Merge ~chad.smith/cloud-init:ubuntu/bionic into cloud-init:ubuntu/bionic
- Git
- lp:~chad.smith/cloud-init
- ubuntu/bionic
- Merge into ubuntu/bionic
Proposed by
Chad Smith
Status: | Merged | ||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | d39e24e74c4f0486ceb9aa4a1db77c7a537db996 | ||||||||||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/bionic | ||||||||||||||||||||
Merge into: | cloud-init:ubuntu/bionic | ||||||||||||||||||||
Diff against target: |
8915 lines (+3780/-1106) 115 files modified
ChangeLog (+226/-0) cloudinit/cmd/devel/logs.py (+48/-11) cloudinit/cmd/devel/tests/test_logs.py (+18/-3) cloudinit/cmd/main.py (+1/-1) cloudinit/config/cc_lxd.py (+56/-8) cloudinit/config/cc_mounts.py (+45/-30) cloudinit/config/cc_phone_home.py (+4/-3) cloudinit/config/cc_resizefs.py (+1/-1) cloudinit/config/cc_users_groups.py (+6/-2) cloudinit/config/schema.py (+46/-18) cloudinit/distros/__init__.py (+1/-1) cloudinit/distros/freebsd.py (+1/-1) cloudinit/ec2_utils.py (+6/-8) cloudinit/handlers/upstart_job.py (+1/-1) cloudinit/net/__init__.py (+6/-2) cloudinit/net/eni.py (+17/-3) cloudinit/net/netplan.py (+14/-8) cloudinit/net/sysconfig.py (+7/-0) cloudinit/netinfo.py (+31/-11) cloudinit/sources/DataSourceAltCloud.py (+8/-8) cloudinit/sources/DataSourceAzure.py (+62/-22) cloudinit/sources/DataSourceCloudStack.py (+10/-21) cloudinit/sources/DataSourceConfigDrive.py (+10/-5) cloudinit/sources/DataSourceEc2.py (+15/-33) cloudinit/sources/DataSourceMAAS.py (+1/-1) cloudinit/sources/DataSourceNoCloud.py (+2/-2) cloudinit/sources/DataSourceOpenNebula.py (+1/-1) cloudinit/sources/DataSourceOpenStack.py (+127/-55) cloudinit/sources/DataSourceSmartOS.py (+47/-12) cloudinit/sources/__init__.py (+76/-0) cloudinit/sources/helpers/azure.py (+3/-2) cloudinit/sources/tests/test_init.py (+87/-2) cloudinit/stages.py (+17/-9) cloudinit/tests/helpers.py (+10/-2) cloudinit/tests/test_netinfo.py (+46/-1) cloudinit/tests/test_url_helper.py (+27/-1) cloudinit/tests/test_util.py (+77/-1) cloudinit/tests/test_version.py (+17/-0) cloudinit/url_helper.py (+28/-1) cloudinit/user_data.py (+16/-12) cloudinit/util.py (+152/-64) cloudinit/version.py (+5/-1) debian/changelog (+67/-3) debian/patches/openstack-no-network-config.patch (+2/-4) doc/examples/cloud-config-user-groups.txt (+20/-7) doc/rtd/topics/datasources.rst (+97/-0) doc/rtd/topics/datasources/cloudstack.rst (+20/-6) doc/rtd/topics/datasources/ec2.rst (+30/-0) doc/rtd/topics/datasources/openstack.rst (+21/-2) doc/rtd/topics/network-config-format-v1.rst (+27/-0) doc/rtd/topics/network-config-format-v2.rst (+6/-0) doc/rtd/topics/tests.rst (+6/-1) integration-requirements.txt (+1/-1) packages/bddeb (+36/-4) packages/brpm (+3/-3) packages/debian/changelog.in (+1/-1) packages/debian/rules.in (+2/-0) packages/redhat/cloud-init.spec.in (+7/-0) packages/suse/cloud-init.spec.in (+28/-42) setup.py (+14/-3) systemd/cloud-config.service.tmpl (+1/-0) tests/cloud_tests/args.py (+3/-0) tests/cloud_tests/collect.py (+3/-2) tests/cloud_tests/platforms/instances.py (+29/-10) tests/cloud_tests/platforms/lxd/instance.py (+1/-1) tests/cloud_tests/releases.yaml (+16/-0) tests/cloud_tests/stage.py (+12/-3) tests/cloud_tests/testcases.yaml (+2/-2) tests/cloud_tests/testcases/base.py (+21/-0) tests/cloud_tests/testcases/modules/byobu.py (+1/-2) tests/cloud_tests/testcases/modules/byobu.yaml (+0/-3) tests/cloud_tests/testcases/modules/ca_certs.py (+17/-4) tests/cloud_tests/testcases/modules/ca_certs.yaml (+6/-2) tests/cloud_tests/testcases/modules/ntp.py (+2/-3) tests/cloud_tests/testcases/modules/ntp_chrony.py (+12/-1) tests/cloud_tests/testcases/modules/package_update_upgrade_install.py (+6/-8) tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+3/-6) tests/cloud_tests/testcases/modules/salt_minion.py (+1/-2) tests/cloud_tests/testcases/modules/salt_minion.yaml (+12/-5) tests/cloud_tests/verify.py (+46/-1) tests/data/netinfo/netdev-formatted-output-down (+8/-0) tests/data/netinfo/new-ifconfig-output-down (+15/-0) tests/data/netinfo/sample-ipaddrshow-output-down (+8/-0) tests/unittests/test__init__.py (+4/-4) tests/unittests/test_data.py (+21/-3) tests/unittests/test_datasource/test_aliyun.py (+0/-2) tests/unittests/test_datasource/test_azure.py (+207/-68) tests/unittests/test_datasource/test_azure_helper.py (+1/-1) tests/unittests/test_datasource/test_common.py (+1/-0) tests/unittests/test_datasource/test_ec2.py (+0/-12) tests/unittests/test_datasource/test_gce.py (+0/-1) tests/unittests/test_datasource/test_openstack.py (+215/-20) tests/unittests/test_datasource/test_scaleway.py (+0/-3) tests/unittests/test_datasource/test_smartos.py (+26/-0) tests/unittests/test_distros/test_create_users.py (+8/-0) tests/unittests/test_ds_identify.py (+141/-10) tests/unittests/test_ec2_util.py (+0/-9) tests/unittests/test_handler/test_handler_apt_conf_v1.py (+6/-10) tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+0/-7) tests/unittests/test_handler/test_handler_apt_source_v1.py (+10/-17) tests/unittests/test_handler/test_handler_apt_source_v3.py (+10/-17) tests/unittests/test_handler/test_handler_chef.py (+12/-4) tests/unittests/test_handler/test_handler_lxd.py (+64/-16) tests/unittests/test_handler/test_handler_mounts.py (+100/-4) tests/unittests/test_handler/test_handler_ntp.py (+22/-31) tests/unittests/test_handler/test_handler_resizefs.py (+1/-1) tests/unittests/test_handler/test_schema.py (+33/-6) tests/unittests/test_net.py (+63/-8) tests/unittests/test_runs/test_simple_run.py (+30/-2) tests/unittests/test_util.py (+114/-3) tools/ds-identify (+64/-28) tools/read-dependencies (+6/-2) tools/run-centos (+30/-310) tools/run-container (+590/-0) tox.ini (+9/-7) |
||||||||||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
cloud-init Commiters | Pending | ||
Review via email: mp+348362@code.launchpad.net |
Commit message
cloud-init 18.3 new-upstream-
Description of the change
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/ChangeLog b/ChangeLog | |||
2 | index daa7ccf..72c5287 100644 | |||
3 | --- a/ChangeLog | |||
4 | +++ b/ChangeLog | |||
5 | @@ -1,3 +1,229 @@ | |||
6 | 1 | 18.3: | ||
7 | 2 | - docs: represent sudo:false in docs for user_groups config module | ||
8 | 3 | - Explicitly prevent `sudo` access for user module | ||
9 | 4 | [Jacob Bednarz] (LP: #1771468) | ||
10 | 5 | - lxd: Delete default network and detach device if lxd-init created them. | ||
11 | 6 | (LP: #1776958) | ||
12 | 7 | - openstack: avoid unneeded metadata probe on non-openstack platforms | ||
13 | 8 | (LP: #1776701) | ||
14 | 9 | - stages: fix tracebacks if a module stage is undefined or empty | ||
15 | 10 | [Robert Schweikert] (LP: #1770462) | ||
16 | 11 | - Be more safe on string/bytes when writing multipart user-data to disk. | ||
17 | 12 | (LP: #1768600) | ||
18 | 13 | - Fix get_proc_env for pids that have non-utf8 content in environment. | ||
19 | 14 | (LP: #1775371) | ||
20 | 15 | - tests: fix salt_minion integration test on bionic and later | ||
21 | 16 | - tests: provide human-readable integration test summary when --verbose | ||
22 | 17 | - tests: skip chrony integration tests on lxd running artful or older | ||
23 | 18 | - test: add optional --preserve-instance arg to integraiton tests | ||
24 | 19 | - netplan: fix mtu if provided by network config for all rendered types | ||
25 | 20 | (LP: #1774666) | ||
26 | 21 | - tests: remove pip install workarounds for pylxd, take upstream fix. | ||
27 | 22 | - subp: support combine_capture argument. | ||
28 | 23 | - tests: ordered tox dependencies for pylxd install | ||
29 | 24 | - util: add get_linux_distro function to replace platform.dist | ||
30 | 25 | [Robert Schweikert] (LP: #1745235) | ||
31 | 26 | - pyflakes: fix unused variable references identified by pyflakes 2.0.0. | ||
32 | 27 | - - Do not use the systemd_prefix macro, not available in this environment | ||
33 | 28 | [Robert Schweikert] | ||
34 | 29 | - doc: Add config info to ec2, openstack and cloudstack datasource docs | ||
35 | 30 | - Enable SmartOS network metadata to work with netplan via per-subnet | ||
36 | 31 | routes [Dan McDonald] (LP: #1763512) | ||
37 | 32 | - openstack: Allow discovery in init-local using dhclient in a sandbox. | ||
38 | 33 | (LP: #1749717) | ||
39 | 34 | - tests: Avoid using https in httpretty, improve HttPretty test case. | ||
40 | 35 | (LP: #1771659) | ||
41 | 36 | - yaml_load/schema: Add invalid line and column nums to error message | ||
42 | 37 | - Azure: Ignore NTFS mount errors when checking ephemeral drive | ||
43 | 38 | [Paul Meyer] | ||
44 | 39 | - packages/brpm: Get proper dependencies for cmdline distro. | ||
45 | 40 | - packages: Make rpm spec files patch in package version like in debs. | ||
46 | 41 | - tools/run-container: replace tools/run-centos with more generic. | ||
47 | 42 | - Update version.version_string to contain packaged version. (LP: #1770712) | ||
48 | 43 | - cc_mounts: Do not add devices to fstab that are already present. | ||
49 | 44 | [Lars Kellogg-Stedman] | ||
50 | 45 | - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382) | ||
51 | 46 | - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] | ||
52 | 47 | - read_file_or_url: move to url_helper, fix bug in its FileResponse. | ||
53 | 48 | - cloud_tests: help pylint [Ryan Harper] | ||
54 | 49 | - flake8: fix flake8 errors in previous commit. | ||
55 | 50 | - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] | ||
56 | 51 | - tests: restructure SSH and initial connections [Joshua Powers] | ||
57 | 52 | - ds-identify: recognize container-other as a container, test SmartOS. | ||
58 | 53 | - cloud-config.service: run After snap.seeded.service. (LP: #1767131) | ||
59 | 54 | - tests: do not rely on host /proc/cmdline in test_net.py | ||
60 | 55 | [Lars Kellogg-Stedman] (LP: #1769952) | ||
61 | 56 | - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. | ||
62 | 57 | - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. | ||
63 | 58 | - tests: fix package and ca_cert cloud_tests on bionic | ||
64 | 59 | (LP: #1769985) | ||
65 | 60 | - ds-identify: make shellcheck 0.4.6 happy with ds-identify. | ||
66 | 61 | - pycodestyle: Fix deprecated string literals, move away from flake8. | ||
67 | 62 | - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214) | ||
68 | 63 | - tools: Support adding a release suffix through packages/bddeb. | ||
69 | 64 | - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. | ||
70 | 65 | [Harm Weites] (LP: #1404745) | ||
71 | 66 | - tools: Re-use the orig tarball in packages/bddeb if it is around. | ||
72 | 67 | - netinfo: fix netdev_pformat when a nic does not have an address | ||
73 | 68 | assigned. (LP: #1766302) | ||
74 | 69 | - collect-logs: add -v flag, write to stderr, limit journal to single | ||
75 | 70 | boot. (LP: #1766335) | ||
76 | 71 | - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. | ||
77 | 72 | (LP: #1766401) | ||
78 | 73 | - Add reporting events and log_time around early source of blocking time | ||
79 | 74 | [Ryan Harper] | ||
80 | 75 | - IBMCloud: recognize provisioning environment during debug boots. | ||
81 | 76 | (LP: #1767166) | ||
82 | 77 | - net: detect unstable network names and trigger a settle if needed | ||
83 | 78 | [Ryan Harper] (LP: #1766287) | ||
84 | 79 | - IBMCloud: improve documentation in datasource. | ||
85 | 80 | - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov] | ||
86 | 81 | - packages/debian/control.in: add missing dependency on iproute2. | ||
87 | 82 | (LP: #1766711) | ||
88 | 83 | - DataSourceSmartOS: add locking of serial device. | ||
89 | 84 | [Mike Gerdts] (LP: #1746605) | ||
90 | 85 | - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085) | ||
91 | 86 | - DataSourceSmartOS: list() should always return a list | ||
92 | 87 | [Mike Gerdts] (LP: #1763480) | ||
93 | 88 | - schema: in validation, raise ImportError if strict but no jsonschema. | ||
94 | 89 | - set_passwords: Add newline to end of sshd config, only restart if | ||
95 | 90 | updated. (LP: #1677205) | ||
96 | 91 | - pylint: pay attention to unused variable warnings. | ||
97 | 92 | - doc: Add documentation for AliYun datasource. [Junjie Wang] | ||
98 | 93 | - Schema: do not warn on duplicate items in commands. (LP: #1764264) | ||
99 | 94 | - net: Depend on iproute2's ip instead of net-tools ifconfig or route | ||
100 | 95 | - DataSourceSmartOS: fix hang when metadata service is down | ||
101 | 96 | [Mike Gerdts] (LP: #1667735) | ||
102 | 97 | - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to | ||
103 | 98 | ext4. [Mike Gerdts] (LP: #1763511) | ||
104 | 99 | - pycodestyle: Fix invalid escape sequences in string literals. | ||
105 | 100 | - Implement bash completion script for cloud-init command line | ||
106 | 101 | [Ryan Harper] | ||
107 | 102 | - tools: Fix make-tarball cli tool usage for development | ||
108 | 103 | - renderer: support unicode in render_from_file. | ||
109 | 104 | - Implement ntp client spec with auto support for distro selection | ||
110 | 105 | [Ryan Harper] (LP: #1749722) | ||
111 | 106 | - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds. | ||
112 | 107 | - tests: fix ec2 integration network metadata validation | ||
113 | 108 | - tests: fix integration tests to support lxd 3.0 release | ||
114 | 109 | - correct documentation to match correct attribute name usage. | ||
115 | 110 | [Dominic Schlegel] (LP: #1420018) | ||
116 | 111 | - cc_resizefs, util: handle no /dev/zfs [Ryan Harper] | ||
117 | 112 | - doc: Fix links in OpenStack datasource documentation. | ||
118 | 113 | [Dominic Schlegel] (LP: #1721660) | ||
119 | 114 | - docs: represent sudo:false in docs for user_groups config module | ||
120 | 115 | - Explicitly prevent `sudo` access for user module | ||
121 | 116 | [Jacob Bednarz] (LP: #1771468) | ||
122 | 117 | - lxd: Delete default network and detach device if lxd-init created them. | ||
123 | 118 | (LP: #1776958) | ||
124 | 119 | - openstack: avoid unneeded metadata probe on non-openstack platforms | ||
125 | 120 | (LP: #1776701) | ||
126 | 121 | - stages: fix tracebacks if a module stage is undefined or empty | ||
127 | 122 | [Robert Schweikert] (LP: #1770462) | ||
128 | 123 | - Be more safe on string/bytes when writing multipart user-data to disk. | ||
129 | 124 | (LP: #1768600) | ||
130 | 125 | - Fix get_proc_env for pids that have non-utf8 content in environment. | ||
131 | 126 | (LP: #1775371) | ||
132 | 127 | - tests: fix salt_minion integration test on bionic and later | ||
133 | 128 | - tests: provide human-readable integration test summary when --verbose | ||
134 | 129 | - tests: skip chrony integration tests on lxd running artful or older | ||
135 | 130 | - test: add optional --preserve-instance arg to integraiton tests | ||
136 | 131 | - netplan: fix mtu if provided by network config for all rendered types | ||
137 | 132 | (LP: #1774666) | ||
138 | 133 | - tests: remove pip install workarounds for pylxd, take upstream fix. | ||
139 | 134 | - subp: support combine_capture argument. | ||
140 | 135 | - tests: ordered tox dependencies for pylxd install | ||
141 | 136 | - util: add get_linux_distro function to replace platform.dist | ||
142 | 137 | [Robert Schweikert] (LP: #1745235) | ||
143 | 138 | - pyflakes: fix unused variable references identified by pyflakes 2.0.0. | ||
144 | 139 | - - Do not use the systemd_prefix macro, not available in this environment | ||
145 | 140 | [Robert Schweikert] | ||
146 | 141 | - doc: Add config info to ec2, openstack and cloudstack datasource docs | ||
147 | 142 | - Enable SmartOS network metadata to work with netplan via per-subnet | ||
148 | 143 | routes [Dan McDonald] (LP: #1763512) | ||
149 | 144 | - openstack: Allow discovery in init-local using dhclient in a sandbox. | ||
150 | 145 | (LP: #1749717) | ||
151 | 146 | - tests: Avoid using https in httpretty, improve HttPretty test case. | ||
152 | 147 | (LP: #1771659) | ||
153 | 148 | - yaml_load/schema: Add invalid line and column nums to error message | ||
154 | 149 | - Azure: Ignore NTFS mount errors when checking ephemeral drive | ||
155 | 150 | [Paul Meyer] | ||
156 | 151 | - packages/brpm: Get proper dependencies for cmdline distro. | ||
157 | 152 | - packages: Make rpm spec files patch in package version like in debs. | ||
158 | 153 | - tools/run-container: replace tools/run-centos with more generic. | ||
159 | 154 | - Update version.version_string to contain packaged version. (LP: #1770712) | ||
160 | 155 | - cc_mounts: Do not add devices to fstab that are already present. | ||
161 | 156 | [Lars Kellogg-Stedman] | ||
162 | 157 | - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382) | ||
163 | 158 | - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] | ||
164 | 159 | - read_file_or_url: move to url_helper, fix bug in its FileResponse. | ||
165 | 160 | - cloud_tests: help pylint [Ryan Harper] | ||
166 | 161 | - flake8: fix flake8 errors in previous commit. | ||
167 | 162 | - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] | ||
168 | 163 | - tests: restructure SSH and initial connections [Joshua Powers] | ||
169 | 164 | - ds-identify: recognize container-other as a container, test SmartOS. | ||
170 | 165 | - cloud-config.service: run After snap.seeded.service. (LP: #1767131) | ||
171 | 166 | - tests: do not rely on host /proc/cmdline in test_net.py | ||
172 | 167 | [Lars Kellogg-Stedman] (LP: #1769952) | ||
173 | 168 | - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. | ||
174 | 169 | - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. | ||
175 | 170 | - tests: fix package and ca_cert cloud_tests on bionic | ||
176 | 171 | (LP: #1769985) | ||
177 | 172 | - ds-identify: make shellcheck 0.4.6 happy with ds-identify. | ||
178 | 173 | - pycodestyle: Fix deprecated string literals, move away from flake8. | ||
179 | 174 | - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214) | ||
180 | 175 | - tools: Support adding a release suffix through packages/bddeb. | ||
181 | 176 | - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. | ||
182 | 177 | [Harm Weites] (LP: #1404745) | ||
183 | 178 | - tools: Re-use the orig tarball in packages/bddeb if it is around. | ||
184 | 179 | - netinfo: fix netdev_pformat when a nic does not have an address | ||
185 | 180 | assigned. (LP: #1766302) | ||
186 | 181 | - collect-logs: add -v flag, write to stderr, limit journal to single | ||
187 | 182 | boot. (LP: #1766335) | ||
188 | 183 | - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. | ||
189 | 184 | (LP: #1766401) | ||
190 | 185 | - Add reporting events and log_time around early source of blocking time | ||
191 | 186 | [Ryan Harper] | ||
192 | 187 | - IBMCloud: recognize provisioning environment during debug boots. | ||
193 | 188 | (LP: #1767166) | ||
194 | 189 | - net: detect unstable network names and trigger a settle if needed | ||
195 | 190 | [Ryan Harper] (LP: #1766287) | ||
196 | 191 | - IBMCloud: improve documentation in datasource. | ||
197 | 192 | - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov] | ||
198 | 193 | - packages/debian/control.in: add missing dependency on iproute2. | ||
199 | 194 | (LP: #1766711) | ||
200 | 195 | - DataSourceSmartOS: add locking of serial device. | ||
201 | 196 | [Mike Gerdts] (LP: #1746605) | ||
202 | 197 | - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085) | ||
203 | 198 | - DataSourceSmartOS: list() should always return a list | ||
204 | 199 | [Mike Gerdts] (LP: #1763480) | ||
205 | 200 | - schema: in validation, raise ImportError if strict but no jsonschema. | ||
206 | 201 | - set_passwords: Add newline to end of sshd config, only restart if | ||
207 | 202 | updated. (LP: #1677205) | ||
208 | 203 | - pylint: pay attention to unused variable warnings. | ||
209 | 204 | - doc: Add documentation for AliYun datasource. [Junjie Wang] | ||
210 | 205 | - Schema: do not warn on duplicate items in commands. (LP: #1764264) | ||
211 | 206 | - net: Depend on iproute2's ip instead of net-tools ifconfig or route | ||
212 | 207 | - DataSourceSmartOS: fix hang when metadata service is down | ||
213 | 208 | [Mike Gerdts] (LP: #1667735) | ||
214 | 209 | - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to | ||
215 | 210 | ext4. [Mike Gerdts] (LP: #1763511) | ||
216 | 211 | - pycodestyle: Fix invalid escape sequences in string literals. | ||
217 | 212 | - Implement bash completion script for cloud-init command line | ||
218 | 213 | [Ryan Harper] | ||
219 | 214 | - tools: Fix make-tarball cli tool usage for development | ||
220 | 215 | - renderer: support unicode in render_from_file. | ||
221 | 216 | - Implement ntp client spec with auto support for distro selection | ||
222 | 217 | [Ryan Harper] (LP: #1749722) | ||
223 | 218 | - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds. | ||
224 | 219 | - tests: fix ec2 integration network metadata validation | ||
225 | 220 | - tests: fix integration tests to support lxd 3.0 release | ||
226 | 221 | - correct documentation to match correct attribute name usage. | ||
227 | 222 | [Dominic Schlegel] (LP: #1420018) | ||
228 | 223 | - cc_resizefs, util: handle no /dev/zfs [Ryan Harper] | ||
229 | 224 | - doc: Fix links in OpenStack datasource documentation. | ||
230 | 225 | [Dominic Schlegel] (LP: #1721660) | ||
231 | 226 | |||
232 | 1 | 18.2: | 227 | 18.2: |
233 | 2 | - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. | 228 | - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. |
234 | 3 | - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. | 229 | - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. |
235 | diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py | |||
236 | index 35ca478..df72520 100644 | |||
237 | --- a/cloudinit/cmd/devel/logs.py | |||
238 | +++ b/cloudinit/cmd/devel/logs.py | |||
239 | @@ -11,6 +11,7 @@ from cloudinit.temp_utils import tempdir | |||
240 | 11 | from datetime import datetime | 11 | from datetime import datetime |
241 | 12 | import os | 12 | import os |
242 | 13 | import shutil | 13 | import shutil |
243 | 14 | import sys | ||
244 | 14 | 15 | ||
245 | 15 | 16 | ||
246 | 16 | CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] | 17 | CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] |
247 | @@ -31,6 +32,8 @@ def get_parser(parser=None): | |||
248 | 31 | parser = argparse.ArgumentParser( | 32 | parser = argparse.ArgumentParser( |
249 | 32 | prog='collect-logs', | 33 | prog='collect-logs', |
250 | 33 | description='Collect and tar all cloud-init debug info') | 34 | description='Collect and tar all cloud-init debug info') |
251 | 35 | parser.add_argument('--verbose', '-v', action='count', default=0, | ||
252 | 36 | dest='verbosity', help="Be more verbose.") | ||
253 | 34 | parser.add_argument( | 37 | parser.add_argument( |
254 | 35 | "--tarfile", '-t', default='cloud-init.tar.gz', | 38 | "--tarfile", '-t', default='cloud-init.tar.gz', |
255 | 36 | help=('The tarfile to create containing all collected logs.' | 39 | help=('The tarfile to create containing all collected logs.' |
256 | @@ -43,17 +46,33 @@ def get_parser(parser=None): | |||
257 | 43 | return parser | 46 | return parser |
258 | 44 | 47 | ||
259 | 45 | 48 | ||
261 | 46 | def _write_command_output_to_file(cmd, filename): | 49 | def _write_command_output_to_file(cmd, filename, msg, verbosity): |
262 | 47 | """Helper which runs a command and writes output or error to filename.""" | 50 | """Helper which runs a command and writes output or error to filename.""" |
263 | 48 | try: | 51 | try: |
264 | 49 | out, _ = subp(cmd) | 52 | out, _ = subp(cmd) |
265 | 50 | except ProcessExecutionError as e: | 53 | except ProcessExecutionError as e: |
266 | 51 | write_file(filename, str(e)) | 54 | write_file(filename, str(e)) |
267 | 55 | _debug("collecting %s failed.\n" % msg, 1, verbosity) | ||
268 | 52 | else: | 56 | else: |
269 | 53 | write_file(filename, out) | 57 | write_file(filename, out) |
270 | 58 | _debug("collected %s\n" % msg, 1, verbosity) | ||
271 | 59 | return out | ||
272 | 54 | 60 | ||
273 | 55 | 61 | ||
275 | 56 | def collect_logs(tarfile, include_userdata): | 62 | def _debug(msg, level, verbosity): |
276 | 63 | if level <= verbosity: | ||
277 | 64 | sys.stderr.write(msg) | ||
278 | 65 | |||
279 | 66 | |||
280 | 67 | def _collect_file(path, out_dir, verbosity): | ||
281 | 68 | if os.path.isfile(path): | ||
282 | 69 | copy(path, out_dir) | ||
283 | 70 | _debug("collected file: %s\n" % path, 1, verbosity) | ||
284 | 71 | else: | ||
285 | 72 | _debug("file %s did not exist\n" % path, 2, verbosity) | ||
286 | 73 | |||
287 | 74 | |||
288 | 75 | def collect_logs(tarfile, include_userdata, verbosity=0): | ||
289 | 57 | """Collect all cloud-init logs and tar them up into the provided tarfile. | 76 | """Collect all cloud-init logs and tar them up into the provided tarfile. |
290 | 58 | 77 | ||
291 | 59 | @param tarfile: The path of the tar-gzipped file to create. | 78 | @param tarfile: The path of the tar-gzipped file to create. |
292 | @@ -64,28 +83,46 @@ def collect_logs(tarfile, include_userdata): | |||
293 | 64 | log_dir = 'cloud-init-logs-{0}'.format(date) | 83 | log_dir = 'cloud-init-logs-{0}'.format(date) |
294 | 65 | with tempdir(dir='/tmp') as tmp_dir: | 84 | with tempdir(dir='/tmp') as tmp_dir: |
295 | 66 | log_dir = os.path.join(tmp_dir, log_dir) | 85 | log_dir = os.path.join(tmp_dir, log_dir) |
297 | 67 | _write_command_output_to_file( | 86 | version = _write_command_output_to_file( |
298 | 87 | ['cloud-init', '--version'], | ||
299 | 88 | os.path.join(log_dir, 'version'), | ||
300 | 89 | "cloud-init --version", verbosity) | ||
301 | 90 | dpkg_ver = _write_command_output_to_file( | ||
302 | 68 | ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], | 91 | ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], |
304 | 69 | os.path.join(log_dir, 'version')) | 92 | os.path.join(log_dir, 'dpkg-version'), |
305 | 93 | "dpkg version", verbosity) | ||
306 | 94 | if not version: | ||
307 | 95 | version = dpkg_ver if dpkg_ver else "not-available" | ||
308 | 96 | _debug("collected cloud-init version: %s\n" % version, 1, verbosity) | ||
309 | 70 | _write_command_output_to_file( | 97 | _write_command_output_to_file( |
311 | 71 | ['dmesg'], os.path.join(log_dir, 'dmesg.txt')) | 98 | ['dmesg'], os.path.join(log_dir, 'dmesg.txt'), |
312 | 99 | "dmesg output", verbosity) | ||
313 | 72 | _write_command_output_to_file( | 100 | _write_command_output_to_file( |
316 | 73 | ['journalctl', '-o', 'short-precise'], | 101 | ['journalctl', '--boot=0', '-o', 'short-precise'], |
317 | 74 | os.path.join(log_dir, 'journal.txt')) | 102 | os.path.join(log_dir, 'journal.txt'), |
318 | 103 | "systemd journal of current boot", verbosity) | ||
319 | 104 | |||
320 | 75 | for log in CLOUDINIT_LOGS: | 105 | for log in CLOUDINIT_LOGS: |
322 | 76 | copy(log, log_dir) | 106 | _collect_file(log, log_dir, verbosity) |
323 | 77 | if include_userdata: | 107 | if include_userdata: |
325 | 78 | copy(USER_DATA_FILE, log_dir) | 108 | _collect_file(USER_DATA_FILE, log_dir, verbosity) |
326 | 79 | run_dir = os.path.join(log_dir, 'run') | 109 | run_dir = os.path.join(log_dir, 'run') |
327 | 80 | ensure_dir(run_dir) | 110 | ensure_dir(run_dir) |
329 | 81 | shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init')) | 111 | if os.path.exists(CLOUDINIT_RUN_DIR): |
330 | 112 | shutil.copytree(CLOUDINIT_RUN_DIR, | ||
331 | 113 | os.path.join(run_dir, 'cloud-init')) | ||
332 | 114 | _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) | ||
333 | 115 | else: | ||
334 | 116 | _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, | ||
335 | 117 | verbosity) | ||
336 | 82 | with chdir(tmp_dir): | 118 | with chdir(tmp_dir): |
337 | 83 | subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) | 119 | subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) |
338 | 120 | sys.stderr.write("Wrote %s\n" % tarfile) | ||
339 | 84 | 121 | ||
340 | 85 | 122 | ||
341 | 86 | def handle_collect_logs_args(name, args): | 123 | def handle_collect_logs_args(name, args): |
342 | 87 | """Handle calls to 'cloud-init collect-logs' as a subcommand.""" | 124 | """Handle calls to 'cloud-init collect-logs' as a subcommand.""" |
344 | 88 | collect_logs(args.tarfile, args.userdata) | 125 | collect_logs(args.tarfile, args.userdata, args.verbosity) |
345 | 89 | 126 | ||
346 | 90 | 127 | ||
347 | 91 | def main(): | 128 | def main(): |
348 | diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py | |||
349 | index dc4947c..98b4756 100644 | |||
350 | --- a/cloudinit/cmd/devel/tests/test_logs.py | |||
351 | +++ b/cloudinit/cmd/devel/tests/test_logs.py | |||
352 | @@ -4,6 +4,7 @@ from cloudinit.cmd.devel import logs | |||
353 | 4 | from cloudinit.util import ensure_dir, load_file, subp, write_file | 4 | from cloudinit.util import ensure_dir, load_file, subp, write_file |
354 | 5 | from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call | 5 | from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call |
355 | 6 | from datetime import datetime | 6 | from datetime import datetime |
356 | 7 | import mock | ||
357 | 7 | import os | 8 | import os |
358 | 8 | 9 | ||
359 | 9 | 10 | ||
360 | @@ -27,11 +28,13 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
361 | 27 | date = datetime.utcnow().date().strftime('%Y-%m-%d') | 28 | date = datetime.utcnow().date().strftime('%Y-%m-%d') |
362 | 28 | date_logdir = 'cloud-init-logs-{0}'.format(date) | 29 | date_logdir = 'cloud-init-logs-{0}'.format(date) |
363 | 29 | 30 | ||
364 | 31 | version_out = '/usr/bin/cloud-init 18.2fake\n' | ||
365 | 30 | expected_subp = { | 32 | expected_subp = { |
366 | 31 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): | 33 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): |
367 | 32 | '0.7fake\n', | 34 | '0.7fake\n', |
368 | 35 | ('cloud-init', '--version'): version_out, | ||
369 | 33 | ('dmesg',): 'dmesg-out\n', | 36 | ('dmesg',): 'dmesg-out\n', |
371 | 34 | ('journalctl', '-o', 'short-precise'): 'journal-out\n', | 37 | ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', |
372 | 35 | ('tar', 'czvf', output_tarfile, date_logdir): '' | 38 | ('tar', 'czvf', output_tarfile, date_logdir): '' |
373 | 36 | } | 39 | } |
374 | 37 | 40 | ||
375 | @@ -44,9 +47,12 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
376 | 44 | subp(cmd) # Pass through tar cmd so we can check output | 47 | subp(cmd) # Pass through tar cmd so we can check output |
377 | 45 | return expected_subp[cmd_tuple], '' | 48 | return expected_subp[cmd_tuple], '' |
378 | 46 | 49 | ||
379 | 50 | fake_stderr = mock.MagicMock() | ||
380 | 51 | |||
381 | 47 | wrap_and_call( | 52 | wrap_and_call( |
382 | 48 | 'cloudinit.cmd.devel.logs', | 53 | 'cloudinit.cmd.devel.logs', |
383 | 49 | {'subp': {'side_effect': fake_subp}, | 54 | {'subp': {'side_effect': fake_subp}, |
384 | 55 | 'sys.stderr': {'new': fake_stderr}, | ||
385 | 50 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, | 56 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, |
386 | 51 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, | 57 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, |
387 | 52 | logs.collect_logs, output_tarfile, include_userdata=False) | 58 | logs.collect_logs, output_tarfile, include_userdata=False) |
388 | @@ -55,7 +61,9 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
389 | 55 | out_logdir = self.tmp_path(date_logdir, self.new_root) | 61 | out_logdir = self.tmp_path(date_logdir, self.new_root) |
390 | 56 | self.assertEqual( | 62 | self.assertEqual( |
391 | 57 | '0.7fake\n', | 63 | '0.7fake\n', |
393 | 58 | load_file(os.path.join(out_logdir, 'version'))) | 64 | load_file(os.path.join(out_logdir, 'dpkg-version'))) |
394 | 65 | self.assertEqual(version_out, | ||
395 | 66 | load_file(os.path.join(out_logdir, 'version'))) | ||
396 | 59 | self.assertEqual( | 67 | self.assertEqual( |
397 | 60 | 'cloud-init-log', | 68 | 'cloud-init-log', |
398 | 61 | load_file(os.path.join(out_logdir, 'cloud-init.log'))) | 69 | load_file(os.path.join(out_logdir, 'cloud-init.log'))) |
399 | @@ -72,6 +80,7 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
400 | 72 | 'results', | 80 | 'results', |
401 | 73 | load_file( | 81 | load_file( |
402 | 74 | os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) | 82 | os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) |
403 | 83 | fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) | ||
404 | 75 | 84 | ||
405 | 76 | def test_collect_logs_includes_optional_userdata(self): | 85 | def test_collect_logs_includes_optional_userdata(self): |
406 | 77 | """collect-logs include userdata when --include-userdata is set.""" | 86 | """collect-logs include userdata when --include-userdata is set.""" |
407 | @@ -88,11 +97,13 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
408 | 88 | date = datetime.utcnow().date().strftime('%Y-%m-%d') | 97 | date = datetime.utcnow().date().strftime('%Y-%m-%d') |
409 | 89 | date_logdir = 'cloud-init-logs-{0}'.format(date) | 98 | date_logdir = 'cloud-init-logs-{0}'.format(date) |
410 | 90 | 99 | ||
411 | 100 | version_out = '/usr/bin/cloud-init 18.2fake\n' | ||
412 | 91 | expected_subp = { | 101 | expected_subp = { |
413 | 92 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): | 102 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): |
414 | 93 | '0.7fake', | 103 | '0.7fake', |
415 | 104 | ('cloud-init', '--version'): version_out, | ||
416 | 94 | ('dmesg',): 'dmesg-out\n', | 105 | ('dmesg',): 'dmesg-out\n', |
418 | 95 | ('journalctl', '-o', 'short-precise'): 'journal-out\n', | 106 | ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', |
419 | 96 | ('tar', 'czvf', output_tarfile, date_logdir): '' | 107 | ('tar', 'czvf', output_tarfile, date_logdir): '' |
420 | 97 | } | 108 | } |
421 | 98 | 109 | ||
422 | @@ -105,9 +116,12 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
423 | 105 | subp(cmd) # Pass through tar cmd so we can check output | 116 | subp(cmd) # Pass through tar cmd so we can check output |
424 | 106 | return expected_subp[cmd_tuple], '' | 117 | return expected_subp[cmd_tuple], '' |
425 | 107 | 118 | ||
426 | 119 | fake_stderr = mock.MagicMock() | ||
427 | 120 | |||
428 | 108 | wrap_and_call( | 121 | wrap_and_call( |
429 | 109 | 'cloudinit.cmd.devel.logs', | 122 | 'cloudinit.cmd.devel.logs', |
430 | 110 | {'subp': {'side_effect': fake_subp}, | 123 | {'subp': {'side_effect': fake_subp}, |
431 | 124 | 'sys.stderr': {'new': fake_stderr}, | ||
432 | 111 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, | 125 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, |
433 | 112 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, | 126 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, |
434 | 113 | 'USER_DATA_FILE': {'new': userdata}}, | 127 | 'USER_DATA_FILE': {'new': userdata}}, |
435 | @@ -118,3 +132,4 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
436 | 118 | self.assertEqual( | 132 | self.assertEqual( |
437 | 119 | 'user-data', | 133 | 'user-data', |
438 | 120 | load_file(os.path.join(out_logdir, 'user-data.txt'))) | 134 | load_file(os.path.join(out_logdir, 'user-data.txt'))) |
439 | 135 | fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) | ||
440 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
441 | index 3f2dbb9..d6ba90f 100644 | |||
442 | --- a/cloudinit/cmd/main.py | |||
443 | +++ b/cloudinit/cmd/main.py | |||
444 | @@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None): | |||
445 | 187 | data = None | 187 | data = None |
446 | 188 | header = b'#cloud-config' | 188 | header = b'#cloud-config' |
447 | 189 | try: | 189 | try: |
449 | 190 | resp = util.read_file_or_url(**kwargs) | 190 | resp = url_helper.read_file_or_url(**kwargs) |
450 | 191 | if resp.ok(): | 191 | if resp.ok(): |
451 | 192 | data = resp.contents | 192 | data = resp.contents |
452 | 193 | if not resp.contents.startswith(header): | 193 | if not resp.contents.startswith(header): |
453 | diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py | |||
454 | index 09374d2..ac72ac4 100644 | |||
455 | --- a/cloudinit/config/cc_lxd.py | |||
456 | +++ b/cloudinit/config/cc_lxd.py | |||
457 | @@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly. | |||
458 | 47 | domain: <domain> | 47 | domain: <domain> |
459 | 48 | """ | 48 | """ |
460 | 49 | 49 | ||
461 | 50 | from cloudinit import log as logging | ||
462 | 50 | from cloudinit import util | 51 | from cloudinit import util |
463 | 51 | import os | 52 | import os |
464 | 52 | 53 | ||
465 | 53 | distros = ['ubuntu'] | 54 | distros = ['ubuntu'] |
466 | 54 | 55 | ||
467 | 56 | LOG = logging.getLogger(__name__) | ||
468 | 57 | |||
469 | 58 | _DEFAULT_NETWORK_NAME = "lxdbr0" | ||
470 | 59 | |||
471 | 55 | 60 | ||
472 | 56 | def handle(name, cfg, cloud, log, args): | 61 | def handle(name, cfg, cloud, log, args): |
473 | 57 | # Get config | 62 | # Get config |
474 | @@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args): | |||
475 | 109 | # Set up lxd-bridge if bridge config is given | 114 | # Set up lxd-bridge if bridge config is given |
476 | 110 | dconf_comm = "debconf-communicate" | 115 | dconf_comm = "debconf-communicate" |
477 | 111 | if bridge_cfg: | 116 | if bridge_cfg: |
478 | 117 | net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) | ||
479 | 112 | if os.path.exists("/etc/default/lxd-bridge") \ | 118 | if os.path.exists("/etc/default/lxd-bridge") \ |
480 | 113 | and util.which(dconf_comm): | 119 | and util.which(dconf_comm): |
481 | 114 | # Bridge configured through packaging | 120 | # Bridge configured through packaging |
482 | @@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args): | |||
483 | 135 | else: | 141 | else: |
484 | 136 | # Built-in LXD bridge support | 142 | # Built-in LXD bridge support |
485 | 137 | cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) | 143 | cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) |
486 | 144 | maybe_cleanup_default( | ||
487 | 145 | net_name=net_name, did_init=bool(init_cfg), | ||
488 | 146 | create=bool(cmd_create), attach=bool(cmd_attach)) | ||
489 | 138 | if cmd_create: | 147 | if cmd_create: |
490 | 139 | log.debug("Creating lxd bridge: %s" % | 148 | log.debug("Creating lxd bridge: %s" % |
491 | 140 | " ".join(cmd_create)) | 149 | " ".join(cmd_create)) |
493 | 141 | util.subp(cmd_create) | 150 | _lxc(cmd_create) |
494 | 142 | 151 | ||
495 | 143 | if cmd_attach: | 152 | if cmd_attach: |
496 | 144 | log.debug("Setting up default lxd bridge: %s" % | 153 | log.debug("Setting up default lxd bridge: %s" % |
497 | 145 | " ".join(cmd_create)) | 154 | " ".join(cmd_create)) |
499 | 146 | util.subp(cmd_attach) | 155 | _lxc(cmd_attach) |
500 | 147 | 156 | ||
501 | 148 | elif bridge_cfg: | 157 | elif bridge_cfg: |
502 | 149 | raise RuntimeError( | 158 | raise RuntimeError( |
503 | @@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg): | |||
504 | 204 | if bridge_cfg.get("mode") == "none": | 213 | if bridge_cfg.get("mode") == "none": |
505 | 205 | return None, None | 214 | return None, None |
506 | 206 | 215 | ||
508 | 207 | bridge_name = bridge_cfg.get("name", "lxdbr0") | 216 | bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) |
509 | 208 | cmd_create = [] | 217 | cmd_create = [] |
512 | 209 | cmd_attach = ["lxc", "network", "attach-profile", bridge_name, | 218 | cmd_attach = ["network", "attach-profile", bridge_name, |
513 | 210 | "default", "eth0", "--force-local"] | 219 | "default", "eth0"] |
514 | 211 | 220 | ||
515 | 212 | if bridge_cfg.get("mode") == "existing": | 221 | if bridge_cfg.get("mode") == "existing": |
516 | 213 | return None, cmd_attach | 222 | return None, cmd_attach |
517 | @@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg): | |||
518 | 215 | if bridge_cfg.get("mode") != "new": | 224 | if bridge_cfg.get("mode") != "new": |
519 | 216 | raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) | 225 | raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) |
520 | 217 | 226 | ||
522 | 218 | cmd_create = ["lxc", "network", "create", bridge_name] | 227 | cmd_create = ["network", "create", bridge_name] |
523 | 219 | 228 | ||
524 | 220 | if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): | 229 | if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): |
525 | 221 | cmd_create.append("ipv4.address=%s/%s" % | 230 | cmd_create.append("ipv4.address=%s/%s" % |
526 | @@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg): | |||
527 | 247 | if bridge_cfg.get("domain"): | 256 | if bridge_cfg.get("domain"): |
528 | 248 | cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) | 257 | cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) |
529 | 249 | 258 | ||
530 | 250 | cmd_create.append("--force-local") | ||
531 | 251 | |||
532 | 252 | return cmd_create, cmd_attach | 259 | return cmd_create, cmd_attach |
533 | 253 | 260 | ||
534 | 261 | |||
535 | 262 | def _lxc(cmd): | ||
536 | 263 | env = {'LC_ALL': 'C'} | ||
537 | 264 | util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) | ||
538 | 265 | |||
539 | 266 | |||
540 | 267 | def maybe_cleanup_default(net_name, did_init, create, attach, | ||
541 | 268 | profile="default", nic_name="eth0"): | ||
542 | 269 | """Newer versions of lxc (3.0.1+) create a lxdbr0 network when | ||
543 | 270 | 'lxd init --auto' is run. Older versions did not. | ||
544 | 271 | |||
545 | 272 | By removing ay that lxd-init created, we simply leave the add/attach | ||
546 | 273 | code in-tact. | ||
547 | 274 | |||
548 | 275 | https://github.com/lxc/lxd/issues/4649""" | ||
549 | 276 | if net_name != _DEFAULT_NETWORK_NAME or not did_init: | ||
550 | 277 | return | ||
551 | 278 | |||
552 | 279 | fail_assume_enoent = " failed. Assuming it did not exist." | ||
553 | 280 | succeeded = " succeeded." | ||
554 | 281 | if create: | ||
555 | 282 | msg = "Deletion of lxd network '%s'" % net_name | ||
556 | 283 | try: | ||
557 | 284 | _lxc(["network", "delete", net_name]) | ||
558 | 285 | LOG.debug(msg + succeeded) | ||
559 | 286 | except util.ProcessExecutionError as e: | ||
560 | 287 | if e.exit_code != 1: | ||
561 | 288 | raise e | ||
562 | 289 | LOG.debug(msg + fail_assume_enoent) | ||
563 | 290 | |||
564 | 291 | if attach: | ||
565 | 292 | msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile) | ||
566 | 293 | try: | ||
567 | 294 | _lxc(["profile", "device", "remove", profile, nic_name]) | ||
568 | 295 | LOG.debug(msg + succeeded) | ||
569 | 296 | except util.ProcessExecutionError as e: | ||
570 | 297 | if e.exit_code != 1: | ||
571 | 298 | raise e | ||
572 | 299 | LOG.debug(msg + fail_assume_enoent) | ||
573 | 300 | |||
574 | 301 | |||
575 | 254 | # vi: ts=4 expandtab | 302 | # vi: ts=4 expandtab |
576 | diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py | |||
577 | index f14a4fc..339baba 100644 | |||
578 | --- a/cloudinit/config/cc_mounts.py | |||
579 | +++ b/cloudinit/config/cc_mounts.py | |||
580 | @@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" | |||
581 | 76 | DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) | 76 | DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) |
582 | 77 | WS = re.compile("[%s]+" % (whitespace)) | 77 | WS = re.compile("[%s]+" % (whitespace)) |
583 | 78 | FSTAB_PATH = "/etc/fstab" | 78 | FSTAB_PATH = "/etc/fstab" |
584 | 79 | MNT_COMMENT = "comment=cloudconfig" | ||
585 | 79 | 80 | ||
586 | 80 | LOG = logging.getLogger(__name__) | 81 | LOG = logging.getLogger(__name__) |
587 | 81 | 82 | ||
588 | @@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None): | |||
589 | 232 | if str(size).lower() == "auto": | 233 | if str(size).lower() == "auto": |
590 | 233 | try: | 234 | try: |
591 | 234 | memsize = util.read_meminfo()['total'] | 235 | memsize = util.read_meminfo()['total'] |
594 | 235 | except IOError as e: | 236 | except IOError: |
595 | 236 | LOG.debug("Not creating swap. failed to read meminfo") | 237 | LOG.debug("Not creating swap: failed to read meminfo") |
596 | 237 | return | 238 | return |
597 | 238 | 239 | ||
598 | 239 | util.ensure_dir(tdir) | 240 | util.ensure_dir(tdir) |
599 | @@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg): | |||
600 | 280 | 281 | ||
601 | 281 | if os.path.exists(fname): | 282 | if os.path.exists(fname): |
602 | 282 | if not os.path.exists("/proc/swaps"): | 283 | if not os.path.exists("/proc/swaps"): |
605 | 283 | LOG.debug("swap file %s existed. no /proc/swaps. Being safe.", | 284 | LOG.debug("swap file %s exists, but no /proc/swaps exists, " |
606 | 284 | fname) | 285 | "being safe", fname) |
607 | 285 | return fname | 286 | return fname |
608 | 286 | try: | 287 | try: |
609 | 287 | for line in util.load_file("/proc/swaps").splitlines(): | 288 | for line in util.load_file("/proc/swaps").splitlines(): |
610 | 288 | if line.startswith(fname + " "): | 289 | if line.startswith(fname + " "): |
612 | 289 | LOG.debug("swap file %s already in use.", fname) | 290 | LOG.debug("swap file %s already in use", fname) |
613 | 290 | return fname | 291 | return fname |
615 | 291 | LOG.debug("swap file %s existed, but not in /proc/swaps", fname) | 292 | LOG.debug("swap file %s exists, but not in /proc/swaps", fname) |
616 | 292 | except Exception: | 293 | except Exception: |
618 | 293 | LOG.warning("swap file %s existed. Error reading /proc/swaps", | 294 | LOG.warning("swap file %s exists. Error reading /proc/swaps", |
619 | 294 | fname) | 295 | fname) |
620 | 295 | return fname | 296 | return fname |
621 | 296 | 297 | ||
622 | @@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args): | |||
623 | 327 | 328 | ||
624 | 328 | LOG.debug("mounts configuration is %s", cfgmnt) | 329 | LOG.debug("mounts configuration is %s", cfgmnt) |
625 | 329 | 330 | ||
626 | 331 | fstab_lines = [] | ||
627 | 332 | fstab_devs = {} | ||
628 | 333 | fstab_removed = [] | ||
629 | 334 | |||
630 | 335 | for line in util.load_file(FSTAB_PATH).splitlines(): | ||
631 | 336 | if MNT_COMMENT in line: | ||
632 | 337 | fstab_removed.append(line) | ||
633 | 338 | continue | ||
634 | 339 | |||
635 | 340 | try: | ||
636 | 341 | toks = WS.split(line) | ||
637 | 342 | except Exception: | ||
638 | 343 | pass | ||
639 | 344 | fstab_devs[toks[0]] = line | ||
640 | 345 | fstab_lines.append(line) | ||
641 | 346 | |||
642 | 330 | for i in range(len(cfgmnt)): | 347 | for i in range(len(cfgmnt)): |
643 | 331 | # skip something that wasn't a list | 348 | # skip something that wasn't a list |
644 | 332 | if not isinstance(cfgmnt[i], list): | 349 | if not isinstance(cfgmnt[i], list): |
645 | @@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args): | |||
646 | 336 | 353 | ||
647 | 337 | start = str(cfgmnt[i][0]) | 354 | start = str(cfgmnt[i][0]) |
648 | 338 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) | 355 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) |
649 | 356 | if sanitized != start: | ||
650 | 357 | log.debug("changed %s => %s" % (start, sanitized)) | ||
651 | 358 | |||
652 | 339 | if sanitized is None: | 359 | if sanitized is None: |
654 | 340 | log.debug("Ignorming nonexistant named mount %s", start) | 360 | log.debug("Ignoring nonexistent named mount %s", start) |
655 | 361 | continue | ||
656 | 362 | elif sanitized in fstab_devs: | ||
657 | 363 | log.info("Device %s already defined in fstab: %s", | ||
658 | 364 | sanitized, fstab_devs[sanitized]) | ||
659 | 341 | continue | 365 | continue |
660 | 342 | 366 | ||
661 | 343 | if sanitized != start: | ||
662 | 344 | log.debug("changed %s => %s" % (start, sanitized)) | ||
663 | 345 | cfgmnt[i][0] = sanitized | 367 | cfgmnt[i][0] = sanitized |
664 | 346 | 368 | ||
665 | 347 | # in case the user did not quote a field (likely fs-freq, fs_passno) | 369 | # in case the user did not quote a field (likely fs-freq, fs_passno) |
666 | @@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args): | |||
667 | 373 | for defmnt in defmnts: | 395 | for defmnt in defmnts: |
668 | 374 | start = defmnt[0] | 396 | start = defmnt[0] |
669 | 375 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) | 397 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) |
670 | 376 | if sanitized is None: | ||
671 | 377 | log.debug("Ignoring nonexistant default named mount %s", start) | ||
672 | 378 | continue | ||
673 | 379 | if sanitized != start: | 398 | if sanitized != start: |
674 | 380 | log.debug("changed default device %s => %s" % (start, sanitized)) | 399 | log.debug("changed default device %s => %s" % (start, sanitized)) |
675 | 400 | |||
676 | 401 | if sanitized is None: | ||
677 | 402 | log.debug("Ignoring nonexistent default named mount %s", start) | ||
678 | 403 | continue | ||
679 | 404 | elif sanitized in fstab_devs: | ||
680 | 405 | log.debug("Device %s already defined in fstab: %s", | ||
681 | 406 | sanitized, fstab_devs[sanitized]) | ||
682 | 407 | continue | ||
683 | 408 | |||
684 | 381 | defmnt[0] = sanitized | 409 | defmnt[0] = sanitized |
685 | 382 | 410 | ||
686 | 383 | cfgmnt_has = False | 411 | cfgmnt_has = False |
687 | @@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args): | |||
688 | 397 | actlist = [] | 425 | actlist = [] |
689 | 398 | for x in cfgmnt: | 426 | for x in cfgmnt: |
690 | 399 | if x[1] is None: | 427 | if x[1] is None: |
692 | 400 | log.debug("Skipping non-existent device named %s", x[0]) | 428 | log.debug("Skipping nonexistent device named %s", x[0]) |
693 | 401 | else: | 429 | else: |
694 | 402 | actlist.append(x) | 430 | actlist.append(x) |
695 | 403 | 431 | ||
696 | @@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args): | |||
697 | 406 | actlist.append([swapret, "none", "swap", "sw", "0", "0"]) | 434 | actlist.append([swapret, "none", "swap", "sw", "0", "0"]) |
698 | 407 | 435 | ||
699 | 408 | if len(actlist) == 0: | 436 | if len(actlist) == 0: |
701 | 409 | log.debug("No modifications to fstab needed.") | 437 | log.debug("No modifications to fstab needed") |
702 | 410 | return | 438 | return |
703 | 411 | 439 | ||
704 | 412 | comment = "comment=cloudconfig" | ||
705 | 413 | cc_lines = [] | 440 | cc_lines = [] |
706 | 414 | needswap = False | 441 | needswap = False |
707 | 415 | dirs = [] | 442 | dirs = [] |
708 | 416 | for line in actlist: | 443 | for line in actlist: |
709 | 417 | # write 'comment' in the fs_mntops, entry, claiming this | 444 | # write 'comment' in the fs_mntops, entry, claiming this |
711 | 418 | line[3] = "%s,%s" % (line[3], comment) | 445 | line[3] = "%s,%s" % (line[3], MNT_COMMENT) |
712 | 419 | if line[2] == "swap": | 446 | if line[2] == "swap": |
713 | 420 | needswap = True | 447 | needswap = True |
714 | 421 | if line[1].startswith("/"): | 448 | if line[1].startswith("/"): |
715 | 422 | dirs.append(line[1]) | 449 | dirs.append(line[1]) |
716 | 423 | cc_lines.append('\t'.join(line)) | 450 | cc_lines.append('\t'.join(line)) |
717 | 424 | 451 | ||
718 | 425 | fstab_lines = [] | ||
719 | 426 | removed = [] | ||
720 | 427 | for line in util.load_file(FSTAB_PATH).splitlines(): | ||
721 | 428 | try: | ||
722 | 429 | toks = WS.split(line) | ||
723 | 430 | if toks[3].find(comment) != -1: | ||
724 | 431 | removed.append(line) | ||
725 | 432 | continue | ||
726 | 433 | except Exception: | ||
727 | 434 | pass | ||
728 | 435 | fstab_lines.append(line) | ||
729 | 436 | |||
730 | 437 | for d in dirs: | 452 | for d in dirs: |
731 | 438 | try: | 453 | try: |
732 | 439 | util.ensure_dir(d) | 454 | util.ensure_dir(d) |
733 | @@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args): | |||
734 | 441 | util.logexc(log, "Failed to make '%s' config-mount", d) | 456 | util.logexc(log, "Failed to make '%s' config-mount", d) |
735 | 442 | 457 | ||
736 | 443 | sadds = [WS.sub(" ", n) for n in cc_lines] | 458 | sadds = [WS.sub(" ", n) for n in cc_lines] |
738 | 444 | sdrops = [WS.sub(" ", n) for n in removed] | 459 | sdrops = [WS.sub(" ", n) for n in fstab_removed] |
739 | 445 | 460 | ||
740 | 446 | sops = (["- " + drop for drop in sdrops if drop not in sadds] + | 461 | sops = (["- " + drop for drop in sdrops if drop not in sadds] + |
741 | 447 | ["+ " + add for add in sadds if add not in sdrops]) | 462 | ["+ " + add for add in sadds if add not in sdrops]) |
742 | diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py | |||
743 | index 878069b..3be0d1c 100644 | |||
744 | --- a/cloudinit/config/cc_phone_home.py | |||
745 | +++ b/cloudinit/config/cc_phone_home.py | |||
746 | @@ -41,6 +41,7 @@ keys to post. Available keys are: | |||
747 | 41 | """ | 41 | """ |
748 | 42 | 42 | ||
749 | 43 | from cloudinit import templater | 43 | from cloudinit import templater |
750 | 44 | from cloudinit import url_helper | ||
751 | 44 | from cloudinit import util | 45 | from cloudinit import util |
752 | 45 | 46 | ||
753 | 46 | from cloudinit.settings import PER_INSTANCE | 47 | from cloudinit.settings import PER_INSTANCE |
754 | @@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args): | |||
755 | 136 | } | 137 | } |
756 | 137 | url = templater.render_string(url, url_params) | 138 | url = templater.render_string(url, url_params) |
757 | 138 | try: | 139 | try: |
761 | 139 | util.read_file_or_url(url, data=real_submit_keys, | 140 | url_helper.read_file_or_url( |
762 | 140 | retries=tries, sec_between=3, | 141 | url, data=real_submit_keys, retries=tries, sec_between=3, |
763 | 141 | ssl_details=util.fetch_ssl_details(cloud.paths)) | 142 | ssl_details=util.fetch_ssl_details(cloud.paths)) |
764 | 142 | except Exception: | 143 | except Exception: |
765 | 143 | util.logexc(log, "Failed to post phone home data to %s in %s tries", | 144 | util.logexc(log, "Failed to post phone home data to %s in %s tries", |
766 | 144 | url, tries) | 145 | url, tries) |
767 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py | |||
768 | index 82f29e1..2edddd0 100644 | |||
769 | --- a/cloudinit/config/cc_resizefs.py | |||
770 | +++ b/cloudinit/config/cc_resizefs.py | |||
771 | @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): | |||
772 | 81 | 81 | ||
773 | 82 | 82 | ||
774 | 83 | def _resize_ufs(mount_point, devpth): | 83 | def _resize_ufs(mount_point, devpth): |
776 | 84 | return ('growfs', devpth) | 84 | return ('growfs', '-y', devpth) |
777 | 85 | 85 | ||
778 | 86 | 86 | ||
779 | 87 | def _resize_zfs(mount_point, devpth): | 87 | def _resize_zfs(mount_point, devpth): |
780 | diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py | |||
781 | index b215e95..c95bdaa 100644 | |||
782 | --- a/cloudinit/config/cc_users_groups.py | |||
783 | +++ b/cloudinit/config/cc_users_groups.py | |||
784 | @@ -54,8 +54,9 @@ config keys for an entry in ``users`` are as follows: | |||
785 | 54 | - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's | 54 | - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's |
786 | 55 | authkeys file. Default: none | 55 | authkeys file. Default: none |
787 | 56 | - ``ssh_import_id``: Optional. SSH id to import for user. Default: none | 56 | - ``ssh_import_id``: Optional. SSH id to import for user. Default: none |
790 | 57 | - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use. | 57 | - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False. |
791 | 58 | Default: none. | 58 | Default: none. An absence of sudo key, or a value of none or false |
792 | 59 | will result in no sudo rules being written for the user. | ||
793 | 59 | - ``system``: Optional. Create user as system user with no home directory. | 60 | - ``system``: Optional. Create user as system user with no home directory. |
794 | 60 | Default: false | 61 | Default: false |
795 | 61 | - ``uid``: Optional. The user's ID. Default: The next available value. | 62 | - ``uid``: Optional. The user's ID. Default: The next available value. |
796 | @@ -82,6 +83,9 @@ config keys for an entry in ``users`` are as follows: | |||
797 | 82 | 83 | ||
798 | 83 | users: | 84 | users: |
799 | 84 | - default | 85 | - default |
800 | 86 | # User explicitly omitted from sudo permission; also default behavior. | ||
801 | 87 | - name: <some_restricted_user> | ||
802 | 88 | sudo: false | ||
803 | 85 | - name: <username> | 89 | - name: <username> |
804 | 86 | expiredate: <date> | 90 | expiredate: <date> |
805 | 87 | gecos: <comment> | 91 | gecos: <comment> |
806 | diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py | |||
807 | index 76826e0..080a6d0 100644 | |||
808 | --- a/cloudinit/config/schema.py | |||
809 | +++ b/cloudinit/config/schema.py | |||
810 | @@ -4,7 +4,7 @@ | |||
811 | 4 | from __future__ import print_function | 4 | from __future__ import print_function |
812 | 5 | 5 | ||
813 | 6 | from cloudinit import importer | 6 | from cloudinit import importer |
815 | 7 | from cloudinit.util import find_modules, read_file_or_url | 7 | from cloudinit.util import find_modules, load_file |
816 | 8 | 8 | ||
817 | 9 | import argparse | 9 | import argparse |
818 | 10 | from collections import defaultdict | 10 | from collections import defaultdict |
819 | @@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False): | |||
820 | 93 | def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): | 93 | def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): |
821 | 94 | """Return contents of the cloud-config file annotated with schema errors. | 94 | """Return contents of the cloud-config file annotated with schema errors. |
822 | 95 | 95 | ||
824 | 96 | @param cloudconfig: YAML-loaded object from the original_content. | 96 | @param cloudconfig: YAML-loaded dict from the original_content or empty |
825 | 97 | dict if unparseable. | ||
826 | 97 | @param original_content: The contents of a cloud-config file | 98 | @param original_content: The contents of a cloud-config file |
827 | 98 | @param schema_errors: List of tuples from a JSONSchemaValidationError. The | 99 | @param schema_errors: List of tuples from a JSONSchemaValidationError. The |
828 | 99 | tuples consist of (schemapath, error_message). | 100 | tuples consist of (schemapath, error_message). |
829 | 100 | """ | 101 | """ |
830 | 101 | if not schema_errors: | 102 | if not schema_errors: |
831 | 102 | return original_content | 103 | return original_content |
833 | 103 | schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content) | 104 | schemapaths = {} |
834 | 105 | if cloudconfig: | ||
835 | 106 | schemapaths = _schemapath_for_cloudconfig( | ||
836 | 107 | cloudconfig, original_content) | ||
837 | 104 | errors_by_line = defaultdict(list) | 108 | errors_by_line = defaultdict(list) |
838 | 105 | error_count = 1 | 109 | error_count = 1 |
839 | 106 | error_footer = [] | 110 | error_footer = [] |
840 | 107 | annotated_content = [] | 111 | annotated_content = [] |
841 | 108 | for path, msg in schema_errors: | 112 | for path, msg in schema_errors: |
843 | 109 | errors_by_line[schemapaths[path]].append(msg) | 113 | match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path) |
844 | 114 | if match: | ||
845 | 115 | line, col = match.groups() | ||
846 | 116 | errors_by_line[int(line)].append(msg) | ||
847 | 117 | else: | ||
848 | 118 | col = None | ||
849 | 119 | errors_by_line[schemapaths[path]].append(msg) | ||
850 | 120 | if col is not None: | ||
851 | 121 | msg = 'Line {line} column {col}: {msg}'.format( | ||
852 | 122 | line=line, col=col, msg=msg) | ||
853 | 110 | error_footer.append('# E{0}: {1}'.format(error_count, msg)) | 123 | error_footer.append('# E{0}: {1}'.format(error_count, msg)) |
854 | 111 | error_count += 1 | 124 | error_count += 1 |
855 | 112 | lines = original_content.decode().split('\n') | 125 | lines = original_content.decode().split('\n') |
856 | @@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): | |||
857 | 139 | """ | 152 | """ |
858 | 140 | if not os.path.exists(config_path): | 153 | if not os.path.exists(config_path): |
859 | 141 | raise RuntimeError('Configfile {0} does not exist'.format(config_path)) | 154 | raise RuntimeError('Configfile {0} does not exist'.format(config_path)) |
861 | 142 | content = read_file_or_url('file://{0}'.format(config_path)).contents | 155 | content = load_file(config_path, decode=False) |
862 | 143 | if not content.startswith(CLOUD_CONFIG_HEADER): | 156 | if not content.startswith(CLOUD_CONFIG_HEADER): |
863 | 144 | errors = ( | 157 | errors = ( |
865 | 145 | ('header', 'File {0} needs to begin with "{1}"'.format( | 158 | ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format( |
866 | 146 | config_path, CLOUD_CONFIG_HEADER.decode())),) | 159 | config_path, CLOUD_CONFIG_HEADER.decode())),) |
869 | 147 | raise SchemaValidationError(errors) | 160 | error = SchemaValidationError(errors) |
870 | 148 | 161 | if annotate: | |
871 | 162 | print(annotated_cloudconfig_file({}, content, error.schema_errors)) | ||
872 | 163 | raise error | ||
873 | 149 | try: | 164 | try: |
874 | 150 | cloudconfig = yaml.safe_load(content) | 165 | cloudconfig = yaml.safe_load(content) |
881 | 151 | except yaml.parser.ParserError as e: | 166 | except (yaml.YAMLError) as e: |
882 | 152 | errors = ( | 167 | line = column = 1 |
883 | 153 | ('format', 'File {0} is not valid yaml. {1}'.format( | 168 | mark = None |
884 | 154 | config_path, str(e))),) | 169 | if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): |
885 | 155 | raise SchemaValidationError(errors) | 170 | mark = getattr(e, 'context_mark') |
886 | 156 | 171 | elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): | |
887 | 172 | mark = getattr(e, 'problem_mark') | ||
888 | 173 | if mark: | ||
889 | 174 | line = mark.line + 1 | ||
890 | 175 | column = mark.column + 1 | ||
891 | 176 | errors = (('format-l{line}.c{col}'.format(line=line, col=column), | ||
892 | 177 | 'File {0} is not valid yaml. {1}'.format( | ||
893 | 178 | config_path, str(e))),) | ||
894 | 179 | error = SchemaValidationError(errors) | ||
895 | 180 | if annotate: | ||
896 | 181 | print(annotated_cloudconfig_file({}, content, error.schema_errors)) | ||
897 | 182 | raise error | ||
898 | 157 | try: | 183 | try: |
899 | 158 | validate_cloudconfig_schema( | 184 | validate_cloudconfig_schema( |
900 | 159 | cloudconfig, schema, strict=True) | 185 | cloudconfig, schema, strict=True) |
901 | @@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content): | |||
902 | 176 | list_index = 0 | 202 | list_index = 0 |
903 | 177 | RE_YAML_INDENT = r'^(\s*)' | 203 | RE_YAML_INDENT = r'^(\s*)' |
904 | 178 | scopes = [] | 204 | scopes = [] |
906 | 179 | for line_number, line in enumerate(content_lines): | 205 | for line_number, line in enumerate(content_lines, 1): |
907 | 180 | indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) | 206 | indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) |
908 | 181 | line = line.strip() | 207 | line = line.strip() |
909 | 182 | if not line or line.startswith('#'): | 208 | if not line or line.startswith('#'): |
910 | @@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content): | |||
911 | 208 | scopes.append((indent_depth + 2, key + '.0')) | 234 | scopes.append((indent_depth + 2, key + '.0')) |
912 | 209 | for inner_list_index in range(0, len(yaml.safe_load(value))): | 235 | for inner_list_index in range(0, len(yaml.safe_load(value))): |
913 | 210 | list_key = key + '.' + str(inner_list_index) | 236 | list_key = key + '.' + str(inner_list_index) |
916 | 211 | schema_line_numbers[list_key] = line_number + 1 | 237 | schema_line_numbers[list_key] = line_number |
917 | 212 | schema_line_numbers[key] = line_number + 1 | 238 | schema_line_numbers[key] = line_number |
918 | 213 | return schema_line_numbers | 239 | return schema_line_numbers |
919 | 214 | 240 | ||
920 | 215 | 241 | ||
921 | @@ -337,9 +363,11 @@ def handle_schema_args(name, args): | |||
922 | 337 | try: | 363 | try: |
923 | 338 | validate_cloudconfig_file( | 364 | validate_cloudconfig_file( |
924 | 339 | args.config_file, full_schema, args.annotate) | 365 | args.config_file, full_schema, args.annotate) |
926 | 340 | except (SchemaValidationError, RuntimeError) as e: | 366 | except SchemaValidationError as e: |
927 | 341 | if not args.annotate: | 367 | if not args.annotate: |
928 | 342 | error(str(e)) | 368 | error(str(e)) |
929 | 369 | except RuntimeError as e: | ||
930 | 370 | error(str(e)) | ||
931 | 343 | else: | 371 | else: |
932 | 344 | print("Valid cloud-config file {0}".format(args.config_file)) | 372 | print("Valid cloud-config file {0}".format(args.config_file)) |
933 | 345 | if args.doc: | 373 | if args.doc: |
934 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py | |||
935 | index 6c22b07..ab0b077 100755 | |||
936 | --- a/cloudinit/distros/__init__.py | |||
937 | +++ b/cloudinit/distros/__init__.py | |||
938 | @@ -531,7 +531,7 @@ class Distro(object): | |||
939 | 531 | self.lock_passwd(name) | 531 | self.lock_passwd(name) |
940 | 532 | 532 | ||
941 | 533 | # Configure sudo access | 533 | # Configure sudo access |
943 | 534 | if 'sudo' in kwargs: | 534 | if 'sudo' in kwargs and kwargs['sudo'] is not False: |
944 | 535 | self.write_sudo_rules(name, kwargs['sudo']) | 535 | self.write_sudo_rules(name, kwargs['sudo']) |
945 | 536 | 536 | ||
946 | 537 | # Import SSH keys | 537 | # Import SSH keys |
947 | diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py | |||
948 | index 5b1718a..ff22d56 100644 | |||
949 | --- a/cloudinit/distros/freebsd.py | |||
950 | +++ b/cloudinit/distros/freebsd.py | |||
951 | @@ -266,7 +266,7 @@ class Distro(distros.Distro): | |||
952 | 266 | self.lock_passwd(name) | 266 | self.lock_passwd(name) |
953 | 267 | 267 | ||
954 | 268 | # Configure sudo access | 268 | # Configure sudo access |
956 | 269 | if 'sudo' in kwargs: | 269 | if 'sudo' in kwargs and kwargs['sudo'] is not False: |
957 | 270 | self.write_sudo_rules(name, kwargs['sudo']) | 270 | self.write_sudo_rules(name, kwargs['sudo']) |
958 | 271 | 271 | ||
959 | 272 | # Import SSH keys | 272 | # Import SSH keys |
960 | diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py | |||
961 | index dc3f0fc..3b7b17f 100644 | |||
962 | --- a/cloudinit/ec2_utils.py | |||
963 | +++ b/cloudinit/ec2_utils.py | |||
964 | @@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest', | |||
965 | 150 | # NOT_FOUND occurs) and just in that case returning an empty string. | 150 | # NOT_FOUND occurs) and just in that case returning an empty string. |
966 | 151 | exception_cb = functools.partial(_skip_retry_on_codes, | 151 | exception_cb = functools.partial(_skip_retry_on_codes, |
967 | 152 | SKIP_USERDATA_CODES) | 152 | SKIP_USERDATA_CODES) |
973 | 153 | response = util.read_file_or_url(ud_url, | 153 | response = url_helper.read_file_or_url( |
974 | 154 | ssl_details=ssl_details, | 154 | ud_url, ssl_details=ssl_details, timeout=timeout, |
975 | 155 | timeout=timeout, | 155 | retries=retries, exception_cb=exception_cb) |
971 | 156 | retries=retries, | ||
972 | 157 | exception_cb=exception_cb) | ||
976 | 158 | user_data = response.contents | 156 | user_data = response.contents |
977 | 159 | except url_helper.UrlError as e: | 157 | except url_helper.UrlError as e: |
978 | 160 | if e.code not in SKIP_USERDATA_CODES: | 158 | if e.code not in SKIP_USERDATA_CODES: |
979 | @@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest', | |||
980 | 169 | ssl_details=None, timeout=5, retries=5, | 167 | ssl_details=None, timeout=5, retries=5, |
981 | 170 | leaf_decoder=None): | 168 | leaf_decoder=None): |
982 | 171 | md_url = url_helper.combine_url(metadata_address, api_version, tree) | 169 | md_url = url_helper.combine_url(metadata_address, api_version, tree) |
986 | 172 | caller = functools.partial(util.read_file_or_url, | 170 | caller = functools.partial( |
987 | 173 | ssl_details=ssl_details, timeout=timeout, | 171 | url_helper.read_file_or_url, ssl_details=ssl_details, |
988 | 174 | retries=retries) | 172 | timeout=timeout, retries=retries) |
989 | 175 | 173 | ||
990 | 176 | def mcaller(url): | 174 | def mcaller(url): |
991 | 177 | return caller(url).contents | 175 | return caller(url).contents |
992 | diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py | |||
993 | index 1ca92d4..dc33876 100644 | |||
994 | --- a/cloudinit/handlers/upstart_job.py | |||
995 | +++ b/cloudinit/handlers/upstart_job.py | |||
996 | @@ -97,7 +97,7 @@ def _has_suitable_upstart(): | |||
997 | 97 | else: | 97 | else: |
998 | 98 | util.logexc(LOG, "dpkg --compare-versions failed [%s]", | 98 | util.logexc(LOG, "dpkg --compare-versions failed [%s]", |
999 | 99 | e.exit_code) | 99 | e.exit_code) |
1001 | 100 | except Exception as e: | 100 | except Exception: |
1002 | 101 | util.logexc(LOG, "dpkg --compare-versions failed") | 101 | util.logexc(LOG, "dpkg --compare-versions failed") |
1003 | 102 | return False | 102 | return False |
1004 | 103 | else: | 103 | else: |
1005 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py | |||
1006 | index 43226bd..3ffde52 100644 | |||
1007 | --- a/cloudinit/net/__init__.py | |||
1008 | +++ b/cloudinit/net/__init__.py | |||
1009 | @@ -359,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False): | |||
1010 | 359 | 1: randomly generated 3: set using dev_set_mac_address""" | 359 | 1: randomly generated 3: set using dev_set_mac_address""" |
1011 | 360 | 360 | ||
1012 | 361 | assign_type = read_sys_net_int(ifname, "addr_assign_type") | 361 | assign_type = read_sys_net_int(ifname, "addr_assign_type") |
1015 | 362 | if strict and assign_type is None: | 362 | if assign_type is None: |
1016 | 363 | raise ValueError("%s had no addr_assign_type.") | 363 | # None is returned if this nic had no 'addr_assign_type' entry. |
1017 | 364 | # if strict, raise an error, if not return True. | ||
1018 | 365 | if strict: | ||
1019 | 366 | raise ValueError("%s had no addr_assign_type.") | ||
1020 | 367 | return True | ||
1021 | 364 | return assign_type in (0, 1, 3) | 368 | return assign_type in (0, 1, 3) |
1022 | 365 | 369 | ||
1023 | 366 | 370 | ||
1024 | diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py | |||
1025 | index c6a71d1..bd20a36 100644 | |||
1026 | --- a/cloudinit/net/eni.py | |||
1027 | +++ b/cloudinit/net/eni.py | |||
1028 | @@ -10,9 +10,12 @@ from . import ParserError | |||
1029 | 10 | from . import renderer | 10 | from . import renderer |
1030 | 11 | from .network_state import subnet_is_ipv6 | 11 | from .network_state import subnet_is_ipv6 |
1031 | 12 | 12 | ||
1032 | 13 | from cloudinit import log as logging | ||
1033 | 13 | from cloudinit import util | 14 | from cloudinit import util |
1034 | 14 | 15 | ||
1035 | 15 | 16 | ||
1036 | 17 | LOG = logging.getLogger(__name__) | ||
1037 | 18 | |||
1038 | 16 | NET_CONFIG_COMMANDS = [ | 19 | NET_CONFIG_COMMANDS = [ |
1039 | 17 | "pre-up", "up", "post-up", "down", "pre-down", "post-down", | 20 | "pre-up", "up", "post-up", "down", "pre-down", "post-down", |
1040 | 18 | ] | 21 | ] |
1041 | @@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet): | |||
1042 | 61 | 64 | ||
1043 | 62 | 65 | ||
1044 | 63 | # TODO: switch to valid_map for attrs | 66 | # TODO: switch to valid_map for attrs |
1046 | 64 | def _iface_add_attrs(iface, index): | 67 | def _iface_add_attrs(iface, index, ipv4_subnet_mtu): |
1047 | 65 | # If the index is non-zero, this is an alias interface. Alias interfaces | 68 | # If the index is non-zero, this is an alias interface. Alias interfaces |
1048 | 66 | # represent additional interface addresses, and should not have additional | 69 | # represent additional interface addresses, and should not have additional |
1049 | 67 | # attributes. (extra attributes here are almost always either incorrect, | 70 | # attributes. (extra attributes here are almost always either incorrect, |
1050 | @@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index): | |||
1051 | 100 | value = 'on' if iface[key] else 'off' | 103 | value = 'on' if iface[key] else 'off' |
1052 | 101 | if not value or key in ignore_map: | 104 | if not value or key in ignore_map: |
1053 | 102 | continue | 105 | continue |
1054 | 106 | if key == 'mtu' and ipv4_subnet_mtu: | ||
1055 | 107 | if value != ipv4_subnet_mtu: | ||
1056 | 108 | LOG.warning( | ||
1057 | 109 | "Network config: ignoring %s device-level mtu:%s because" | ||
1058 | 110 | " ipv4 subnet-level mtu:%s provided.", | ||
1059 | 111 | iface['name'], value, ipv4_subnet_mtu) | ||
1060 | 112 | continue | ||
1061 | 103 | if key in multiline_keys: | 113 | if key in multiline_keys: |
1062 | 104 | for v in value: | 114 | for v in value: |
1063 | 105 | content.append(" {0} {1}".format(renames.get(key, key), v)) | 115 | content.append(" {0} {1}".format(renames.get(key, key), v)) |
1064 | @@ -377,12 +387,15 @@ class Renderer(renderer.Renderer): | |||
1065 | 377 | subnets = iface.get('subnets', {}) | 387 | subnets = iface.get('subnets', {}) |
1066 | 378 | if subnets: | 388 | if subnets: |
1067 | 379 | for index, subnet in enumerate(subnets): | 389 | for index, subnet in enumerate(subnets): |
1068 | 390 | ipv4_subnet_mtu = None | ||
1069 | 380 | iface['index'] = index | 391 | iface['index'] = index |
1070 | 381 | iface['mode'] = subnet['type'] | 392 | iface['mode'] = subnet['type'] |
1071 | 382 | iface['control'] = subnet.get('control', 'auto') | 393 | iface['control'] = subnet.get('control', 'auto') |
1072 | 383 | subnet_inet = 'inet' | 394 | subnet_inet = 'inet' |
1073 | 384 | if subnet_is_ipv6(subnet): | 395 | if subnet_is_ipv6(subnet): |
1074 | 385 | subnet_inet += '6' | 396 | subnet_inet += '6' |
1075 | 397 | else: | ||
1076 | 398 | ipv4_subnet_mtu = subnet.get('mtu') | ||
1077 | 386 | iface['inet'] = subnet_inet | 399 | iface['inet'] = subnet_inet |
1078 | 387 | if subnet['type'].startswith('dhcp'): | 400 | if subnet['type'].startswith('dhcp'): |
1079 | 388 | iface['mode'] = 'dhcp' | 401 | iface['mode'] = 'dhcp' |
1080 | @@ -397,7 +410,7 @@ class Renderer(renderer.Renderer): | |||
1081 | 397 | _iface_start_entry( | 410 | _iface_start_entry( |
1082 | 398 | iface, index, render_hwaddress=render_hwaddress) + | 411 | iface, index, render_hwaddress=render_hwaddress) + |
1083 | 399 | _iface_add_subnet(iface, subnet) + | 412 | _iface_add_subnet(iface, subnet) + |
1085 | 400 | _iface_add_attrs(iface, index) | 413 | _iface_add_attrs(iface, index, ipv4_subnet_mtu) |
1086 | 401 | ) | 414 | ) |
1087 | 402 | for route in subnet.get('routes', []): | 415 | for route in subnet.get('routes', []): |
1088 | 403 | lines.extend(self._render_route(route, indent=" ")) | 416 | lines.extend(self._render_route(route, indent=" ")) |
1089 | @@ -409,7 +422,8 @@ class Renderer(renderer.Renderer): | |||
1090 | 409 | if 'bond-master' in iface or 'bond-slaves' in iface: | 422 | if 'bond-master' in iface or 'bond-slaves' in iface: |
1091 | 410 | lines.append("auto {name}".format(**iface)) | 423 | lines.append("auto {name}".format(**iface)) |
1092 | 411 | lines.append("iface {name} {inet} {mode}".format(**iface)) | 424 | lines.append("iface {name} {inet} {mode}".format(**iface)) |
1094 | 412 | lines.extend(_iface_add_attrs(iface, index=0)) | 425 | lines.extend( |
1095 | 426 | _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None)) | ||
1096 | 413 | sections.append(lines) | 427 | sections.append(lines) |
1097 | 414 | return sections | 428 | return sections |
1098 | 415 | 429 | ||
1099 | diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py | |||
1100 | index 6344348..4014363 100644 | |||
1101 | --- a/cloudinit/net/netplan.py | |||
1102 | +++ b/cloudinit/net/netplan.py | |||
1103 | @@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match): | |||
1104 | 34 | if key.startswith(match)) | 34 | if key.startswith(match)) |
1105 | 35 | 35 | ||
1106 | 36 | 36 | ||
1108 | 37 | def _extract_addresses(config, entry): | 37 | def _extract_addresses(config, entry, ifname): |
1109 | 38 | """This method parse a cloudinit.net.network_state dictionary (config) and | 38 | """This method parse a cloudinit.net.network_state dictionary (config) and |
1110 | 39 | maps netstate keys/values into a dictionary (entry) to represent | 39 | maps netstate keys/values into a dictionary (entry) to represent |
1111 | 40 | netplan yaml. | 40 | netplan yaml. |
1112 | @@ -124,6 +124,15 @@ def _extract_addresses(config, entry): | |||
1113 | 124 | 124 | ||
1114 | 125 | addresses.append(addr) | 125 | addresses.append(addr) |
1115 | 126 | 126 | ||
1116 | 127 | if 'mtu' in config: | ||
1117 | 128 | entry_mtu = entry.get('mtu') | ||
1118 | 129 | if entry_mtu and config['mtu'] != entry_mtu: | ||
1119 | 130 | LOG.warning( | ||
1120 | 131 | "Network config: ignoring %s device-level mtu:%s because" | ||
1121 | 132 | " ipv4 subnet-level mtu:%s provided.", | ||
1122 | 133 | ifname, config['mtu'], entry_mtu) | ||
1123 | 134 | else: | ||
1124 | 135 | entry['mtu'] = config['mtu'] | ||
1125 | 127 | if len(addresses) > 0: | 136 | if len(addresses) > 0: |
1126 | 128 | entry.update({'addresses': addresses}) | 137 | entry.update({'addresses': addresses}) |
1127 | 129 | if len(routes) > 0: | 138 | if len(routes) > 0: |
1128 | @@ -262,10 +271,7 @@ class Renderer(renderer.Renderer): | |||
1129 | 262 | else: | 271 | else: |
1130 | 263 | del eth['match'] | 272 | del eth['match'] |
1131 | 264 | del eth['set-name'] | 273 | del eth['set-name'] |
1136 | 265 | if 'mtu' in ifcfg: | 274 | _extract_addresses(ifcfg, eth, ifname) |
1133 | 266 | eth['mtu'] = ifcfg.get('mtu') | ||
1134 | 267 | |||
1135 | 268 | _extract_addresses(ifcfg, eth) | ||
1137 | 269 | ethernets.update({ifname: eth}) | 275 | ethernets.update({ifname: eth}) |
1138 | 270 | 276 | ||
1139 | 271 | elif if_type == 'bond': | 277 | elif if_type == 'bond': |
1140 | @@ -288,7 +294,7 @@ class Renderer(renderer.Renderer): | |||
1141 | 288 | slave_interfaces = ifcfg.get('bond-slaves') | 294 | slave_interfaces = ifcfg.get('bond-slaves') |
1142 | 289 | if slave_interfaces == 'none': | 295 | if slave_interfaces == 'none': |
1143 | 290 | _extract_bond_slaves_by_name(interfaces, bond, ifname) | 296 | _extract_bond_slaves_by_name(interfaces, bond, ifname) |
1145 | 291 | _extract_addresses(ifcfg, bond) | 297 | _extract_addresses(ifcfg, bond, ifname) |
1146 | 292 | bonds.update({ifname: bond}) | 298 | bonds.update({ifname: bond}) |
1147 | 293 | 299 | ||
1148 | 294 | elif if_type == 'bridge': | 300 | elif if_type == 'bridge': |
1149 | @@ -321,7 +327,7 @@ class Renderer(renderer.Renderer): | |||
1150 | 321 | 327 | ||
1151 | 322 | if len(br_config) > 0: | 328 | if len(br_config) > 0: |
1152 | 323 | bridge.update({'parameters': br_config}) | 329 | bridge.update({'parameters': br_config}) |
1154 | 324 | _extract_addresses(ifcfg, bridge) | 330 | _extract_addresses(ifcfg, bridge, ifname) |
1155 | 325 | bridges.update({ifname: bridge}) | 331 | bridges.update({ifname: bridge}) |
1156 | 326 | 332 | ||
1157 | 327 | elif if_type == 'vlan': | 333 | elif if_type == 'vlan': |
1158 | @@ -333,7 +339,7 @@ class Renderer(renderer.Renderer): | |||
1159 | 333 | macaddr = ifcfg.get('mac_address', None) | 339 | macaddr = ifcfg.get('mac_address', None) |
1160 | 334 | if macaddr is not None: | 340 | if macaddr is not None: |
1161 | 335 | vlan['macaddress'] = macaddr.lower() | 341 | vlan['macaddress'] = macaddr.lower() |
1163 | 336 | _extract_addresses(ifcfg, vlan) | 342 | _extract_addresses(ifcfg, vlan, ifname) |
1164 | 337 | vlans.update({ifname: vlan}) | 343 | vlans.update({ifname: vlan}) |
1165 | 338 | 344 | ||
1166 | 339 | # inject global nameserver values under each all interface which | 345 | # inject global nameserver values under each all interface which |
1167 | diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py | |||
1168 | index e53b9f1..3d71923 100644 | |||
1169 | --- a/cloudinit/net/sysconfig.py | |||
1170 | +++ b/cloudinit/net/sysconfig.py | |||
1171 | @@ -304,6 +304,13 @@ class Renderer(renderer.Renderer): | |||
1172 | 304 | mtu_key = 'IPV6_MTU' | 304 | mtu_key = 'IPV6_MTU' |
1173 | 305 | iface_cfg['IPV6INIT'] = True | 305 | iface_cfg['IPV6INIT'] = True |
1174 | 306 | if 'mtu' in subnet: | 306 | if 'mtu' in subnet: |
1175 | 307 | mtu_mismatch = bool(mtu_key in iface_cfg and | ||
1176 | 308 | subnet['mtu'] != iface_cfg[mtu_key]) | ||
1177 | 309 | if mtu_mismatch: | ||
1178 | 310 | LOG.warning( | ||
1179 | 311 | 'Network config: ignoring %s device-level mtu:%s' | ||
1180 | 312 | ' because ipv4 subnet-level mtu:%s provided.', | ||
1181 | 313 | iface_cfg.name, iface_cfg[mtu_key], subnet['mtu']) | ||
1182 | 307 | iface_cfg[mtu_key] = subnet['mtu'] | 314 | iface_cfg[mtu_key] = subnet['mtu'] |
1183 | 308 | elif subnet_type == 'manual': | 315 | elif subnet_type == 'manual': |
1184 | 309 | # If the subnet has an MTU setting, then ONBOOT=True | 316 | # If the subnet has an MTU setting, then ONBOOT=True |
1185 | diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py | |||
1186 | index f090616..9ff929c 100644 | |||
1187 | --- a/cloudinit/netinfo.py | |||
1188 | +++ b/cloudinit/netinfo.py | |||
1189 | @@ -138,7 +138,7 @@ def _netdev_info_ifconfig(ifconfig_data): | |||
1190 | 138 | elif toks[i].startswith("scope:"): | 138 | elif toks[i].startswith("scope:"): |
1191 | 139 | devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") | 139 | devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") |
1192 | 140 | elif toks[i] == "scopeid": | 140 | elif toks[i] == "scopeid": |
1194 | 141 | res = re.match(".*<(\S+)>", toks[i + 1]) | 141 | res = re.match(r'.*<(\S+)>', toks[i + 1]) |
1195 | 142 | if res: | 142 | if res: |
1196 | 143 | devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) | 143 | devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) |
1197 | 144 | return devs | 144 | return devs |
1198 | @@ -158,12 +158,28 @@ def netdev_info(empty=""): | |||
1199 | 158 | LOG.warning( | 158 | LOG.warning( |
1200 | 159 | "Could not print networks: missing 'ip' and 'ifconfig' commands") | 159 | "Could not print networks: missing 'ip' and 'ifconfig' commands") |
1201 | 160 | 160 | ||
1207 | 161 | if empty != "": | 161 | if empty == "": |
1208 | 162 | for (_devname, dev) in devs.items(): | 162 | return devs |
1204 | 163 | for field in dev: | ||
1205 | 164 | if dev[field] == "": | ||
1206 | 165 | dev[field] = empty | ||
1209 | 166 | 163 | ||
1210 | 164 | recurse_types = (dict, tuple, list) | ||
1211 | 165 | |||
1212 | 166 | def fill(data, new_val="", empty_vals=("", b"")): | ||
1213 | 167 | """Recursively replace 'empty_vals' in data (dict, tuple, list) | ||
1214 | 168 | with new_val""" | ||
1215 | 169 | if isinstance(data, dict): | ||
1216 | 170 | myiter = data.items() | ||
1217 | 171 | elif isinstance(data, (tuple, list)): | ||
1218 | 172 | myiter = enumerate(data) | ||
1219 | 173 | else: | ||
1220 | 174 | raise TypeError("Unexpected input to fill") | ||
1221 | 175 | |||
1222 | 176 | for key, val in myiter: | ||
1223 | 177 | if val in empty_vals: | ||
1224 | 178 | data[key] = new_val | ||
1225 | 179 | elif isinstance(val, recurse_types): | ||
1226 | 180 | fill(val, new_val) | ||
1227 | 181 | |||
1228 | 182 | fill(devs, new_val=empty) | ||
1229 | 167 | return devs | 183 | return devs |
1230 | 168 | 184 | ||
1231 | 169 | 185 | ||
1232 | @@ -353,8 +369,9 @@ def getgateway(): | |||
1233 | 353 | 369 | ||
1234 | 354 | def netdev_pformat(): | 370 | def netdev_pformat(): |
1235 | 355 | lines = [] | 371 | lines = [] |
1236 | 372 | empty = "." | ||
1237 | 356 | try: | 373 | try: |
1239 | 357 | netdev = netdev_info(empty=".") | 374 | netdev = netdev_info(empty=empty) |
1240 | 358 | except Exception as e: | 375 | except Exception as e: |
1241 | 359 | lines.append( | 376 | lines.append( |
1242 | 360 | util.center( | 377 | util.center( |
1243 | @@ -368,12 +385,15 @@ def netdev_pformat(): | |||
1244 | 368 | for (dev, data) in sorted(netdev.items()): | 385 | for (dev, data) in sorted(netdev.items()): |
1245 | 369 | for addr in data.get('ipv4'): | 386 | for addr in data.get('ipv4'): |
1246 | 370 | tbl.add_row( | 387 | tbl.add_row( |
1249 | 371 | [dev, data["up"], addr["ip"], addr["mask"], | 388 | (dev, data["up"], addr["ip"], addr["mask"], |
1250 | 372 | addr.get('scope', '.'), data["hwaddr"]]) | 389 | addr.get('scope', empty), data["hwaddr"])) |
1251 | 373 | for addr in data.get('ipv6'): | 390 | for addr in data.get('ipv6'): |
1252 | 374 | tbl.add_row( | 391 | tbl.add_row( |
1255 | 375 | [dev, data["up"], addr["ip"], ".", addr["scope6"], | 392 | (dev, data["up"], addr["ip"], empty, addr["scope6"], |
1256 | 376 | data["hwaddr"]]) | 393 | data["hwaddr"])) |
1257 | 394 | if len(data.get('ipv6')) + len(data.get('ipv4')) == 0: | ||
1258 | 395 | tbl.add_row((dev, data["up"], empty, empty, empty, | ||
1259 | 396 | data["hwaddr"])) | ||
1260 | 377 | netdev_s = tbl.get_string() | 397 | netdev_s = tbl.get_string() |
1261 | 378 | max_len = len(max(netdev_s.splitlines(), key=len)) | 398 | max_len = len(max(netdev_s.splitlines(), key=len)) |
1262 | 379 | header = util.center("Net device info", "+", max_len) | 399 | header = util.center("Net device info", "+", max_len) |
1263 | diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py | |||
1264 | index f6e86f3..24fd65f 100644 | |||
1265 | --- a/cloudinit/sources/DataSourceAltCloud.py | |||
1266 | +++ b/cloudinit/sources/DataSourceAltCloud.py | |||
1267 | @@ -184,11 +184,11 @@ class DataSourceAltCloud(sources.DataSource): | |||
1268 | 184 | cmd = CMD_PROBE_FLOPPY | 184 | cmd = CMD_PROBE_FLOPPY |
1269 | 185 | (cmd_out, _err) = util.subp(cmd) | 185 | (cmd_out, _err) = util.subp(cmd) |
1270 | 186 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) | 186 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) |
1273 | 187 | except ProcessExecutionError as _err: | 187 | except ProcessExecutionError as e: |
1274 | 188 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) | 188 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
1275 | 189 | return False | 189 | return False |
1278 | 190 | except OSError as _err: | 190 | except OSError as e: |
1279 | 191 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) | 191 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
1280 | 192 | return False | 192 | return False |
1281 | 193 | 193 | ||
1282 | 194 | floppy_dev = '/dev/fd0' | 194 | floppy_dev = '/dev/fd0' |
1283 | @@ -197,11 +197,11 @@ class DataSourceAltCloud(sources.DataSource): | |||
1284 | 197 | try: | 197 | try: |
1285 | 198 | (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5) | 198 | (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5) |
1286 | 199 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) | 199 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) |
1289 | 200 | except ProcessExecutionError as _err: | 200 | except ProcessExecutionError as e: |
1290 | 201 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) | 201 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
1291 | 202 | return False | 202 | return False |
1294 | 203 | except OSError as _err: | 203 | except OSError as e: |
1295 | 204 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) | 204 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
1296 | 205 | return False | 205 | return False |
1297 | 206 | 206 | ||
1298 | 207 | try: | 207 | try: |
1299 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py | |||
1300 | index a71197a..7007d9e 100644 | |||
1301 | --- a/cloudinit/sources/DataSourceAzure.py | |||
1302 | +++ b/cloudinit/sources/DataSourceAzure.py | |||
1303 | @@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4' | |||
1304 | 48 | # DMI chassis-asset-tag is set static for all azure instances | 48 | # DMI chassis-asset-tag is set static for all azure instances |
1305 | 49 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' | 49 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' |
1306 | 50 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" | 50 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" |
1307 | 51 | REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" | ||
1308 | 51 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" | 52 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" |
1309 | 52 | 53 | ||
1310 | 53 | 54 | ||
1311 | @@ -207,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = { | |||
1312 | 207 | } | 208 | } |
1313 | 208 | 209 | ||
1314 | 209 | DS_CFG_PATH = ['datasource', DS_NAME] | 210 | DS_CFG_PATH = ['datasource', DS_NAME] |
1315 | 211 | DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs' | ||
1316 | 210 | DEF_EPHEMERAL_LABEL = 'Temporary Storage' | 212 | DEF_EPHEMERAL_LABEL = 'Temporary Storage' |
1317 | 211 | 213 | ||
1318 | 212 | # The redacted password fails to meet password complexity requirements | 214 | # The redacted password fails to meet password complexity requirements |
1319 | @@ -393,14 +395,9 @@ class DataSourceAzure(sources.DataSource): | |||
1320 | 393 | if found == ddir: | 395 | if found == ddir: |
1321 | 394 | LOG.debug("using files cached in %s", ddir) | 396 | LOG.debug("using files cached in %s", ddir) |
1322 | 395 | 397 | ||
1331 | 396 | # azure / hyper-v provides random data here | 398 | seed = _get_random_seed() |
1332 | 397 | # TODO. find the seed on FreeBSD platform | 399 | if seed: |
1333 | 398 | # now update ds_cfg to reflect contents pass in config | 400 | self.metadata['random_seed'] = seed |
1326 | 399 | if not util.is_FreeBSD(): | ||
1327 | 400 | seed = util.load_file("/sys/firmware/acpi/tables/OEM0", | ||
1328 | 401 | quiet=True, decode=False) | ||
1329 | 402 | if seed: | ||
1330 | 403 | self.metadata['random_seed'] = seed | ||
1334 | 404 | 401 | ||
1335 | 405 | user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) | 402 | user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) |
1336 | 406 | self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) | 403 | self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) |
1337 | @@ -436,11 +433,12 @@ class DataSourceAzure(sources.DataSource): | |||
1338 | 436 | LOG.debug("negotiating already done for %s", | 433 | LOG.debug("negotiating already done for %s", |
1339 | 437 | self.get_instance_id()) | 434 | self.get_instance_id()) |
1340 | 438 | 435 | ||
1342 | 439 | def _poll_imds(self, report_ready=True): | 436 | def _poll_imds(self): |
1343 | 440 | """Poll IMDS for the new provisioning data until we get a valid | 437 | """Poll IMDS for the new provisioning data until we get a valid |
1344 | 441 | response. Then return the returned JSON object.""" | 438 | response. Then return the returned JSON object.""" |
1345 | 442 | url = IMDS_URL + "?api-version=2017-04-02" | 439 | url = IMDS_URL + "?api-version=2017-04-02" |
1346 | 443 | headers = {"Metadata": "true"} | 440 | headers = {"Metadata": "true"} |
1347 | 441 | report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) | ||
1348 | 444 | LOG.debug("Start polling IMDS") | 442 | LOG.debug("Start polling IMDS") |
1349 | 445 | 443 | ||
1350 | 446 | def exc_cb(msg, exception): | 444 | def exc_cb(msg, exception): |
1351 | @@ -450,13 +448,17 @@ class DataSourceAzure(sources.DataSource): | |||
1352 | 450 | # call DHCP and setup the ephemeral network to acquire the new IP. | 448 | # call DHCP and setup the ephemeral network to acquire the new IP. |
1353 | 451 | return False | 449 | return False |
1354 | 452 | 450 | ||
1355 | 453 | need_report = report_ready | ||
1356 | 454 | while True: | 451 | while True: |
1357 | 455 | try: | 452 | try: |
1358 | 456 | with EphemeralDHCPv4() as lease: | 453 | with EphemeralDHCPv4() as lease: |
1360 | 457 | if need_report: | 454 | if report_ready: |
1361 | 455 | path = REPORTED_READY_MARKER_FILE | ||
1362 | 456 | LOG.info( | ||
1363 | 457 | "Creating a marker file to report ready: %s", path) | ||
1364 | 458 | util.write_file(path, "{pid}: {time}\n".format( | ||
1365 | 459 | pid=os.getpid(), time=time())) | ||
1366 | 458 | self._report_ready(lease=lease) | 460 | self._report_ready(lease=lease) |
1368 | 459 | need_report = False | 461 | report_ready = False |
1369 | 460 | return readurl(url, timeout=1, headers=headers, | 462 | return readurl(url, timeout=1, headers=headers, |
1370 | 461 | exception_cb=exc_cb, infinite=True).contents | 463 | exception_cb=exc_cb, infinite=True).contents |
1371 | 462 | except UrlError: | 464 | except UrlError: |
1372 | @@ -490,8 +492,10 @@ class DataSourceAzure(sources.DataSource): | |||
1373 | 490 | if (cfg.get('PreprovisionedVm') is True or | 492 | if (cfg.get('PreprovisionedVm') is True or |
1374 | 491 | os.path.isfile(path)): | 493 | os.path.isfile(path)): |
1375 | 492 | if not os.path.isfile(path): | 494 | if not os.path.isfile(path): |
1378 | 493 | LOG.info("Creating a marker file to poll imds") | 495 | LOG.info("Creating a marker file to poll imds: %s", |
1379 | 494 | util.write_file(path, "%s: %s\n" % (os.getpid(), time())) | 496 | path) |
1380 | 497 | util.write_file(path, "{pid}: {time}\n".format( | ||
1381 | 498 | pid=os.getpid(), time=time())) | ||
1382 | 495 | return True | 499 | return True |
1383 | 496 | return False | 500 | return False |
1384 | 497 | 501 | ||
1385 | @@ -526,11 +530,14 @@ class DataSourceAzure(sources.DataSource): | |||
1386 | 526 | "Error communicating with Azure fabric; You may experience." | 530 | "Error communicating with Azure fabric; You may experience." |
1387 | 527 | "connectivity issues.", exc_info=True) | 531 | "connectivity issues.", exc_info=True) |
1388 | 528 | return False | 532 | return False |
1389 | 533 | util.del_file(REPORTED_READY_MARKER_FILE) | ||
1390 | 529 | util.del_file(REPROVISION_MARKER_FILE) | 534 | util.del_file(REPROVISION_MARKER_FILE) |
1391 | 530 | return fabric_data | 535 | return fabric_data |
1392 | 531 | 536 | ||
1393 | 532 | def activate(self, cfg, is_new_instance): | 537 | def activate(self, cfg, is_new_instance): |
1395 | 533 | address_ephemeral_resize(is_new_instance=is_new_instance) | 538 | address_ephemeral_resize(is_new_instance=is_new_instance, |
1396 | 539 | preserve_ntfs=self.ds_cfg.get( | ||
1397 | 540 | DS_CFG_KEY_PRESERVE_NTFS, False)) | ||
1398 | 534 | return | 541 | return |
1399 | 535 | 542 | ||
1400 | 536 | @property | 543 | @property |
1401 | @@ -574,17 +581,29 @@ def _has_ntfs_filesystem(devpath): | |||
1402 | 574 | return os.path.realpath(devpath) in ntfs_devices | 581 | return os.path.realpath(devpath) in ntfs_devices |
1403 | 575 | 582 | ||
1404 | 576 | 583 | ||
1407 | 577 | def can_dev_be_reformatted(devpath): | 584 | def can_dev_be_reformatted(devpath, preserve_ntfs): |
1408 | 578 | """Determine if block device devpath is newly formatted ephemeral. | 585 | """Determine if the ephemeral drive at devpath should be reformatted. |
1409 | 579 | 586 | ||
1411 | 580 | A newly formatted disk will: | 587 | A fresh ephemeral disk is formatted by Azure and will: |
1412 | 581 | a.) have a partition table (dos or gpt) | 588 | a.) have a partition table (dos or gpt) |
1413 | 582 | b.) have 1 partition that is ntfs formatted, or | 589 | b.) have 1 partition that is ntfs formatted, or |
1414 | 583 | have 2 partitions with the second partition ntfs formatted. | 590 | have 2 partitions with the second partition ntfs formatted. |
1415 | 584 | (larger instances with >2TB ephemeral disk have gpt, and will | 591 | (larger instances with >2TB ephemeral disk have gpt, and will |
1416 | 585 | have a microsoft reserved partition as part 1. LP: #1686514) | 592 | have a microsoft reserved partition as part 1. LP: #1686514) |
1417 | 586 | c.) the ntfs partition will have no files other than possibly | 593 | c.) the ntfs partition will have no files other than possibly |
1419 | 587 | 'dataloss_warning_readme.txt'""" | 594 | 'dataloss_warning_readme.txt' |
1420 | 595 | |||
1421 | 596 | User can indicate that NTFS should never be destroyed by setting | ||
1422 | 597 | DS_CFG_KEY_PRESERVE_NTFS in dscfg. | ||
1423 | 598 | If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS | ||
1424 | 599 | to make sure cloud-init does not accidentally wipe their data. | ||
1425 | 600 | If cloud-init cannot mount the disk to check for data, destruction | ||
1426 | 601 | will be allowed, unless the dscfg key is set.""" | ||
1427 | 602 | if preserve_ntfs: | ||
1428 | 603 | msg = ('config says to never destroy NTFS (%s.%s), skipping checks' % | ||
1429 | 604 | (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS)) | ||
1430 | 605 | return False, msg | ||
1431 | 606 | |||
1432 | 588 | if not os.path.exists(devpath): | 607 | if not os.path.exists(devpath): |
1433 | 589 | return False, 'device %s does not exist' % devpath | 608 | return False, 'device %s does not exist' % devpath |
1434 | 590 | 609 | ||
1435 | @@ -617,18 +636,27 @@ def can_dev_be_reformatted(devpath): | |||
1436 | 617 | bmsg = ('partition %s (%s) on device %s was ntfs formatted' % | 636 | bmsg = ('partition %s (%s) on device %s was ntfs formatted' % |
1437 | 618 | (cand_part, cand_path, devpath)) | 637 | (cand_part, cand_path, devpath)) |
1438 | 619 | try: | 638 | try: |
1440 | 620 | file_count = util.mount_cb(cand_path, count_files) | 639 | file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", |
1441 | 640 | update_env_for_mount={'LANG': 'C'}) | ||
1442 | 621 | except util.MountFailedError as e: | 641 | except util.MountFailedError as e: |
1443 | 642 | if "mount: unknown filesystem type 'ntfs'" in str(e): | ||
1444 | 643 | return True, (bmsg + ' but this system cannot mount NTFS,' | ||
1445 | 644 | ' assuming there are no important files.' | ||
1446 | 645 | ' Formatting allowed.') | ||
1447 | 622 | return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) | 646 | return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) |
1448 | 623 | 647 | ||
1449 | 624 | if file_count != 0: | 648 | if file_count != 0: |
1450 | 649 | LOG.warning("it looks like you're using NTFS on the ephemeral disk, " | ||
1451 | 650 | 'to ensure that filesystem does not get wiped, set ' | ||
1452 | 651 | '%s.%s in config', '.'.join(DS_CFG_PATH), | ||
1453 | 652 | DS_CFG_KEY_PRESERVE_NTFS) | ||
1454 | 625 | return False, bmsg + ' but had %d files on it.' % file_count | 653 | return False, bmsg + ' but had %d files on it.' % file_count |
1455 | 626 | 654 | ||
1456 | 627 | return True, bmsg + ' and had no important files. Safe for reformatting.' | 655 | return True, bmsg + ' and had no important files. Safe for reformatting.' |
1457 | 628 | 656 | ||
1458 | 629 | 657 | ||
1459 | 630 | def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, | 658 | def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, |
1461 | 631 | is_new_instance=False): | 659 | is_new_instance=False, preserve_ntfs=False): |
1462 | 632 | # wait for ephemeral disk to come up | 660 | # wait for ephemeral disk to come up |
1463 | 633 | naplen = .2 | 661 | naplen = .2 |
1464 | 634 | missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, | 662 | missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, |
1465 | @@ -644,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, | |||
1466 | 644 | if is_new_instance: | 672 | if is_new_instance: |
1467 | 645 | result, msg = (True, "First instance boot.") | 673 | result, msg = (True, "First instance boot.") |
1468 | 646 | else: | 674 | else: |
1470 | 647 | result, msg = can_dev_be_reformatted(devpath) | 675 | result, msg = can_dev_be_reformatted(devpath, preserve_ntfs) |
1471 | 648 | 676 | ||
1472 | 649 | LOG.debug("reformattable=%s: %s", result, msg) | 677 | LOG.debug("reformattable=%s: %s", result, msg) |
1473 | 650 | if not result: | 678 | if not result: |
1474 | @@ -958,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev): | |||
1475 | 958 | return False | 986 | return False |
1476 | 959 | 987 | ||
1477 | 960 | 988 | ||
1478 | 989 | def _get_random_seed(): | ||
1479 | 990 | """Return content random seed file if available, otherwise, | ||
1480 | 991 | return None.""" | ||
1481 | 992 | # azure / hyper-v provides random data here | ||
1482 | 993 | # TODO. find the seed on FreeBSD platform | ||
1483 | 994 | # now update ds_cfg to reflect contents pass in config | ||
1484 | 995 | if util.is_FreeBSD(): | ||
1485 | 996 | return None | ||
1486 | 997 | return util.load_file("/sys/firmware/acpi/tables/OEM0", | ||
1487 | 998 | quiet=True, decode=False) | ||
1488 | 999 | |||
1489 | 1000 | |||
1490 | 961 | def list_possible_azure_ds_devs(): | 1001 | def list_possible_azure_ds_devs(): |
1491 | 962 | devlist = [] | 1002 | devlist = [] |
1492 | 963 | if util.is_FreeBSD(): | 1003 | if util.is_FreeBSD(): |
1493 | diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py | |||
1494 | index 0df545f..d4b758f 100644 | |||
1495 | --- a/cloudinit/sources/DataSourceCloudStack.py | |||
1496 | +++ b/cloudinit/sources/DataSourceCloudStack.py | |||
1497 | @@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource): | |||
1498 | 68 | 68 | ||
1499 | 69 | dsname = 'CloudStack' | 69 | dsname = 'CloudStack' |
1500 | 70 | 70 | ||
1501 | 71 | # Setup read_url parameters per get_url_params. | ||
1502 | 72 | url_max_wait = 120 | ||
1503 | 73 | url_timeout = 50 | ||
1504 | 74 | |||
1505 | 71 | def __init__(self, sys_cfg, distro, paths): | 75 | def __init__(self, sys_cfg, distro, paths): |
1506 | 72 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 76 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
1507 | 73 | self.seed_dir = os.path.join(paths.seed_dir, 'cs') | 77 | self.seed_dir = os.path.join(paths.seed_dir, 'cs') |
1508 | @@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource): | |||
1509 | 80 | self.metadata_address = "http://%s/" % (self.vr_addr,) | 84 | self.metadata_address = "http://%s/" % (self.vr_addr,) |
1510 | 81 | self.cfg = {} | 85 | self.cfg = {} |
1511 | 82 | 86 | ||
1519 | 83 | def _get_url_settings(self): | 87 | def wait_for_metadata_service(self): |
1520 | 84 | mcfg = self.ds_cfg | 88 | url_params = self.get_url_params() |
1514 | 85 | max_wait = 120 | ||
1515 | 86 | try: | ||
1516 | 87 | max_wait = int(mcfg.get("max_wait", max_wait)) | ||
1517 | 88 | except Exception: | ||
1518 | 89 | util.logexc(LOG, "Failed to get max wait. using %s", max_wait) | ||
1521 | 90 | 89 | ||
1523 | 91 | if max_wait == 0: | 90 | if url_params.max_wait_seconds <= 0: |
1524 | 92 | return False | 91 | return False |
1525 | 93 | 92 | ||
1526 | 94 | timeout = 50 | ||
1527 | 95 | try: | ||
1528 | 96 | timeout = int(mcfg.get("timeout", timeout)) | ||
1529 | 97 | except Exception: | ||
1530 | 98 | util.logexc(LOG, "Failed to get timeout, using %s", timeout) | ||
1531 | 99 | |||
1532 | 100 | return (max_wait, timeout) | ||
1533 | 101 | |||
1534 | 102 | def wait_for_metadata_service(self): | ||
1535 | 103 | (max_wait, timeout) = self._get_url_settings() | ||
1536 | 104 | |||
1537 | 105 | urls = [uhelp.combine_url(self.metadata_address, | 93 | urls = [uhelp.combine_url(self.metadata_address, |
1538 | 106 | 'latest/meta-data/instance-id')] | 94 | 'latest/meta-data/instance-id')] |
1539 | 107 | start_time = time.time() | 95 | start_time = time.time() |
1542 | 108 | url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, | 96 | url = uhelp.wait_for_url( |
1543 | 109 | timeout=timeout, status_cb=LOG.warn) | 97 | urls=urls, max_wait=url_params.max_wait_seconds, |
1544 | 98 | timeout=url_params.timeout_seconds, status_cb=LOG.warn) | ||
1545 | 110 | 99 | ||
1546 | 111 | if url: | 100 | if url: |
1547 | 112 | LOG.debug("Using metadata source: '%s'", url) | 101 | LOG.debug("Using metadata source: '%s'", url) |
1548 | diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py | |||
1549 | index c7b5fe5..4cb2897 100644 | |||
1550 | --- a/cloudinit/sources/DataSourceConfigDrive.py | |||
1551 | +++ b/cloudinit/sources/DataSourceConfigDrive.py | |||
1552 | @@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): | |||
1553 | 43 | self.version = None | 43 | self.version = None |
1554 | 44 | self.ec2_metadata = None | 44 | self.ec2_metadata = None |
1555 | 45 | self._network_config = None | 45 | self._network_config = None |
1557 | 46 | self.network_json = None | 46 | self.network_json = sources.UNSET |
1558 | 47 | self.network_eni = None | 47 | self.network_eni = None |
1559 | 48 | self.known_macs = None | 48 | self.known_macs = None |
1560 | 49 | self.files = {} | 49 | self.files = {} |
1561 | @@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): | |||
1562 | 69 | util.logexc(LOG, "Failed reading config drive from %s", sdir) | 69 | util.logexc(LOG, "Failed reading config drive from %s", sdir) |
1563 | 70 | 70 | ||
1564 | 71 | if not found: | 71 | if not found: |
1566 | 72 | for dev in find_candidate_devs(): | 72 | dslist = self.sys_cfg.get('datasource_list') |
1567 | 73 | for dev in find_candidate_devs(dslist=dslist): | ||
1568 | 73 | try: | 74 | try: |
1569 | 74 | # Set mtype if freebsd and turn off sync | 75 | # Set mtype if freebsd and turn off sync |
1570 | 75 | if dev.startswith("/dev/cd"): | 76 | if dev.startswith("/dev/cd"): |
1571 | @@ -148,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): | |||
1572 | 148 | @property | 149 | @property |
1573 | 149 | def network_config(self): | 150 | def network_config(self): |
1574 | 150 | if self._network_config is None: | 151 | if self._network_config is None: |
1576 | 151 | if self.network_json is not None: | 152 | if self.network_json not in (None, sources.UNSET): |
1577 | 152 | LOG.debug("network config provided via network_json") | 153 | LOG.debug("network config provided via network_json") |
1578 | 153 | self._network_config = openstack.convert_net_json( | 154 | self._network_config = openstack.convert_net_json( |
1579 | 154 | self.network_json, known_macs=self.known_macs) | 155 | self.network_json, known_macs=self.known_macs) |
1580 | @@ -211,7 +212,7 @@ def write_injected_files(files): | |||
1581 | 211 | util.logexc(LOG, "Failed writing file: %s", filename) | 212 | util.logexc(LOG, "Failed writing file: %s", filename) |
1582 | 212 | 213 | ||
1583 | 213 | 214 | ||
1585 | 214 | def find_candidate_devs(probe_optical=True): | 215 | def find_candidate_devs(probe_optical=True, dslist=None): |
1586 | 215 | """Return a list of devices that may contain the config drive. | 216 | """Return a list of devices that may contain the config drive. |
1587 | 216 | 217 | ||
1588 | 217 | The returned list is sorted by search order where the first item has | 218 | The returned list is sorted by search order where the first item has |
1589 | @@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True): | |||
1590 | 227 | * either vfat or iso9660 formated | 228 | * either vfat or iso9660 formated |
1591 | 228 | * labeled with 'config-2' or 'CONFIG-2' | 229 | * labeled with 'config-2' or 'CONFIG-2' |
1592 | 229 | """ | 230 | """ |
1593 | 231 | if dslist is None: | ||
1594 | 232 | dslist = [] | ||
1595 | 233 | |||
1596 | 230 | # query optical drive to get it in blkid cache for 2.6 kernels | 234 | # query optical drive to get it in blkid cache for 2.6 kernels |
1597 | 231 | if probe_optical: | 235 | if probe_optical: |
1598 | 232 | for device in OPTICAL_DEVICES: | 236 | for device in OPTICAL_DEVICES: |
1599 | @@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True): | |||
1600 | 257 | devices = [d for d in candidates | 261 | devices = [d for d in candidates |
1601 | 258 | if d in by_label or not util.is_partition(d)] | 262 | if d in by_label or not util.is_partition(d)] |
1602 | 259 | 263 | ||
1604 | 260 | if devices: | 264 | LOG.debug("devices=%s dslist=%s", devices, dslist) |
1605 | 265 | if devices and "IBMCloud" in dslist: | ||
1606 | 261 | # IBMCloud uses config-2 label, but limited to a single UUID. | 266 | # IBMCloud uses config-2 label, but limited to a single UUID. |
1607 | 262 | ibm_platform, ibm_path = get_ibm_platform() | 267 | ibm_platform, ibm_path = get_ibm_platform() |
1608 | 263 | if ibm_path in devices: | 268 | if ibm_path in devices: |
1609 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py | |||
1610 | index 21e9ef8..968ab3f 100644 | |||
1611 | --- a/cloudinit/sources/DataSourceEc2.py | |||
1612 | +++ b/cloudinit/sources/DataSourceEc2.py | |||
1613 | @@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) | |||
1614 | 27 | STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") | 27 | STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") |
1615 | 28 | STRICT_ID_DEFAULT = "warn" | 28 | STRICT_ID_DEFAULT = "warn" |
1616 | 29 | 29 | ||
1617 | 30 | _unset = "_unset" | ||
1618 | 31 | |||
1619 | 32 | 30 | ||
1620 | 33 | class Platforms(object): | 31 | class Platforms(object): |
1621 | 34 | # TODO Rename and move to cloudinit.cloud.CloudNames | 32 | # TODO Rename and move to cloudinit.cloud.CloudNames |
1622 | @@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource): | |||
1623 | 59 | # for extended metadata content. IPv6 support comes in 2016-09-02 | 57 | # for extended metadata content. IPv6 support comes in 2016-09-02 |
1624 | 60 | extended_metadata_versions = ['2016-09-02'] | 58 | extended_metadata_versions = ['2016-09-02'] |
1625 | 61 | 59 | ||
1626 | 60 | # Setup read_url parameters per get_url_params. | ||
1627 | 61 | url_max_wait = 120 | ||
1628 | 62 | url_timeout = 50 | ||
1629 | 63 | |||
1630 | 62 | _cloud_platform = None | 64 | _cloud_platform = None |
1631 | 63 | 65 | ||
1633 | 64 | _network_config = _unset # Used for caching calculated network config v1 | 66 | _network_config = sources.UNSET # Used to cache calculated network cfg v1 |
1634 | 65 | 67 | ||
1635 | 66 | # Whether we want to get network configuration from the metadata service. | 68 | # Whether we want to get network configuration from the metadata service. |
1640 | 67 | get_network_metadata = False | 69 | perform_dhcp_setup = False |
1637 | 68 | |||
1638 | 69 | # Track the discovered fallback nic for use in configuration generation. | ||
1639 | 70 | _fallback_interface = None | ||
1641 | 71 | 70 | ||
1642 | 72 | def __init__(self, sys_cfg, distro, paths): | 71 | def __init__(self, sys_cfg, distro, paths): |
1643 | 73 | super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) | 72 | super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) |
1644 | @@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource): | |||
1645 | 98 | elif self.cloud_platform == Platforms.NO_EC2_METADATA: | 97 | elif self.cloud_platform == Platforms.NO_EC2_METADATA: |
1646 | 99 | return False | 98 | return False |
1647 | 100 | 99 | ||
1649 | 101 | if self.get_network_metadata: # Setup networking in init-local stage. | 100 | if self.perform_dhcp_setup: # Setup networking in init-local stage. |
1650 | 102 | if util.is_FreeBSD(): | 101 | if util.is_FreeBSD(): |
1651 | 103 | LOG.debug("FreeBSD doesn't support running dhclient with -sf") | 102 | LOG.debug("FreeBSD doesn't support running dhclient with -sf") |
1652 | 104 | return False | 103 | return False |
1653 | @@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource): | |||
1654 | 158 | else: | 157 | else: |
1655 | 159 | return self.metadata['instance-id'] | 158 | return self.metadata['instance-id'] |
1656 | 160 | 159 | ||
1657 | 161 | def _get_url_settings(self): | ||
1658 | 162 | mcfg = self.ds_cfg | ||
1659 | 163 | max_wait = 120 | ||
1660 | 164 | try: | ||
1661 | 165 | max_wait = int(mcfg.get("max_wait", max_wait)) | ||
1662 | 166 | except Exception: | ||
1663 | 167 | util.logexc(LOG, "Failed to get max wait. using %s", max_wait) | ||
1664 | 168 | |||
1665 | 169 | timeout = 50 | ||
1666 | 170 | try: | ||
1667 | 171 | timeout = max(0, int(mcfg.get("timeout", timeout))) | ||
1668 | 172 | except Exception: | ||
1669 | 173 | util.logexc(LOG, "Failed to get timeout, using %s", timeout) | ||
1670 | 174 | |||
1671 | 175 | return (max_wait, timeout) | ||
1672 | 176 | |||
1673 | 177 | def wait_for_metadata_service(self): | 160 | def wait_for_metadata_service(self): |
1674 | 178 | mcfg = self.ds_cfg | 161 | mcfg = self.ds_cfg |
1675 | 179 | 162 | ||
1678 | 180 | (max_wait, timeout) = self._get_url_settings() | 163 | url_params = self.get_url_params() |
1679 | 181 | if max_wait <= 0: | 164 | if url_params.max_wait_seconds <= 0: |
1680 | 182 | return False | 165 | return False |
1681 | 183 | 166 | ||
1682 | 184 | # Remove addresses from the list that wont resolve. | 167 | # Remove addresses from the list that wont resolve. |
1683 | @@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource): | |||
1684 | 205 | 188 | ||
1685 | 206 | start_time = time.time() | 189 | start_time = time.time() |
1686 | 207 | url = uhelp.wait_for_url( | 190 | url = uhelp.wait_for_url( |
1688 | 208 | urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn) | 191 | urls=urls, max_wait=url_params.max_wait_seconds, |
1689 | 192 | timeout=url_params.timeout_seconds, status_cb=LOG.warn) | ||
1690 | 209 | 193 | ||
1691 | 210 | if url: | 194 | if url: |
1692 | 211 | self.metadata_address = url2base[url] | 195 | self.metadata_address = url2base[url] |
1693 | @@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource): | |||
1694 | 310 | @property | 294 | @property |
1695 | 311 | def network_config(self): | 295 | def network_config(self): |
1696 | 312 | """Return a network config dict for rendering ENI or netplan files.""" | 296 | """Return a network config dict for rendering ENI or netplan files.""" |
1698 | 313 | if self._network_config != _unset: | 297 | if self._network_config != sources.UNSET: |
1699 | 314 | return self._network_config | 298 | return self._network_config |
1700 | 315 | 299 | ||
1701 | 316 | if self.metadata is None: | 300 | if self.metadata is None: |
1703 | 317 | # this would happen if get_data hadn't been called. leave as _unset | 301 | # this would happen if get_data hadn't been called. leave as UNSET |
1704 | 318 | LOG.warning( | 302 | LOG.warning( |
1705 | 319 | "Unexpected call to network_config when metadata is None.") | 303 | "Unexpected call to network_config when metadata is None.") |
1706 | 320 | return None | 304 | return None |
1707 | @@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource): | |||
1708 | 353 | self._fallback_interface = _legacy_fbnic | 337 | self._fallback_interface = _legacy_fbnic |
1709 | 354 | self.fallback_nic = None | 338 | self.fallback_nic = None |
1710 | 355 | else: | 339 | else: |
1714 | 356 | self._fallback_interface = net.find_fallback_nic() | 340 | return super(DataSourceEc2, self).fallback_interface |
1712 | 357 | if self._fallback_interface is None: | ||
1713 | 358 | LOG.warning("Did not find a fallback interface on EC2.") | ||
1715 | 359 | return self._fallback_interface | 341 | return self._fallback_interface |
1716 | 360 | 342 | ||
1717 | 361 | def _crawl_metadata(self): | 343 | def _crawl_metadata(self): |
1718 | @@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2): | |||
1719 | 390 | metadata service. If the metadata service provides network configuration | 372 | metadata service. If the metadata service provides network configuration |
1720 | 391 | then render the network configuration for that instance based on metadata. | 373 | then render the network configuration for that instance based on metadata. |
1721 | 392 | """ | 374 | """ |
1723 | 393 | get_network_metadata = True # Get metadata network config if present | 375 | perform_dhcp_setup = True # Use dhcp before querying metadata |
1724 | 394 | 376 | ||
1725 | 395 | def get_data(self): | 377 | def get_data(self): |
1726 | 396 | supported_platforms = (Platforms.AWS,) | 378 | supported_platforms = (Platforms.AWS,) |
1727 | diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py | |||
1728 | index aa56add..bcb3854 100644 | |||
1729 | --- a/cloudinit/sources/DataSourceMAAS.py | |||
1730 | +++ b/cloudinit/sources/DataSourceMAAS.py | |||
1731 | @@ -198,7 +198,7 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, | |||
1732 | 198 | If version is None, then <version>/ will not be used. | 198 | If version is None, then <version>/ will not be used. |
1733 | 199 | """ | 199 | """ |
1734 | 200 | if read_file_or_url is None: | 200 | if read_file_or_url is None: |
1736 | 201 | read_file_or_url = util.read_file_or_url | 201 | read_file_or_url = url_helper.read_file_or_url |
1737 | 202 | 202 | ||
1738 | 203 | if seed_url.endswith("/"): | 203 | if seed_url.endswith("/"): |
1739 | 204 | seed_url = seed_url[:-1] | 204 | seed_url = seed_url[:-1] |
1740 | diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py | |||
1741 | index 5d3a8dd..2daea59 100644 | |||
1742 | --- a/cloudinit/sources/DataSourceNoCloud.py | |||
1743 | +++ b/cloudinit/sources/DataSourceNoCloud.py | |||
1744 | @@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource): | |||
1745 | 78 | LOG.debug("Using seeded data from %s", path) | 78 | LOG.debug("Using seeded data from %s", path) |
1746 | 79 | mydata = _merge_new_seed(mydata, seeded) | 79 | mydata = _merge_new_seed(mydata, seeded) |
1747 | 80 | break | 80 | break |
1749 | 81 | except ValueError as e: | 81 | except ValueError: |
1750 | 82 | pass | 82 | pass |
1751 | 83 | 83 | ||
1752 | 84 | # If the datasource config had a 'seedfrom' entry, then that takes | 84 | # If the datasource config had a 'seedfrom' entry, then that takes |
1753 | @@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource): | |||
1754 | 117 | try: | 117 | try: |
1755 | 118 | seeded = util.mount_cb(dev, _pp2d_callback, | 118 | seeded = util.mount_cb(dev, _pp2d_callback, |
1756 | 119 | pp2d_kwargs) | 119 | pp2d_kwargs) |
1758 | 120 | except ValueError as e: | 120 | except ValueError: |
1759 | 121 | if dev in label_list: | 121 | if dev in label_list: |
1760 | 122 | LOG.warning("device %s with label=%s not a" | 122 | LOG.warning("device %s with label=%s not a" |
1761 | 123 | "valid seed.", dev, label) | 123 | "valid seed.", dev, label) |
1762 | diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py | |||
1763 | index d4a4111..16c1078 100644 | |||
1764 | --- a/cloudinit/sources/DataSourceOpenNebula.py | |||
1765 | +++ b/cloudinit/sources/DataSourceOpenNebula.py | |||
1766 | @@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None): | |||
1767 | 378 | if asuser is not None: | 378 | if asuser is not None: |
1768 | 379 | try: | 379 | try: |
1769 | 380 | pwd.getpwnam(asuser) | 380 | pwd.getpwnam(asuser) |
1771 | 381 | except KeyError as e: | 381 | except KeyError: |
1772 | 382 | raise BrokenContextDiskDir( | 382 | raise BrokenContextDiskDir( |
1773 | 383 | "configured user '{user}' does not exist".format( | 383 | "configured user '{user}' does not exist".format( |
1774 | 384 | user=asuser)) | 384 | user=asuser)) |
1775 | diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py | |||
1776 | index fb166ae..365af96 100644 | |||
1777 | --- a/cloudinit/sources/DataSourceOpenStack.py | |||
1778 | +++ b/cloudinit/sources/DataSourceOpenStack.py | |||
1779 | @@ -7,6 +7,7 @@ | |||
1780 | 7 | import time | 7 | import time |
1781 | 8 | 8 | ||
1782 | 9 | from cloudinit import log as logging | 9 | from cloudinit import log as logging |
1783 | 10 | from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError | ||
1784 | 10 | from cloudinit import sources | 11 | from cloudinit import sources |
1785 | 11 | from cloudinit import url_helper | 12 | from cloudinit import url_helper |
1786 | 12 | from cloudinit import util | 13 | from cloudinit import util |
1787 | @@ -22,51 +23,37 @@ DEFAULT_METADATA = { | |||
1788 | 22 | "instance-id": DEFAULT_IID, | 23 | "instance-id": DEFAULT_IID, |
1789 | 23 | } | 24 | } |
1790 | 24 | 25 | ||
1791 | 26 | # OpenStack DMI constants | ||
1792 | 27 | DMI_PRODUCT_NOVA = 'OpenStack Nova' | ||
1793 | 28 | DMI_PRODUCT_COMPUTE = 'OpenStack Compute' | ||
1794 | 29 | VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE] | ||
1795 | 30 | DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud' | ||
1796 | 31 | VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM] | ||
1797 | 32 | |||
1798 | 25 | 33 | ||
1799 | 26 | class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | 34 | class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
1800 | 27 | 35 | ||
1801 | 28 | dsname = "OpenStack" | 36 | dsname = "OpenStack" |
1802 | 29 | 37 | ||
1803 | 38 | _network_config = sources.UNSET # Used to cache calculated network cfg v1 | ||
1804 | 39 | |||
1805 | 40 | # Whether we want to get network configuration from the metadata service. | ||
1806 | 41 | perform_dhcp_setup = False | ||
1807 | 42 | |||
1808 | 30 | def __init__(self, sys_cfg, distro, paths): | 43 | def __init__(self, sys_cfg, distro, paths): |
1809 | 31 | super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) | 44 | super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) |
1810 | 32 | self.metadata_address = None | 45 | self.metadata_address = None |
1811 | 33 | self.ssl_details = util.fetch_ssl_details(self.paths) | 46 | self.ssl_details = util.fetch_ssl_details(self.paths) |
1812 | 34 | self.version = None | 47 | self.version = None |
1813 | 35 | self.files = {} | 48 | self.files = {} |
1815 | 36 | self.ec2_metadata = None | 49 | self.ec2_metadata = sources.UNSET |
1816 | 50 | self.network_json = sources.UNSET | ||
1817 | 37 | 51 | ||
1818 | 38 | def __str__(self): | 52 | def __str__(self): |
1819 | 39 | root = sources.DataSource.__str__(self) | 53 | root = sources.DataSource.__str__(self) |
1820 | 40 | mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) | 54 | mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) |
1821 | 41 | return mstr | 55 | return mstr |
1822 | 42 | 56 | ||
1823 | 43 | def _get_url_settings(self): | ||
1824 | 44 | # TODO(harlowja): this is shared with ec2 datasource, we should just | ||
1825 | 45 | # move it to a shared location instead... | ||
1826 | 46 | # Note: the defaults here are different though. | ||
1827 | 47 | |||
1828 | 48 | # max_wait < 0 indicates do not wait | ||
1829 | 49 | max_wait = -1 | ||
1830 | 50 | timeout = 10 | ||
1831 | 51 | retries = 5 | ||
1832 | 52 | |||
1833 | 53 | try: | ||
1834 | 54 | max_wait = int(self.ds_cfg.get("max_wait", max_wait)) | ||
1835 | 55 | except Exception: | ||
1836 | 56 | util.logexc(LOG, "Failed to get max wait. using %s", max_wait) | ||
1837 | 57 | |||
1838 | 58 | try: | ||
1839 | 59 | timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) | ||
1840 | 60 | except Exception: | ||
1841 | 61 | util.logexc(LOG, "Failed to get timeout, using %s", timeout) | ||
1842 | 62 | |||
1843 | 63 | try: | ||
1844 | 64 | retries = int(self.ds_cfg.get("retries", retries)) | ||
1845 | 65 | except Exception: | ||
1846 | 66 | util.logexc(LOG, "Failed to get retries. using %s", retries) | ||
1847 | 67 | |||
1848 | 68 | return (max_wait, timeout, retries) | ||
1849 | 69 | |||
1850 | 70 | def wait_for_metadata_service(self): | 57 | def wait_for_metadata_service(self): |
1851 | 71 | urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) | 58 | urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) |
1852 | 72 | filtered = [x for x in urls if util.is_resolvable_url(x)] | 59 | filtered = [x for x in urls if util.is_resolvable_url(x)] |
1853 | @@ -86,10 +73,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | |||
1854 | 86 | md_urls.append(md_url) | 73 | md_urls.append(md_url) |
1855 | 87 | url2base[md_url] = url | 74 | url2base[md_url] = url |
1856 | 88 | 75 | ||
1858 | 89 | (max_wait, timeout, _retries) = self._get_url_settings() | 76 | url_params = self.get_url_params() |
1859 | 90 | start_time = time.time() | 77 | start_time = time.time() |
1862 | 91 | avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, | 78 | avail_url = url_helper.wait_for_url( |
1863 | 92 | timeout=timeout) | 79 | urls=md_urls, max_wait=url_params.max_wait_seconds, |
1864 | 80 | timeout=url_params.timeout_seconds) | ||
1865 | 93 | if avail_url: | 81 | if avail_url: |
1866 | 94 | LOG.debug("Using metadata source: '%s'", url2base[avail_url]) | 82 | LOG.debug("Using metadata source: '%s'", url2base[avail_url]) |
1867 | 95 | else: | 83 | else: |
1868 | @@ -99,38 +87,66 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | |||
1869 | 99 | self.metadata_address = url2base.get(avail_url) | 87 | self.metadata_address = url2base.get(avail_url) |
1870 | 100 | return bool(avail_url) | 88 | return bool(avail_url) |
1871 | 101 | 89 | ||
1878 | 102 | def _get_data(self): | 90 | def check_instance_id(self, sys_cfg): |
1879 | 103 | try: | 91 | # quickly (local check only) if self.instance_id is still valid |
1880 | 104 | if not self.wait_for_metadata_service(): | 92 | return sources.instance_id_matches_system_uuid(self.get_instance_id()) |
1875 | 105 | return False | ||
1876 | 106 | except IOError: | ||
1877 | 107 | return False | ||
1881 | 108 | 93 | ||
1883 | 109 | (_max_wait, timeout, retries) = self._get_url_settings() | 94 | @property |
1884 | 95 | def network_config(self): | ||
1885 | 96 | """Return a network config dict for rendering ENI or netplan files.""" | ||
1886 | 97 | if self._network_config != sources.UNSET: | ||
1887 | 98 | return self._network_config | ||
1888 | 99 | |||
1889 | 100 | # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide | ||
1890 | 101 | # network_config by default unless configured in /etc/cloud/cloud.cfg*. | ||
1891 | 102 | # Patch Xenial and Artful before release to default to False. | ||
1892 | 103 | if util.is_false(self.ds_cfg.get('apply_network_config', True)): | ||
1893 | 104 | self._network_config = None | ||
1894 | 105 | return self._network_config | ||
1895 | 106 | if self.network_json == sources.UNSET: | ||
1896 | 107 | # this would happen if get_data hadn't been called. leave as UNSET | ||
1897 | 108 | LOG.warning( | ||
1898 | 109 | 'Unexpected call to network_config when network_json is None.') | ||
1899 | 110 | return None | ||
1900 | 111 | |||
1901 | 112 | LOG.debug('network config provided via network_json') | ||
1902 | 113 | self._network_config = openstack.convert_net_json( | ||
1903 | 114 | self.network_json, known_macs=None) | ||
1904 | 115 | return self._network_config | ||
1905 | 110 | 116 | ||
1919 | 111 | try: | 117 | def _get_data(self): |
1920 | 112 | results = util.log_time(LOG.debug, | 118 | """Crawl metadata, parse and persist that data for this instance. |
1921 | 113 | 'Crawl of openstack metadata service', | 119 | |
1922 | 114 | read_metadata_service, | 120 | @return: True when metadata discovered indicates OpenStack datasource. |
1923 | 115 | args=[self.metadata_address], | 121 | False when unable to contact metadata service or when metadata |
1924 | 116 | kwargs={'ssl_details': self.ssl_details, | 122 | format is invalid or disabled. |
1925 | 117 | 'retries': retries, | 123 | """ |
1926 | 118 | 'timeout': timeout}) | 124 | if not detect_openstack(): |
1914 | 119 | except openstack.NonReadable: | ||
1915 | 120 | return False | ||
1916 | 121 | except (openstack.BrokenMetadata, IOError): | ||
1917 | 122 | util.logexc(LOG, "Broken metadata address %s", | ||
1918 | 123 | self.metadata_address) | ||
1927 | 124 | return False | 125 | return False |
1928 | 126 | if self.perform_dhcp_setup: # Setup networking in init-local stage. | ||
1929 | 127 | try: | ||
1930 | 128 | with EphemeralDHCPv4(self.fallback_interface): | ||
1931 | 129 | results = util.log_time( | ||
1932 | 130 | logfunc=LOG.debug, msg='Crawl of metadata service', | ||
1933 | 131 | func=self._crawl_metadata) | ||
1934 | 132 | except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e: | ||
1935 | 133 | util.logexc(LOG, str(e)) | ||
1936 | 134 | return False | ||
1937 | 135 | else: | ||
1938 | 136 | try: | ||
1939 | 137 | results = self._crawl_metadata() | ||
1940 | 138 | except sources.InvalidMetaDataException as e: | ||
1941 | 139 | util.logexc(LOG, str(e)) | ||
1942 | 140 | return False | ||
1943 | 125 | 141 | ||
1944 | 126 | self.dsmode = self._determine_dsmode([results.get('dsmode')]) | 142 | self.dsmode = self._determine_dsmode([results.get('dsmode')]) |
1945 | 127 | if self.dsmode == sources.DSMODE_DISABLED: | 143 | if self.dsmode == sources.DSMODE_DISABLED: |
1946 | 128 | return False | 144 | return False |
1947 | 129 | |||
1948 | 130 | md = results.get('metadata', {}) | 145 | md = results.get('metadata', {}) |
1949 | 131 | md = util.mergemanydict([md, DEFAULT_METADATA]) | 146 | md = util.mergemanydict([md, DEFAULT_METADATA]) |
1950 | 132 | self.metadata = md | 147 | self.metadata = md |
1951 | 133 | self.ec2_metadata = results.get('ec2-metadata') | 148 | self.ec2_metadata = results.get('ec2-metadata') |
1952 | 149 | self.network_json = results.get('networkdata') | ||
1953 | 134 | self.userdata_raw = results.get('userdata') | 150 | self.userdata_raw = results.get('userdata') |
1954 | 135 | self.version = results['version'] | 151 | self.version = results['version'] |
1955 | 136 | self.files.update(results.get('files', {})) | 152 | self.files.update(results.get('files', {})) |
1956 | @@ -145,9 +161,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | |||
1957 | 145 | 161 | ||
1958 | 146 | return True | 162 | return True |
1959 | 147 | 163 | ||
1963 | 148 | def check_instance_id(self, sys_cfg): | 164 | def _crawl_metadata(self): |
1964 | 149 | # quickly (local check only) if self.instance_id is still valid | 165 | """Crawl metadata service when available. |
1965 | 150 | return sources.instance_id_matches_system_uuid(self.get_instance_id()) | 166 | |
1966 | 167 | @returns: Dictionary with all metadata discovered for this datasource. | ||
1967 | 168 | @raise: InvalidMetaDataException on unreadable or broken | ||
1968 | 169 | metadata. | ||
1969 | 170 | """ | ||
1970 | 171 | try: | ||
1971 | 172 | if not self.wait_for_metadata_service(): | ||
1972 | 173 | raise sources.InvalidMetaDataException( | ||
1973 | 174 | 'No active metadata service found') | ||
1974 | 175 | except IOError as e: | ||
1975 | 176 | raise sources.InvalidMetaDataException( | ||
1976 | 177 | 'IOError contacting metadata service: {error}'.format( | ||
1977 | 178 | error=str(e))) | ||
1978 | 179 | |||
1979 | 180 | url_params = self.get_url_params() | ||
1980 | 181 | |||
1981 | 182 | try: | ||
1982 | 183 | result = util.log_time( | ||
1983 | 184 | LOG.debug, 'Crawl of openstack metadata service', | ||
1984 | 185 | read_metadata_service, args=[self.metadata_address], | ||
1985 | 186 | kwargs={'ssl_details': self.ssl_details, | ||
1986 | 187 | 'retries': url_params.num_retries, | ||
1987 | 188 | 'timeout': url_params.timeout_seconds}) | ||
1988 | 189 | except openstack.NonReadable as e: | ||
1989 | 190 | raise sources.InvalidMetaDataException(str(e)) | ||
1990 | 191 | except (openstack.BrokenMetadata, IOError): | ||
1991 | 192 | msg = 'Broken metadata address {addr}'.format( | ||
1992 | 193 | addr=self.metadata_address) | ||
1993 | 194 | raise sources.InvalidMetaDataException(msg) | ||
1994 | 195 | return result | ||
1995 | 196 | |||
1996 | 197 | |||
1997 | 198 | class DataSourceOpenStackLocal(DataSourceOpenStack): | ||
1998 | 199 | """Run in init-local using a dhcp discovery prior to metadata crawl. | ||
1999 | 200 | |||
2000 | 201 | In init-local, no network is available. This subclass sets up minimal | ||
2001 | 202 | networking with dhclient on a viable nic so that it can talk to the | ||
2002 | 203 | metadata service. If the metadata service provides network configuration | ||
2003 | 204 | then render the network configuration for that instance based on metadata. | ||
2004 | 205 | """ | ||
2005 | 206 | |||
2006 | 207 | perform_dhcp_setup = True # Get metadata network config if present | ||
2007 | 151 | 208 | ||
2008 | 152 | 209 | ||
2009 | 153 | def read_metadata_service(base_url, ssl_details=None, | 210 | def read_metadata_service(base_url, ssl_details=None, |
2010 | @@ -157,8 +214,23 @@ def read_metadata_service(base_url, ssl_details=None, | |||
2011 | 157 | return reader.read_v2() | 214 | return reader.read_v2() |
2012 | 158 | 215 | ||
2013 | 159 | 216 | ||
2014 | 217 | def detect_openstack(): | ||
2015 | 218 | """Return True when a potential OpenStack platform is detected.""" | ||
2016 | 219 | if not util.is_x86(): | ||
2017 | 220 | return True # Non-Intel cpus don't properly report dmi product names | ||
2018 | 221 | product_name = util.read_dmi_data('system-product-name') | ||
2019 | 222 | if product_name in VALID_DMI_PRODUCT_NAMES: | ||
2020 | 223 | return True | ||
2021 | 224 | elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: | ||
2022 | 225 | return True | ||
2023 | 226 | elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: | ||
2024 | 227 | return True | ||
2025 | 228 | return False | ||
2026 | 229 | |||
2027 | 230 | |||
2028 | 160 | # Used to match classes to dependencies | 231 | # Used to match classes to dependencies |
2029 | 161 | datasources = [ | 232 | datasources = [ |
2030 | 233 | (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)), | ||
2031 | 162 | (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), | 234 | (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), |
2032 | 163 | ] | 235 | ] |
2033 | 164 | 236 | ||
2034 | diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py | |||
2035 | index 4ea00eb..f92e8b5 100644 | |||
2036 | --- a/cloudinit/sources/DataSourceSmartOS.py | |||
2037 | +++ b/cloudinit/sources/DataSourceSmartOS.py | |||
2038 | @@ -17,7 +17,7 @@ | |||
2039 | 17 | # of a serial console. | 17 | # of a serial console. |
2040 | 18 | # | 18 | # |
2041 | 19 | # Certain behavior is defined by the DataDictionary | 19 | # Certain behavior is defined by the DataDictionary |
2043 | 20 | # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html | 20 | # https://eng.joyent.com/mdata/datadict.html |
2044 | 21 | # Comments with "@datadictionary" are snippets of the definition | 21 | # Comments with "@datadictionary" are snippets of the definition |
2045 | 22 | 22 | ||
2046 | 23 | import base64 | 23 | import base64 |
2047 | @@ -165,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource): | |||
2048 | 165 | 165 | ||
2049 | 166 | dsname = "Joyent" | 166 | dsname = "Joyent" |
2050 | 167 | 167 | ||
2054 | 168 | _unset = "_unset" | 168 | smartos_type = sources.UNSET |
2055 | 169 | smartos_type = _unset | 169 | md_client = sources.UNSET |
2053 | 170 | md_client = _unset | ||
2056 | 171 | 170 | ||
2057 | 172 | def __init__(self, sys_cfg, distro, paths): | 171 | def __init__(self, sys_cfg, distro, paths): |
2058 | 173 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 172 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2059 | @@ -189,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource): | |||
2060 | 189 | return "%s [client=%s]" % (root, self.md_client) | 188 | return "%s [client=%s]" % (root, self.md_client) |
2061 | 190 | 189 | ||
2062 | 191 | def _init(self): | 190 | def _init(self): |
2064 | 192 | if self.smartos_type == self._unset: | 191 | if self.smartos_type == sources.UNSET: |
2065 | 193 | self.smartos_type = get_smartos_environ() | 192 | self.smartos_type = get_smartos_environ() |
2066 | 194 | if self.smartos_type is None: | 193 | if self.smartos_type is None: |
2067 | 195 | self.md_client = None | 194 | self.md_client = None |
2068 | 196 | 195 | ||
2070 | 197 | if self.md_client == self._unset: | 196 | if self.md_client == sources.UNSET: |
2071 | 198 | self.md_client = jmc_client_factory( | 197 | self.md_client = jmc_client_factory( |
2072 | 199 | smartos_type=self.smartos_type, | 198 | smartos_type=self.smartos_type, |
2073 | 200 | metadata_sockfile=self.ds_cfg['metadata_sockfile'], | 199 | metadata_sockfile=self.ds_cfg['metadata_sockfile'], |
2074 | @@ -299,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource): | |||
2075 | 299 | self.userdata_raw = ud | 298 | self.userdata_raw = ud |
2076 | 300 | self.vendordata_raw = md['vendor-data'] | 299 | self.vendordata_raw = md['vendor-data'] |
2077 | 301 | self.network_data = md['network-data'] | 300 | self.network_data = md['network-data'] |
2078 | 301 | self.routes_data = md['routes'] | ||
2079 | 302 | 302 | ||
2080 | 303 | self._set_provisioned() | 303 | self._set_provisioned() |
2081 | 304 | return True | 304 | return True |
2082 | @@ -322,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource): | |||
2083 | 322 | convert_smartos_network_data( | 322 | convert_smartos_network_data( |
2084 | 323 | network_data=self.network_data, | 323 | network_data=self.network_data, |
2085 | 324 | dns_servers=self.metadata['dns_servers'], | 324 | dns_servers=self.metadata['dns_servers'], |
2087 | 325 | dns_domain=self.metadata['dns_domain'])) | 325 | dns_domain=self.metadata['dns_domain'], |
2088 | 326 | routes=self.routes_data)) | ||
2089 | 326 | return self._network_config | 327 | return self._network_config |
2090 | 327 | 328 | ||
2091 | 328 | 329 | ||
2092 | @@ -745,7 +746,7 @@ def get_smartos_environ(uname_version=None, product_name=None): | |||
2093 | 745 | # report 'BrandZ virtual linux' as the kernel version | 746 | # report 'BrandZ virtual linux' as the kernel version |
2094 | 746 | if uname_version is None: | 747 | if uname_version is None: |
2095 | 747 | uname_version = uname[3] | 748 | uname_version = uname[3] |
2097 | 748 | if uname_version.lower() == 'brandz virtual linux': | 749 | if uname_version == 'BrandZ virtual linux': |
2098 | 749 | return SMARTOS_ENV_LX_BRAND | 750 | return SMARTOS_ENV_LX_BRAND |
2099 | 750 | 751 | ||
2100 | 751 | if product_name is None: | 752 | if product_name is None: |
2101 | @@ -753,7 +754,7 @@ def get_smartos_environ(uname_version=None, product_name=None): | |||
2102 | 753 | else: | 754 | else: |
2103 | 754 | system_type = product_name | 755 | system_type = product_name |
2104 | 755 | 756 | ||
2106 | 756 | if system_type and 'smartdc' in system_type.lower(): | 757 | if system_type and system_type.startswith('SmartDC'): |
2107 | 757 | return SMARTOS_ENV_KVM | 758 | return SMARTOS_ENV_KVM |
2108 | 758 | 759 | ||
2109 | 759 | return None | 760 | return None |
2110 | @@ -761,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None): | |||
2111 | 761 | 762 | ||
2112 | 762 | # Convert SMARTOS 'sdc:nics' data to network_config yaml | 763 | # Convert SMARTOS 'sdc:nics' data to network_config yaml |
2113 | 763 | def convert_smartos_network_data(network_data=None, | 764 | def convert_smartos_network_data(network_data=None, |
2115 | 764 | dns_servers=None, dns_domain=None): | 765 | dns_servers=None, dns_domain=None, |
2116 | 766 | routes=None): | ||
2117 | 765 | """Return a dictionary of network_config by parsing provided | 767 | """Return a dictionary of network_config by parsing provided |
2118 | 766 | SMARTOS sdc:nics configuration data | 768 | SMARTOS sdc:nics configuration data |
2119 | 767 | 769 | ||
2120 | @@ -779,6 +781,10 @@ def convert_smartos_network_data(network_data=None, | |||
2121 | 779 | keys are related to ip configuration. For each ip in the 'ips' list | 781 | keys are related to ip configuration. For each ip in the 'ips' list |
2122 | 780 | we create a subnet entry under 'subnets' pairing the ip to a one in | 782 | we create a subnet entry under 'subnets' pairing the ip to a one in |
2123 | 781 | the 'gateways' list. | 783 | the 'gateways' list. |
2124 | 784 | |||
2125 | 785 | Each route in sdc:routes is mapped to a route on each interface. | ||
2126 | 786 | The sdc:routes properties 'dst' and 'gateway' map to 'network' and | ||
2127 | 787 | 'gateway'. The 'linklocal' sdc:routes property is ignored. | ||
2128 | 782 | """ | 788 | """ |
2129 | 783 | 789 | ||
2130 | 784 | valid_keys = { | 790 | valid_keys = { |
2131 | @@ -801,6 +807,10 @@ def convert_smartos_network_data(network_data=None, | |||
2132 | 801 | 'scope', | 807 | 'scope', |
2133 | 802 | 'type', | 808 | 'type', |
2134 | 803 | ], | 809 | ], |
2135 | 810 | 'route': [ | ||
2136 | 811 | 'network', | ||
2137 | 812 | 'gateway', | ||
2138 | 813 | ], | ||
2139 | 804 | } | 814 | } |
2140 | 805 | 815 | ||
2141 | 806 | if dns_servers: | 816 | if dns_servers: |
2142 | @@ -815,6 +825,9 @@ def convert_smartos_network_data(network_data=None, | |||
2143 | 815 | else: | 825 | else: |
2144 | 816 | dns_domain = [] | 826 | dns_domain = [] |
2145 | 817 | 827 | ||
2146 | 828 | if not routes: | ||
2147 | 829 | routes = [] | ||
2148 | 830 | |||
2149 | 818 | def is_valid_ipv4(addr): | 831 | def is_valid_ipv4(addr): |
2150 | 819 | return '.' in addr | 832 | return '.' in addr |
2151 | 820 | 833 | ||
2152 | @@ -841,6 +854,7 @@ def convert_smartos_network_data(network_data=None, | |||
2153 | 841 | if ip == "dhcp": | 854 | if ip == "dhcp": |
2154 | 842 | subnet = {'type': 'dhcp4'} | 855 | subnet = {'type': 'dhcp4'} |
2155 | 843 | else: | 856 | else: |
2156 | 857 | routeents = [] | ||
2157 | 844 | subnet = dict((k, v) for k, v in nic.items() | 858 | subnet = dict((k, v) for k, v in nic.items() |
2158 | 845 | if k in valid_keys['subnet']) | 859 | if k in valid_keys['subnet']) |
2159 | 846 | subnet.update({ | 860 | subnet.update({ |
2160 | @@ -862,6 +876,25 @@ def convert_smartos_network_data(network_data=None, | |||
2161 | 862 | pgws[proto]['gw'] = gateways[0] | 876 | pgws[proto]['gw'] = gateways[0] |
2162 | 863 | subnet.update({'gateway': pgws[proto]['gw']}) | 877 | subnet.update({'gateway': pgws[proto]['gw']}) |
2163 | 864 | 878 | ||
2164 | 879 | for route in routes: | ||
2165 | 880 | rcfg = dict((k, v) for k, v in route.items() | ||
2166 | 881 | if k in valid_keys['route']) | ||
2167 | 882 | # Linux uses the value of 'gateway' to determine | ||
2168 | 883 | # automatically if the route is a forward/next-hop | ||
2169 | 884 | # (non-local IP for gateway) or an interface/resolver | ||
2170 | 885 | # (local IP for gateway). So we can ignore the | ||
2171 | 886 | # 'interface' attribute of sdc:routes, because SDC | ||
2172 | 887 | # guarantees that the gateway is a local IP for | ||
2173 | 888 | # "interface=true". | ||
2174 | 889 | # | ||
2175 | 890 | # Eventually we should be smart and compare "gateway" | ||
2176 | 891 | # to see if it's in the prefix. We can then smartly | ||
2177 | 892 | # add or not-add this route. But for now, | ||
2178 | 893 | # when in doubt, use brute force! Routes for everyone! | ||
2179 | 894 | rcfg.update({'network': route['dst']}) | ||
2180 | 895 | routeents.append(rcfg) | ||
2181 | 896 | subnet.update({'routes': routeents}) | ||
2182 | 897 | |||
2183 | 865 | subnets.append(subnet) | 898 | subnets.append(subnet) |
2184 | 866 | cfg.update({'subnets': subnets}) | 899 | cfg.update({'subnets': subnets}) |
2185 | 867 | config.append(cfg) | 900 | config.append(cfg) |
2186 | @@ -905,12 +938,14 @@ if __name__ == "__main__": | |||
2187 | 905 | keyname = SMARTOS_ATTRIB_JSON[key] | 938 | keyname = SMARTOS_ATTRIB_JSON[key] |
2188 | 906 | data[key] = client.get_json(keyname) | 939 | data[key] = client.get_json(keyname) |
2189 | 907 | elif key == "network_config": | 940 | elif key == "network_config": |
2191 | 908 | for depkey in ('network-data', 'dns_servers', 'dns_domain'): | 941 | for depkey in ('network-data', 'dns_servers', 'dns_domain', |
2192 | 942 | 'routes'): | ||
2193 | 909 | load_key(client, depkey, data) | 943 | load_key(client, depkey, data) |
2194 | 910 | data[key] = convert_smartos_network_data( | 944 | data[key] = convert_smartos_network_data( |
2195 | 911 | network_data=data['network-data'], | 945 | network_data=data['network-data'], |
2196 | 912 | dns_servers=data['dns_servers'], | 946 | dns_servers=data['dns_servers'], |
2198 | 913 | dns_domain=data['dns_domain']) | 947 | dns_domain=data['dns_domain'], |
2199 | 948 | routes=data['routes']) | ||
2200 | 914 | else: | 949 | else: |
2201 | 915 | if key in SMARTOS_ATTRIB_MAP: | 950 | if key in SMARTOS_ATTRIB_MAP: |
2202 | 916 | keyname, strip = SMARTOS_ATTRIB_MAP[key] | 951 | keyname, strip = SMARTOS_ATTRIB_MAP[key] |
2203 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py | |||
2204 | index df0b374..90d7457 100644 | |||
2205 | --- a/cloudinit/sources/__init__.py | |||
2206 | +++ b/cloudinit/sources/__init__.py | |||
2207 | @@ -9,6 +9,7 @@ | |||
2208 | 9 | # This file is part of cloud-init. See LICENSE file for license information. | 9 | # This file is part of cloud-init. See LICENSE file for license information. |
2209 | 10 | 10 | ||
2210 | 11 | import abc | 11 | import abc |
2211 | 12 | from collections import namedtuple | ||
2212 | 12 | import copy | 13 | import copy |
2213 | 13 | import json | 14 | import json |
2214 | 14 | import os | 15 | import os |
2215 | @@ -17,6 +18,7 @@ import six | |||
2216 | 17 | from cloudinit.atomic_helper import write_json | 18 | from cloudinit.atomic_helper import write_json |
2217 | 18 | from cloudinit import importer | 19 | from cloudinit import importer |
2218 | 19 | from cloudinit import log as logging | 20 | from cloudinit import log as logging |
2219 | 21 | from cloudinit import net | ||
2220 | 20 | from cloudinit import type_utils | 22 | from cloudinit import type_utils |
2221 | 21 | from cloudinit import user_data as ud | 23 | from cloudinit import user_data as ud |
2222 | 22 | from cloudinit import util | 24 | from cloudinit import util |
2223 | @@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json' | |||
2224 | 41 | # Key which can be provide a cloud's official product name to cloud-init | 43 | # Key which can be provide a cloud's official product name to cloud-init |
2225 | 42 | METADATA_CLOUD_NAME_KEY = 'cloud-name' | 44 | METADATA_CLOUD_NAME_KEY = 'cloud-name' |
2226 | 43 | 45 | ||
2227 | 46 | UNSET = "_unset" | ||
2228 | 47 | |||
2229 | 44 | LOG = logging.getLogger(__name__) | 48 | LOG = logging.getLogger(__name__) |
2230 | 45 | 49 | ||
2231 | 46 | 50 | ||
2232 | @@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception): | |||
2233 | 48 | pass | 52 | pass |
2234 | 49 | 53 | ||
2235 | 50 | 54 | ||
2236 | 55 | class InvalidMetaDataException(Exception): | ||
2237 | 56 | """Raised when metadata is broken, unavailable or disabled.""" | ||
2238 | 57 | pass | ||
2239 | 58 | |||
2240 | 59 | |||
2241 | 51 | def process_base64_metadata(metadata, key_path=''): | 60 | def process_base64_metadata(metadata, key_path=''): |
2242 | 52 | """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" | 61 | """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" |
2243 | 53 | md_copy = copy.deepcopy(metadata) | 62 | md_copy = copy.deepcopy(metadata) |
2244 | @@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''): | |||
2245 | 68 | return md_copy | 77 | return md_copy |
2246 | 69 | 78 | ||
2247 | 70 | 79 | ||
2248 | 80 | URLParams = namedtuple( | ||
2249 | 81 | 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) | ||
2250 | 82 | |||
2251 | 83 | |||
2252 | 71 | @six.add_metaclass(abc.ABCMeta) | 84 | @six.add_metaclass(abc.ABCMeta) |
2253 | 72 | class DataSource(object): | 85 | class DataSource(object): |
2254 | 73 | 86 | ||
2255 | @@ -81,6 +94,14 @@ class DataSource(object): | |||
2256 | 81 | # Cached cloud_name as determined by _get_cloud_name | 94 | # Cached cloud_name as determined by _get_cloud_name |
2257 | 82 | _cloud_name = None | 95 | _cloud_name = None |
2258 | 83 | 96 | ||
2259 | 97 | # Track the discovered fallback nic for use in configuration generation. | ||
2260 | 98 | _fallback_interface = None | ||
2261 | 99 | |||
2262 | 100 | # read_url_params | ||
2263 | 101 | url_max_wait = -1 # max_wait < 0 means do not wait | ||
2264 | 102 | url_timeout = 10 # timeout for each metadata url read attempt | ||
2265 | 103 | url_retries = 5 # number of times to retry url upon 404 | ||
2266 | 104 | |||
2267 | 84 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): | 105 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): |
2268 | 85 | self.sys_cfg = sys_cfg | 106 | self.sys_cfg = sys_cfg |
2269 | 86 | self.distro = distro | 107 | self.distro = distro |
2270 | @@ -128,6 +149,14 @@ class DataSource(object): | |||
2271 | 128 | 'meta-data': self.metadata, | 149 | 'meta-data': self.metadata, |
2272 | 129 | 'user-data': self.get_userdata_raw(), | 150 | 'user-data': self.get_userdata_raw(), |
2273 | 130 | 'vendor-data': self.get_vendordata_raw()}} | 151 | 'vendor-data': self.get_vendordata_raw()}} |
2274 | 152 | if hasattr(self, 'network_json'): | ||
2275 | 153 | network_json = getattr(self, 'network_json') | ||
2276 | 154 | if network_json != UNSET: | ||
2277 | 155 | instance_data['ds']['network_json'] = network_json | ||
2278 | 156 | if hasattr(self, 'ec2_metadata'): | ||
2279 | 157 | ec2_metadata = getattr(self, 'ec2_metadata') | ||
2280 | 158 | if ec2_metadata != UNSET: | ||
2281 | 159 | instance_data['ds']['ec2_metadata'] = ec2_metadata | ||
2282 | 131 | instance_data.update( | 160 | instance_data.update( |
2283 | 132 | self._get_standardized_metadata()) | 161 | self._get_standardized_metadata()) |
2284 | 133 | try: | 162 | try: |
2285 | @@ -149,6 +178,42 @@ class DataSource(object): | |||
2286 | 149 | 'Subclasses of DataSource must implement _get_data which' | 178 | 'Subclasses of DataSource must implement _get_data which' |
2287 | 150 | ' sets self.metadata, vendordata_raw and userdata_raw.') | 179 | ' sets self.metadata, vendordata_raw and userdata_raw.') |
2288 | 151 | 180 | ||
2289 | 181 | def get_url_params(self): | ||
2290 | 182 | """Return the Datasource's prefered url_read parameters. | ||
2291 | 183 | |||
2292 | 184 | Subclasses may override url_max_wait, url_timeout, url_retries. | ||
2293 | 185 | |||
2294 | 186 | @return: A URLParams object with max_wait_seconds, timeout_seconds, | ||
2295 | 187 | num_retries. | ||
2296 | 188 | """ | ||
2297 | 189 | max_wait = self.url_max_wait | ||
2298 | 190 | try: | ||
2299 | 191 | max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait)) | ||
2300 | 192 | except ValueError: | ||
2301 | 193 | util.logexc( | ||
2302 | 194 | LOG, "Config max_wait '%s' is not an int, using default '%s'", | ||
2303 | 195 | self.ds_cfg.get("max_wait"), max_wait) | ||
2304 | 196 | |||
2305 | 197 | timeout = self.url_timeout | ||
2306 | 198 | try: | ||
2307 | 199 | timeout = max( | ||
2308 | 200 | 0, int(self.ds_cfg.get("timeout", self.url_timeout))) | ||
2309 | 201 | except ValueError: | ||
2310 | 202 | timeout = self.url_timeout | ||
2311 | 203 | util.logexc( | ||
2312 | 204 | LOG, "Config timeout '%s' is not an int, using default '%s'", | ||
2313 | 205 | self.ds_cfg.get('timeout'), timeout) | ||
2314 | 206 | |||
2315 | 207 | retries = self.url_retries | ||
2316 | 208 | try: | ||
2317 | 209 | retries = int(self.ds_cfg.get("retries", self.url_retries)) | ||
2318 | 210 | except Exception: | ||
2319 | 211 | util.logexc( | ||
2320 | 212 | LOG, "Config retries '%s' is not an int, using default '%s'", | ||
2321 | 213 | self.ds_cfg.get('retries'), retries) | ||
2322 | 214 | |||
2323 | 215 | return URLParams(max_wait, timeout, retries) | ||
2324 | 216 | |||
2325 | 152 | def get_userdata(self, apply_filter=False): | 217 | def get_userdata(self, apply_filter=False): |
2326 | 153 | if self.userdata is None: | 218 | if self.userdata is None: |
2327 | 154 | self.userdata = self.ud_proc.process(self.get_userdata_raw()) | 219 | self.userdata = self.ud_proc.process(self.get_userdata_raw()) |
2328 | @@ -162,6 +227,17 @@ class DataSource(object): | |||
2329 | 162 | return self.vendordata | 227 | return self.vendordata |
2330 | 163 | 228 | ||
2331 | 164 | @property | 229 | @property |
2332 | 230 | def fallback_interface(self): | ||
2333 | 231 | """Determine the network interface used during local network config.""" | ||
2334 | 232 | if self._fallback_interface is None: | ||
2335 | 233 | self._fallback_interface = net.find_fallback_nic() | ||
2336 | 234 | if self._fallback_interface is None: | ||
2337 | 235 | LOG.warning( | ||
2338 | 236 | "Did not find a fallback interface on %s.", | ||
2339 | 237 | self.cloud_name) | ||
2340 | 238 | return self._fallback_interface | ||
2341 | 239 | |||
2342 | 240 | @property | ||
2343 | 165 | def cloud_name(self): | 241 | def cloud_name(self): |
2344 | 166 | """Return lowercase cloud name as determined by the datasource. | 242 | """Return lowercase cloud name as determined by the datasource. |
2345 | 167 | 243 | ||
2346 | diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py | |||
2347 | index 90c12df..e5696b1 100644 | |||
2348 | --- a/cloudinit/sources/helpers/azure.py | |||
2349 | +++ b/cloudinit/sources/helpers/azure.py | |||
2350 | @@ -14,6 +14,7 @@ from cloudinit import temp_utils | |||
2351 | 14 | from contextlib import contextmanager | 14 | from contextlib import contextmanager |
2352 | 15 | from xml.etree import ElementTree | 15 | from xml.etree import ElementTree |
2353 | 16 | 16 | ||
2354 | 17 | from cloudinit import url_helper | ||
2355 | 17 | from cloudinit import util | 18 | from cloudinit import util |
2356 | 18 | 19 | ||
2357 | 19 | LOG = logging.getLogger(__name__) | 20 | LOG = logging.getLogger(__name__) |
2358 | @@ -55,14 +56,14 @@ class AzureEndpointHttpClient(object): | |||
2359 | 55 | if secure: | 56 | if secure: |
2360 | 56 | headers = self.headers.copy() | 57 | headers = self.headers.copy() |
2361 | 57 | headers.update(self.extra_secure_headers) | 58 | headers.update(self.extra_secure_headers) |
2363 | 58 | return util.read_file_or_url(url, headers=headers) | 59 | return url_helper.read_file_or_url(url, headers=headers) |
2364 | 59 | 60 | ||
2365 | 60 | def post(self, url, data=None, extra_headers=None): | 61 | def post(self, url, data=None, extra_headers=None): |
2366 | 61 | headers = self.headers | 62 | headers = self.headers |
2367 | 62 | if extra_headers is not None: | 63 | if extra_headers is not None: |
2368 | 63 | headers = self.headers.copy() | 64 | headers = self.headers.copy() |
2369 | 64 | headers.update(extra_headers) | 65 | headers.update(extra_headers) |
2371 | 65 | return util.read_file_or_url(url, data=data, headers=headers) | 66 | return url_helper.read_file_or_url(url, data=data, headers=headers) |
2372 | 66 | 67 | ||
2373 | 67 | 68 | ||
2374 | 68 | class GoalState(object): | 69 | class GoalState(object): |
2375 | diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py | |||
2376 | index 452e921..d5bc98a 100644 | |||
2377 | --- a/cloudinit/sources/tests/test_init.py | |||
2378 | +++ b/cloudinit/sources/tests/test_init.py | |||
2379 | @@ -17,6 +17,7 @@ from cloudinit import util | |||
2380 | 17 | class DataSourceTestSubclassNet(DataSource): | 17 | class DataSourceTestSubclassNet(DataSource): |
2381 | 18 | 18 | ||
2382 | 19 | dsname = 'MyTestSubclass' | 19 | dsname = 'MyTestSubclass' |
2383 | 20 | url_max_wait = 55 | ||
2384 | 20 | 21 | ||
2385 | 21 | def __init__(self, sys_cfg, distro, paths, custom_userdata=None): | 22 | def __init__(self, sys_cfg, distro, paths, custom_userdata=None): |
2386 | 22 | super(DataSourceTestSubclassNet, self).__init__( | 23 | super(DataSourceTestSubclassNet, self).__init__( |
2387 | @@ -70,8 +71,7 @@ class TestDataSource(CiTestCase): | |||
2388 | 70 | """Init uses DataSource.dsname for sourcing ds_cfg.""" | 71 | """Init uses DataSource.dsname for sourcing ds_cfg.""" |
2389 | 71 | sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} | 72 | sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} |
2390 | 72 | distro = 'distrotest' # generally should be a Distro object | 73 | distro = 'distrotest' # generally should be a Distro object |
2393 | 73 | paths = Paths({}) | 74 | datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) |
2392 | 74 | datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) | ||
2394 | 75 | self.assertEqual({'key2': False}, datasource.ds_cfg) | 75 | self.assertEqual({'key2': False}, datasource.ds_cfg) |
2395 | 76 | 76 | ||
2396 | 77 | def test_str_is_classname(self): | 77 | def test_str_is_classname(self): |
2397 | @@ -81,6 +81,91 @@ class TestDataSource(CiTestCase): | |||
2398 | 81 | 'DataSourceTestSubclassNet', | 81 | 'DataSourceTestSubclassNet', |
2399 | 82 | str(DataSourceTestSubclassNet('', '', self.paths))) | 82 | str(DataSourceTestSubclassNet('', '', self.paths))) |
2400 | 83 | 83 | ||
2401 | 84 | def test_datasource_get_url_params_defaults(self): | ||
2402 | 85 | """get_url_params default url config settings for the datasource.""" | ||
2403 | 86 | params = self.datasource.get_url_params() | ||
2404 | 87 | self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) | ||
2405 | 88 | self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) | ||
2406 | 89 | self.assertEqual(params.num_retries, self.datasource.url_retries) | ||
2407 | 90 | |||
2408 | 91 | def test_datasource_get_url_params_subclassed(self): | ||
2409 | 92 | """Subclasses can override get_url_params defaults.""" | ||
2410 | 93 | sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} | ||
2411 | 94 | distro = 'distrotest' # generally should be a Distro object | ||
2412 | 95 | datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) | ||
2413 | 96 | expected = (datasource.url_max_wait, datasource.url_timeout, | ||
2414 | 97 | datasource.url_retries) | ||
2415 | 98 | url_params = datasource.get_url_params() | ||
2416 | 99 | self.assertNotEqual(self.datasource.get_url_params(), url_params) | ||
2417 | 100 | self.assertEqual(expected, url_params) | ||
2418 | 101 | |||
2419 | 102 | def test_datasource_get_url_params_ds_config_override(self): | ||
2420 | 103 | """Datasource configuration options can override url param defaults.""" | ||
2421 | 104 | sys_cfg = { | ||
2422 | 105 | 'datasource': { | ||
2423 | 106 | 'MyTestSubclass': { | ||
2424 | 107 | 'max_wait': '1', 'timeout': '2', 'retries': '3'}}} | ||
2425 | 108 | datasource = DataSourceTestSubclassNet( | ||
2426 | 109 | sys_cfg, self.distro, self.paths) | ||
2427 | 110 | expected = (1, 2, 3) | ||
2428 | 111 | url_params = datasource.get_url_params() | ||
2429 | 112 | self.assertNotEqual( | ||
2430 | 113 | (datasource.url_max_wait, datasource.url_timeout, | ||
2431 | 114 | datasource.url_retries), | ||
2432 | 115 | url_params) | ||
2433 | 116 | self.assertEqual(expected, url_params) | ||
2434 | 117 | |||
2435 | 118 | def test_datasource_get_url_params_is_zero_or_greater(self): | ||
2436 | 119 | """get_url_params ignores timeouts with a value below 0.""" | ||
2437 | 120 | # Set an override that is below 0 which gets ignored. | ||
2438 | 121 | sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} | ||
2439 | 122 | datasource = DataSource(sys_cfg, self.distro, self.paths) | ||
2440 | 123 | (_max_wait, timeout, _retries) = datasource.get_url_params() | ||
2441 | 124 | self.assertEqual(0, timeout) | ||
2442 | 125 | |||
2443 | 126 | def test_datasource_get_url_uses_defaults_on_errors(self): | ||
2444 | 127 | """On invalid system config values for url_params defaults are used.""" | ||
2445 | 128 | # All invalid values should be logged | ||
2446 | 129 | sys_cfg = {'datasource': { | ||
2447 | 130 | '_undef': { | ||
2448 | 131 | 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}} | ||
2449 | 132 | datasource = DataSource(sys_cfg, self.distro, self.paths) | ||
2450 | 133 | url_params = datasource.get_url_params() | ||
2451 | 134 | expected = (datasource.url_max_wait, datasource.url_timeout, | ||
2452 | 135 | datasource.url_retries) | ||
2453 | 136 | self.assertEqual(expected, url_params) | ||
2454 | 137 | logs = self.logs.getvalue() | ||
2455 | 138 | expected_logs = [ | ||
2456 | 139 | "Config max_wait 'nope' is not an int, using default '-1'", | ||
2457 | 140 | "Config timeout 'bug' is not an int, using default '10'", | ||
2458 | 141 | "Config retries 'nonint' is not an int, using default '5'", | ||
2459 | 142 | ] | ||
2460 | 143 | for log in expected_logs: | ||
2461 | 144 | self.assertIn(log, logs) | ||
2462 | 145 | |||
2463 | 146 | @mock.patch('cloudinit.sources.net.find_fallback_nic') | ||
2464 | 147 | def test_fallback_interface_is_discovered(self, m_get_fallback_nic): | ||
2465 | 148 | """The fallback_interface is discovered via find_fallback_nic.""" | ||
2466 | 149 | m_get_fallback_nic.return_value = 'nic9' | ||
2467 | 150 | self.assertEqual('nic9', self.datasource.fallback_interface) | ||
2468 | 151 | |||
2469 | 152 | @mock.patch('cloudinit.sources.net.find_fallback_nic') | ||
2470 | 153 | def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): | ||
2471 | 154 | """Log a warning when fallback_interface can not discover the nic.""" | ||
2472 | 155 | self.datasource._cloud_name = 'MySupahCloud' | ||
2473 | 156 | m_get_fallback_nic.return_value = None # Couldn't discover nic | ||
2474 | 157 | self.assertIsNone(self.datasource.fallback_interface) | ||
2475 | 158 | self.assertEqual( | ||
2476 | 159 | 'WARNING: Did not find a fallback interface on MySupahCloud.\n', | ||
2477 | 160 | self.logs.getvalue()) | ||
2478 | 161 | |||
2479 | 162 | @mock.patch('cloudinit.sources.net.find_fallback_nic') | ||
2480 | 163 | def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): | ||
2481 | 164 | """The fallback_interface is cached and won't be rediscovered.""" | ||
2482 | 165 | self.datasource._fallback_interface = 'nic10' | ||
2483 | 166 | self.assertEqual('nic10', self.datasource.fallback_interface) | ||
2484 | 167 | m_get_fallback_nic.assert_not_called() | ||
2485 | 168 | |||
2486 | 84 | def test__get_data_unimplemented(self): | 169 | def test__get_data_unimplemented(self): |
2487 | 85 | """Raise an error when _get_data is not implemented.""" | 170 | """Raise an error when _get_data is not implemented.""" |
2488 | 86 | with self.assertRaises(NotImplementedError) as context_manager: | 171 | with self.assertRaises(NotImplementedError) as context_manager: |
2489 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py | |||
2490 | index bc4ebc8..286607b 100644 | |||
2491 | --- a/cloudinit/stages.py | |||
2492 | +++ b/cloudinit/stages.py | |||
2493 | @@ -362,16 +362,22 @@ class Init(object): | |||
2494 | 362 | self._store_vendordata() | 362 | self._store_vendordata() |
2495 | 363 | 363 | ||
2496 | 364 | def setup_datasource(self): | 364 | def setup_datasource(self): |
2500 | 365 | if self.datasource is None: | 365 | with events.ReportEventStack("setup-datasource", |
2501 | 366 | raise RuntimeError("Datasource is None, cannot setup.") | 366 | "setting up datasource", |
2502 | 367 | self.datasource.setup(is_new_instance=self.is_new_instance()) | 367 | parent=self.reporter): |
2503 | 368 | if self.datasource is None: | ||
2504 | 369 | raise RuntimeError("Datasource is None, cannot setup.") | ||
2505 | 370 | self.datasource.setup(is_new_instance=self.is_new_instance()) | ||
2506 | 368 | 371 | ||
2507 | 369 | def activate_datasource(self): | 372 | def activate_datasource(self): |
2513 | 370 | if self.datasource is None: | 373 | with events.ReportEventStack("activate-datasource", |
2514 | 371 | raise RuntimeError("Datasource is None, cannot activate.") | 374 | "activating datasource", |
2515 | 372 | self.datasource.activate(cfg=self.cfg, | 375 | parent=self.reporter): |
2516 | 373 | is_new_instance=self.is_new_instance()) | 376 | if self.datasource is None: |
2517 | 374 | self._write_to_cache() | 377 | raise RuntimeError("Datasource is None, cannot activate.") |
2518 | 378 | self.datasource.activate(cfg=self.cfg, | ||
2519 | 379 | is_new_instance=self.is_new_instance()) | ||
2520 | 380 | self._write_to_cache() | ||
2521 | 375 | 381 | ||
2522 | 376 | def _store_userdata(self): | 382 | def _store_userdata(self): |
2523 | 377 | raw_ud = self.datasource.get_userdata_raw() | 383 | raw_ud = self.datasource.get_userdata_raw() |
2524 | @@ -691,7 +697,9 @@ class Modules(object): | |||
2525 | 691 | module_list = [] | 697 | module_list = [] |
2526 | 692 | if name not in self.cfg: | 698 | if name not in self.cfg: |
2527 | 693 | return module_list | 699 | return module_list |
2529 | 694 | cfg_mods = self.cfg[name] | 700 | cfg_mods = self.cfg.get(name) |
2530 | 701 | if not cfg_mods: | ||
2531 | 702 | return module_list | ||
2532 | 695 | # Create 'module_list', an array of hashes | 703 | # Create 'module_list', an array of hashes |
2533 | 696 | # Where hash['mod'] = module name | 704 | # Where hash['mod'] = module name |
2534 | 697 | # hash['freq'] = frequency | 705 | # hash['freq'] = frequency |
2535 | diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py | |||
2536 | index 117a9cf..5bfe7fa 100644 | |||
2537 | --- a/cloudinit/tests/helpers.py | |||
2538 | +++ b/cloudinit/tests/helpers.py | |||
2539 | @@ -3,6 +3,7 @@ | |||
2540 | 3 | from __future__ import print_function | 3 | from __future__ import print_function |
2541 | 4 | 4 | ||
2542 | 5 | import functools | 5 | import functools |
2543 | 6 | import httpretty | ||
2544 | 6 | import logging | 7 | import logging |
2545 | 7 | import os | 8 | import os |
2546 | 8 | import shutil | 9 | import shutil |
2547 | @@ -111,12 +112,12 @@ class TestCase(unittest2.TestCase): | |||
2548 | 111 | super(TestCase, self).setUp() | 112 | super(TestCase, self).setUp() |
2549 | 112 | self.reset_global_state() | 113 | self.reset_global_state() |
2550 | 113 | 114 | ||
2552 | 114 | def add_patch(self, target, attr, **kwargs): | 115 | def add_patch(self, target, attr, *args, **kwargs): |
2553 | 115 | """Patches specified target object and sets it as attr on test | 116 | """Patches specified target object and sets it as attr on test |
2554 | 116 | instance also schedules cleanup""" | 117 | instance also schedules cleanup""" |
2555 | 117 | if 'autospec' not in kwargs: | 118 | if 'autospec' not in kwargs: |
2556 | 118 | kwargs['autospec'] = True | 119 | kwargs['autospec'] = True |
2558 | 119 | m = mock.patch(target, **kwargs) | 120 | m = mock.patch(target, *args, **kwargs) |
2559 | 120 | p = m.start() | 121 | p = m.start() |
2560 | 121 | self.addCleanup(m.stop) | 122 | self.addCleanup(m.stop) |
2561 | 122 | setattr(self, attr, p) | 123 | setattr(self, attr, p) |
2562 | @@ -303,14 +304,21 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): | |||
2563 | 303 | class HttprettyTestCase(CiTestCase): | 304 | class HttprettyTestCase(CiTestCase): |
2564 | 304 | # necessary as http_proxy gets in the way of httpretty | 305 | # necessary as http_proxy gets in the way of httpretty |
2565 | 305 | # https://github.com/gabrielfalcao/HTTPretty/issues/122 | 306 | # https://github.com/gabrielfalcao/HTTPretty/issues/122 |
2566 | 307 | # Also make sure that allow_net_connect is set to False. | ||
2567 | 308 | # And make sure reset and enable/disable are done. | ||
2568 | 306 | 309 | ||
2569 | 307 | def setUp(self): | 310 | def setUp(self): |
2570 | 308 | self.restore_proxy = os.environ.get('http_proxy') | 311 | self.restore_proxy = os.environ.get('http_proxy') |
2571 | 309 | if self.restore_proxy is not None: | 312 | if self.restore_proxy is not None: |
2572 | 310 | del os.environ['http_proxy'] | 313 | del os.environ['http_proxy'] |
2573 | 311 | super(HttprettyTestCase, self).setUp() | 314 | super(HttprettyTestCase, self).setUp() |
2574 | 315 | httpretty.HTTPretty.allow_net_connect = False | ||
2575 | 316 | httpretty.reset() | ||
2576 | 317 | httpretty.enable() | ||
2577 | 312 | 318 | ||
2578 | 313 | def tearDown(self): | 319 | def tearDown(self): |
2579 | 320 | httpretty.disable() | ||
2580 | 321 | httpretty.reset() | ||
2581 | 314 | if self.restore_proxy: | 322 | if self.restore_proxy: |
2582 | 315 | os.environ['http_proxy'] = self.restore_proxy | 323 | os.environ['http_proxy'] = self.restore_proxy |
2583 | 316 | super(HttprettyTestCase, self).tearDown() | 324 | super(HttprettyTestCase, self).tearDown() |
2584 | diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py | |||
2585 | index 2537c1c..d76e768 100644 | |||
2586 | --- a/cloudinit/tests/test_netinfo.py | |||
2587 | +++ b/cloudinit/tests/test_netinfo.py | |||
2588 | @@ -4,7 +4,7 @@ | |||
2589 | 4 | 4 | ||
2590 | 5 | from copy import copy | 5 | from copy import copy |
2591 | 6 | 6 | ||
2593 | 7 | from cloudinit.netinfo import netdev_pformat, route_pformat | 7 | from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat |
2594 | 8 | from cloudinit.tests.helpers import CiTestCase, mock, readResource | 8 | from cloudinit.tests.helpers import CiTestCase, mock, readResource |
2595 | 9 | 9 | ||
2596 | 10 | 10 | ||
2597 | @@ -73,6 +73,51 @@ class TestNetInfo(CiTestCase): | |||
2598 | 73 | 73 | ||
2599 | 74 | @mock.patch('cloudinit.netinfo.util.which') | 74 | @mock.patch('cloudinit.netinfo.util.which') |
2600 | 75 | @mock.patch('cloudinit.netinfo.util.subp') | 75 | @mock.patch('cloudinit.netinfo.util.subp') |
2601 | 76 | def test_netdev_info_nettools_down(self, m_subp, m_which): | ||
2602 | 77 | """test netdev_info using nettools and down interfaces.""" | ||
2603 | 78 | m_subp.return_value = ( | ||
2604 | 79 | readResource("netinfo/new-ifconfig-output-down"), "") | ||
2605 | 80 | m_which.side_effect = lambda x: x if x == 'ifconfig' else None | ||
2606 | 81 | self.assertEqual( | ||
2607 | 82 | {'eth0': {'ipv4': [], 'ipv6': [], | ||
2608 | 83 | 'hwaddr': '00:16:3e:de:51:a6', 'up': False}, | ||
2609 | 84 | 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}], | ||
2610 | 85 | 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], | ||
2611 | 86 | 'hwaddr': '.', 'up': True}}, | ||
2612 | 87 | netdev_info(".")) | ||
2613 | 88 | |||
2614 | 89 | @mock.patch('cloudinit.netinfo.util.which') | ||
2615 | 90 | @mock.patch('cloudinit.netinfo.util.subp') | ||
2616 | 91 | def test_netdev_info_iproute_down(self, m_subp, m_which): | ||
2617 | 92 | """Test netdev_info with ip and down interfaces.""" | ||
2618 | 93 | m_subp.return_value = ( | ||
2619 | 94 | readResource("netinfo/sample-ipaddrshow-output-down"), "") | ||
2620 | 95 | m_which.side_effect = lambda x: x if x == 'ip' else None | ||
2621 | 96 | self.assertEqual( | ||
2622 | 97 | {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.', | ||
2623 | 98 | 'mask': '255.0.0.0', 'scope': 'host'}], | ||
2624 | 99 | 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], | ||
2625 | 100 | 'hwaddr': '.', 'up': True}, | ||
2626 | 101 | 'eth0': {'ipv4': [], 'ipv6': [], | ||
2627 | 102 | 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}, | ||
2628 | 103 | netdev_info(".")) | ||
2629 | 104 | |||
2630 | 105 | @mock.patch('cloudinit.netinfo.netdev_info') | ||
2631 | 106 | def test_netdev_pformat_with_down(self, m_netdev_info): | ||
2632 | 107 | """test netdev_pformat when netdev_info returns 'down' interfaces.""" | ||
2633 | 108 | m_netdev_info.return_value = ( | ||
2634 | 109 | {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0', | ||
2635 | 110 | 'scope': 'host'}], | ||
2636 | 111 | 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], | ||
2637 | 112 | 'hwaddr': '.', 'up': True}, | ||
2638 | 113 | 'eth0': {'ipv4': [], 'ipv6': [], | ||
2639 | 114 | 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}) | ||
2640 | 115 | self.assertEqual( | ||
2641 | 116 | readResource("netinfo/netdev-formatted-output-down"), | ||
2642 | 117 | netdev_pformat()) | ||
2643 | 118 | |||
2644 | 119 | @mock.patch('cloudinit.netinfo.util.which') | ||
2645 | 120 | @mock.patch('cloudinit.netinfo.util.subp') | ||
2646 | 76 | def test_route_nettools_pformat(self, m_subp, m_which): | 121 | def test_route_nettools_pformat(self, m_subp, m_which): |
2647 | 77 | """route_pformat properly rendering nettools route info.""" | 122 | """route_pformat properly rendering nettools route info.""" |
2648 | 78 | 123 | ||
2649 | diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py | |||
2650 | index b778a3a..113249d 100644 | |||
2651 | --- a/cloudinit/tests/test_url_helper.py | |||
2652 | +++ b/cloudinit/tests/test_url_helper.py | |||
2653 | @@ -1,7 +1,10 @@ | |||
2654 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
2655 | 2 | 2 | ||
2657 | 3 | from cloudinit.url_helper import oauth_headers | 3 | from cloudinit.url_helper import oauth_headers, read_file_or_url |
2658 | 4 | from cloudinit.tests.helpers import CiTestCase, mock, skipIf | 4 | from cloudinit.tests.helpers import CiTestCase, mock, skipIf |
2659 | 5 | from cloudinit import util | ||
2660 | 6 | |||
2661 | 7 | import httpretty | ||
2662 | 5 | 8 | ||
2663 | 6 | 9 | ||
2664 | 7 | try: | 10 | try: |
2665 | @@ -38,3 +41,26 @@ class TestOAuthHeaders(CiTestCase): | |||
2666 | 38 | 'url', 'consumer_key', 'token_key', 'token_secret', | 41 | 'url', 'consumer_key', 'token_key', 'token_secret', |
2667 | 39 | 'consumer_secret') | 42 | 'consumer_secret') |
2668 | 40 | self.assertEqual('url', return_value) | 43 | self.assertEqual('url', return_value) |
2669 | 44 | |||
2670 | 45 | |||
2671 | 46 | class TestReadFileOrUrl(CiTestCase): | ||
2672 | 47 | def test_read_file_or_url_str_from_file(self): | ||
2673 | 48 | """Test that str(result.contents) on file is text version of contents. | ||
2674 | 49 | It should not be "b'data'", but just "'data'" """ | ||
2675 | 50 | tmpf = self.tmp_path("myfile1") | ||
2676 | 51 | data = b'This is my file content\n' | ||
2677 | 52 | util.write_file(tmpf, data, omode="wb") | ||
2678 | 53 | result = read_file_or_url("file://%s" % tmpf) | ||
2679 | 54 | self.assertEqual(result.contents, data) | ||
2680 | 55 | self.assertEqual(str(result), data.decode('utf-8')) | ||
2681 | 56 | |||
2682 | 57 | @httpretty.activate | ||
2683 | 58 | def test_read_file_or_url_str_from_url(self): | ||
2684 | 59 | """Test that str(result.contents) on url is text version of contents. | ||
2685 | 60 | It should not be "b'data'", but just "'data'" """ | ||
2686 | 61 | url = 'http://hostname/path' | ||
2687 | 62 | data = b'This is my url content\n' | ||
2688 | 63 | httpretty.register_uri(httpretty.GET, url, data) | ||
2689 | 64 | result = read_file_or_url(url) | ||
2690 | 65 | self.assertEqual(result.contents, data) | ||
2691 | 66 | self.assertEqual(str(result), data.decode('utf-8')) | ||
2692 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py | |||
2693 | index 3c05a43..17853fc 100644 | |||
2694 | --- a/cloudinit/tests/test_util.py | |||
2695 | +++ b/cloudinit/tests/test_util.py | |||
2696 | @@ -3,11 +3,12 @@ | |||
2697 | 3 | """Tests for cloudinit.util""" | 3 | """Tests for cloudinit.util""" |
2698 | 4 | 4 | ||
2699 | 5 | import logging | 5 | import logging |
2701 | 6 | from textwrap import dedent | 6 | import platform |
2702 | 7 | 7 | ||
2703 | 8 | import cloudinit.util as util | 8 | import cloudinit.util as util |
2704 | 9 | 9 | ||
2705 | 10 | from cloudinit.tests.helpers import CiTestCase, mock | 10 | from cloudinit.tests.helpers import CiTestCase, mock |
2706 | 11 | from textwrap import dedent | ||
2707 | 11 | 12 | ||
2708 | 12 | LOG = logging.getLogger(__name__) | 13 | LOG = logging.getLogger(__name__) |
2709 | 13 | 14 | ||
2710 | @@ -16,6 +17,29 @@ MOUNT_INFO = [ | |||
2711 | 16 | '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' | 17 | '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' |
2712 | 17 | ] | 18 | ] |
2713 | 18 | 19 | ||
2714 | 20 | OS_RELEASE_SLES = dedent("""\ | ||
2715 | 21 | NAME="SLES"\n | ||
2716 | 22 | VERSION="12-SP3"\n | ||
2717 | 23 | VERSION_ID="12.3"\n | ||
2718 | 24 | PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n | ||
2719 | 25 | ID="sles"\nANSI_COLOR="0;32"\n | ||
2720 | 26 | CPE_NAME="cpe:/o:suse:sles:12:sp3"\n | ||
2721 | 27 | """) | ||
2722 | 28 | |||
2723 | 29 | OS_RELEASE_UBUNTU = dedent("""\ | ||
2724 | 30 | NAME="Ubuntu"\n | ||
2725 | 31 | VERSION="16.04.3 LTS (Xenial Xerus)"\n | ||
2726 | 32 | ID=ubuntu\n | ||
2727 | 33 | ID_LIKE=debian\n | ||
2728 | 34 | PRETTY_NAME="Ubuntu 16.04.3 LTS"\n | ||
2729 | 35 | VERSION_ID="16.04"\n | ||
2730 | 36 | HOME_URL="http://www.ubuntu.com/"\n | ||
2731 | 37 | SUPPORT_URL="http://help.ubuntu.com/"\n | ||
2732 | 38 | BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n | ||
2733 | 39 | VERSION_CODENAME=xenial\n | ||
2734 | 40 | UBUNTU_CODENAME=xenial\n | ||
2735 | 41 | """) | ||
2736 | 42 | |||
2737 | 19 | 43 | ||
2738 | 20 | class FakeCloud(object): | 44 | class FakeCloud(object): |
2739 | 21 | 45 | ||
2740 | @@ -261,4 +285,56 @@ class TestUdevadmSettle(CiTestCase): | |||
2741 | 261 | self.assertRaises(util.ProcessExecutionError, util.udevadm_settle) | 285 | self.assertRaises(util.ProcessExecutionError, util.udevadm_settle) |
2742 | 262 | 286 | ||
2743 | 263 | 287 | ||
2744 | 288 | @mock.patch('os.path.exists') | ||
2745 | 289 | class TestGetLinuxDistro(CiTestCase): | ||
2746 | 290 | |||
2747 | 291 | @classmethod | ||
2748 | 292 | def os_release_exists(self, path): | ||
2749 | 293 | """Side effect function""" | ||
2750 | 294 | if path == '/etc/os-release': | ||
2751 | 295 | return 1 | ||
2752 | 296 | |||
2753 | 297 | @mock.patch('cloudinit.util.load_file') | ||
2754 | 298 | def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): | ||
2755 | 299 | """Verify we get the correct name if the os-release file has | ||
2756 | 300 | the distro name in quotes""" | ||
2757 | 301 | m_os_release.return_value = OS_RELEASE_SLES | ||
2758 | 302 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | ||
2759 | 303 | dist = util.get_linux_distro() | ||
2760 | 304 | self.assertEqual(('sles', '12.3', platform.machine()), dist) | ||
2761 | 305 | |||
2762 | 306 | @mock.patch('cloudinit.util.load_file') | ||
2763 | 307 | def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): | ||
2764 | 308 | """Verify we get the correct name if the os-release file does not | ||
2765 | 309 | have the distro name in quotes""" | ||
2766 | 310 | m_os_release.return_value = OS_RELEASE_UBUNTU | ||
2767 | 311 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | ||
2768 | 312 | dist = util.get_linux_distro() | ||
2769 | 313 | self.assertEqual(('ubuntu', '16.04', platform.machine()), dist) | ||
2770 | 314 | |||
2771 | 315 | @mock.patch('platform.dist') | ||
2772 | 316 | def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): | ||
2773 | 317 | """Verify we get no information if os-release does not exist""" | ||
2774 | 318 | m_platform_dist.return_value = ('', '', '') | ||
2775 | 319 | m_path_exists.return_value = 0 | ||
2776 | 320 | dist = util.get_linux_distro() | ||
2777 | 321 | self.assertEqual(('', '', ''), dist) | ||
2778 | 322 | |||
2779 | 323 | @mock.patch('platform.dist') | ||
2780 | 324 | def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists): | ||
2781 | 325 | """Verify we get an empty tuple when no information exists and | ||
2782 | 326 | Exceptions are not propagated""" | ||
2783 | 327 | m_platform_dist.side_effect = Exception() | ||
2784 | 328 | m_path_exists.return_value = 0 | ||
2785 | 329 | dist = util.get_linux_distro() | ||
2786 | 330 | self.assertEqual(('', '', ''), dist) | ||
2787 | 331 | |||
2788 | 332 | @mock.patch('platform.dist') | ||
2789 | 333 | def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists): | ||
2790 | 334 | """Verify we get the correct platform information""" | ||
2791 | 335 | m_platform_dist.return_value = ('foo', '1.1', 'aarch64') | ||
2792 | 336 | m_path_exists.return_value = 0 | ||
2793 | 337 | dist = util.get_linux_distro() | ||
2794 | 338 | self.assertEqual(('foo', '1.1', 'aarch64'), dist) | ||
2795 | 339 | |||
2796 | 264 | # vi: ts=4 expandtab | 340 | # vi: ts=4 expandtab |
2797 | diff --git a/tests/unittests/test_version.py b/cloudinit/tests/test_version.py | |||
2798 | index d012f69..a96c2a4 100644 | |||
2799 | --- a/tests/unittests/test_version.py | |||
2800 | +++ b/cloudinit/tests/test_version.py | |||
2801 | @@ -3,6 +3,8 @@ | |||
2802 | 3 | from cloudinit.tests.helpers import CiTestCase | 3 | from cloudinit.tests.helpers import CiTestCase |
2803 | 4 | from cloudinit import version | 4 | from cloudinit import version |
2804 | 5 | 5 | ||
2805 | 6 | import mock | ||
2806 | 7 | |||
2807 | 6 | 8 | ||
2808 | 7 | class TestExportsFeatures(CiTestCase): | 9 | class TestExportsFeatures(CiTestCase): |
2809 | 8 | def test_has_network_config_v1(self): | 10 | def test_has_network_config_v1(self): |
2810 | @@ -11,4 +13,19 @@ class TestExportsFeatures(CiTestCase): | |||
2811 | 11 | def test_has_network_config_v2(self): | 13 | def test_has_network_config_v2(self): |
2812 | 12 | self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) | 14 | self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) |
2813 | 13 | 15 | ||
2814 | 16 | |||
2815 | 17 | class TestVersionString(CiTestCase): | ||
2816 | 18 | @mock.patch("cloudinit.version._PACKAGED_VERSION", | ||
2817 | 19 | "17.2-3-gb05b9972-0ubuntu1") | ||
2818 | 20 | def test_package_version_respected(self): | ||
2819 | 21 | """If _PACKAGED_VERSION is filled in, then it should be returned.""" | ||
2820 | 22 | self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) | ||
2821 | 23 | |||
2822 | 24 | @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@") | ||
2823 | 25 | @mock.patch("cloudinit.version.__VERSION__", "17.2") | ||
2824 | 26 | def test_package_version_skipped(self): | ||
2825 | 27 | """If _PACKAGED_VERSION is not modified, then return __VERSION__.""" | ||
2826 | 28 | self.assertEqual("17.2", version.version_string()) | ||
2827 | 29 | |||
2828 | 30 | |||
2829 | 14 | # vi: ts=4 expandtab | 31 | # vi: ts=4 expandtab |
2830 | diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py | |||
2831 | index 1de07b1..8067979 100644 | |||
2832 | --- a/cloudinit/url_helper.py | |||
2833 | +++ b/cloudinit/url_helper.py | |||
2834 | @@ -15,6 +15,7 @@ import six | |||
2835 | 15 | import time | 15 | import time |
2836 | 16 | 16 | ||
2837 | 17 | from email.utils import parsedate | 17 | from email.utils import parsedate |
2838 | 18 | from errno import ENOENT | ||
2839 | 18 | from functools import partial | 19 | from functools import partial |
2840 | 19 | from itertools import count | 20 | from itertools import count |
2841 | 20 | from requests import exceptions | 21 | from requests import exceptions |
2842 | @@ -80,6 +81,32 @@ def combine_url(base, *add_ons): | |||
2843 | 80 | return url | 81 | return url |
2844 | 81 | 82 | ||
2845 | 82 | 83 | ||
2846 | 84 | def read_file_or_url(url, timeout=5, retries=10, | ||
2847 | 85 | headers=None, data=None, sec_between=1, ssl_details=None, | ||
2848 | 86 | headers_cb=None, exception_cb=None): | ||
2849 | 87 | url = url.lstrip() | ||
2850 | 88 | if url.startswith("/"): | ||
2851 | 89 | url = "file://%s" % url | ||
2852 | 90 | if url.lower().startswith("file://"): | ||
2853 | 91 | if data: | ||
2854 | 92 | LOG.warning("Unable to post data to file resource %s", url) | ||
2855 | 93 | file_path = url[len("file://"):] | ||
2856 | 94 | try: | ||
2857 | 95 | with open(file_path, "rb") as fp: | ||
2858 | 96 | contents = fp.read() | ||
2859 | 97 | except IOError as e: | ||
2860 | 98 | code = e.errno | ||
2861 | 99 | if e.errno == ENOENT: | ||
2862 | 100 | code = NOT_FOUND | ||
2863 | 101 | raise UrlError(cause=e, code=code, headers=None, url=url) | ||
2864 | 102 | return FileResponse(file_path, contents=contents) | ||
2865 | 103 | else: | ||
2866 | 104 | return readurl(url, timeout=timeout, retries=retries, headers=headers, | ||
2867 | 105 | headers_cb=headers_cb, data=data, | ||
2868 | 106 | sec_between=sec_between, ssl_details=ssl_details, | ||
2869 | 107 | exception_cb=exception_cb) | ||
2870 | 108 | |||
2871 | 109 | |||
2872 | 83 | # Made to have same accessors as UrlResponse so that the | 110 | # Made to have same accessors as UrlResponse so that the |
2873 | 84 | # read_file_or_url can return this or that object and the | 111 | # read_file_or_url can return this or that object and the |
2874 | 85 | # 'user' of those objects will not need to know the difference. | 112 | # 'user' of those objects will not need to know the difference. |
2875 | @@ -96,7 +123,7 @@ class StringResponse(object): | |||
2876 | 96 | return True | 123 | return True |
2877 | 97 | 124 | ||
2878 | 98 | def __str__(self): | 125 | def __str__(self): |
2880 | 99 | return self.contents | 126 | return self.contents.decode('utf-8') |
2881 | 100 | 127 | ||
2882 | 101 | 128 | ||
2883 | 102 | class FileResponse(StringResponse): | 129 | class FileResponse(StringResponse): |
2884 | diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py | |||
2885 | index cc55daf..ed83d2d 100644 | |||
2886 | --- a/cloudinit/user_data.py | |||
2887 | +++ b/cloudinit/user_data.py | |||
2888 | @@ -19,7 +19,7 @@ import six | |||
2889 | 19 | 19 | ||
2890 | 20 | from cloudinit import handlers | 20 | from cloudinit import handlers |
2891 | 21 | from cloudinit import log as logging | 21 | from cloudinit import log as logging |
2893 | 22 | from cloudinit.url_helper import UrlError | 22 | from cloudinit.url_helper import read_file_or_url, UrlError |
2894 | 23 | from cloudinit import util | 23 | from cloudinit import util |
2895 | 24 | 24 | ||
2896 | 25 | LOG = logging.getLogger(__name__) | 25 | LOG = logging.getLogger(__name__) |
2897 | @@ -224,8 +224,8 @@ class UserDataProcessor(object): | |||
2898 | 224 | content = util.load_file(include_once_fn) | 224 | content = util.load_file(include_once_fn) |
2899 | 225 | else: | 225 | else: |
2900 | 226 | try: | 226 | try: |
2903 | 227 | resp = util.read_file_or_url(include_url, | 227 | resp = read_file_or_url(include_url, |
2904 | 228 | ssl_details=self.ssl_details) | 228 | ssl_details=self.ssl_details) |
2905 | 229 | if include_once_on and resp.ok(): | 229 | if include_once_on and resp.ok(): |
2906 | 230 | util.write_file(include_once_fn, resp.contents, | 230 | util.write_file(include_once_fn, resp.contents, |
2907 | 231 | mode=0o600) | 231 | mode=0o600) |
2908 | @@ -337,8 +337,10 @@ def is_skippable(part): | |||
2909 | 337 | 337 | ||
2910 | 338 | # Coverts a raw string into a mime message | 338 | # Coverts a raw string into a mime message |
2911 | 339 | def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): | 339 | def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): |
2912 | 340 | """convert a string (more likely bytes) or a message into | ||
2913 | 341 | a mime message.""" | ||
2914 | 340 | if not raw_data: | 342 | if not raw_data: |
2916 | 341 | raw_data = '' | 343 | raw_data = b'' |
2917 | 342 | 344 | ||
2918 | 343 | def create_binmsg(data, content_type): | 345 | def create_binmsg(data, content_type): |
2919 | 344 | maintype, subtype = content_type.split("/", 1) | 346 | maintype, subtype = content_type.split("/", 1) |
2920 | @@ -346,15 +348,17 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): | |||
2921 | 346 | msg.set_payload(data) | 348 | msg.set_payload(data) |
2922 | 347 | return msg | 349 | return msg |
2923 | 348 | 350 | ||
2932 | 349 | try: | 351 | if isinstance(raw_data, six.text_type): |
2933 | 350 | data = util.decode_binary(util.decomp_gzip(raw_data)) | 352 | bdata = raw_data.encode('utf-8') |
2934 | 351 | if "mime-version:" in data[0:4096].lower(): | 353 | else: |
2935 | 352 | msg = util.message_from_string(data) | 354 | bdata = raw_data |
2936 | 353 | else: | 355 | bdata = util.decomp_gzip(bdata, decode=False) |
2937 | 354 | msg = create_binmsg(data, content_type) | 356 | if b"mime-version:" in bdata[0:4096].lower(): |
2938 | 355 | except UnicodeDecodeError: | 357 | msg = util.message_from_string(bdata.decode('utf-8')) |
2939 | 356 | msg = create_binmsg(raw_data, content_type) | 358 | else: |
2940 | 359 | msg = create_binmsg(bdata, content_type) | ||
2941 | 357 | 360 | ||
2942 | 358 | return msg | 361 | return msg |
2943 | 359 | 362 | ||
2944 | 363 | |||
2945 | 360 | # vi: ts=4 expandtab | 364 | # vi: ts=4 expandtab |
2946 | diff --git a/cloudinit/util.py b/cloudinit/util.py | |||
2947 | index 2828ca3..6da9511 100644 | |||
2948 | --- a/cloudinit/util.py | |||
2949 | +++ b/cloudinit/util.py | |||
2950 | @@ -576,6 +576,39 @@ def get_cfg_option_int(yobj, key, default=0): | |||
2951 | 576 | return int(get_cfg_option_str(yobj, key, default=default)) | 576 | return int(get_cfg_option_str(yobj, key, default=default)) |
2952 | 577 | 577 | ||
2953 | 578 | 578 | ||
2954 | 579 | def get_linux_distro(): | ||
2955 | 580 | distro_name = '' | ||
2956 | 581 | distro_version = '' | ||
2957 | 582 | if os.path.exists('/etc/os-release'): | ||
2958 | 583 | os_release = load_file('/etc/os-release') | ||
2959 | 584 | for line in os_release.splitlines(): | ||
2960 | 585 | if line.strip().startswith('ID='): | ||
2961 | 586 | distro_name = line.split('=')[-1] | ||
2962 | 587 | distro_name = distro_name.replace('"', '') | ||
2963 | 588 | if line.strip().startswith('VERSION_ID='): | ||
2964 | 589 | # Lets hope for the best that distros stay consistent ;) | ||
2965 | 590 | distro_version = line.split('=')[-1] | ||
2966 | 591 | distro_version = distro_version.replace('"', '') | ||
2967 | 592 | else: | ||
2968 | 593 | dist = ('', '', '') | ||
2969 | 594 | try: | ||
2970 | 595 | # Will be removed in 3.7 | ||
2971 | 596 | dist = platform.dist() # pylint: disable=W1505 | ||
2972 | 597 | except Exception: | ||
2973 | 598 | pass | ||
2974 | 599 | finally: | ||
2975 | 600 | found = None | ||
2976 | 601 | for entry in dist: | ||
2977 | 602 | if entry: | ||
2978 | 603 | found = 1 | ||
2979 | 604 | if not found: | ||
2980 | 605 | LOG.warning('Unable to determine distribution, template ' | ||
2981 | 606 | 'expansion may have unexpected results') | ||
2982 | 607 | return dist | ||
2983 | 608 | |||
2984 | 609 | return (distro_name, distro_version, platform.machine()) | ||
2985 | 610 | |||
2986 | 611 | |||
2987 | 579 | def system_info(): | 612 | def system_info(): |
2988 | 580 | info = { | 613 | info = { |
2989 | 581 | 'platform': platform.platform(), | 614 | 'platform': platform.platform(), |
2990 | @@ -583,19 +616,19 @@ def system_info(): | |||
2991 | 583 | 'release': platform.release(), | 616 | 'release': platform.release(), |
2992 | 584 | 'python': platform.python_version(), | 617 | 'python': platform.python_version(), |
2993 | 585 | 'uname': platform.uname(), | 618 | 'uname': platform.uname(), |
2995 | 586 | 'dist': platform.dist(), # pylint: disable=W1505 | 619 | 'dist': get_linux_distro() |
2996 | 587 | } | 620 | } |
2997 | 588 | system = info['system'].lower() | 621 | system = info['system'].lower() |
2998 | 589 | var = 'unknown' | 622 | var = 'unknown' |
2999 | 590 | if system == "linux": | 623 | if system == "linux": |
3000 | 591 | linux_dist = info['dist'][0].lower() | 624 | linux_dist = info['dist'][0].lower() |
3002 | 592 | if linux_dist in ('centos', 'fedora', 'debian'): | 625 | if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'): |
3003 | 593 | var = linux_dist | 626 | var = linux_dist |
3004 | 594 | elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): | 627 | elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): |
3005 | 595 | var = 'ubuntu' | 628 | var = 'ubuntu' |
3006 | 596 | elif linux_dist == 'redhat': | 629 | elif linux_dist == 'redhat': |
3007 | 597 | var = 'rhel' | 630 | var = 'rhel' |
3009 | 598 | elif linux_dist == 'suse': | 631 | elif linux_dist in ('opensuse', 'sles'): |
3010 | 599 | var = 'suse' | 632 | var = 'suse' |
3011 | 600 | else: | 633 | else: |
3012 | 601 | var = 'linux' | 634 | var = 'linux' |
3013 | @@ -857,37 +890,6 @@ def fetch_ssl_details(paths=None): | |||
3014 | 857 | return ssl_details | 890 | return ssl_details |
3015 | 858 | 891 | ||
3016 | 859 | 892 | ||
3017 | 860 | def read_file_or_url(url, timeout=5, retries=10, | ||
3018 | 861 | headers=None, data=None, sec_between=1, ssl_details=None, | ||
3019 | 862 | headers_cb=None, exception_cb=None): | ||
3020 | 863 | url = url.lstrip() | ||
3021 | 864 | if url.startswith("/"): | ||
3022 | 865 | url = "file://%s" % url | ||
3023 | 866 | if url.lower().startswith("file://"): | ||
3024 | 867 | if data: | ||
3025 | 868 | LOG.warning("Unable to post data to file resource %s", url) | ||
3026 | 869 | file_path = url[len("file://"):] | ||
3027 | 870 | try: | ||
3028 | 871 | contents = load_file(file_path, decode=False) | ||
3029 | 872 | except IOError as e: | ||
3030 | 873 | code = e.errno | ||
3031 | 874 | if e.errno == ENOENT: | ||
3032 | 875 | code = url_helper.NOT_FOUND | ||
3033 | 876 | raise url_helper.UrlError(cause=e, code=code, headers=None, | ||
3034 | 877 | url=url) | ||
3035 | 878 | return url_helper.FileResponse(file_path, contents=contents) | ||
3036 | 879 | else: | ||
3037 | 880 | return url_helper.readurl(url, | ||
3038 | 881 | timeout=timeout, | ||
3039 | 882 | retries=retries, | ||
3040 | 883 | headers=headers, | ||
3041 | 884 | headers_cb=headers_cb, | ||
3042 | 885 | data=data, | ||
3043 | 886 | sec_between=sec_between, | ||
3044 | 887 | ssl_details=ssl_details, | ||
3045 | 888 | exception_cb=exception_cb) | ||
3046 | 889 | |||
3047 | 890 | |||
3048 | 891 | def load_yaml(blob, default=None, allowed=(dict,)): | 893 | def load_yaml(blob, default=None, allowed=(dict,)): |
3049 | 892 | loaded = default | 894 | loaded = default |
3050 | 893 | blob = decode_binary(blob) | 895 | blob = decode_binary(blob) |
3051 | @@ -905,8 +907,20 @@ def load_yaml(blob, default=None, allowed=(dict,)): | |||
3052 | 905 | " but got %s instead") % | 907 | " but got %s instead") % |
3053 | 906 | (allowed, type_utils.obj_name(converted))) | 908 | (allowed, type_utils.obj_name(converted))) |
3054 | 907 | loaded = converted | 909 | loaded = converted |
3057 | 908 | except (yaml.YAMLError, TypeError, ValueError): | 910 | except (yaml.YAMLError, TypeError, ValueError) as e: |
3058 | 909 | logexc(LOG, "Failed loading yaml blob") | 911 | msg = 'Failed loading yaml blob' |
3059 | 912 | mark = None | ||
3060 | 913 | if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): | ||
3061 | 914 | mark = getattr(e, 'context_mark') | ||
3062 | 915 | elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): | ||
3063 | 916 | mark = getattr(e, 'problem_mark') | ||
3064 | 917 | if mark: | ||
3065 | 918 | msg += ( | ||
3066 | 919 | '. Invalid format at line {line} column {col}: "{err}"'.format( | ||
3067 | 920 | line=mark.line + 1, col=mark.column + 1, err=e)) | ||
3068 | 921 | else: | ||
3069 | 922 | msg += '. {err}'.format(err=e) | ||
3070 | 923 | LOG.warning(msg) | ||
3071 | 910 | return loaded | 924 | return loaded |
3072 | 911 | 925 | ||
3073 | 912 | 926 | ||
3074 | @@ -925,12 +939,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): | |||
3075 | 925 | ud_url = "%s%s%s" % (base, "user-data", ext) | 939 | ud_url = "%s%s%s" % (base, "user-data", ext) |
3076 | 926 | md_url = "%s%s%s" % (base, "meta-data", ext) | 940 | md_url = "%s%s%s" % (base, "meta-data", ext) |
3077 | 927 | 941 | ||
3079 | 928 | md_resp = read_file_or_url(md_url, timeout, retries, file_retries) | 942 | md_resp = url_helper.read_file_or_url(md_url, timeout, retries, |
3080 | 943 | file_retries) | ||
3081 | 929 | md = None | 944 | md = None |
3082 | 930 | if md_resp.ok(): | 945 | if md_resp.ok(): |
3083 | 931 | md = load_yaml(decode_binary(md_resp.contents), default={}) | 946 | md = load_yaml(decode_binary(md_resp.contents), default={}) |
3084 | 932 | 947 | ||
3086 | 933 | ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) | 948 | ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, |
3087 | 949 | file_retries) | ||
3088 | 934 | ud = None | 950 | ud = None |
3089 | 935 | if ud_resp.ok(): | 951 | if ud_resp.ok(): |
3090 | 936 | ud = ud_resp.contents | 952 | ud = ud_resp.contents |
3091 | @@ -1154,7 +1170,9 @@ def gethostbyaddr(ip): | |||
3092 | 1154 | 1170 | ||
3093 | 1155 | def is_resolvable_url(url): | 1171 | def is_resolvable_url(url): |
3094 | 1156 | """determine if this url is resolvable (existing or ip).""" | 1172 | """determine if this url is resolvable (existing or ip).""" |
3096 | 1157 | return is_resolvable(urlparse.urlparse(url).hostname) | 1173 | return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url, |
3097 | 1174 | func=is_resolvable, | ||
3098 | 1175 | args=(urlparse.urlparse(url).hostname,)) | ||
3099 | 1158 | 1176 | ||
3100 | 1159 | 1177 | ||
3101 | 1160 | def search_for_mirror(candidates): | 1178 | def search_for_mirror(candidates): |
3102 | @@ -1608,7 +1626,8 @@ def mounts(): | |||
3103 | 1608 | return mounted | 1626 | return mounted |
3104 | 1609 | 1627 | ||
3105 | 1610 | 1628 | ||
3107 | 1611 | def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): | 1629 | def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, |
3108 | 1630 | update_env_for_mount=None): | ||
3109 | 1612 | """ | 1631 | """ |
3110 | 1613 | Mount the device, call method 'callback' passing the directory | 1632 | Mount the device, call method 'callback' passing the directory |
3111 | 1614 | in which it was mounted, then unmount. Return whatever 'callback' | 1633 | in which it was mounted, then unmount. Return whatever 'callback' |
3112 | @@ -1670,7 +1689,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): | |||
3113 | 1670 | mountcmd.extend(['-t', mtype]) | 1689 | mountcmd.extend(['-t', mtype]) |
3114 | 1671 | mountcmd.append(device) | 1690 | mountcmd.append(device) |
3115 | 1672 | mountcmd.append(tmpd) | 1691 | mountcmd.append(tmpd) |
3117 | 1673 | subp(mountcmd) | 1692 | subp(mountcmd, update_env=update_env_for_mount) |
3118 | 1674 | umount = tmpd # This forces it to be unmounted (when set) | 1693 | umount = tmpd # This forces it to be unmounted (when set) |
3119 | 1675 | mountpoint = tmpd | 1694 | mountpoint = tmpd |
3120 | 1676 | break | 1695 | break |
3121 | @@ -1857,9 +1876,55 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): | |||
3122 | 1857 | return subp(*args, **kwargs) | 1876 | return subp(*args, **kwargs) |
3123 | 1858 | 1877 | ||
3124 | 1859 | 1878 | ||
3126 | 1860 | def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | 1879 | def subp(args, data=None, rcs=None, env=None, capture=True, |
3127 | 1880 | combine_capture=False, shell=False, | ||
3128 | 1861 | logstring=False, decode="replace", target=None, update_env=None, | 1881 | logstring=False, decode="replace", target=None, update_env=None, |
3129 | 1862 | status_cb=None): | 1882 | status_cb=None): |
3130 | 1883 | """Run a subprocess. | ||
3131 | 1884 | |||
3132 | 1885 | :param args: command to run in a list. [cmd, arg1, arg2...] | ||
3133 | 1886 | :param data: input to the command, made available on its stdin. | ||
3134 | 1887 | :param rcs: | ||
3135 | 1888 | a list of allowed return codes. If subprocess exits with a value not | ||
3136 | 1889 | in this list, a ProcessExecutionError will be raised. By default, | ||
3137 | 1890 | data is returned as a string. See 'decode' parameter. | ||
3138 | 1891 | :param env: a dictionary for the command's environment. | ||
3139 | 1892 | :param capture: | ||
3140 | 1893 | boolean indicating if output should be captured. If True, then stderr | ||
3141 | 1894 | and stdout will be returned. If False, they will not be redirected. | ||
3142 | 1895 | :param combine_capture: | ||
3143 | 1896 | boolean indicating if stderr should be redirected to stdout. When True, | ||
3144 | 1897 | interleaved stderr and stdout will be returned as the first element of | ||
3145 | 1898 | a tuple, the second will be empty string or bytes (per decode). | ||
3146 | 1899 | if combine_capture is True, then output is captured independent of | ||
3147 | 1900 | the value of capture. | ||
3148 | 1901 | :param shell: boolean indicating if this should be run with a shell. | ||
3149 | 1902 | :param logstring: | ||
3150 | 1903 | the command will be logged to DEBUG. If it contains info that should | ||
3151 | 1904 | not be logged, then logstring will be logged instead. | ||
3152 | 1905 | :param decode: | ||
3153 | 1906 | if False, no decoding will be done and returned stdout and stderr will | ||
3154 | 1907 | be bytes. Other allowed values are 'strict', 'ignore', and 'replace'. | ||
3155 | 1908 | These values are passed through to bytes().decode() as the 'errors' | ||
3156 | 1909 | parameter. There is no support for decoding to other than utf-8. | ||
3157 | 1910 | :param target: | ||
3158 | 1911 | not supported, kwarg present only to make function signature similar | ||
3159 | 1912 | to curtin's subp. | ||
3160 | 1913 | :param update_env: | ||
3161 | 1914 | update the enviornment for this command with this dictionary. | ||
3162 | 1915 | this will not affect the current processes os.environ. | ||
3163 | 1916 | :param status_cb: | ||
3164 | 1917 | call this fuction with a single string argument before starting | ||
3165 | 1918 | and after finishing. | ||
3166 | 1919 | |||
3167 | 1920 | :return | ||
3168 | 1921 | if not capturing, return is (None, None) | ||
3169 | 1922 | if capturing, stdout and stderr are returned. | ||
3170 | 1923 | if decode: | ||
3171 | 1924 | entries in tuple will be python2 unicode or python3 string | ||
3172 | 1925 | if not decode: | ||
3173 | 1926 | entries in tuple will be python2 string or python3 bytes | ||
3174 | 1927 | """ | ||
3175 | 1863 | 1928 | ||
3176 | 1864 | # not supported in cloud-init (yet), for now kept in the call signature | 1929 | # not supported in cloud-init (yet), for now kept in the call signature |
3177 | 1865 | # to ease maintaining code shared between cloud-init and curtin | 1930 | # to ease maintaining code shared between cloud-init and curtin |
3178 | @@ -1885,7 +1950,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | |||
3179 | 1885 | status_cb('Begin run command: {command}\n'.format(command=command)) | 1950 | status_cb('Begin run command: {command}\n'.format(command=command)) |
3180 | 1886 | if not logstring: | 1951 | if not logstring: |
3181 | 1887 | LOG.debug(("Running command %s with allowed return codes %s" | 1952 | LOG.debug(("Running command %s with allowed return codes %s" |
3183 | 1888 | " (shell=%s, capture=%s)"), args, rcs, shell, capture) | 1953 | " (shell=%s, capture=%s)"), |
3184 | 1954 | args, rcs, shell, 'combine' if combine_capture else capture) | ||
3185 | 1889 | else: | 1955 | else: |
3186 | 1890 | LOG.debug(("Running hidden command to protect sensitive " | 1956 | LOG.debug(("Running hidden command to protect sensitive " |
3187 | 1891 | "input/output logstring: %s"), logstring) | 1957 | "input/output logstring: %s"), logstring) |
3188 | @@ -1896,6 +1962,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | |||
3189 | 1896 | if capture: | 1962 | if capture: |
3190 | 1897 | stdout = subprocess.PIPE | 1963 | stdout = subprocess.PIPE |
3191 | 1898 | stderr = subprocess.PIPE | 1964 | stderr = subprocess.PIPE |
3192 | 1965 | if combine_capture: | ||
3193 | 1966 | stdout = subprocess.PIPE | ||
3194 | 1967 | stderr = subprocess.STDOUT | ||
3195 | 1899 | if data is None: | 1968 | if data is None: |
3196 | 1900 | # using devnull assures any reads get null, rather | 1969 | # using devnull assures any reads get null, rather |
3197 | 1901 | # than possibly waiting on input. | 1970 | # than possibly waiting on input. |
3198 | @@ -1934,10 +2003,11 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | |||
3199 | 1934 | devnull_fp.close() | 2003 | devnull_fp.close() |
3200 | 1935 | 2004 | ||
3201 | 1936 | # Just ensure blank instead of none. | 2005 | # Just ensure blank instead of none. |
3206 | 1937 | if not out and capture: | 2006 | if capture or combine_capture: |
3207 | 1938 | out = b'' | 2007 | if not out: |
3208 | 1939 | if not err and capture: | 2008 | out = b'' |
3209 | 1940 | err = b'' | 2009 | if not err: |
3210 | 2010 | err = b'' | ||
3211 | 1941 | if decode: | 2011 | if decode: |
3212 | 1942 | def ldecode(data, m='utf-8'): | 2012 | def ldecode(data, m='utf-8'): |
3213 | 1943 | if not isinstance(data, bytes): | 2013 | if not isinstance(data, bytes): |
3214 | @@ -2061,24 +2131,33 @@ def is_container(): | |||
3215 | 2061 | return False | 2131 | return False |
3216 | 2062 | 2132 | ||
3217 | 2063 | 2133 | ||
3219 | 2064 | def get_proc_env(pid): | 2134 | def get_proc_env(pid, encoding='utf-8', errors='replace'): |
3220 | 2065 | """ | 2135 | """ |
3221 | 2066 | Return the environment in a dict that a given process id was started with. | 2136 | Return the environment in a dict that a given process id was started with. |
3222 | 2067 | """ | ||
3223 | 2068 | 2137 | ||
3226 | 2069 | env = {} | 2138 | @param encoding: if true, then decoding will be done with |
3227 | 2070 | fn = os.path.join("/proc/", str(pid), "environ") | 2139 | .decode(encoding, errors) and text will be returned. |
3228 | 2140 | if false then binary will be returned. | ||
3229 | 2141 | @param errors: only used if encoding is true.""" | ||
3230 | 2142 | fn = os.path.join("/proc", str(pid), "environ") | ||
3231 | 2143 | |||
3232 | 2071 | try: | 2144 | try: |
3241 | 2072 | contents = load_file(fn) | 2145 | contents = load_file(fn, decode=False) |
3234 | 2073 | toks = contents.split("\x00") | ||
3235 | 2074 | for tok in toks: | ||
3236 | 2075 | if tok == "": | ||
3237 | 2076 | continue | ||
3238 | 2077 | (name, val) = tok.split("=", 1) | ||
3239 | 2078 | if name: | ||
3240 | 2079 | env[name] = val | ||
3242 | 2080 | except (IOError, OSError): | 2146 | except (IOError, OSError): |
3244 | 2081 | pass | 2147 | return {} |
3245 | 2148 | |||
3246 | 2149 | env = {} | ||
3247 | 2150 | null, equal = (b"\x00", b"=") | ||
3248 | 2151 | if encoding: | ||
3249 | 2152 | null, equal = ("\x00", "=") | ||
3250 | 2153 | contents = contents.decode(encoding, errors) | ||
3251 | 2154 | |||
3252 | 2155 | for tok in contents.split(null): | ||
3253 | 2156 | if not tok: | ||
3254 | 2157 | continue | ||
3255 | 2158 | (name, val) = tok.split(equal, 1) | ||
3256 | 2159 | if name: | ||
3257 | 2160 | env[name] = val | ||
3258 | 2082 | return env | 2161 | return env |
3259 | 2083 | 2162 | ||
3260 | 2084 | 2163 | ||
3261 | @@ -2545,11 +2624,21 @@ def _call_dmidecode(key, dmidecode_path): | |||
3262 | 2545 | if result.replace(".", "") == "": | 2624 | if result.replace(".", "") == "": |
3263 | 2546 | return "" | 2625 | return "" |
3264 | 2547 | return result | 2626 | return result |
3267 | 2548 | except (IOError, OSError) as _err: | 2627 | except (IOError, OSError) as e: |
3268 | 2549 | LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err) | 2628 | LOG.debug('failed dmidecode cmd: %s\n%s', cmd, e) |
3269 | 2550 | return None | 2629 | return None |
3270 | 2551 | 2630 | ||
3271 | 2552 | 2631 | ||
3272 | 2632 | def is_x86(uname_arch=None): | ||
3273 | 2633 | """Return True if platform is x86-based""" | ||
3274 | 2634 | if uname_arch is None: | ||
3275 | 2635 | uname_arch = os.uname()[4] | ||
3276 | 2636 | x86_arch_match = ( | ||
3277 | 2637 | uname_arch == 'x86_64' or | ||
3278 | 2638 | (uname_arch[0] == 'i' and uname_arch[2:] == '86')) | ||
3279 | 2639 | return x86_arch_match | ||
3280 | 2640 | |||
3281 | 2641 | |||
3282 | 2553 | def read_dmi_data(key): | 2642 | def read_dmi_data(key): |
3283 | 2554 | """ | 2643 | """ |
3284 | 2555 | Wrapper for reading DMI data. | 2644 | Wrapper for reading DMI data. |
3285 | @@ -2577,8 +2666,7 @@ def read_dmi_data(key): | |||
3286 | 2577 | 2666 | ||
3287 | 2578 | # running dmidecode can be problematic on some arches (LP: #1243287) | 2667 | # running dmidecode can be problematic on some arches (LP: #1243287) |
3288 | 2579 | uname_arch = os.uname()[4] | 2668 | uname_arch = os.uname()[4] |
3291 | 2580 | if not (uname_arch == "x86_64" or | 2669 | if not (is_x86(uname_arch) or |
3290 | 2581 | (uname_arch.startswith("i") and uname_arch[2:] == "86") or | ||
3292 | 2582 | uname_arch == 'aarch64' or | 2670 | uname_arch == 'aarch64' or |
3293 | 2583 | uname_arch == 'amd64'): | 2671 | uname_arch == 'amd64'): |
3294 | 2584 | LOG.debug("dmidata is not supported on %s", uname_arch) | 2672 | LOG.debug("dmidata is not supported on %s", uname_arch) |
3295 | diff --git a/cloudinit/version.py b/cloudinit/version.py | |||
3296 | index ccd0f84..3b60fc4 100644 | |||
3297 | --- a/cloudinit/version.py | |||
3298 | +++ b/cloudinit/version.py | |||
3299 | @@ -4,7 +4,8 @@ | |||
3300 | 4 | # | 4 | # |
3301 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | 5 | # This file is part of cloud-init. See LICENSE file for license information. |
3302 | 6 | 6 | ||
3304 | 7 | __VERSION__ = "18.2" | 7 | __VERSION__ = "18.3" |
3305 | 8 | _PACKAGED_VERSION = '@@PACKAGED_VERSION@@' | ||
3306 | 8 | 9 | ||
3307 | 9 | FEATURES = [ | 10 | FEATURES = [ |
3308 | 10 | # supports network config version 1 | 11 | # supports network config version 1 |
3309 | @@ -15,6 +16,9 @@ FEATURES = [ | |||
3310 | 15 | 16 | ||
3311 | 16 | 17 | ||
3312 | 17 | def version_string(): | 18 | def version_string(): |
3313 | 19 | """Extract a version string from cloud-init.""" | ||
3314 | 20 | if not _PACKAGED_VERSION.startswith('@@'): | ||
3315 | 21 | return _PACKAGED_VERSION | ||
3316 | 18 | return __VERSION__ | 22 | return __VERSION__ |
3317 | 19 | 23 | ||
3318 | 20 | # vi: ts=4 expandtab | 24 | # vi: ts=4 expandtab |
3319 | diff --git a/debian/changelog b/debian/changelog | |||
3320 | index 7ac0d4f..9ea98b6 100644 | |||
3321 | --- a/debian/changelog | |||
3322 | +++ b/debian/changelog | |||
3323 | @@ -1,12 +1,76 @@ | |||
3325 | 1 | cloud-init (18.2-27-g6ef92c98-0ubuntu1~18.04.2) UNRELEASED; urgency=medium | 1 | cloud-init (18.3-0ubuntu1~18.04.1) bionic-proposed; urgency=medium |
3326 | 2 | 2 | ||
3327 | 3 | * debian/rules: update version.version_string to contain packaged version. | 3 | * debian/rules: update version.version_string to contain packaged version. |
3328 | 4 | (LP: #1770712) | 4 | (LP: #1770712) |
3329 | 5 | * debian/patches/openstack-no-network-config.patch | 5 | * debian/patches/openstack-no-network-config.patch |
3330 | 6 | add patch to ignore Openstack network_config from network_data.json by | 6 | add patch to ignore Openstack network_config from network_data.json by |
3331 | 7 | default | 7 | default |
3334 | 8 | 8 | * Refresh patches against upstream: | |
3335 | 9 | -- Chad Smith <chad.smith@canonical.com> Thu, 21 Jun 2018 14:27:10 -0600 | 9 | + openstack-no-network-config.patch |
3336 | 10 | * New upstream release. (LP: #1777912) | ||
3337 | 11 | - release 18.3 | ||
3338 | 12 | - docs: represent sudo:false in docs for user_groups config module | ||
3339 | 13 | - Explicitly prevent `sudo` access for user module [Jacob Bednarz] | ||
3340 | 14 | - lxd: Delete default network and detach device if lxd-init created them. | ||
3341 | 15 | - openstack: avoid unneeded metadata probe on non-openstack platforms | ||
3342 | 16 | - stages: fix tracebacks if a module stage is undefined or empty | ||
3343 | 17 | [Robert Schweikert] | ||
3344 | 18 | - Be more safe on string/bytes when writing multipart user-data to disk. | ||
3345 | 19 | - Fix get_proc_env for pids that have non-utf8 content in environment. | ||
3346 | 20 | - tests: fix salt_minion integration test on bionic and later | ||
3347 | 21 | - tests: provide human-readable integration test summary when --verbose | ||
3348 | 22 | - tests: skip chrony integration tests on lxd running artful or older | ||
3349 | 23 | - test: add optional --preserve-instance arg to integraiton tests | ||
3350 | 24 | - netplan: fix mtu if provided by network config for all rendered types | ||
3351 | 25 | - tests: remove pip install workarounds for pylxd, take upstream fix. | ||
3352 | 26 | - subp: support combine_capture argument. | ||
3353 | 27 | - tests: ordered tox dependencies for pylxd install | ||
3354 | 28 | - util: add get_linux_distro function to replace platform.dist | ||
3355 | 29 | [Robert Schweikert] | ||
3356 | 30 | - pyflakes: fix unused variable references identified by pyflakes 2.0.0. | ||
3357 | 31 | - - Do not use the systemd_prefix macro, not available in this environment | ||
3358 | 32 | [Robert Schweikert] | ||
3359 | 33 | - doc: Add config info to ec2, openstack and cloudstack datasource docs | ||
3360 | 34 | - Enable SmartOS network metadata to work with netplan via per-subnet | ||
3361 | 35 | routes [Dan McDonald] | ||
3362 | 36 | - openstack: Allow discovery in init-local using dhclient in a sandbox. | ||
3363 | 37 | - tests: Avoid using https in httpretty, improve HttPretty test case. | ||
3364 | 38 | - yaml_load/schema: Add invalid line and column nums to error message | ||
3365 | 39 | - Azure: Ignore NTFS mount errors when checking ephemeral drive | ||
3366 | 40 | [Paul Meyer] | ||
3367 | 41 | - packages/brpm: Get proper dependencies for cmdline distro. | ||
3368 | 42 | - packages: Make rpm spec files patch in package version like in debs. | ||
3369 | 43 | - tools/run-container: replace tools/run-centos with more generic. | ||
3370 | 44 | - Update version.version_string to contain packaged version. | ||
3371 | 45 | - cc_mounts: Do not add devices to fstab that are already present. | ||
3372 | 46 | [Lars Kellogg-Stedman] | ||
3373 | 47 | - ds-identify: ensure that we have certain tokens in PATH. | ||
3374 | 48 | - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] | ||
3375 | 49 | - read_file_or_url: move to url_helper, fix bug in its FileResponse. | ||
3376 | 50 | - cloud_tests: help pylint | ||
3377 | 51 | - flake8: fix flake8 errors in previous commit. | ||
3378 | 52 | - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] | ||
3379 | 53 | - tests: restructure SSH and initial connections [Joshua Powers] | ||
3380 | 54 | - ds-identify: recognize container-other as a container, test SmartOS. | ||
3381 | 55 | - cloud-config.service: run After snap.seeded.service. | ||
3382 | 56 | - tests: do not rely on host /proc/cmdline in test_net.py | ||
3383 | 57 | [Lars Kellogg-Stedman] | ||
3384 | 58 | - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. | ||
3385 | 59 | - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. | ||
3386 | 60 | - tests: fix package and ca_cert cloud_tests on bionic | ||
3387 | 61 | - ds-identify: make shellcheck 0.4.6 happy with ds-identify. | ||
3388 | 62 | - pycodestyle: Fix deprecated string literals, move away from flake8. | ||
3389 | 63 | - azure: Add reported ready marker file. [Joshua Chan] | ||
3390 | 64 | - tools: Support adding a release suffix through packages/bddeb. | ||
3391 | 65 | - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. | ||
3392 | 66 | [Harm Weites] | ||
3393 | 67 | - tools: Re-use the orig tarball in packages/bddeb if it is around. | ||
3394 | 68 | - netinfo: fix netdev_pformat when a nic does not have an address assigned. | ||
3395 | 69 | - collect-logs: add -v flag, write to stderr, limit journal to single boot. | ||
3396 | 70 | - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. | ||
3397 | 71 | - Add reporting events and log_time around early source of blocking time | ||
3398 | 72 | |||
3399 | 73 | -- Chad Smith <chad.smith@canonical.com> Thu, 21 Jun 2018 14:37:06 -0600 | ||
3400 | 10 | 74 | ||
3401 | 11 | cloud-init (18.2-27-g6ef92c98-0ubuntu1~18.04.1) bionic; urgency=medium | 75 | cloud-init (18.2-27-g6ef92c98-0ubuntu1~18.04.1) bionic; urgency=medium |
3402 | 12 | 76 | ||
3403 | diff --git a/debian/patches/openstack-no-network-config.patch b/debian/patches/openstack-no-network-config.patch | |||
3404 | index 6749354..d6560f4 100644 | |||
3405 | --- a/debian/patches/openstack-no-network-config.patch | |||
3406 | +++ b/debian/patches/openstack-no-network-config.patch | |||
3407 | @@ -15,7 +15,7 @@ Author: Chad Smith <chad.smith@canonical.com> | |||
3408 | 15 | 15 | ||
3409 | 16 | --- a/cloudinit/sources/DataSourceOpenStack.py | 16 | --- a/cloudinit/sources/DataSourceOpenStack.py |
3410 | 17 | +++ b/cloudinit/sources/DataSourceOpenStack.py | 17 | +++ b/cloudinit/sources/DataSourceOpenStack.py |
3412 | 18 | @@ -97,10 +97,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | 18 | @@ -97,10 +97,9 @@ class DataSourceOpenStack(openstack.Sour |
3413 | 19 | if self._network_config != sources.UNSET: | 19 | if self._network_config != sources.UNSET: |
3414 | 20 | return self._network_config | 20 | return self._network_config |
3415 | 21 | 21 | ||
3416 | @@ -28,10 +28,9 @@ Author: Chad Smith <chad.smith@canonical.com> | |||
3417 | 28 | self._network_config = None | 28 | self._network_config = None |
3418 | 29 | return self._network_config | 29 | return self._network_config |
3419 | 30 | if self.network_json == sources.UNSET: | 30 | if self.network_json == sources.UNSET: |
3420 | 31 | |||
3421 | 32 | --- a/tests/unittests/test_datasource/test_openstack.py | 31 | --- a/tests/unittests/test_datasource/test_openstack.py |
3422 | 33 | +++ b/tests/unittests/test_datasource/test_openstack.py | 32 | +++ b/tests/unittests/test_datasource/test_openstack.py |
3424 | 34 | @@ -345,6 +345,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): | 33 | @@ -345,6 +345,7 @@ class TestOpenStackDataSource(test_helpe |
3425 | 35 | settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) | 34 | settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) |
3426 | 36 | sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], | 35 | sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], |
3427 | 37 | 'networks': [], 'services': []} | 36 | 'networks': [], 'services': []} |
3428 | @@ -39,4 +38,3 @@ Author: Chad Smith <chad.smith@canonical.com> | |||
3429 | 39 | ds_os.network_json = sample_json | 38 | ds_os.network_json = sample_json |
3430 | 40 | with test_helpers.mock.patch(mock_path) as m_convert_json: | 39 | with test_helpers.mock.patch(mock_path) as m_convert_json: |
3431 | 41 | m_convert_json.return_value = example_cfg | 40 | m_convert_json.return_value = example_cfg |
3432 | 42 | |||
3433 | diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt | |||
3434 | index 7bca24a..01ecad7 100644 | |||
3435 | --- a/doc/examples/cloud-config-user-groups.txt | |||
3436 | +++ b/doc/examples/cloud-config-user-groups.txt | |||
3437 | @@ -30,6 +30,11 @@ users: | |||
3438 | 30 | gecos: Magic Cloud App Daemon User | 30 | gecos: Magic Cloud App Daemon User |
3439 | 31 | inactive: true | 31 | inactive: true |
3440 | 32 | system: true | 32 | system: true |
3441 | 33 | - name: fizzbuzz | ||
3442 | 34 | sudo: False | ||
3443 | 35 | ssh_authorized_keys: | ||
3444 | 36 | - <ssh pub key 1> | ||
3445 | 37 | - <ssh pub key 2> | ||
3446 | 33 | - snapuser: joe@joeuser.io | 38 | - snapuser: joe@joeuser.io |
3447 | 34 | 39 | ||
3448 | 35 | # Valid Values: | 40 | # Valid Values: |
3449 | @@ -71,13 +76,21 @@ users: | |||
3450 | 71 | # no_log_init: When set to true, do not initialize lastlog and faillog database. | 76 | # no_log_init: When set to true, do not initialize lastlog and faillog database. |
3451 | 72 | # ssh_import_id: Optional. Import SSH ids | 77 | # ssh_import_id: Optional. Import SSH ids |
3452 | 73 | # ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file | 78 | # ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file |
3460 | 74 | # sudo: Defaults to none. Set to the sudo string you want to use, i.e. | 79 | # sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule |
3461 | 75 | # ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following | 80 | # strings or False to explicitly deny sudo usage. Examples: |
3462 | 76 | # format. | 81 | # |
3463 | 77 | # sudo: | 82 | # Allow a user unrestricted sudo access. |
3464 | 78 | # - ALL=(ALL) NOPASSWD:/bin/mysql | 83 | # sudo: ALL=(ALL) NOPASSWD:ALL |
3465 | 79 | # - ALL=(ALL) ALL | 84 | # |
3466 | 80 | # Note: Please double check your syntax and make sure it is valid. | 85 | # Adding multiple sudo rule strings. |
3467 | 86 | # sudo: | ||
3468 | 87 | # - ALL=(ALL) NOPASSWD:/bin/mysql | ||
3469 | 88 | # - ALL=(ALL) ALL | ||
3470 | 89 | # | ||
3471 | 90 | # Prevent sudo access for a user. | ||
3472 | 91 | # sudo: False | ||
3473 | 92 | # | ||
3474 | 93 | # Note: Please double check your syntax and make sure it is valid. | ||
3475 | 81 | # cloud-init does not parse/check the syntax of the sudo | 94 | # cloud-init does not parse/check the syntax of the sudo |
3476 | 82 | # directive. | 95 | # directive. |
3477 | 83 | # system: Create the user as a system user. This means no home directory. | 96 | # system: Create the user as a system user. This means no home directory. |
3478 | diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst | |||
3479 | index 38ba75d..30e57d8 100644 | |||
3480 | --- a/doc/rtd/topics/datasources.rst | |||
3481 | +++ b/doc/rtd/topics/datasources.rst | |||
3482 | @@ -17,6 +17,103 @@ own way) internally a datasource abstract class was created to allow for a | |||
3483 | 17 | single way to access the different cloud systems methods to provide this data | 17 | single way to access the different cloud systems methods to provide this data |
3484 | 18 | through the typical usage of subclasses. | 18 | through the typical usage of subclasses. |
3485 | 19 | 19 | ||
3486 | 20 | |||
3487 | 21 | instance-data | ||
3488 | 22 | ------------- | ||
3489 | 23 | For reference, cloud-init stores all the metadata, vendordata and userdata | ||
3490 | 24 | provided by a cloud in a json blob at ``/run/cloud-init/instance-data.json``. | ||
3491 | 25 | While the json contains datasource-specific keys and names, cloud-init will | ||
3492 | 26 | maintain a minimal set of standardized keys that will remain stable on any | ||
3493 | 27 | cloud. Standardized instance-data keys will be present under a "v1" key. | ||
3494 | 28 | Any datasource metadata cloud-init consumes will all be present under the | ||
3495 | 29 | "ds" key. | ||
3496 | 30 | |||
3497 | 31 | Below is an instance-data.json example from an OpenStack instance: | ||
3498 | 32 | |||
3499 | 33 | .. sourcecode:: json | ||
3500 | 34 | |||
3501 | 35 | { | ||
3502 | 36 | "base64-encoded-keys": [ | ||
3503 | 37 | "ds/meta-data/random_seed", | ||
3504 | 38 | "ds/user-data" | ||
3505 | 39 | ], | ||
3506 | 40 | "ds": { | ||
3507 | 41 | "ec2_metadata": { | ||
3508 | 42 | "ami-id": "ami-0000032f", | ||
3509 | 43 | "ami-launch-index": "0", | ||
3510 | 44 | "ami-manifest-path": "FIXME", | ||
3511 | 45 | "block-device-mapping": { | ||
3512 | 46 | "ami": "vda", | ||
3513 | 47 | "ephemeral0": "/dev/vdb", | ||
3514 | 48 | "root": "/dev/vda" | ||
3515 | 49 | }, | ||
3516 | 50 | "hostname": "xenial-test.novalocal", | ||
3517 | 51 | "instance-action": "none", | ||
3518 | 52 | "instance-id": "i-0006e030", | ||
3519 | 53 | "instance-type": "m1.small", | ||
3520 | 54 | "local-hostname": "xenial-test.novalocal", | ||
3521 | 55 | "local-ipv4": "10.5.0.6", | ||
3522 | 56 | "placement": { | ||
3523 | 57 | "availability-zone": "None" | ||
3524 | 58 | }, | ||
3525 | 59 | "public-hostname": "xenial-test.novalocal", | ||
3526 | 60 | "public-ipv4": "10.245.162.145", | ||
3527 | 61 | "reservation-id": "r-fxm623oa", | ||
3528 | 62 | "security-groups": "default" | ||
3529 | 63 | }, | ||
3530 | 64 | "meta-data": { | ||
3531 | 65 | "availability_zone": null, | ||
3532 | 66 | "devices": [], | ||
3533 | 67 | "hostname": "xenial-test.novalocal", | ||
3534 | 68 | "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0", | ||
3535 | 69 | "launch_index": 0, | ||
3536 | 70 | "local-hostname": "xenial-test.novalocal", | ||
3537 | 71 | "name": "xenial-test", | ||
3538 | 72 | "project_id": "e0eb2d2538814...", | ||
3539 | 73 | "random_seed": "A6yPN...", | ||
3540 | 74 | "uuid": "3e39d278-0644-4728-9479-678f92..." | ||
3541 | 75 | }, | ||
3542 | 76 | "network_json": { | ||
3543 | 77 | "links": [ | ||
3544 | 78 | { | ||
3545 | 79 | "ethernet_mac_address": "fa:16:3e:7d:74:9b", | ||
3546 | 80 | "id": "tap9ca524d5-6e", | ||
3547 | 81 | "mtu": 8958, | ||
3548 | 82 | "type": "ovs", | ||
3549 | 83 | "vif_id": "9ca524d5-6e5a-4809-936a-6901..." | ||
3550 | 84 | } | ||
3551 | 85 | ], | ||
3552 | 86 | "networks": [ | ||
3553 | 87 | { | ||
3554 | 88 | "id": "network0", | ||
3555 | 89 | "link": "tap9ca524d5-6e", | ||
3556 | 90 | "network_id": "c6adfc18-9753-42eb-b3ea-18b57e6b837f", | ||
3557 | 91 | "type": "ipv4_dhcp" | ||
3558 | 92 | } | ||
3559 | 93 | ], | ||
3560 | 94 | "services": [ | ||
3561 | 95 | { | ||
3562 | 96 | "address": "10.10.160.2", | ||
3563 | 97 | "type": "dns" | ||
3564 | 98 | } | ||
3565 | 99 | ] | ||
3566 | 100 | }, | ||
3567 | 101 | "user-data": "I2Nsb3VkLWNvbmZpZ...", | ||
3568 | 102 | "vendor-data": null | ||
3569 | 103 | }, | ||
3570 | 104 | "v1": { | ||
3571 | 105 | "availability-zone": null, | ||
3572 | 106 | "cloud-name": "openstack", | ||
3573 | 107 | "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0", | ||
3574 | 108 | "local-hostname": "xenial-test", | ||
3575 | 109 | "region": null | ||
3576 | 110 | } | ||
3577 | 111 | } | ||
3578 | 112 | |||
3579 | 113 | |||
3580 | 114 | |||
3581 | 115 | Datasource API | ||
3582 | 116 | -------------- | ||
3583 | 20 | The current interface that a datasource object must provide is the following: | 117 | The current interface that a datasource object must provide is the following: |
3584 | 21 | 118 | ||
3585 | 22 | .. sourcecode:: python | 119 | .. sourcecode:: python |
3586 | diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst | |||
3587 | index 225093a..a3101ed 100644 | |||
3588 | --- a/doc/rtd/topics/datasources/cloudstack.rst | |||
3589 | +++ b/doc/rtd/topics/datasources/cloudstack.rst | |||
3590 | @@ -4,7 +4,9 @@ CloudStack | |||
3591 | 4 | ========== | 4 | ========== |
3592 | 5 | 5 | ||
3593 | 6 | `Apache CloudStack`_ expose user-data, meta-data, user password and account | 6 | `Apache CloudStack`_ expose user-data, meta-data, user password and account |
3595 | 7 | sshkey thru the Virtual-Router. For more details on meta-data and user-data, | 7 | sshkey thru the Virtual-Router. The datasource obtains the VR address via |
3596 | 8 | dhcp lease information given to the instance. | ||
3597 | 9 | For more details on meta-data and user-data, | ||
3598 | 8 | refer the `CloudStack Administrator Guide`_. | 10 | refer the `CloudStack Administrator Guide`_. |
3599 | 9 | 11 | ||
3600 | 10 | URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1 | 12 | URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1 |
3601 | @@ -18,14 +20,26 @@ is the Virtual Router IP: | |||
3602 | 18 | 20 | ||
3603 | 19 | Configuration | 21 | Configuration |
3604 | 20 | ------------- | 22 | ------------- |
3605 | 23 | The following configuration can be set for the datasource in system | ||
3606 | 24 | configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). | ||
3607 | 21 | 25 | ||
3609 | 22 | Apache CloudStack datasource can be configured as follows: | 26 | The settings that may be configured are: |
3610 | 23 | 27 | ||
3612 | 24 | .. code:: yaml | 28 | * **max_wait**: the maximum amount of clock time in seconds that should be |
3613 | 29 | spent searching metadata_urls. A value less than zero will result in only | ||
3614 | 30 | one request being made, to the first in the list. (default: 120) | ||
3615 | 31 | * **timeout**: the timeout value provided to urlopen for each individual http | ||
3616 | 32 | request. This is used both when selecting a metadata_url and when crawling | ||
3617 | 33 | the metadata service. (default: 50) | ||
3618 | 25 | 34 | ||
3622 | 26 | datasource: | 35 | An example configuration with the default values is provided below: |
3623 | 27 | CloudStack: {} | 36 | |
3624 | 28 | None: {} | 37 | .. sourcecode:: yaml |
3625 | 38 | |||
3626 | 39 | datasource: | ||
3627 | 40 | CloudStack: | ||
3628 | 41 | max_wait: 120 | ||
3629 | 42 | timeout: 50 | ||
3630 | 29 | datasource_list: | 43 | datasource_list: |
3631 | 30 | - CloudStack | 44 | - CloudStack |
3632 | 31 | 45 | ||
3633 | diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst | |||
3634 | index 3bc66e1..64c325d 100644 | |||
3635 | --- a/doc/rtd/topics/datasources/ec2.rst | |||
3636 | +++ b/doc/rtd/topics/datasources/ec2.rst | |||
3637 | @@ -60,4 +60,34 @@ To see which versions are supported from your cloud provider use the following U | |||
3638 | 60 | ... | 60 | ... |
3639 | 61 | latest | 61 | latest |
3640 | 62 | 62 | ||
3641 | 63 | |||
3642 | 64 | |||
3643 | 65 | Configuration | ||
3644 | 66 | ------------- | ||
3645 | 67 | The following configuration can be set for the datasource in system | ||
3646 | 68 | configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). | ||
3647 | 69 | |||
3648 | 70 | The settings that may be configured are: | ||
3649 | 71 | |||
3650 | 72 | * **metadata_urls**: This list of urls will be searched for an Ec2 | ||
3651 | 73 | metadata service. The first entry that successfully returns a 200 response | ||
3652 | 74 | for <url>/<version>/meta-data/instance-id will be selected. | ||
3653 | 75 | (default: ['http://169.254.169.254', 'http://instance-data:8773']). | ||
3654 | 76 | * **max_wait**: the maximum amount of clock time in seconds that should be | ||
3655 | 77 | spent searching metadata_urls. A value less than zero will result in only | ||
3656 | 78 | one request being made, to the first in the list. (default: 120) | ||
3657 | 79 | * **timeout**: the timeout value provided to urlopen for each individual http | ||
3658 | 80 | request. This is used both when selecting a metadata_url and when crawling | ||
3659 | 81 | the metadata service. (default: 50) | ||
3660 | 82 | |||
3661 | 83 | An example configuration with the default values is provided below: | ||
3662 | 84 | |||
3663 | 85 | .. sourcecode:: yaml | ||
3664 | 86 | |||
3665 | 87 | datasource: | ||
3666 | 88 | Ec2: | ||
3667 | 89 | metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"] | ||
3668 | 90 | max_wait: 120 | ||
3669 | 91 | timeout: 50 | ||
3670 | 92 | |||
3671 | 63 | .. vi: textwidth=78 | 93 | .. vi: textwidth=78 |
3672 | diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst | |||
3673 | index 43592de..421da08 100644 | |||
3674 | --- a/doc/rtd/topics/datasources/openstack.rst | |||
3675 | +++ b/doc/rtd/topics/datasources/openstack.rst | |||
3676 | @@ -7,6 +7,21 @@ This datasource supports reading data from the | |||
3677 | 7 | `OpenStack Metadata Service | 7 | `OpenStack Metadata Service |
3678 | 8 | <https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_. | 8 | <https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_. |
3679 | 9 | 9 | ||
3680 | 10 | Discovery | ||
3681 | 11 | ------------- | ||
3682 | 12 | To determine whether a platform looks like it may be OpenStack, cloud-init | ||
3683 | 13 | checks the following environment attributes as a potential OpenStack platform: | ||
3684 | 14 | |||
3685 | 15 | * Maybe OpenStack if | ||
3686 | 16 | |||
3687 | 17 | * **non-x86 cpu architecture**: because DMI data is buggy on some arches | ||
3688 | 18 | * Is OpenStack **if x86 architecture and ANY** of the following | ||
3689 | 19 | |||
3690 | 20 | * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova* | ||
3691 | 21 | * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute* | ||
3692 | 22 | * **DMI chassis_asset_tag** is *OpenTelekomCloud* | ||
3693 | 23 | |||
3694 | 24 | |||
3695 | 10 | Configuration | 25 | Configuration |
3696 | 11 | ------------- | 26 | ------------- |
3697 | 12 | The following configuration can be set for the datasource in system | 27 | The following configuration can be set for the datasource in system |
3698 | @@ -25,18 +40,22 @@ The settings that may be configured are: | |||
3699 | 25 | the metadata service. (default: 10) | 40 | the metadata service. (default: 10) |
3700 | 26 | * **retries**: The number of retries that should be done for an http request. | 41 | * **retries**: The number of retries that should be done for an http request. |
3701 | 27 | This value is used only after metadata_url is selected. (default: 5) | 42 | This value is used only after metadata_url is selected. (default: 5) |
3702 | 43 | * **apply_network_config**: A boolean specifying whether to configure the | ||
3703 | 44 | network for the instance based on network_data.json provided by the | ||
3704 | 45 | metadata service. When False, only configure dhcp on the primary nic for | ||
3705 | 46 | this instances. (default: True) | ||
3706 | 28 | 47 | ||
3708 | 29 | An example configuration with the default values is provided as example below: | 48 | An example configuration with the default values is provided below: |
3709 | 30 | 49 | ||
3710 | 31 | .. sourcecode:: yaml | 50 | .. sourcecode:: yaml |
3711 | 32 | 51 | ||
3712 | 33 | #cloud-config | ||
3713 | 34 | datasource: | 52 | datasource: |
3714 | 35 | OpenStack: | 53 | OpenStack: |
3715 | 36 | metadata_urls: ["http://169.254.169.254"] | 54 | metadata_urls: ["http://169.254.169.254"] |
3716 | 37 | max_wait: -1 | 55 | max_wait: -1 |
3717 | 38 | timeout: 10 | 56 | timeout: 10 |
3718 | 39 | retries: 5 | 57 | retries: 5 |
3719 | 58 | apply_network_config: True | ||
3720 | 40 | 59 | ||
3721 | 41 | 60 | ||
3722 | 42 | Vendor Data | 61 | Vendor Data |
3723 | diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst | |||
3724 | index 2f8ab54..3b0148c 100644 | |||
3725 | --- a/doc/rtd/topics/network-config-format-v1.rst | |||
3726 | +++ b/doc/rtd/topics/network-config-format-v1.rst | |||
3727 | @@ -130,6 +130,18 @@ the bond interfaces. | |||
3728 | 130 | The ``bond_interfaces`` key accepts a list of network device ``name`` values | 130 | The ``bond_interfaces`` key accepts a list of network device ``name`` values |
3729 | 131 | from the configuration. This list may be empty. | 131 | from the configuration. This list may be empty. |
3730 | 132 | 132 | ||
3731 | 133 | **mtu**: *<MTU SizeBytes>* | ||
3732 | 134 | |||
3733 | 135 | The MTU key represents a device's Maximum Transmission Unit, the largest size | ||
3734 | 136 | packet or frame, specified in octets (eight-bit bytes), that can be sent in a | ||
3735 | 137 | packet- or frame-based network. Specifying ``mtu`` is optional. | ||
3736 | 138 | |||
3737 | 139 | .. note:: | ||
3738 | 140 | |||
3739 | 141 | The possible supported values of a device's MTU is not available at | ||
3740 | 142 | configuration time. It's possible to specify a value too large or to | ||
3741 | 143 | small for a device and may be ignored by the device. | ||
3742 | 144 | |||
3743 | 133 | **params**: *<Dictionary of key: value bonding parameter pairs>* | 145 | **params**: *<Dictionary of key: value bonding parameter pairs>* |
3744 | 134 | 146 | ||
3745 | 135 | The ``params`` key in a bond holds a dictionary of bonding parameters. | 147 | The ``params`` key in a bond holds a dictionary of bonding parameters. |
3746 | @@ -268,6 +280,21 @@ Type ``vlan`` requires the following keys: | |||
3747 | 268 | - ``vlan_link``: Specify the underlying link via its ``name``. | 280 | - ``vlan_link``: Specify the underlying link via its ``name``. |
3748 | 269 | - ``vlan_id``: Specify the VLAN numeric id. | 281 | - ``vlan_id``: Specify the VLAN numeric id. |
3749 | 270 | 282 | ||
3750 | 283 | The following optional keys are supported: | ||
3751 | 284 | |||
3752 | 285 | **mtu**: *<MTU SizeBytes>* | ||
3753 | 286 | |||
3754 | 287 | The MTU key represents a device's Maximum Transmission Unit, the largest size | ||
3755 | 288 | packet or frame, specified in octets (eight-bit bytes), that can be sent in a | ||
3756 | 289 | packet- or frame-based network. Specifying ``mtu`` is optional. | ||
3757 | 290 | |||
3758 | 291 | .. note:: | ||
3759 | 292 | |||
3760 | 293 | The possible supported values of a device's MTU is not available at | ||
3761 | 294 | configuration time. It's possible to specify a value too large or to | ||
3762 | 295 | small for a device and may be ignored by the device. | ||
3763 | 296 | |||
3764 | 297 | |||
3765 | 271 | **VLAN Example**:: | 298 | **VLAN Example**:: |
3766 | 272 | 299 | ||
3767 | 273 | network: | 300 | network: |
3768 | diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst | |||
3769 | index 335d236..ea370ef 100644 | |||
3770 | --- a/doc/rtd/topics/network-config-format-v2.rst | |||
3771 | +++ b/doc/rtd/topics/network-config-format-v2.rst | |||
3772 | @@ -174,6 +174,12 @@ recognized by ``inet_pton(3)`` | |||
3773 | 174 | Example for IPv4: ``gateway4: 172.16.0.1`` | 174 | Example for IPv4: ``gateway4: 172.16.0.1`` |
3774 | 175 | Example for IPv6: ``gateway6: 2001:4::1`` | 175 | Example for IPv6: ``gateway6: 2001:4::1`` |
3775 | 176 | 176 | ||
3776 | 177 | **mtu**: *<MTU SizeBytes>* | ||
3777 | 178 | |||
3778 | 179 | The MTU key represents a device's Maximum Transmission Unit, the largest size | ||
3779 | 180 | packet or frame, specified in octets (eight-bit bytes), that can be sent in a | ||
3780 | 181 | packet- or frame-based network. Specifying ``mtu`` is optional. | ||
3781 | 182 | |||
3782 | 177 | **nameservers**: *<(mapping)>* | 183 | **nameservers**: *<(mapping)>* |
3783 | 178 | 184 | ||
3784 | 179 | Set DNS servers and search domains, for manual address configuration. There | 185 | Set DNS servers and search domains, for manual address configuration. There |
3785 | diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst | |||
3786 | index cac4a6e..b83bd89 100644 | |||
3787 | --- a/doc/rtd/topics/tests.rst | |||
3788 | +++ b/doc/rtd/topics/tests.rst | |||
3789 | @@ -58,7 +58,8 @@ explaining how to run one or the other independently. | |||
3790 | 58 | $ tox -e citest -- run --verbose \ | 58 | $ tox -e citest -- run --verbose \ |
3791 | 59 | --os-name stretch --os-name xenial \ | 59 | --os-name stretch --os-name xenial \ |
3792 | 60 | --deb cloud-init_0.7.8~my_patch_all.deb \ | 60 | --deb cloud-init_0.7.8~my_patch_all.deb \ |
3794 | 61 | --preserve-data --data-dir ~/collection | 61 | --preserve-data --data-dir ~/collection \ |
3795 | 62 | --preserve-instance | ||
3796 | 62 | 63 | ||
3797 | 63 | The above command will do the following: | 64 | The above command will do the following: |
3798 | 64 | 65 | ||
3799 | @@ -76,6 +77,10 @@ The above command will do the following: | |||
3800 | 76 | * ``--preserve-data`` always preserve collected data, do not remove data | 77 | * ``--preserve-data`` always preserve collected data, do not remove data |
3801 | 77 | after successful test run | 78 | after successful test run |
3802 | 78 | 79 | ||
3803 | 80 | * ``--preserve-instance`` do not destroy the instance after test to allow | ||
3804 | 81 | for debugging the stopped instance during integration test development. By | ||
3805 | 82 | default, test instances are destroyed after the test completes. | ||
3806 | 83 | |||
3807 | 79 | * ``--data-dir ~/collection`` write collected data into `~/collection`, | 84 | * ``--data-dir ~/collection`` write collected data into `~/collection`, |
3808 | 80 | rather than using a temporary directory | 85 | rather than using a temporary directory |
3809 | 81 | 86 | ||
3810 | diff --git a/integration-requirements.txt b/integration-requirements.txt | |||
3811 | index df3a73e..e5bb5b2 100644 | |||
3812 | --- a/integration-requirements.txt | |||
3813 | +++ b/integration-requirements.txt | |||
3814 | @@ -13,7 +13,7 @@ paramiko==2.4.0 | |||
3815 | 13 | 13 | ||
3816 | 14 | # lxd backend | 14 | # lxd backend |
3817 | 15 | # 04/03/2018: enables use of lxd 3.0 | 15 | # 04/03/2018: enables use of lxd 3.0 |
3819 | 16 | git+https://github.com/lxc/pylxd.git@1a85a12a23401de6e96b1aeaf59ecbff2e88f49d | 16 | git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779 |
3820 | 17 | 17 | ||
3821 | 18 | 18 | ||
3822 | 19 | # finds latest image information | 19 | # finds latest image information |
3823 | diff --git a/packages/bddeb b/packages/bddeb | |||
3824 | index 4f2e2dd..95602a0 100755 | |||
3825 | --- a/packages/bddeb | |||
3826 | +++ b/packages/bddeb | |||
3827 | @@ -1,11 +1,14 @@ | |||
3828 | 1 | #!/usr/bin/env python3 | 1 | #!/usr/bin/env python3 |
3829 | 2 | 2 | ||
3830 | 3 | import argparse | 3 | import argparse |
3831 | 4 | import csv | ||
3832 | 4 | import json | 5 | import json |
3833 | 5 | import os | 6 | import os |
3834 | 6 | import shutil | 7 | import shutil |
3835 | 7 | import sys | 8 | import sys |
3836 | 8 | 9 | ||
3837 | 10 | UNRELEASED = "UNRELEASED" | ||
3838 | 11 | |||
3839 | 9 | 12 | ||
3840 | 10 | def find_root(): | 13 | def find_root(): |
3841 | 11 | # expected path is in <top_dir>/packages/ | 14 | # expected path is in <top_dir>/packages/ |
3842 | @@ -28,6 +31,24 @@ if "avoid-pep8-E402-import-not-top-of-file": | |||
3843 | 28 | DEBUILD_ARGS = ["-S", "-d"] | 31 | DEBUILD_ARGS = ["-S", "-d"] |
3844 | 29 | 32 | ||
3845 | 30 | 33 | ||
3846 | 34 | def get_release_suffix(release): | ||
3847 | 35 | """Given ubuntu release (xenial), return a suffix for package (~16.04.1)""" | ||
3848 | 36 | csv_path = "/usr/share/distro-info/ubuntu.csv" | ||
3849 | 37 | rels = {} | ||
3850 | 38 | # fields are version, codename, series, created, release, eol, eol-server | ||
3851 | 39 | if os.path.exists(csv_path): | ||
3852 | 40 | with open(csv_path, "r") as fp: | ||
3853 | 41 | # version has "16.04 LTS" or "16.10", so drop "LTS" portion. | ||
3854 | 42 | rels = {row['series']: row['version'].replace(' LTS', '') | ||
3855 | 43 | for row in csv.DictReader(fp)} | ||
3856 | 44 | if release in rels: | ||
3857 | 45 | return "~%s.1" % rels[release] | ||
3858 | 46 | elif release != UNRELEASED: | ||
3859 | 47 | print("missing distro-info-data package, unable to give " | ||
3860 | 48 | "per-release suffix.\n") | ||
3861 | 49 | return "" | ||
3862 | 50 | |||
3863 | 51 | |||
3864 | 31 | def run_helper(helper, args=None, strip=True): | 52 | def run_helper(helper, args=None, strip=True): |
3865 | 32 | if args is None: | 53 | if args is None: |
3866 | 33 | args = [] | 54 | args = [] |
3867 | @@ -117,7 +138,7 @@ def get_parser(): | |||
3868 | 117 | 138 | ||
3869 | 118 | parser.add_argument("--release", dest="release", | 139 | parser.add_argument("--release", dest="release", |
3870 | 119 | help=("build with changelog referencing RELEASE"), | 140 | help=("build with changelog referencing RELEASE"), |
3872 | 120 | default="UNRELEASED") | 141 | default=UNRELEASED) |
3873 | 121 | 142 | ||
3874 | 122 | for ent in DEBUILD_ARGS: | 143 | for ent in DEBUILD_ARGS: |
3875 | 123 | parser.add_argument(ent, dest="debuild_args", action='append_const', | 144 | parser.add_argument(ent, dest="debuild_args", action='append_const', |
3876 | @@ -148,7 +169,10 @@ def main(): | |||
3877 | 148 | if args.verbose: | 169 | if args.verbose: |
3878 | 149 | capture = False | 170 | capture = False |
3879 | 150 | 171 | ||
3881 | 151 | templ_data = {'debian_release': args.release} | 172 | templ_data = { |
3882 | 173 | 'debian_release': args.release, | ||
3883 | 174 | 'release_suffix': get_release_suffix(args.release)} | ||
3884 | 175 | |||
3885 | 152 | with temp_utils.tempdir() as tdir: | 176 | with temp_utils.tempdir() as tdir: |
3886 | 153 | 177 | ||
3887 | 154 | # output like 0.7.6-1022-g36e92d3 | 178 | # output like 0.7.6-1022-g36e92d3 |
3888 | @@ -157,10 +181,18 @@ def main(): | |||
3889 | 157 | # This is really only a temporary archive | 181 | # This is really only a temporary archive |
3890 | 158 | # since we will extract it then add in the debian | 182 | # since we will extract it then add in the debian |
3891 | 159 | # folder, then re-archive it for debian happiness | 183 | # folder, then re-archive it for debian happiness |
3892 | 160 | print("Creating a temporary tarball using the 'make-tarball' helper") | ||
3893 | 161 | tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long'] | 184 | tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long'] |
3894 | 162 | tarball_fp = util.abs_join(tdir, tarball) | 185 | tarball_fp = util.abs_join(tdir, tarball) |
3896 | 163 | run_helper('make-tarball', ['--long', '--output=' + tarball_fp]) | 186 | path = None |
3897 | 187 | for pd in ("./", "../", "../dl/"): | ||
3898 | 188 | if os.path.exists(pd + tarball): | ||
3899 | 189 | path = pd + tarball | ||
3900 | 190 | print("Using existing tarball %s" % path) | ||
3901 | 191 | shutil.copy(path, tarball_fp) | ||
3902 | 192 | break | ||
3903 | 193 | if path is None: | ||
3904 | 194 | print("Creating a temp tarball using the 'make-tarball' helper") | ||
3905 | 195 | run_helper('make-tarball', ['--long', '--output=' + tarball_fp]) | ||
3906 | 164 | 196 | ||
3907 | 165 | print("Extracting temporary tarball %r" % (tarball)) | 197 | print("Extracting temporary tarball %r" % (tarball)) |
3908 | 166 | cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir] | 198 | cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir] |
3909 | diff --git a/packages/brpm b/packages/brpm | |||
3910 | index 3439cf3..a154ef2 100755 | |||
3911 | --- a/packages/brpm | |||
3912 | +++ b/packages/brpm | |||
3913 | @@ -42,13 +42,13 @@ def run_helper(helper, args=None, strip=True): | |||
3914 | 42 | return stdout | 42 | return stdout |
3915 | 43 | 43 | ||
3916 | 44 | 44 | ||
3918 | 45 | def read_dependencies(requirements_file='requirements.txt'): | 45 | def read_dependencies(distro, requirements_file='requirements.txt'): |
3919 | 46 | """Returns the Python package depedencies from requirements.txt files. | 46 | """Returns the Python package depedencies from requirements.txt files. |
3920 | 47 | 47 | ||
3921 | 48 | @returns a tuple of (requirements, test_requirements) | 48 | @returns a tuple of (requirements, test_requirements) |
3922 | 49 | """ | 49 | """ |
3923 | 50 | pkg_deps = run_helper( | 50 | pkg_deps = run_helper( |
3925 | 51 | 'read-dependencies', args=['--distro', 'redhat']).splitlines() | 51 | 'read-dependencies', args=['--distro', distro]).splitlines() |
3926 | 52 | test_deps = run_helper( | 52 | test_deps = run_helper( |
3927 | 53 | 'read-dependencies', args=[ | 53 | 'read-dependencies', args=[ |
3928 | 54 | '--requirements-file', 'test-requirements.txt', | 54 | '--requirements-file', 'test-requirements.txt', |
3929 | @@ -83,7 +83,7 @@ def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn): | |||
3930 | 83 | rpm_upstream_version = version_data['version'] | 83 | rpm_upstream_version = version_data['version'] |
3931 | 84 | subs['rpm_upstream_version'] = rpm_upstream_version | 84 | subs['rpm_upstream_version'] = rpm_upstream_version |
3932 | 85 | 85 | ||
3934 | 86 | deps, test_deps = read_dependencies() | 86 | deps, test_deps = read_dependencies(distro=args.distro) |
3935 | 87 | subs['buildrequires'] = deps + test_deps | 87 | subs['buildrequires'] = deps + test_deps |
3936 | 88 | subs['requires'] = deps | 88 | subs['requires'] = deps |
3937 | 89 | 89 | ||
3938 | diff --git a/packages/debian/changelog.in b/packages/debian/changelog.in | |||
3939 | index bdf8d56..930322f 100644 | |||
3940 | --- a/packages/debian/changelog.in | |||
3941 | +++ b/packages/debian/changelog.in | |||
3942 | @@ -1,5 +1,5 @@ | |||
3943 | 1 | ## template:basic | 1 | ## template:basic |
3945 | 2 | cloud-init (${version_long}-1~bddeb) ${debian_release}; urgency=low | 2 | cloud-init (${version_long}-1~bddeb${release_suffix}) ${debian_release}; urgency=low |
3946 | 3 | 3 | ||
3947 | 4 | * build | 4 | * build |
3948 | 5 | 5 | ||
3949 | diff --git a/packages/debian/rules.in b/packages/debian/rules.in | |||
3950 | index 4aa907e..e542c7f 100755 | |||
3951 | --- a/packages/debian/rules.in | |||
3952 | +++ b/packages/debian/rules.in | |||
3953 | @@ -3,6 +3,7 @@ | |||
3954 | 3 | INIT_SYSTEM ?= systemd | 3 | INIT_SYSTEM ?= systemd |
3955 | 4 | export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM) | 4 | export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM) |
3956 | 5 | PYVER ?= python${pyver} | 5 | PYVER ?= python${pyver} |
3957 | 6 | DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version) | ||
3958 | 6 | 7 | ||
3959 | 7 | %: | 8 | %: |
3960 | 8 | dh $@ --with $(PYVER),systemd --buildsystem pybuild | 9 | dh $@ --with $(PYVER),systemd --buildsystem pybuild |
3961 | @@ -14,6 +15,7 @@ override_dh_install: | |||
3962 | 14 | cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf | 15 | cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf |
3963 | 15 | install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh | 16 | install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh |
3964 | 16 | install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh | 17 | install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh |
3965 | 18 | flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement} | ||
3966 | 17 | 19 | ||
3967 | 18 | override_dh_auto_test: | 20 | override_dh_auto_test: |
3968 | 19 | ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) | 21 | ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) |
3969 | diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in | |||
3970 | index 91faf3c..a3a6d1e 100644 | |||
3971 | --- a/packages/redhat/cloud-init.spec.in | |||
3972 | +++ b/packages/redhat/cloud-init.spec.in | |||
3973 | @@ -115,6 +115,13 @@ rm -rf $RPM_BUILD_ROOT%{python_sitelib}/tests | |||
3974 | 115 | mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud | 115 | mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud |
3975 | 116 | mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name} | 116 | mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name} |
3976 | 117 | 117 | ||
3977 | 118 | # patch in the full version to version.py | ||
3978 | 119 | version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f) | ||
3979 | 120 | [ -n "$version_pys" ] || | ||
3980 | 121 | { echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; } | ||
3981 | 122 | ( cd "$RPM_BUILD_ROOT" && | ||
3982 | 123 | sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys ) | ||
3983 | 124 | |||
3984 | 118 | %clean | 125 | %clean |
3985 | 119 | rm -rf $RPM_BUILD_ROOT | 126 | rm -rf $RPM_BUILD_ROOT |
3986 | 120 | 127 | ||
3987 | diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in | |||
3988 | index bbb965a..e781d74 100644 | |||
3989 | --- a/packages/suse/cloud-init.spec.in | |||
3990 | +++ b/packages/suse/cloud-init.spec.in | |||
3991 | @@ -5,7 +5,7 @@ | |||
3992 | 5 | # Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html | 5 | # Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html |
3993 | 6 | 6 | ||
3994 | 7 | Name: cloud-init | 7 | Name: cloud-init |
3996 | 8 | Version: {{version}} | 8 | Version: {{rpm_upstream_version}} |
3997 | 9 | Release: 1{{subrelease}}%{?dist} | 9 | Release: 1{{subrelease}}%{?dist} |
3998 | 10 | Summary: Cloud instance init scripts | 10 | Summary: Cloud instance init scripts |
3999 | 11 | 11 | ||
4000 | @@ -16,22 +16,13 @@ URL: http://launchpad.net/cloud-init | |||
4001 | 16 | Source0: {{archive_name}} | 16 | Source0: {{archive_name}} |
4002 | 17 | BuildRoot: %{_tmppath}/%{name}-%{version}-build | 17 | BuildRoot: %{_tmppath}/%{name}-%{version}-build |
4003 | 18 | 18 | ||
4004 | 19 | %if 0%{?suse_version} && 0%{?suse_version} <= 1110 | ||
4005 | 20 | %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} | ||
4006 | 21 | %else | ||
4007 | 22 | BuildArch: noarch | 19 | BuildArch: noarch |
4009 | 23 | %endif | 20 | |
4010 | 24 | 21 | ||
4011 | 25 | {% for r in buildrequires %} | 22 | {% for r in buildrequires %} |
4012 | 26 | BuildRequires: {{r}} | 23 | BuildRequires: {{r}} |
4013 | 27 | {% endfor %} | 24 | {% endfor %} |
4014 | 28 | 25 | ||
4015 | 29 | %if 0%{?suse_version} && 0%{?suse_version} <= 1210 | ||
4016 | 30 | %define initsys sysvinit | ||
4017 | 31 | %else | ||
4018 | 32 | %define initsys systemd | ||
4019 | 33 | %endif | ||
4020 | 34 | |||
4021 | 35 | # Install pypi 'dynamic' requirements | 26 | # Install pypi 'dynamic' requirements |
4022 | 36 | {% for r in requires %} | 27 | {% for r in requires %} |
4023 | 37 | Requires: {{r}} | 28 | Requires: {{r}} |
4024 | @@ -39,7 +30,7 @@ Requires: {{r}} | |||
4025 | 39 | 30 | ||
4026 | 40 | # Custom patches | 31 | # Custom patches |
4027 | 41 | {% for p in patches %} | 32 | {% for p in patches %} |
4029 | 42 | Patch{{loop.index0}: {{p}} | 33 | Patch{{loop.index0}}: {{p}} |
4030 | 43 | {% endfor %} | 34 | {% endfor %} |
4031 | 44 | 35 | ||
4032 | 45 | %description | 36 | %description |
4033 | @@ -63,35 +54,21 @@ end for | |||
4034 | 63 | %{__python} setup.py install \ | 54 | %{__python} setup.py install \ |
4035 | 64 | --skip-build --root=%{buildroot} --prefix=%{_prefix} \ | 55 | --skip-build --root=%{buildroot} --prefix=%{_prefix} \ |
4036 | 65 | --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \ | 56 | --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \ |
4038 | 66 | --init-system=%{initsys} | 57 | --init-system=systemd |
4039 | 58 | |||
4040 | 59 | # Move udev rules | ||
4041 | 60 | mkdir -p %{buildroot}/usr/lib/udev/rules.d/ | ||
4042 | 61 | mv %{buildroot}/lib/udev/rules.d/* %{buildroot}/usr/lib/udev/rules.d/ | ||
4043 | 67 | 62 | ||
4044 | 68 | # Remove non-SUSE templates | 63 | # Remove non-SUSE templates |
4045 | 69 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.* | 64 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.* |
4046 | 70 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.* | 65 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.* |
4047 | 71 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.* | 66 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.* |
4048 | 72 | 67 | ||
4049 | 73 | # Remove cloud-init tests | ||
4050 | 74 | rm -r %{buildroot}/%{python_sitelib}/tests | ||
4051 | 75 | |||
4052 | 76 | # Move sysvinit scripts to the correct place and create symbolic links | ||
4053 | 77 | %if %{initsys} == sysvinit | ||
4054 | 78 | mkdir -p %{buildroot}/%{_initddir} | ||
4055 | 79 | mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/ | ||
4056 | 80 | rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d | ||
4057 | 81 | rmdir %{buildroot}%{_sysconfdir}/rc.d | ||
4058 | 82 | |||
4059 | 83 | mkdir -p %{buildroot}/%{_sbindir} | ||
4060 | 84 | pushd %{buildroot}/%{_initddir} | ||
4061 | 85 | for file in * ; do | ||
4062 | 86 | ln -s %{_initddir}/${file} %{buildroot}/%{_sbindir}/rc${file} | ||
4063 | 87 | done | ||
4064 | 88 | popd | ||
4065 | 89 | %endif | ||
4066 | 90 | |||
4067 | 91 | # Move documentation | 68 | # Move documentation |
4068 | 92 | mkdir -p %{buildroot}/%{_defaultdocdir} | 69 | mkdir -p %{buildroot}/%{_defaultdocdir} |
4069 | 93 | mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir} | 70 | mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir} |
4071 | 94 | for doc in TODO LICENSE ChangeLog requirements.txt; do | 71 | for doc in LICENSE ChangeLog requirements.txt; do |
4072 | 95 | cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init | 72 | cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init |
4073 | 96 | done | 73 | done |
4074 | 97 | 74 | ||
4075 | @@ -102,29 +79,35 @@ done | |||
4076 | 102 | 79 | ||
4077 | 103 | mkdir -p %{buildroot}/var/lib/cloud | 80 | mkdir -p %{buildroot}/var/lib/cloud |
4078 | 104 | 81 | ||
4079 | 82 | # patch in the full version to version.py | ||
4080 | 83 | version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) | ||
4081 | 84 | [ -n "$version_pys" ] || | ||
4082 | 85 | { echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; } | ||
4083 | 86 | ( cd "%{buildroot}" && | ||
4084 | 87 | sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys ) | ||
4085 | 88 | |||
4086 | 105 | %postun | 89 | %postun |
4087 | 106 | %insserv_cleanup | 90 | %insserv_cleanup |
4088 | 107 | 91 | ||
4089 | 108 | %files | 92 | %files |
4090 | 109 | 93 | ||
4091 | 110 | # Sysvinit scripts | ||
4092 | 111 | %if %{initsys} == sysvinit | ||
4093 | 112 | %attr(0755, root, root) %{_initddir}/cloud-config | ||
4094 | 113 | %attr(0755, root, root) %{_initddir}/cloud-final | ||
4095 | 114 | %attr(0755, root, root) %{_initddir}/cloud-init-local | ||
4096 | 115 | %attr(0755, root, root) %{_initddir}/cloud-init | ||
4097 | 116 | |||
4098 | 117 | %{_sbindir}/rccloud-* | ||
4099 | 118 | %endif | ||
4100 | 119 | |||
4101 | 120 | # Program binaries | 94 | # Program binaries |
4102 | 121 | %{_bindir}/cloud-init* | 95 | %{_bindir}/cloud-init* |
4103 | 122 | 96 | ||
4104 | 97 | # systemd files | ||
4105 | 98 | /usr/lib/systemd/system-generators/* | ||
4106 | 99 | /usr/lib/systemd/system/* | ||
4107 | 100 | |||
4108 | 123 | # There doesn't seem to be an agreed upon place for these | 101 | # There doesn't seem to be an agreed upon place for these |
4109 | 124 | # although it appears the standard says /usr/lib but rpmbuild | 102 | # although it appears the standard says /usr/lib but rpmbuild |
4110 | 125 | # will try /usr/lib64 ?? | 103 | # will try /usr/lib64 ?? |
4111 | 126 | /usr/lib/%{name}/uncloud-init | 104 | /usr/lib/%{name}/uncloud-init |
4112 | 127 | /usr/lib/%{name}/write-ssh-key-fingerprints | 105 | /usr/lib/%{name}/write-ssh-key-fingerprints |
4113 | 106 | /usr/lib/%{name}/ds-identify | ||
4114 | 107 | |||
4115 | 108 | # udev rules | ||
4116 | 109 | /usr/lib/udev/rules.d/66-azure-ephemeral.rules | ||
4117 | 110 | |||
4118 | 128 | 111 | ||
4119 | 129 | # Docs | 112 | # Docs |
4120 | 130 | %doc %{_defaultdocdir}/cloud-init/* | 113 | %doc %{_defaultdocdir}/cloud-init/* |
4121 | @@ -138,6 +121,9 @@ mkdir -p %{buildroot}/var/lib/cloud | |||
4122 | 138 | %config(noreplace) %{_sysconfdir}/cloud/templates/* | 121 | %config(noreplace) %{_sysconfdir}/cloud/templates/* |
4123 | 139 | %{_sysconfdir}/bash_completion.d/cloud-init | 122 | %{_sysconfdir}/bash_completion.d/cloud-init |
4124 | 140 | 123 | ||
4125 | 124 | %{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient | ||
4126 | 125 | %{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager | ||
4127 | 126 | |||
4128 | 141 | # Python code is here... | 127 | # Python code is here... |
4129 | 142 | %{python_sitelib}/* | 128 | %{python_sitelib}/* |
4130 | 143 | 129 | ||
4131 | diff --git a/setup.py b/setup.py | |||
4132 | index 85b2337..5ed8eae 100755 | |||
4133 | --- a/setup.py | |||
4134 | +++ b/setup.py | |||
4135 | @@ -25,7 +25,7 @@ from distutils.errors import DistutilsArgError | |||
4136 | 25 | import subprocess | 25 | import subprocess |
4137 | 26 | 26 | ||
4138 | 27 | RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" | 27 | RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" |
4140 | 28 | 28 | VARIANT = None | |
4141 | 29 | 29 | ||
4142 | 30 | def is_f(p): | 30 | def is_f(p): |
4143 | 31 | return os.path.isfile(p) | 31 | return os.path.isfile(p) |
4144 | @@ -114,10 +114,20 @@ def render_tmpl(template): | |||
4145 | 114 | atexit.register(shutil.rmtree, tmpd) | 114 | atexit.register(shutil.rmtree, tmpd) |
4146 | 115 | bname = os.path.basename(template).rstrip(tmpl_ext) | 115 | bname = os.path.basename(template).rstrip(tmpl_ext) |
4147 | 116 | fpath = os.path.join(tmpd, bname) | 116 | fpath = os.path.join(tmpd, bname) |
4149 | 117 | tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath]) | 117 | if VARIANT: |
4150 | 118 | tiny_p([sys.executable, './tools/render-cloudcfg', '--variant', | ||
4151 | 119 | VARIANT, template, fpath]) | ||
4152 | 120 | else: | ||
4153 | 121 | tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath]) | ||
4154 | 118 | # return path relative to setup.py | 122 | # return path relative to setup.py |
4155 | 119 | return os.path.join(os.path.basename(tmpd), bname) | 123 | return os.path.join(os.path.basename(tmpd), bname) |
4156 | 120 | 124 | ||
4157 | 125 | # User can set the variant for template rendering | ||
4158 | 126 | if '--distro' in sys.argv: | ||
4159 | 127 | idx = sys.argv.index('--distro') | ||
4160 | 128 | VARIANT = sys.argv[idx+1] | ||
4161 | 129 | del sys.argv[idx+1] | ||
4162 | 130 | sys.argv.remove('--distro') | ||
4163 | 121 | 131 | ||
4164 | 122 | INITSYS_FILES = { | 132 | INITSYS_FILES = { |
4165 | 123 | 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], | 133 | 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], |
4166 | @@ -260,7 +270,7 @@ requirements = read_requires() | |||
4167 | 260 | setuptools.setup( | 270 | setuptools.setup( |
4168 | 261 | name='cloud-init', | 271 | name='cloud-init', |
4169 | 262 | version=get_version(), | 272 | version=get_version(), |
4171 | 263 | description='EC2 initialisation magic', | 273 | description='Cloud instance initialisation magic', |
4172 | 264 | author='Scott Moser', | 274 | author='Scott Moser', |
4173 | 265 | author_email='scott.moser@canonical.com', | 275 | author_email='scott.moser@canonical.com', |
4174 | 266 | url='http://launchpad.net/cloud-init/', | 276 | url='http://launchpad.net/cloud-init/', |
4175 | @@ -277,4 +287,5 @@ setuptools.setup( | |||
4176 | 277 | } | 287 | } |
4177 | 278 | ) | 288 | ) |
4178 | 279 | 289 | ||
4179 | 290 | |||
4180 | 280 | # vi: ts=4 expandtab | 291 | # vi: ts=4 expandtab |
4181 | diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl | |||
4182 | index bdee3ce..9d928ca 100644 | |||
4183 | --- a/systemd/cloud-config.service.tmpl | |||
4184 | +++ b/systemd/cloud-config.service.tmpl | |||
4185 | @@ -2,6 +2,7 @@ | |||
4186 | 2 | [Unit] | 2 | [Unit] |
4187 | 3 | Description=Apply the settings specified in cloud-config | 3 | Description=Apply the settings specified in cloud-config |
4188 | 4 | After=network-online.target cloud-config.target | 4 | After=network-online.target cloud-config.target |
4189 | 5 | After=snapd.seeded.service | ||
4190 | 5 | Wants=network-online.target cloud-config.target | 6 | Wants=network-online.target cloud-config.target |
4191 | 6 | 7 | ||
4192 | 7 | [Service] | 8 | [Service] |
4193 | diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py | |||
4194 | index c6c1877..ab34549 100644 | |||
4195 | --- a/tests/cloud_tests/args.py | |||
4196 | +++ b/tests/cloud_tests/args.py | |||
4197 | @@ -62,6 +62,9 @@ ARG_SETS = { | |||
4198 | 62 | (('-d', '--data-dir'), | 62 | (('-d', '--data-dir'), |
4199 | 63 | {'help': 'directory to store test data in', | 63 | {'help': 'directory to store test data in', |
4200 | 64 | 'action': 'store', 'metavar': 'DIR', 'required': False}), | 64 | 'action': 'store', 'metavar': 'DIR', 'required': False}), |
4201 | 65 | (('--preserve-instance',), | ||
4202 | 66 | {'help': 'do not destroy the instance under test', | ||
4203 | 67 | 'action': 'store_true', 'default': False, 'required': False}), | ||
4204 | 65 | (('--preserve-data',), | 68 | (('--preserve-data',), |
4205 | 66 | {'help': 'do not remove collected data after successful run', | 69 | {'help': 'do not remove collected data after successful run', |
4206 | 67 | 'action': 'store_true', 'default': False, 'required': False}),), | 70 | 'action': 'store_true', 'default': False, 'required': False}),), |
4207 | diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py | |||
4208 | index 1ba7285..75b5061 100644 | |||
4209 | --- a/tests/cloud_tests/collect.py | |||
4210 | +++ b/tests/cloud_tests/collect.py | |||
4211 | @@ -42,7 +42,7 @@ def collect_console(instance, base_dir): | |||
4212 | 42 | @param base_dir: directory to write console log to | 42 | @param base_dir: directory to write console log to |
4213 | 43 | """ | 43 | """ |
4214 | 44 | logfile = os.path.join(base_dir, 'console.log') | 44 | logfile = os.path.join(base_dir, 'console.log') |
4216 | 45 | LOG.debug('getting console log for %s to %s', instance, logfile) | 45 | LOG.debug('getting console log for %s to %s', instance.name, logfile) |
4217 | 46 | try: | 46 | try: |
4218 | 47 | data = instance.console_log() | 47 | data = instance.console_log() |
4219 | 48 | except NotImplementedError as e: | 48 | except NotImplementedError as e: |
4220 | @@ -93,7 +93,8 @@ def collect_test_data(args, snapshot, os_name, test_name): | |||
4221 | 93 | # create test instance | 93 | # create test instance |
4222 | 94 | component = PlatformComponent( | 94 | component = PlatformComponent( |
4223 | 95 | partial(platforms.get_instance, snapshot, user_data, | 95 | partial(platforms.get_instance, snapshot, user_data, |
4225 | 96 | block=True, start=False, use_desc=test_name)) | 96 | block=True, start=False, use_desc=test_name), |
4226 | 97 | preserve_instance=args.preserve_instance) | ||
4227 | 97 | 98 | ||
4228 | 98 | LOG.info('collecting test data for test: %s', test_name) | 99 | LOG.info('collecting test data for test: %s', test_name) |
4229 | 99 | with component as instance: | 100 | with component as instance: |
4230 | diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py | |||
4231 | index cc439d2..95bc3b1 100644 | |||
4232 | --- a/tests/cloud_tests/platforms/instances.py | |||
4233 | +++ b/tests/cloud_tests/platforms/instances.py | |||
4234 | @@ -87,7 +87,12 @@ class Instance(TargetBase): | |||
4235 | 87 | self._ssh_client = None | 87 | self._ssh_client = None |
4236 | 88 | 88 | ||
4237 | 89 | def _ssh_connect(self): | 89 | def _ssh_connect(self): |
4239 | 90 | """Connect via SSH.""" | 90 | """Connect via SSH. |
4240 | 91 | |||
4241 | 92 | Attempt to SSH to the client on the specific IP and port. If it | ||
4242 | 93 | fails in some manner, then retry 2 more times for a total of 3 | ||
4243 | 94 | attempts; sleeping a few seconds between attempts. | ||
4244 | 95 | """ | ||
4245 | 91 | if self._ssh_client: | 96 | if self._ssh_client: |
4246 | 92 | return self._ssh_client | 97 | return self._ssh_client |
4247 | 93 | 98 | ||
4248 | @@ -98,21 +103,22 @@ class Instance(TargetBase): | |||
4249 | 98 | client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) | 103 | client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) |
4250 | 99 | private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) | 104 | private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) |
4251 | 100 | 105 | ||
4253 | 101 | retries = 30 | 106 | retries = 3 |
4254 | 102 | while retries: | 107 | while retries: |
4255 | 103 | try: | 108 | try: |
4256 | 104 | client.connect(username=self.ssh_username, | 109 | client.connect(username=self.ssh_username, |
4257 | 105 | hostname=self.ssh_ip, port=self.ssh_port, | 110 | hostname=self.ssh_ip, port=self.ssh_port, |
4259 | 106 | pkey=private_key, banner_timeout=30) | 111 | pkey=private_key) |
4260 | 107 | self._ssh_client = client | 112 | self._ssh_client = client |
4261 | 108 | return client | 113 | return client |
4262 | 109 | except (ConnectionRefusedError, AuthenticationException, | 114 | except (ConnectionRefusedError, AuthenticationException, |
4263 | 110 | BadHostKeyException, ConnectionResetError, SSHException, | 115 | BadHostKeyException, ConnectionResetError, SSHException, |
4264 | 111 | OSError): | 116 | OSError): |
4265 | 112 | retries -= 1 | 117 | retries -= 1 |
4267 | 113 | time.sleep(10) | 118 | LOG.debug('Retrying ssh connection on connect failure') |
4268 | 119 | time.sleep(3) | ||
4269 | 114 | 120 | ||
4271 | 115 | ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % ( | 121 | ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % ( |
4272 | 116 | self.ssh_username, self.ssh_ip, self.ssh_port | 122 | self.ssh_username, self.ssh_ip, self.ssh_port |
4273 | 117 | ) | 123 | ) |
4274 | 118 | raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh') | 124 | raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh') |
4275 | @@ -128,18 +134,31 @@ class Instance(TargetBase): | |||
4276 | 128 | return ' '.join(l for l in test.strip().splitlines() | 134 | return ' '.join(l for l in test.strip().splitlines() |
4277 | 129 | if not l.lstrip().startswith('#')) | 135 | if not l.lstrip().startswith('#')) |
4278 | 130 | 136 | ||
4280 | 131 | time = self.config['boot_timeout'] | 137 | boot_timeout = self.config['boot_timeout'] |
4281 | 132 | tests = [self.config['system_ready_script']] | 138 | tests = [self.config['system_ready_script']] |
4282 | 133 | if wait_for_cloud_init: | 139 | if wait_for_cloud_init: |
4283 | 134 | tests.append(self.config['cloud_init_ready_script']) | 140 | tests.append(self.config['cloud_init_ready_script']) |
4284 | 135 | 141 | ||
4285 | 136 | formatted_tests = ' && '.join(clean_test(t) for t in tests) | 142 | formatted_tests = ' && '.join(clean_test(t) for t in tests) |
4286 | 137 | cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && ' | 143 | cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && ' |
4288 | 138 | 'exit 0; sleep 1; done; exit 1').format(time=time, | 144 | 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout, |
4289 | 139 | test=formatted_tests) | 145 | test=formatted_tests) |
4290 | 140 | 146 | ||
4294 | 141 | if self.execute(cmd, rcs=(0, 1))[-1] != 0: | 147 | end_time = time.time() + boot_timeout |
4295 | 142 | raise OSError('timeout: after {}s system not started'.format(time)) | 148 | while True: |
4296 | 143 | 149 | try: | |
4297 | 150 | return_code = self.execute( | ||
4298 | 151 | cmd, rcs=(0, 1), description='wait for instance start' | ||
4299 | 152 | )[-1] | ||
4300 | 153 | if return_code == 0: | ||
4301 | 154 | break | ||
4302 | 155 | except util.InTargetExecuteError: | ||
4303 | 156 | LOG.warning("failed to connect via SSH") | ||
4304 | 157 | |||
4305 | 158 | if time.time() < end_time: | ||
4306 | 159 | time.sleep(3) | ||
4307 | 160 | else: | ||
4308 | 161 | raise util.PlatformError('ssh', 'after %ss instance is not ' | ||
4309 | 162 | 'reachable' % boot_timeout) | ||
4310 | 144 | 163 | ||
4311 | 145 | # vi: ts=4 expandtab | 164 | # vi: ts=4 expandtab |
4312 | diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py | |||
4313 | index 1c17c78..d396519 100644 | |||
4314 | --- a/tests/cloud_tests/platforms/lxd/instance.py | |||
4315 | +++ b/tests/cloud_tests/platforms/lxd/instance.py | |||
4316 | @@ -208,7 +208,7 @@ def _has_proper_console_support(): | |||
4317 | 208 | if 'console' not in info.get('api_extensions', []): | 208 | if 'console' not in info.get('api_extensions', []): |
4318 | 209 | reason = "LXD server does not support console api extension" | 209 | reason = "LXD server does not support console api extension" |
4319 | 210 | else: | 210 | else: |
4321 | 211 | dver = info.get('environment', {}).get('driver_version', "") | 211 | dver = str(info.get('environment', {}).get('driver_version', "")) |
4322 | 212 | if dver.startswith("2.") or dver.startswith("1."): | 212 | if dver.startswith("2.") or dver.startswith("1."): |
4323 | 213 | reason = "LXD Driver version not 3.x+ (%s)" % dver | 213 | reason = "LXD Driver version not 3.x+ (%s)" % dver |
4324 | 214 | else: | 214 | else: |
4325 | diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml | |||
4326 | index c7dcbe8..defae02 100644 | |||
4327 | --- a/tests/cloud_tests/releases.yaml | |||
4328 | +++ b/tests/cloud_tests/releases.yaml | |||
4329 | @@ -129,6 +129,22 @@ features: | |||
4330 | 129 | 129 | ||
4331 | 130 | releases: | 130 | releases: |
4332 | 131 | # UBUNTU ================================================================= | 131 | # UBUNTU ================================================================= |
4333 | 132 | cosmic: | ||
4334 | 133 | # EOL: Jul 2019 | ||
4335 | 134 | default: | ||
4336 | 135 | enabled: true | ||
4337 | 136 | release: cosmic | ||
4338 | 137 | version: 18.10 | ||
4339 | 138 | os: ubuntu | ||
4340 | 139 | feature_groups: | ||
4341 | 140 | - base | ||
4342 | 141 | - debian_base | ||
4343 | 142 | - ubuntu_specific | ||
4344 | 143 | lxd: | ||
4345 | 144 | sstreams_server: https://cloud-images.ubuntu.com/daily | ||
4346 | 145 | alias: cosmic | ||
4347 | 146 | setup_overrides: null | ||
4348 | 147 | override_templates: false | ||
4349 | 132 | bionic: | 148 | bionic: |
4350 | 133 | # EOL: Apr 2023 | 149 | # EOL: Apr 2023 |
4351 | 134 | default: | 150 | default: |
4352 | diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py | |||
4353 | index 74a7d46..d64a1dc 100644 | |||
4354 | --- a/tests/cloud_tests/stage.py | |||
4355 | +++ b/tests/cloud_tests/stage.py | |||
4356 | @@ -12,9 +12,15 @@ from tests.cloud_tests import LOG | |||
4357 | 12 | class PlatformComponent(object): | 12 | class PlatformComponent(object): |
4358 | 13 | """Context manager to safely handle platform components.""" | 13 | """Context manager to safely handle platform components.""" |
4359 | 14 | 14 | ||
4362 | 15 | def __init__(self, get_func): | 15 | def __init__(self, get_func, preserve_instance=False): |
4363 | 16 | """Store get_<platform component> function as partial with no args.""" | 16 | """Store get_<platform component> function as partial with no args. |
4364 | 17 | |||
4365 | 18 | @param get_func: Callable returning an instance from the platform. | ||
4366 | 19 | @param preserve_instance: Boolean, when True, do not destroy instance | ||
4367 | 20 | after test. Used for test development. | ||
4368 | 21 | """ | ||
4369 | 17 | self.get_func = get_func | 22 | self.get_func = get_func |
4370 | 23 | self.preserve_instance = preserve_instance | ||
4371 | 18 | 24 | ||
4372 | 19 | def __enter__(self): | 25 | def __enter__(self): |
4373 | 20 | """Create instance of platform component.""" | 26 | """Create instance of platform component.""" |
4374 | @@ -24,7 +30,10 @@ class PlatformComponent(object): | |||
4375 | 24 | def __exit__(self, etype, value, trace): | 30 | def __exit__(self, etype, value, trace): |
4376 | 25 | """Destroy instance.""" | 31 | """Destroy instance.""" |
4377 | 26 | if self.instance is not None: | 32 | if self.instance is not None: |
4379 | 27 | self.instance.destroy() | 33 | if self.preserve_instance: |
4380 | 34 | LOG.info('Preserving test instance %s', self.instance.name) | ||
4381 | 35 | else: | ||
4382 | 36 | self.instance.destroy() | ||
4383 | 28 | 37 | ||
4384 | 29 | 38 | ||
4385 | 30 | def run_single(name, call): | 39 | def run_single(name, call): |
4386 | diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml | |||
4387 | index a3e2990..a16d1dd 100644 | |||
4388 | --- a/tests/cloud_tests/testcases.yaml | |||
4389 | +++ b/tests/cloud_tests/testcases.yaml | |||
4390 | @@ -24,9 +24,9 @@ base_test_data: | |||
4391 | 24 | status.json: | | 24 | status.json: | |
4392 | 25 | #!/bin/sh | 25 | #!/bin/sh |
4393 | 26 | cat /run/cloud-init/status.json | 26 | cat /run/cloud-init/status.json |
4395 | 27 | cloud-init-version: | | 27 | package-versions: | |
4396 | 28 | #!/bin/sh | 28 | #!/bin/sh |
4398 | 29 | dpkg-query -W -f='${Version}' cloud-init | 29 | dpkg-query --show |
4399 | 30 | system.journal.gz: | | 30 | system.journal.gz: | |
4400 | 31 | #!/bin/sh | 31 | #!/bin/sh |
4401 | 32 | [ -d /run/systemd ] || { echo "not systemd."; exit 0; } | 32 | [ -d /run/systemd ] || { echo "not systemd."; exit 0; } |
4402 | diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py | |||
4403 | index 0d1916b..696db8d 100644 | |||
4404 | --- a/tests/cloud_tests/testcases/base.py | |||
4405 | +++ b/tests/cloud_tests/testcases/base.py | |||
4406 | @@ -31,6 +31,27 @@ class CloudTestCase(unittest.TestCase): | |||
4407 | 31 | def is_distro(self, distro_name): | 31 | def is_distro(self, distro_name): |
4408 | 32 | return self.os_cfg['os'] == distro_name | 32 | return self.os_cfg['os'] == distro_name |
4409 | 33 | 33 | ||
4410 | 34 | def assertPackageInstalled(self, name, version=None): | ||
4411 | 35 | """Check dpkg-query --show output for matching package name. | ||
4412 | 36 | |||
4413 | 37 | @param name: package base name | ||
4414 | 38 | @param version: string representing a package version or part of a | ||
4415 | 39 | version. | ||
4416 | 40 | """ | ||
4417 | 41 | pkg_out = self.get_data_file('package-versions') | ||
4418 | 42 | pkg_match = re.search( | ||
4419 | 43 | '^%s\t(?P<version>.*)$' % name, pkg_out, re.MULTILINE) | ||
4420 | 44 | if pkg_match: | ||
4421 | 45 | installed_version = pkg_match.group('version') | ||
4422 | 46 | if not version: | ||
4423 | 47 | return # Success | ||
4424 | 48 | if installed_version.startswith(version): | ||
4425 | 49 | return # Success | ||
4426 | 50 | raise AssertionError( | ||
4427 | 51 | 'Expected package version %s-%s not found. Found %s' % | ||
4428 | 52 | name, version, installed_version) | ||
4429 | 53 | raise AssertionError('Package not installed: %s' % name) | ||
4430 | 54 | |||
4431 | 34 | def os_version_cmp(self, cmp_version): | 55 | def os_version_cmp(self, cmp_version): |
4432 | 35 | """Compare the version of the test to comparison_version. | 56 | """Compare the version of the test to comparison_version. |
4433 | 36 | 57 | ||
4434 | diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py | |||
4435 | index 005ca01..74d0529 100644 | |||
4436 | --- a/tests/cloud_tests/testcases/modules/byobu.py | |||
4437 | +++ b/tests/cloud_tests/testcases/modules/byobu.py | |||
4438 | @@ -9,8 +9,7 @@ class TestByobu(base.CloudTestCase): | |||
4439 | 9 | 9 | ||
4440 | 10 | def test_byobu_installed(self): | 10 | def test_byobu_installed(self): |
4441 | 11 | """Test byobu installed.""" | 11 | """Test byobu installed.""" |
4444 | 12 | out = self.get_data_file('byobu_installed') | 12 | self.assertPackageInstalled('byobu') |
4443 | 13 | self.assertIn('/usr/bin/byobu', out) | ||
4445 | 14 | 13 | ||
4446 | 15 | def test_byobu_profile_enabled(self): | 14 | def test_byobu_profile_enabled(self): |
4447 | 16 | """Test byobu profile.d file exists.""" | 15 | """Test byobu profile.d file exists.""" |
4448 | diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml | |||
4449 | index a9aa1f3..d002a61 100644 | |||
4450 | --- a/tests/cloud_tests/testcases/modules/byobu.yaml | |||
4451 | +++ b/tests/cloud_tests/testcases/modules/byobu.yaml | |||
4452 | @@ -7,9 +7,6 @@ cloud_config: | | |||
4453 | 7 | #cloud-config | 7 | #cloud-config |
4454 | 8 | byobu_by_default: enable | 8 | byobu_by_default: enable |
4455 | 9 | collect_scripts: | 9 | collect_scripts: |
4456 | 10 | byobu_installed: | | ||
4457 | 11 | #!/bin/bash | ||
4458 | 12 | which byobu | ||
4459 | 13 | byobu_profile_enabled: | | 10 | byobu_profile_enabled: | |
4460 | 14 | #!/bin/bash | 11 | #!/bin/bash |
4461 | 15 | ls /etc/profile.d/Z97-byobu.sh | 12 | ls /etc/profile.d/Z97-byobu.sh |
4462 | diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py | |||
4463 | index e75f041..6b56f63 100644 | |||
4464 | --- a/tests/cloud_tests/testcases/modules/ca_certs.py | |||
4465 | +++ b/tests/cloud_tests/testcases/modules/ca_certs.py | |||
4466 | @@ -7,10 +7,23 @@ from tests.cloud_tests.testcases import base | |||
4467 | 7 | class TestCaCerts(base.CloudTestCase): | 7 | class TestCaCerts(base.CloudTestCase): |
4468 | 8 | """Test ca certs module.""" | 8 | """Test ca certs module.""" |
4469 | 9 | 9 | ||
4474 | 10 | def test_cert_count(self): | 10 | def test_certs_updated(self): |
4475 | 11 | """Test the count is proper.""" | 11 | """Test certs have been updated in /etc/ssl/certs.""" |
4476 | 12 | out = self.get_data_file('cert_count') | 12 | out = self.get_data_file('cert_links') |
4477 | 13 | self.assertEqual(5, int(out)) | 13 | # Bionic update-ca-certificates creates less links debian #895075 |
4478 | 14 | unlinked_files = [] | ||
4479 | 15 | links = {} | ||
4480 | 16 | for cert_line in out.splitlines(): | ||
4481 | 17 | if '->' in cert_line: | ||
4482 | 18 | fname, _sep, link = cert_line.split() | ||
4483 | 19 | links[fname] = link | ||
4484 | 20 | else: | ||
4485 | 21 | unlinked_files.append(cert_line) | ||
4486 | 22 | self.assertEqual(['ca-certificates.crt'], unlinked_files) | ||
4487 | 23 | self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0']) | ||
4488 | 24 | self.assertEqual( | ||
4489 | 25 | '/usr/share/ca-certificates/cloud-init-ca-certs.crt', | ||
4490 | 26 | links['cloud-init-ca-certs.pem']) | ||
4491 | 14 | 27 | ||
4492 | 15 | def test_cert_installed(self): | 28 | def test_cert_installed(self): |
4493 | 16 | """Test line from our cert exists.""" | 29 | """Test line from our cert exists.""" |
4494 | diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml | |||
4495 | index d939f43..2cd9155 100644 | |||
4496 | --- a/tests/cloud_tests/testcases/modules/ca_certs.yaml | |||
4497 | +++ b/tests/cloud_tests/testcases/modules/ca_certs.yaml | |||
4498 | @@ -43,9 +43,13 @@ cloud_config: | | |||
4499 | 43 | DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== | 43 | DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== |
4500 | 44 | -----END CERTIFICATE----- | 44 | -----END CERTIFICATE----- |
4501 | 45 | collect_scripts: | 45 | collect_scripts: |
4503 | 46 | cert_count: | | 46 | cert_links: | |
4504 | 47 | #!/bin/bash | 47 | #!/bin/bash |
4506 | 48 | ls -l /etc/ssl/certs | wc -l | 48 | # links printed <filename> -> <link target> |
4507 | 49 | # non-links printed <filename> | ||
4508 | 50 | for file in `ls /etc/ssl/certs`; do | ||
4509 | 51 | [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file; | ||
4510 | 52 | done | ||
4511 | 49 | cert: | | 53 | cert: | |
4512 | 50 | #!/bin/bash | 54 | #!/bin/bash |
4513 | 51 | md5sum /etc/ssl/certs/ca-certificates.crt | 55 | md5sum /etc/ssl/certs/ca-certificates.crt |
4514 | diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py | |||
4515 | index b50e52f..c63cc15 100644 | |||
4516 | --- a/tests/cloud_tests/testcases/modules/ntp.py | |||
4517 | +++ b/tests/cloud_tests/testcases/modules/ntp.py | |||
4518 | @@ -9,15 +9,14 @@ class TestNtp(base.CloudTestCase): | |||
4519 | 9 | 9 | ||
4520 | 10 | def test_ntp_installed(self): | 10 | def test_ntp_installed(self): |
4521 | 11 | """Test ntp installed""" | 11 | """Test ntp installed""" |
4524 | 12 | out = self.get_data_file('ntp_installed') | 12 | self.assertPackageInstalled('ntp') |
4523 | 13 | self.assertEqual(0, int(out)) | ||
4525 | 14 | 13 | ||
4526 | 15 | def test_ntp_dist_entries(self): | 14 | def test_ntp_dist_entries(self): |
4527 | 16 | """Test dist config file is empty""" | 15 | """Test dist config file is empty""" |
4528 | 17 | out = self.get_data_file('ntp_conf_dist_empty') | 16 | out = self.get_data_file('ntp_conf_dist_empty') |
4529 | 18 | self.assertEqual(0, int(out)) | 17 | self.assertEqual(0, int(out)) |
4530 | 19 | 18 | ||
4532 | 20 | def test_ntp_entires(self): | 19 | def test_ntp_entries(self): |
4533 | 21 | """Test config entries""" | 20 | """Test config entries""" |
4534 | 22 | out = self.get_data_file('ntp_conf_pool_list') | 21 | out = self.get_data_file('ntp_conf_pool_list') |
4535 | 23 | self.assertIn('pool.ntp.org iburst', out) | 22 | self.assertIn('pool.ntp.org iburst', out) |
4536 | diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py | |||
4537 | index 461630a..7d34177 100644 | |||
4538 | --- a/tests/cloud_tests/testcases/modules/ntp_chrony.py | |||
4539 | +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py | |||
4540 | @@ -1,13 +1,24 @@ | |||
4541 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
4542 | 2 | 2 | ||
4543 | 3 | """cloud-init Integration Test Verify Script.""" | 3 | """cloud-init Integration Test Verify Script.""" |
4544 | 4 | import unittest | ||
4545 | 5 | |||
4546 | 4 | from tests.cloud_tests.testcases import base | 6 | from tests.cloud_tests.testcases import base |
4547 | 5 | 7 | ||
4548 | 6 | 8 | ||
4549 | 7 | class TestNtpChrony(base.CloudTestCase): | 9 | class TestNtpChrony(base.CloudTestCase): |
4550 | 8 | """Test ntp module with chrony client""" | 10 | """Test ntp module with chrony client""" |
4551 | 9 | 11 | ||
4553 | 10 | def test_chrony_entires(self): | 12 | def setUp(self): |
4554 | 13 | """Skip this suite of tests on lxd and artful or older.""" | ||
4555 | 14 | if self.platform == 'lxd': | ||
4556 | 15 | if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0: | ||
4557 | 16 | raise unittest.SkipTest( | ||
4558 | 17 | 'No support for chrony on containers <= artful.' | ||
4559 | 18 | ' LP: #1589780') | ||
4560 | 19 | return super(TestNtpChrony, self).setUp() | ||
4561 | 20 | |||
4562 | 21 | def test_chrony_entries(self): | ||
4563 | 11 | """Test chrony config entries""" | 22 | """Test chrony config entries""" |
4564 | 12 | out = self.get_data_file('chrony_conf') | 23 | out = self.get_data_file('chrony_conf') |
4565 | 13 | self.assertIn('.pool.ntp.org', out) | 24 | self.assertIn('.pool.ntp.org', out) |
4566 | diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py | |||
4567 | index a92dec2..fecad76 100644 | |||
4568 | --- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py | |||
4569 | +++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py | |||
4570 | @@ -7,15 +7,13 @@ from tests.cloud_tests.testcases import base | |||
4571 | 7 | class TestPackageInstallUpdateUpgrade(base.CloudTestCase): | 7 | class TestPackageInstallUpdateUpgrade(base.CloudTestCase): |
4572 | 8 | """Test package install update upgrade module.""" | 8 | """Test package install update upgrade module.""" |
4573 | 9 | 9 | ||
4578 | 10 | def test_installed_htop(self): | 10 | def test_installed_sl(self): |
4579 | 11 | """Test htop got installed.""" | 11 | """Test sl got installed.""" |
4580 | 12 | out = self.get_data_file('dpkg_htop') | 12 | self.assertPackageInstalled('sl') |
4577 | 13 | self.assertEqual(1, int(out)) | ||
4581 | 14 | 13 | ||
4582 | 15 | def test_installed_tree(self): | 14 | def test_installed_tree(self): |
4583 | 16 | """Test tree got installed.""" | 15 | """Test tree got installed.""" |
4586 | 17 | out = self.get_data_file('dpkg_tree') | 16 | self.assertPackageInstalled('tree') |
4585 | 18 | self.assertEqual(1, int(out)) | ||
4587 | 19 | 17 | ||
4588 | 20 | def test_apt_history(self): | 18 | def test_apt_history(self): |
4589 | 21 | """Test apt history for update command.""" | 19 | """Test apt history for update command.""" |
4590 | @@ -23,13 +21,13 @@ class TestPackageInstallUpdateUpgrade(base.CloudTestCase): | |||
4591 | 23 | self.assertIn( | 21 | self.assertIn( |
4592 | 24 | 'Commandline: /usr/bin/apt-get --option=Dpkg::Options' | 22 | 'Commandline: /usr/bin/apt-get --option=Dpkg::Options' |
4593 | 25 | '::=--force-confold --option=Dpkg::options::=--force-unsafe-io ' | 23 | '::=--force-confold --option=Dpkg::options::=--force-unsafe-io ' |
4595 | 26 | '--assume-yes --quiet install htop tree', out) | 24 | '--assume-yes --quiet install sl tree', out) |
4596 | 27 | 25 | ||
4597 | 28 | def test_cloud_init_output(self): | 26 | def test_cloud_init_output(self): |
4598 | 29 | """Test cloud-init-output for install & upgrade stuff.""" | 27 | """Test cloud-init-output for install & upgrade stuff.""" |
4599 | 30 | out = self.get_data_file('cloud-init-output.log') | 28 | out = self.get_data_file('cloud-init-output.log') |
4600 | 31 | self.assertIn('Setting up tree (', out) | 29 | self.assertIn('Setting up tree (', out) |
4602 | 32 | self.assertIn('Setting up htop (', out) | 30 | self.assertIn('Setting up sl (', out) |
4603 | 33 | self.assertIn('Reading package lists...', out) | 31 | self.assertIn('Reading package lists...', out) |
4604 | 34 | self.assertIn('Building dependency tree...', out) | 32 | self.assertIn('Building dependency tree...', out) |
4605 | 35 | self.assertIn('Reading state information...', out) | 33 | self.assertIn('Reading state information...', out) |
4606 | diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml | |||
4607 | index 71d24b8..dd79e43 100644 | |||
4608 | --- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml | |||
4609 | +++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml | |||
4610 | @@ -15,7 +15,7 @@ required_features: | |||
4611 | 15 | cloud_config: | | 15 | cloud_config: | |
4612 | 16 | #cloud-config | 16 | #cloud-config |
4613 | 17 | packages: | 17 | packages: |
4615 | 18 | - htop | 18 | - sl |
4616 | 19 | - tree | 19 | - tree |
4617 | 20 | package_update: true | 20 | package_update: true |
4618 | 21 | package_upgrade: true | 21 | package_upgrade: true |
4619 | @@ -23,11 +23,8 @@ collect_scripts: | |||
4620 | 23 | apt_history_cmdline: | | 23 | apt_history_cmdline: | |
4621 | 24 | #!/bin/bash | 24 | #!/bin/bash |
4622 | 25 | grep ^Commandline: /var/log/apt/history.log | 25 | grep ^Commandline: /var/log/apt/history.log |
4624 | 26 | dpkg_htop: | | 26 | dpkg_show: | |
4625 | 27 | #!/bin/bash | 27 | #!/bin/bash |
4630 | 28 | dpkg -l | grep htop | wc -l | 28 | dpkg-query --show |
4627 | 29 | dpkg_tree: | | ||
4628 | 30 | #!/bin/bash | ||
4629 | 31 | dpkg -l | grep tree | wc -l | ||
4631 | 32 | 29 | ||
4632 | 33 | # vi: ts=4 expandtab | 30 | # vi: ts=4 expandtab |
4633 | diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py | |||
4634 | index 70917a4..fc9688e 100644 | |||
4635 | --- a/tests/cloud_tests/testcases/modules/salt_minion.py | |||
4636 | +++ b/tests/cloud_tests/testcases/modules/salt_minion.py | |||
4637 | @@ -33,7 +33,6 @@ class Test(base.CloudTestCase): | |||
4638 | 33 | 33 | ||
4639 | 34 | def test_minion_installed(self): | 34 | def test_minion_installed(self): |
4640 | 35 | """Test if the salt-minion package is installed""" | 35 | """Test if the salt-minion package is installed""" |
4643 | 36 | out = self.get_data_file('minion_installed') | 36 | self.assertPackageInstalled('salt-minion') |
4642 | 37 | self.assertEqual(1, int(out)) | ||
4644 | 38 | 37 | ||
4645 | 39 | # vi: ts=4 expandtab | 38 | # vi: ts=4 expandtab |
4646 | diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml | |||
4647 | index f20b976..9227147 100644 | |||
4648 | --- a/tests/cloud_tests/testcases/modules/salt_minion.yaml | |||
4649 | +++ b/tests/cloud_tests/testcases/modules/salt_minion.yaml | |||
4650 | @@ -28,15 +28,22 @@ collect_scripts: | |||
4651 | 28 | cat /etc/salt/minion_id | 28 | cat /etc/salt/minion_id |
4652 | 29 | minion.pem: | | 29 | minion.pem: | |
4653 | 30 | #!/bin/bash | 30 | #!/bin/bash |
4655 | 31 | cat /etc/salt/pki/minion/minion.pem | 31 | PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem |
4656 | 32 | if [ ! -f $PRIV_KEYFILE ]; then | ||
4657 | 33 | # Bionic and later automatically moves /etc/salt/pki/minion/* | ||
4658 | 34 | PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem | ||
4659 | 35 | fi | ||
4660 | 36 | cat $PRIV_KEYFILE | ||
4661 | 32 | minion.pub: | | 37 | minion.pub: | |
4662 | 33 | #!/bin/bash | 38 | #!/bin/bash |
4664 | 34 | cat /etc/salt/pki/minion/minion.pub | 39 | PUB_KEYFILE=/etc/salt/pki/minion/minion.pub |
4665 | 40 | if [ ! -f $PUB_KEYFILE ]; then | ||
4666 | 41 | # Bionic and later automatically moves /etc/salt/pki/minion/* | ||
4667 | 42 | PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub | ||
4668 | 43 | fi | ||
4669 | 44 | cat $PUB_KEYFILE | ||
4670 | 35 | grains: | | 45 | grains: | |
4671 | 36 | #!/bin/bash | 46 | #!/bin/bash |
4672 | 37 | cat /etc/salt/grains | 47 | cat /etc/salt/grains |
4673 | 38 | minion_installed: | | ||
4674 | 39 | #!/bin/bash | ||
4675 | 40 | dpkg -l | grep salt-minion | grep ii | wc -l | ||
4676 | 41 | 48 | ||
4677 | 42 | # vi: ts=4 expandtab | 49 | # vi: ts=4 expandtab |
4678 | diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py | |||
4679 | index 5a68a48..bfb2744 100644 | |||
4680 | --- a/tests/cloud_tests/verify.py | |||
4681 | +++ b/tests/cloud_tests/verify.py | |||
4682 | @@ -56,6 +56,51 @@ def verify_data(data_dir, platform, os_name, tests): | |||
4683 | 56 | return res | 56 | return res |
4684 | 57 | 57 | ||
4685 | 58 | 58 | ||
4686 | 59 | def format_test_failures(test_result): | ||
4687 | 60 | """Return a human-readable printable format of test failures.""" | ||
4688 | 61 | if not test_result['failures']: | ||
4689 | 62 | return '' | ||
4690 | 63 | failure_hdr = ' test failures:' | ||
4691 | 64 | failure_fmt = ' * {module}.{class}.{function}\n {error}' | ||
4692 | 65 | output = [] | ||
4693 | 66 | for failure in test_result['failures']: | ||
4694 | 67 | if not output: | ||
4695 | 68 | output = [failure_hdr] | ||
4696 | 69 | output.append(failure_fmt.format(**failure)) | ||
4697 | 70 | return '\n'.join(output) | ||
4698 | 71 | |||
4699 | 72 | |||
4700 | 73 | def format_results(res): | ||
4701 | 74 | """Return human-readable results as a string""" | ||
4702 | 75 | platform_hdr = 'Platform: {platform}' | ||
4703 | 76 | distro_hdr = ' Distro: {distro}' | ||
4704 | 77 | distro_summary_fmt = ( | ||
4705 | 78 | ' test modules passed:{passed} tests failed:{failed}') | ||
4706 | 79 | output = [''] | ||
4707 | 80 | counts = {} | ||
4708 | 81 | for platform, platform_data in res.items(): | ||
4709 | 82 | output.append(platform_hdr.format(platform=platform)) | ||
4710 | 83 | counts[platform] = {} | ||
4711 | 84 | for distro, distro_data in platform_data.items(): | ||
4712 | 85 | distro_failure_output = [] | ||
4713 | 86 | output.append(distro_hdr.format(distro=distro)) | ||
4714 | 87 | counts[platform][distro] = {'passed': 0, 'failed': 0} | ||
4715 | 88 | for _, test_result in distro_data.items(): | ||
4716 | 89 | if test_result['passed']: | ||
4717 | 90 | counts[platform][distro]['passed'] += 1 | ||
4718 | 91 | else: | ||
4719 | 92 | counts[platform][distro]['failed'] += len( | ||
4720 | 93 | test_result['failures']) | ||
4721 | 94 | failure_output = format_test_failures(test_result) | ||
4722 | 95 | if failure_output: | ||
4723 | 96 | distro_failure_output.append(failure_output) | ||
4724 | 97 | output.append( | ||
4725 | 98 | distro_summary_fmt.format(**counts[platform][distro])) | ||
4726 | 99 | if distro_failure_output: | ||
4727 | 100 | output.extend(distro_failure_output) | ||
4728 | 101 | return '\n'.join(output) | ||
4729 | 102 | |||
4730 | 103 | |||
4731 | 59 | def verify(args): | 104 | def verify(args): |
4732 | 60 | """Verify test data. | 105 | """Verify test data. |
4733 | 61 | 106 | ||
4734 | @@ -90,7 +135,7 @@ def verify(args): | |||
4735 | 90 | failed += len(fail_list) | 135 | failed += len(fail_list) |
4736 | 91 | 136 | ||
4737 | 92 | # dump results | 137 | # dump results |
4739 | 93 | LOG.debug('verify results: %s', res) | 138 | LOG.debug('\n---- Verify summarized results:\n%s', format_results(res)) |
4740 | 94 | if args.result: | 139 | if args.result: |
4741 | 95 | util.merge_results({'verify': res}, args.result) | 140 | util.merge_results({'verify': res}, args.result) |
4742 | 96 | 141 | ||
4743 | diff --git a/tests/data/netinfo/netdev-formatted-output-down b/tests/data/netinfo/netdev-formatted-output-down | |||
4744 | 97 | new file mode 100644 | 142 | new file mode 100644 |
4745 | index 0000000..038dfb4 | |||
4746 | --- /dev/null | |||
4747 | +++ b/tests/data/netinfo/netdev-formatted-output-down | |||
4748 | @@ -0,0 +1,8 @@ | |||
4749 | 1 | +++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++ | ||
4750 | 2 | +--------+-------+-----------+-----------+-------+-------------------+ | ||
4751 | 3 | | Device | Up | Address | Mask | Scope | Hw-Address | | ||
4752 | 4 | +--------+-------+-----------+-----------+-------+-------------------+ | ||
4753 | 5 | | eth0 | False | . | . | . | 00:16:3e:de:51:a6 | | ||
4754 | 6 | | lo | True | 127.0.0.1 | 255.0.0.0 | host | . | | ||
4755 | 7 | | lo | True | ::1/128 | . | host | . | | ||
4756 | 8 | +--------+-------+-----------+-----------+-------+-------------------+ | ||
4757 | diff --git a/tests/data/netinfo/new-ifconfig-output-down b/tests/data/netinfo/new-ifconfig-output-down | |||
4758 | 0 | new file mode 100644 | 9 | new file mode 100644 |
4759 | index 0000000..5d12e35 | |||
4760 | --- /dev/null | |||
4761 | +++ b/tests/data/netinfo/new-ifconfig-output-down | |||
4762 | @@ -0,0 +1,15 @@ | |||
4763 | 1 | eth0: flags=4098<BROADCAST,MULTICAST> mtu 1500 | ||
4764 | 2 | ether 00:16:3e:de:51:a6 txqueuelen 1000 (Ethernet) | ||
4765 | 3 | RX packets 126229 bytes 158139342 (158.1 MB) | ||
4766 | 4 | RX errors 0 dropped 0 overruns 0 frame 0 | ||
4767 | 5 | TX packets 59317 bytes 4839008 (4.8 MB) | ||
4768 | 6 | TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 | ||
4769 | 7 | |||
4770 | 8 | lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536 | ||
4771 | 9 | inet 127.0.0.1 netmask 255.0.0.0 | ||
4772 | 10 | inet6 ::1 prefixlen 128 scopeid 0x10<host> | ||
4773 | 11 | loop txqueuelen 1000 (Local Loopback) | ||
4774 | 12 | RX packets 260 bytes 20092 (20.0 KB) | ||
4775 | 13 | RX errors 0 dropped 0 overruns 0 frame 0 | ||
4776 | 14 | TX packets 260 bytes 20092 (20.0 KB) | ||
4777 | 15 | TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 | ||
4778 | diff --git a/tests/data/netinfo/sample-ipaddrshow-output-down b/tests/data/netinfo/sample-ipaddrshow-output-down | |||
4779 | 0 | new file mode 100644 | 16 | new file mode 100644 |
4780 | index 0000000..cb516d6 | |||
4781 | --- /dev/null | |||
4782 | +++ b/tests/data/netinfo/sample-ipaddrshow-output-down | |||
4783 | @@ -0,0 +1,8 @@ | |||
4784 | 1 | 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 | ||
4785 | 2 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 | ||
4786 | 3 | inet 127.0.0.1/8 scope host lo | ||
4787 | 4 | valid_lft forever preferred_lft forever | ||
4788 | 5 | inet6 ::1/128 scope host | ||
4789 | 6 | valid_lft forever preferred_lft forever | ||
4790 | 7 | 44: eth0@if45: <BROADCAST,MULTICAST> mtu 1500 qdisc noqueue state DOWN group default qlen 1000 | ||
4791 | 8 | link/ether 00:16:3e:de:51:a6 brd ff:ff:ff:ff:ff:ff link-netnsid 0 | ||
4792 | diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py | |||
4793 | index f1ab02e..739bbeb 100644 | |||
4794 | --- a/tests/unittests/test__init__.py | |||
4795 | +++ b/tests/unittests/test__init__.py | |||
4796 | @@ -182,7 +182,7 @@ class TestCmdlineUrl(CiTestCase): | |||
4797 | 182 | self.assertEqual( | 182 | self.assertEqual( |
4798 | 183 | ('url', 'http://example.com'), main.parse_cmdline_url(cmdline)) | 183 | ('url', 'http://example.com'), main.parse_cmdline_url(cmdline)) |
4799 | 184 | 184 | ||
4801 | 185 | @mock.patch('cloudinit.cmd.main.util.read_file_or_url') | 185 | @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') |
4802 | 186 | def test_invalid_content(self, m_read): | 186 | def test_invalid_content(self, m_read): |
4803 | 187 | key = "cloud-config-url" | 187 | key = "cloud-config-url" |
4804 | 188 | url = 'http://example.com/foo' | 188 | url = 'http://example.com/foo' |
4805 | @@ -196,7 +196,7 @@ class TestCmdlineUrl(CiTestCase): | |||
4806 | 196 | self.assertIn(url, msg) | 196 | self.assertIn(url, msg) |
4807 | 197 | self.assertFalse(os.path.exists(fpath)) | 197 | self.assertFalse(os.path.exists(fpath)) |
4808 | 198 | 198 | ||
4810 | 199 | @mock.patch('cloudinit.cmd.main.util.read_file_or_url') | 199 | @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') |
4811 | 200 | def test_valid_content(self, m_read): | 200 | def test_valid_content(self, m_read): |
4812 | 201 | url = "http://example.com/foo" | 201 | url = "http://example.com/foo" |
4813 | 202 | payload = b"#cloud-config\nmydata: foo\nbar: wark\n" | 202 | payload = b"#cloud-config\nmydata: foo\nbar: wark\n" |
4814 | @@ -210,7 +210,7 @@ class TestCmdlineUrl(CiTestCase): | |||
4815 | 210 | self.assertEqual(logging.INFO, lvl) | 210 | self.assertEqual(logging.INFO, lvl) |
4816 | 211 | self.assertIn(url, msg) | 211 | self.assertIn(url, msg) |
4817 | 212 | 212 | ||
4819 | 213 | @mock.patch('cloudinit.cmd.main.util.read_file_or_url') | 213 | @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') |
4820 | 214 | def test_no_key_found(self, m_read): | 214 | def test_no_key_found(self, m_read): |
4821 | 215 | cmdline = "ro mykey=http://example.com/foo root=foo" | 215 | cmdline = "ro mykey=http://example.com/foo root=foo" |
4822 | 216 | fpath = self.tmp_path("ccpath") | 216 | fpath = self.tmp_path("ccpath") |
4823 | @@ -221,7 +221,7 @@ class TestCmdlineUrl(CiTestCase): | |||
4824 | 221 | self.assertFalse(os.path.exists(fpath)) | 221 | self.assertFalse(os.path.exists(fpath)) |
4825 | 222 | self.assertEqual(logging.DEBUG, lvl) | 222 | self.assertEqual(logging.DEBUG, lvl) |
4826 | 223 | 223 | ||
4828 | 224 | @mock.patch('cloudinit.cmd.main.util.read_file_or_url') | 224 | @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') |
4829 | 225 | def test_exception_warns(self, m_read): | 225 | def test_exception_warns(self, m_read): |
4830 | 226 | url = "http://example.com/foo" | 226 | url = "http://example.com/foo" |
4831 | 227 | cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url | 227 | cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url |
4832 | diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py | |||
4833 | index 275b16d..3efe7ad 100644 | |||
4834 | --- a/tests/unittests/test_data.py | |||
4835 | +++ b/tests/unittests/test_data.py | |||
4836 | @@ -524,7 +524,17 @@ c: 4 | |||
4837 | 524 | self.assertEqual(cfg.get('password'), 'gocubs') | 524 | self.assertEqual(cfg.get('password'), 'gocubs') |
4838 | 525 | self.assertEqual(cfg.get('locale'), 'chicago') | 525 | self.assertEqual(cfg.get('locale'), 'chicago') |
4839 | 526 | 526 | ||
4841 | 527 | @httpretty.activate | 527 | |
4842 | 528 | class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): | ||
4843 | 529 | |||
4844 | 530 | def setUp(self): | ||
4845 | 531 | TestConsumeUserData.setUp(self) | ||
4846 | 532 | helpers.HttprettyTestCase.setUp(self) | ||
4847 | 533 | |||
4848 | 534 | def tearDown(self): | ||
4849 | 535 | TestConsumeUserData.tearDown(self) | ||
4850 | 536 | helpers.HttprettyTestCase.tearDown(self) | ||
4851 | 537 | |||
4852 | 528 | @mock.patch('cloudinit.url_helper.time.sleep') | 538 | @mock.patch('cloudinit.url_helper.time.sleep') |
4853 | 529 | def test_include(self, mock_sleep): | 539 | def test_include(self, mock_sleep): |
4854 | 530 | """Test #include.""" | 540 | """Test #include.""" |
4855 | @@ -543,7 +553,6 @@ c: 4 | |||
4856 | 543 | cc = util.load_yaml(cc_contents) | 553 | cc = util.load_yaml(cc_contents) |
4857 | 544 | self.assertTrue(cc.get('included')) | 554 | self.assertTrue(cc.get('included')) |
4858 | 545 | 555 | ||
4859 | 546 | @httpretty.activate | ||
4860 | 547 | @mock.patch('cloudinit.url_helper.time.sleep') | 556 | @mock.patch('cloudinit.url_helper.time.sleep') |
4861 | 548 | def test_include_bad_url(self, mock_sleep): | 557 | def test_include_bad_url(self, mock_sleep): |
4862 | 549 | """Test #include with a bad URL.""" | 558 | """Test #include with a bad URL.""" |
4863 | @@ -597,8 +606,10 @@ class TestUDProcess(helpers.ResourceUsingTestCase): | |||
4864 | 597 | 606 | ||
4865 | 598 | 607 | ||
4866 | 599 | class TestConvertString(helpers.TestCase): | 608 | class TestConvertString(helpers.TestCase): |
4867 | 609 | |||
4868 | 600 | def test_handles_binary_non_utf8_decodable(self): | 610 | def test_handles_binary_non_utf8_decodable(self): |
4870 | 601 | blob = b'\x32\x99' | 611 | """Printable unicode (not utf8-decodable) is safely converted.""" |
4871 | 612 | blob = b'#!/bin/bash\necho \xc3\x84\n' | ||
4872 | 602 | msg = ud.convert_string(blob) | 613 | msg = ud.convert_string(blob) |
4873 | 603 | self.assertEqual(blob, msg.get_payload(decode=True)) | 614 | self.assertEqual(blob, msg.get_payload(decode=True)) |
4874 | 604 | 615 | ||
4875 | @@ -612,6 +623,13 @@ class TestConvertString(helpers.TestCase): | |||
4876 | 612 | msg = ud.convert_string(text) | 623 | msg = ud.convert_string(text) |
4877 | 613 | self.assertEqual(text, msg.get_payload(decode=False)) | 624 | self.assertEqual(text, msg.get_payload(decode=False)) |
4878 | 614 | 625 | ||
4879 | 626 | def test_handle_mime_parts(self): | ||
4880 | 627 | """Mime parts are properly returned as a mime message.""" | ||
4881 | 628 | message = MIMEBase("text", "plain") | ||
4882 | 629 | message.set_payload("Just text") | ||
4883 | 630 | msg = ud.convert_string(str(message)) | ||
4884 | 631 | self.assertEqual("Just text", msg.get_payload(decode=False)) | ||
4885 | 632 | |||
4886 | 615 | 633 | ||
4887 | 616 | class TestFetchBaseConfig(helpers.TestCase): | 634 | class TestFetchBaseConfig(helpers.TestCase): |
4888 | 617 | def test_only_builtin_gets_builtin(self): | 635 | def test_only_builtin_gets_builtin(self): |
4889 | diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py | |||
4890 | index 4fa9616..1e77842 100644 | |||
4891 | --- a/tests/unittests/test_datasource/test_aliyun.py | |||
4892 | +++ b/tests/unittests/test_datasource/test_aliyun.py | |||
4893 | @@ -130,7 +130,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): | |||
4894 | 130 | self.ds.get_hostname()) | 130 | self.ds.get_hostname()) |
4895 | 131 | 131 | ||
4896 | 132 | @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") | 132 | @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") |
4897 | 133 | @httpretty.activate | ||
4898 | 134 | def test_with_mock_server(self, m_is_aliyun): | 133 | def test_with_mock_server(self, m_is_aliyun): |
4899 | 135 | m_is_aliyun.return_value = True | 134 | m_is_aliyun.return_value = True |
4900 | 136 | self.regist_default_server() | 135 | self.regist_default_server() |
4901 | @@ -143,7 +142,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): | |||
4902 | 143 | self._test_host_name() | 142 | self._test_host_name() |
4903 | 144 | 143 | ||
4904 | 145 | @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") | 144 | @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") |
4905 | 146 | @httpretty.activate | ||
4906 | 147 | def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): | 145 | def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): |
4907 | 148 | """If is_aliyun returns false, then get_data should return False.""" | 146 | """If is_aliyun returns false, then get_data should return False.""" |
4908 | 149 | m_is_aliyun.return_value = False | 147 | m_is_aliyun.return_value = False |
4909 | diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py | |||
4910 | index 88fe76c..e82716e 100644 | |||
4911 | --- a/tests/unittests/test_datasource/test_azure.py | |||
4912 | +++ b/tests/unittests/test_datasource/test_azure.py | |||
4913 | @@ -1,10 +1,10 @@ | |||
4914 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
4915 | 2 | 2 | ||
4916 | 3 | from cloudinit import helpers | 3 | from cloudinit import helpers |
4917 | 4 | from cloudinit.util import b64e, decode_binary, load_file, write_file | ||
4918 | 5 | from cloudinit.sources import DataSourceAzure as dsaz | 4 | from cloudinit.sources import DataSourceAzure as dsaz |
4921 | 6 | from cloudinit.util import find_freebsd_part | 5 | from cloudinit.util import (b64e, decode_binary, load_file, write_file, |
4922 | 7 | from cloudinit.util import get_path_dev_freebsd | 6 | find_freebsd_part, get_path_dev_freebsd, |
4923 | 7 | MountFailedError) | ||
4924 | 8 | from cloudinit.version import version_string as vs | 8 | from cloudinit.version import version_string as vs |
4925 | 9 | from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, | 9 | from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, |
4926 | 10 | ExitStack, PY26, SkipTest) | 10 | ExitStack, PY26, SkipTest) |
4927 | @@ -95,6 +95,8 @@ class TestAzureDataSource(CiTestCase): | |||
4928 | 95 | self.patches = ExitStack() | 95 | self.patches = ExitStack() |
4929 | 96 | self.addCleanup(self.patches.close) | 96 | self.addCleanup(self.patches.close) |
4930 | 97 | 97 | ||
4931 | 98 | self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed')) | ||
4932 | 99 | |||
4933 | 98 | super(TestAzureDataSource, self).setUp() | 100 | super(TestAzureDataSource, self).setUp() |
4934 | 99 | 101 | ||
4935 | 100 | def apply_patches(self, patches): | 102 | def apply_patches(self, patches): |
4936 | @@ -335,6 +337,18 @@ fdescfs /dev/fd fdescfs rw 0 0 | |||
4937 | 335 | self.assertTrue(ret) | 337 | self.assertTrue(ret) |
4938 | 336 | self.assertEqual(data['agent_invoked'], '_COMMAND') | 338 | self.assertEqual(data['agent_invoked'], '_COMMAND') |
4939 | 337 | 339 | ||
4940 | 340 | def test_sys_cfg_set_never_destroy_ntfs(self): | ||
4941 | 341 | sys_cfg = {'datasource': {'Azure': { | ||
4942 | 342 | 'never_destroy_ntfs': 'user-supplied-value'}}} | ||
4943 | 343 | data = {'ovfcontent': construct_valid_ovf_env(data={}), | ||
4944 | 344 | 'sys_cfg': sys_cfg} | ||
4945 | 345 | |||
4946 | 346 | dsrc = self._get_ds(data) | ||
4947 | 347 | ret = self._get_and_setup(dsrc) | ||
4948 | 348 | self.assertTrue(ret) | ||
4949 | 349 | self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), | ||
4950 | 350 | 'user-supplied-value') | ||
4951 | 351 | |||
4952 | 338 | def test_username_used(self): | 352 | def test_username_used(self): |
4953 | 339 | odata = {'HostName': "myhost", 'UserName': "myuser"} | 353 | odata = {'HostName': "myhost", 'UserName': "myuser"} |
4954 | 340 | data = {'ovfcontent': construct_valid_ovf_env(data=odata)} | 354 | data = {'ovfcontent': construct_valid_ovf_env(data=odata)} |
4955 | @@ -676,6 +690,8 @@ class TestAzureBounce(CiTestCase): | |||
4956 | 676 | mock.MagicMock(return_value={}))) | 690 | mock.MagicMock(return_value={}))) |
4957 | 677 | self.patches.enter_context( | 691 | self.patches.enter_context( |
4958 | 678 | mock.patch.object(dsaz.util, 'which', lambda x: True)) | 692 | mock.patch.object(dsaz.util, 'which', lambda x: True)) |
4959 | 693 | self.patches.enter_context( | ||
4960 | 694 | mock.patch.object(dsaz, '_get_random_seed')) | ||
4961 | 679 | 695 | ||
4962 | 680 | def _dmi_mocks(key): | 696 | def _dmi_mocks(key): |
4963 | 681 | if key == 'system-uuid': | 697 | if key == 'system-uuid': |
4964 | @@ -957,7 +973,9 @@ class TestCanDevBeReformatted(CiTestCase): | |||
4965 | 957 | # return sorted by partition number | 973 | # return sorted by partition number |
4966 | 958 | return sorted(ret, key=lambda d: d[0]) | 974 | return sorted(ret, key=lambda d: d[0]) |
4967 | 959 | 975 | ||
4969 | 960 | def mount_cb(device, callback): | 976 | def mount_cb(device, callback, mtype, update_env_for_mount): |
4970 | 977 | self.assertEqual('ntfs', mtype) | ||
4971 | 978 | self.assertEqual('C', update_env_for_mount.get('LANG')) | ||
4972 | 961 | p = self.tmp_dir() | 979 | p = self.tmp_dir() |
4973 | 962 | for f in bypath.get(device).get('files', []): | 980 | for f in bypath.get(device).get('files', []): |
4974 | 963 | write_file(os.path.join(p, f), content=f) | 981 | write_file(os.path.join(p, f), content=f) |
4975 | @@ -988,14 +1006,16 @@ class TestCanDevBeReformatted(CiTestCase): | |||
4976 | 988 | '/dev/sda2': {'num': 2}, | 1006 | '/dev/sda2': {'num': 2}, |
4977 | 989 | '/dev/sda3': {'num': 3}, | 1007 | '/dev/sda3': {'num': 3}, |
4978 | 990 | }}}) | 1008 | }}}) |
4980 | 991 | value, msg = dsaz.can_dev_be_reformatted("/dev/sda") | 1009 | value, msg = dsaz.can_dev_be_reformatted("/dev/sda", |
4981 | 1010 | preserve_ntfs=False) | ||
4982 | 992 | self.assertFalse(value) | 1011 | self.assertFalse(value) |
4983 | 993 | self.assertIn("3 or more", msg.lower()) | 1012 | self.assertIn("3 or more", msg.lower()) |
4984 | 994 | 1013 | ||
4985 | 995 | def test_no_partitions_is_false(self): | 1014 | def test_no_partitions_is_false(self): |
4986 | 996 | """A disk with no partitions can not be formatted.""" | 1015 | """A disk with no partitions can not be formatted.""" |
4987 | 997 | self.patchup({'/dev/sda': {}}) | 1016 | self.patchup({'/dev/sda': {}}) |
4989 | 998 | value, msg = dsaz.can_dev_be_reformatted("/dev/sda") | 1017 | value, msg = dsaz.can_dev_be_reformatted("/dev/sda", |
4990 | 1018 | preserve_ntfs=False) | ||
4991 | 999 | self.assertFalse(value) | 1019 | self.assertFalse(value) |
4992 | 1000 | self.assertIn("not partitioned", msg.lower()) | 1020 | self.assertIn("not partitioned", msg.lower()) |
4993 | 1001 | 1021 | ||
4994 | @@ -1007,7 +1027,8 @@ class TestCanDevBeReformatted(CiTestCase): | |||
4995 | 1007 | '/dev/sda1': {'num': 1}, | 1027 | '/dev/sda1': {'num': 1}, |
4996 | 1008 | '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, | 1028 | '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, |
4997 | 1009 | }}}) | 1029 | }}}) |
4999 | 1010 | value, msg = dsaz.can_dev_be_reformatted("/dev/sda") | 1030 | value, msg = dsaz.can_dev_be_reformatted("/dev/sda", |
5000 | 1031 | preserve_ntfs=False) |
The diff has been truncated for viewing.
PASSED: Continuous integration, rev:d39e24e74c4 f0486ceb9aa4a1d b77c7a537db996 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 120/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 120/rebuild
https:/