Merge ~chad.smith/cloud-init:ubuntu/artful into cloud-init:ubuntu/artful

Proposed by Chad Smith
Status: Merged
Merged at revision: 2022cd6ee06582153a55e51db5e5ae0b5398ba2e
Proposed branch: ~chad.smith/cloud-init:ubuntu/artful
Merge into: cloud-init:ubuntu/artful
Diff against target: 15370 lines (+7268/-1952)
200 files modified
.pylintrc (+1/-1)
ChangeLog (+226/-0)
MANIFEST.in (+1/-0)
bash_completion/cloud-init (+77/-0)
cloudinit/analyze/__main__.py (+1/-1)
cloudinit/analyze/dump.py (+1/-1)
cloudinit/apport.py (+23/-4)
cloudinit/cmd/devel/logs.py (+48/-11)
cloudinit/cmd/devel/tests/test_logs.py (+18/-3)
cloudinit/cmd/main.py (+1/-1)
cloudinit/cmd/tests/test_main.py (+3/-3)
cloudinit/config/cc_apt_configure.py (+2/-2)
cloudinit/config/cc_bootcmd.py (+0/-1)
cloudinit/config/cc_disable_ec2_metadata.py (+12/-2)
cloudinit/config/cc_disk_setup.py (+4/-8)
cloudinit/config/cc_emit_upstart.py (+1/-1)
cloudinit/config/cc_lxd.py (+56/-8)
cloudinit/config/cc_mounts.py (+45/-30)
cloudinit/config/cc_ntp.py (+407/-78)
cloudinit/config/cc_phone_home.py (+4/-3)
cloudinit/config/cc_power_state_change.py (+1/-1)
cloudinit/config/cc_resizefs.py (+4/-6)
cloudinit/config/cc_rh_subscription.py (+8/-10)
cloudinit/config/cc_rsyslog.py (+2/-2)
cloudinit/config/cc_runcmd.py (+0/-1)
cloudinit/config/cc_set_passwords.py (+45/-60)
cloudinit/config/cc_snap.py (+2/-3)
cloudinit/config/cc_snappy.py (+2/-2)
cloudinit/config/cc_ubuntu_advantage.py (+2/-3)
cloudinit/config/cc_users_groups.py (+6/-2)
cloudinit/config/schema.py (+48/-20)
cloudinit/config/tests/test_disable_ec2_metadata.py (+50/-0)
cloudinit/config/tests/test_set_passwords.py (+71/-0)
cloudinit/config/tests/test_snap.py (+27/-2)
cloudinit/config/tests/test_ubuntu_advantage.py (+28/-2)
cloudinit/distros/__init__.py (+13/-1)
cloudinit/distros/freebsd.py (+5/-5)
cloudinit/distros/opensuse.py (+24/-0)
cloudinit/distros/ubuntu.py (+19/-0)
cloudinit/ec2_utils.py (+6/-8)
cloudinit/handlers/upstart_job.py (+1/-1)
cloudinit/net/__init__.py (+33/-3)
cloudinit/net/cmdline.py (+1/-1)
cloudinit/net/dhcp.py (+1/-1)
cloudinit/net/eni.py (+17/-3)
cloudinit/net/netplan.py (+14/-8)
cloudinit/net/network_state.py (+5/-6)
cloudinit/net/sysconfig.py (+8/-2)
cloudinit/net/tests/test_init.py (+1/-0)
cloudinit/netinfo.py (+300/-79)
cloudinit/reporting/events.py (+1/-1)
cloudinit/sources/DataSourceAliYun.py (+1/-1)
cloudinit/sources/DataSourceAltCloud.py (+9/-12)
cloudinit/sources/DataSourceAzure.py (+75/-42)
cloudinit/sources/DataSourceCloudStack.py (+10/-21)
cloudinit/sources/DataSourceConfigDrive.py (+10/-5)
cloudinit/sources/DataSourceEc2.py (+15/-33)
cloudinit/sources/DataSourceIBMCloud.py (+92/-14)
cloudinit/sources/DataSourceMAAS.py (+2/-2)
cloudinit/sources/DataSourceNoCloud.py (+2/-2)
cloudinit/sources/DataSourceOVF.py (+1/-1)
cloudinit/sources/DataSourceOpenNebula.py (+1/-1)
cloudinit/sources/DataSourceOpenStack.py (+127/-55)
cloudinit/sources/DataSourceSmartOS.py (+163/-33)
cloudinit/sources/__init__.py (+76/-0)
cloudinit/sources/helpers/azure.py (+3/-2)
cloudinit/sources/helpers/digitalocean.py (+3/-4)
cloudinit/sources/helpers/openstack.py (+1/-1)
cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1)
cloudinit/sources/helpers/vmware/imc/config_passwd.py (+2/-2)
cloudinit/sources/helpers/vmware/imc/guestcust_util.py (+2/-2)
cloudinit/sources/tests/test_init.py (+88/-3)
cloudinit/ssh_util.py (+63/-7)
cloudinit/stages.py (+17/-9)
cloudinit/templater.py (+10/-2)
cloudinit/tests/helpers.py (+56/-30)
cloudinit/tests/test_netinfo.py (+147/-86)
cloudinit/tests/test_url_helper.py (+27/-1)
cloudinit/tests/test_util.py (+127/-2)
cloudinit/tests/test_version.py (+17/-0)
cloudinit/url_helper.py (+29/-2)
cloudinit/user_data.py (+16/-12)
cloudinit/util.py (+171/-68)
cloudinit/version.py (+5/-1)
config/cloud.cfg.tmpl (+2/-0)
debian/changelog (+92/-3)
debian/patches/openstack-no-network-config.patch (+2/-4)
doc/examples/cloud-config-disk-setup.txt (+2/-2)
doc/examples/cloud-config-user-groups.txt (+20/-7)
doc/rtd/topics/datasources.rst (+98/-0)
doc/rtd/topics/datasources/aliyun.rst (+74/-0)
doc/rtd/topics/datasources/cloudstack.rst (+20/-6)
doc/rtd/topics/datasources/ec2.rst (+30/-0)
doc/rtd/topics/datasources/openstack.rst (+21/-2)
doc/rtd/topics/network-config-format-v1.rst (+27/-0)
doc/rtd/topics/network-config-format-v2.rst (+6/-0)
doc/rtd/topics/tests.rst (+6/-1)
integration-requirements.txt (+1/-1)
packages/bddeb (+36/-4)
packages/brpm (+3/-3)
packages/debian/changelog.in (+1/-1)
packages/debian/control.in (+1/-0)
packages/debian/rules.in (+2/-0)
packages/redhat/cloud-init.spec.in (+8/-0)
packages/suse/cloud-init.spec.in (+29/-42)
setup.py (+15/-3)
systemd/cloud-config.service.tmpl (+1/-0)
templates/chrony.conf.debian.tmpl (+39/-0)
templates/chrony.conf.fedora.tmpl (+48/-0)
templates/chrony.conf.opensuse.tmpl (+38/-0)
templates/chrony.conf.rhel.tmpl (+45/-0)
templates/chrony.conf.sles.tmpl (+38/-0)
templates/chrony.conf.ubuntu.tmpl (+42/-0)
tests/cloud_tests/args.py (+3/-0)
tests/cloud_tests/bddeb.py (+1/-1)
tests/cloud_tests/collect.py (+5/-3)
tests/cloud_tests/platforms/instances.py (+30/-11)
tests/cloud_tests/platforms/lxd/instance.py (+5/-7)
tests/cloud_tests/releases.yaml (+16/-0)
tests/cloud_tests/setup_image.py (+5/-6)
tests/cloud_tests/stage.py (+12/-3)
tests/cloud_tests/testcases.yaml (+2/-2)
tests/cloud_tests/testcases/base.py (+28/-6)
tests/cloud_tests/testcases/examples/including_user_groups.py (+1/-1)
tests/cloud_tests/testcases/modules/byobu.py (+1/-2)
tests/cloud_tests/testcases/modules/byobu.yaml (+0/-3)
tests/cloud_tests/testcases/modules/ca_certs.py (+17/-4)
tests/cloud_tests/testcases/modules/ca_certs.yaml (+6/-2)
tests/cloud_tests/testcases/modules/ntp.py (+2/-3)
tests/cloud_tests/testcases/modules/ntp.yaml (+1/-0)
tests/cloud_tests/testcases/modules/ntp_chrony.py (+26/-0)
tests/cloud_tests/testcases/modules/ntp_chrony.yaml (+17/-0)
tests/cloud_tests/testcases/modules/ntp_pools.yaml (+1/-0)
tests/cloud_tests/testcases/modules/ntp_servers.yaml (+1/-0)
tests/cloud_tests/testcases/modules/ntp_timesyncd.py (+15/-0)
tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml (+15/-0)
tests/cloud_tests/testcases/modules/package_update_upgrade_install.py (+6/-8)
tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+3/-6)
tests/cloud_tests/testcases/modules/salt_minion.py (+1/-2)
tests/cloud_tests/testcases/modules/salt_minion.yaml (+12/-5)
tests/cloud_tests/testcases/modules/user_groups.py (+1/-1)
tests/cloud_tests/util.py (+1/-1)
tests/cloud_tests/verify.py (+46/-1)
tests/data/netinfo/netdev-formatted-output (+10/-0)
tests/data/netinfo/netdev-formatted-output-down (+8/-0)
tests/data/netinfo/new-ifconfig-output (+18/-0)
tests/data/netinfo/new-ifconfig-output-down (+15/-0)
tests/data/netinfo/old-ifconfig-output (+18/-0)
tests/data/netinfo/route-formatted-output (+22/-0)
tests/data/netinfo/sample-ipaddrshow-output (+13/-0)
tests/data/netinfo/sample-ipaddrshow-output-down (+8/-0)
tests/data/netinfo/sample-iproute-output-v4 (+3/-0)
tests/data/netinfo/sample-iproute-output-v6 (+11/-0)
tests/data/netinfo/sample-route-output-v4 (+5/-0)
tests/data/netinfo/sample-route-output-v6 (+13/-0)
tests/unittests/test__init__.py (+5/-5)
tests/unittests/test_data.py (+21/-3)
tests/unittests/test_datasource/test_aliyun.py (+0/-2)
tests/unittests/test_datasource/test_azure.py (+209/-70)
tests/unittests/test_datasource/test_azure_helper.py (+1/-1)
tests/unittests/test_datasource/test_common.py (+1/-0)
tests/unittests/test_datasource/test_ec2.py (+0/-12)
tests/unittests/test_datasource/test_gce.py (+0/-1)
tests/unittests/test_datasource/test_ibmcloud.py (+50/-0)
tests/unittests/test_datasource/test_maas.py (+2/-2)
tests/unittests/test_datasource/test_nocloud.py (+0/-3)
tests/unittests/test_datasource/test_openstack.py (+215/-20)
tests/unittests/test_datasource/test_scaleway.py (+0/-3)
tests/unittests/test_datasource/test_smartos.py (+245/-5)
tests/unittests/test_distros/test_create_users.py (+8/-0)
tests/unittests/test_distros/test_netconfig.py (+6/-0)
tests/unittests/test_distros/test_user_data_normalize.py (+6/-0)
tests/unittests/test_ds_identify.py (+205/-18)
tests/unittests/test_ec2_util.py (+0/-9)
tests/unittests/test_filters/test_launch_index.py (+5/-5)
tests/unittests/test_handler/test_handler_apt_conf_v1.py (+6/-10)
tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+0/-7)
tests/unittests/test_handler/test_handler_apt_source_v1.py (+10/-17)
tests/unittests/test_handler/test_handler_apt_source_v3.py (+11/-18)
tests/unittests/test_handler/test_handler_bootcmd.py (+26/-8)
tests/unittests/test_handler/test_handler_chef.py (+12/-4)
tests/unittests/test_handler/test_handler_lxd.py (+64/-16)
tests/unittests/test_handler/test_handler_mounts.py (+100/-4)
tests/unittests/test_handler/test_handler_ntp.py (+571/-305)
tests/unittests/test_handler/test_handler_resizefs.py (+1/-1)
tests/unittests/test_handler/test_handler_runcmd.py (+26/-7)
tests/unittests/test_handler/test_schema.py (+33/-6)
tests/unittests/test_merging.py (+1/-1)
tests/unittests/test_net.py (+189/-12)
tests/unittests/test_runs/test_merge_run.py (+1/-1)
tests/unittests/test_runs/test_simple_run.py (+30/-2)
tests/unittests/test_sshutil.py (+94/-3)
tests/unittests/test_templating.py (+42/-3)
tests/unittests/test_util.py (+126/-13)
tools/ds-identify (+83/-28)
tools/make-tarball (+12/-3)
tools/read-dependencies (+6/-2)
tools/run-centos (+30/-310)
tools/run-container (+590/-0)
tox.ini (+9/-7)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
cloud-init Commiters Pending
Review via email: mp+348360@code.launchpad.net

Commit message

Cloud-init 18.3 new-upstream-snapshot from for release into artful.

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

FAILED: Continuous integration, rev:0bb961d0481e0158c0ef115f718a0f4ebcafdcea
https://jenkins.ubuntu.com/server/job/cloud-init-ci/122/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    FAILED: Ubuntu LTS: Build

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/122/rebuild

review: Needs Fixing (continuous-integration)
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:2022cd6ee06582153a55e51db5e5ae0b5398ba2e
https://jenkins.ubuntu.com/server/job/cloud-init-ci/123/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    SUCCESS: MAAS Compatability Testing
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/123/rebuild

review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/.pylintrc b/.pylintrc
index 0bdfa59..3bfa0c8 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -28,7 +28,7 @@ jobs=4
28# W0703(broad-except)28# W0703(broad-except)
29# W1401(anomalous-backslash-in-string)29# W1401(anomalous-backslash-in-string)
3030
31disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W140131disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401
3232
3333
34[REPORTS]34[REPORTS]
diff --git a/ChangeLog b/ChangeLog
index daa7ccf..72c5287 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,229 @@
118.3:
2 - docs: represent sudo:false in docs for user_groups config module
3 - Explicitly prevent `sudo` access for user module
4 [Jacob Bednarz] (LP: #1771468)
5 - lxd: Delete default network and detach device if lxd-init created them.
6 (LP: #1776958)
7 - openstack: avoid unneeded metadata probe on non-openstack platforms
8 (LP: #1776701)
9 - stages: fix tracebacks if a module stage is undefined or empty
10 [Robert Schweikert] (LP: #1770462)
11 - Be more safe on string/bytes when writing multipart user-data to disk.
12 (LP: #1768600)
13 - Fix get_proc_env for pids that have non-utf8 content in environment.
14 (LP: #1775371)
15 - tests: fix salt_minion integration test on bionic and later
16 - tests: provide human-readable integration test summary when --verbose
17 - tests: skip chrony integration tests on lxd running artful or older
18 - test: add optional --preserve-instance arg to integraiton tests
19 - netplan: fix mtu if provided by network config for all rendered types
20 (LP: #1774666)
21 - tests: remove pip install workarounds for pylxd, take upstream fix.
22 - subp: support combine_capture argument.
23 - tests: ordered tox dependencies for pylxd install
24 - util: add get_linux_distro function to replace platform.dist
25 [Robert Schweikert] (LP: #1745235)
26 - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
27 - - Do not use the systemd_prefix macro, not available in this environment
28 [Robert Schweikert]
29 - doc: Add config info to ec2, openstack and cloudstack datasource docs
30 - Enable SmartOS network metadata to work with netplan via per-subnet
31 routes [Dan McDonald] (LP: #1763512)
32 - openstack: Allow discovery in init-local using dhclient in a sandbox.
33 (LP: #1749717)
34 - tests: Avoid using https in httpretty, improve HttPretty test case.
35 (LP: #1771659)
36 - yaml_load/schema: Add invalid line and column nums to error message
37 - Azure: Ignore NTFS mount errors when checking ephemeral drive
38 [Paul Meyer]
39 - packages/brpm: Get proper dependencies for cmdline distro.
40 - packages: Make rpm spec files patch in package version like in debs.
41 - tools/run-container: replace tools/run-centos with more generic.
42 - Update version.version_string to contain packaged version. (LP: #1770712)
43 - cc_mounts: Do not add devices to fstab that are already present.
44 [Lars Kellogg-Stedman]
45 - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382)
46 - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
47 - read_file_or_url: move to url_helper, fix bug in its FileResponse.
48 - cloud_tests: help pylint [Ryan Harper]
49 - flake8: fix flake8 errors in previous commit.
50 - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
51 - tests: restructure SSH and initial connections [Joshua Powers]
52 - ds-identify: recognize container-other as a container, test SmartOS.
53 - cloud-config.service: run After snap.seeded.service. (LP: #1767131)
54 - tests: do not rely on host /proc/cmdline in test_net.py
55 [Lars Kellogg-Stedman] (LP: #1769952)
56 - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
57 - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
58 - tests: fix package and ca_cert cloud_tests on bionic
59 (LP: #1769985)
60 - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
61 - pycodestyle: Fix deprecated string literals, move away from flake8.
62 - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214)
63 - tools: Support adding a release suffix through packages/bddeb.
64 - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
65 [Harm Weites] (LP: #1404745)
66 - tools: Re-use the orig tarball in packages/bddeb if it is around.
67 - netinfo: fix netdev_pformat when a nic does not have an address
68 assigned. (LP: #1766302)
69 - collect-logs: add -v flag, write to stderr, limit journal to single
70 boot. (LP: #1766335)
71 - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
72 (LP: #1766401)
73 - Add reporting events and log_time around early source of blocking time
74 [Ryan Harper]
75 - IBMCloud: recognize provisioning environment during debug boots.
76 (LP: #1767166)
77 - net: detect unstable network names and trigger a settle if needed
78 [Ryan Harper] (LP: #1766287)
79 - IBMCloud: improve documentation in datasource.
80 - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
81 - packages/debian/control.in: add missing dependency on iproute2.
82 (LP: #1766711)
83 - DataSourceSmartOS: add locking of serial device.
84 [Mike Gerdts] (LP: #1746605)
85 - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085)
86 - DataSourceSmartOS: list() should always return a list
87 [Mike Gerdts] (LP: #1763480)
88 - schema: in validation, raise ImportError if strict but no jsonschema.
89 - set_passwords: Add newline to end of sshd config, only restart if
90 updated. (LP: #1677205)
91 - pylint: pay attention to unused variable warnings.
92 - doc: Add documentation for AliYun datasource. [Junjie Wang]
93 - Schema: do not warn on duplicate items in commands. (LP: #1764264)
94 - net: Depend on iproute2's ip instead of net-tools ifconfig or route
95 - DataSourceSmartOS: fix hang when metadata service is down
96 [Mike Gerdts] (LP: #1667735)
97 - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
98 ext4. [Mike Gerdts] (LP: #1763511)
99 - pycodestyle: Fix invalid escape sequences in string literals.
100 - Implement bash completion script for cloud-init command line
101 [Ryan Harper]
102 - tools: Fix make-tarball cli tool usage for development
103 - renderer: support unicode in render_from_file.
104 - Implement ntp client spec with auto support for distro selection
105 [Ryan Harper] (LP: #1749722)
106 - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
107 - tests: fix ec2 integration network metadata validation
108 - tests: fix integration tests to support lxd 3.0 release
109 - correct documentation to match correct attribute name usage.
110 [Dominic Schlegel] (LP: #1420018)
111 - cc_resizefs, util: handle no /dev/zfs [Ryan Harper]
112 - doc: Fix links in OpenStack datasource documentation.
113 [Dominic Schlegel] (LP: #1721660)
114 - docs: represent sudo:false in docs for user_groups config module
115 - Explicitly prevent `sudo` access for user module
116 [Jacob Bednarz] (LP: #1771468)
117 - lxd: Delete default network and detach device if lxd-init created them.
118 (LP: #1776958)
119 - openstack: avoid unneeded metadata probe on non-openstack platforms
120 (LP: #1776701)
121 - stages: fix tracebacks if a module stage is undefined or empty
122 [Robert Schweikert] (LP: #1770462)
123 - Be more safe on string/bytes when writing multipart user-data to disk.
124 (LP: #1768600)
125 - Fix get_proc_env for pids that have non-utf8 content in environment.
126 (LP: #1775371)
127 - tests: fix salt_minion integration test on bionic and later
128 - tests: provide human-readable integration test summary when --verbose
129 - tests: skip chrony integration tests on lxd running artful or older
130 - test: add optional --preserve-instance arg to integraiton tests
131 - netplan: fix mtu if provided by network config for all rendered types
132 (LP: #1774666)
133 - tests: remove pip install workarounds for pylxd, take upstream fix.
134 - subp: support combine_capture argument.
135 - tests: ordered tox dependencies for pylxd install
136 - util: add get_linux_distro function to replace platform.dist
137 [Robert Schweikert] (LP: #1745235)
138 - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
139 - - Do not use the systemd_prefix macro, not available in this environment
140 [Robert Schweikert]
141 - doc: Add config info to ec2, openstack and cloudstack datasource docs
142 - Enable SmartOS network metadata to work with netplan via per-subnet
143 routes [Dan McDonald] (LP: #1763512)
144 - openstack: Allow discovery in init-local using dhclient in a sandbox.
145 (LP: #1749717)
146 - tests: Avoid using https in httpretty, improve HttPretty test case.
147 (LP: #1771659)
148 - yaml_load/schema: Add invalid line and column nums to error message
149 - Azure: Ignore NTFS mount errors when checking ephemeral drive
150 [Paul Meyer]
151 - packages/brpm: Get proper dependencies for cmdline distro.
152 - packages: Make rpm spec files patch in package version like in debs.
153 - tools/run-container: replace tools/run-centos with more generic.
154 - Update version.version_string to contain packaged version. (LP: #1770712)
155 - cc_mounts: Do not add devices to fstab that are already present.
156 [Lars Kellogg-Stedman]
157 - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382)
158 - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
159 - read_file_or_url: move to url_helper, fix bug in its FileResponse.
160 - cloud_tests: help pylint [Ryan Harper]
161 - flake8: fix flake8 errors in previous commit.
162 - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
163 - tests: restructure SSH and initial connections [Joshua Powers]
164 - ds-identify: recognize container-other as a container, test SmartOS.
165 - cloud-config.service: run After snap.seeded.service. (LP: #1767131)
166 - tests: do not rely on host /proc/cmdline in test_net.py
167 [Lars Kellogg-Stedman] (LP: #1769952)
168 - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
169 - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
170 - tests: fix package and ca_cert cloud_tests on bionic
171 (LP: #1769985)
172 - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
173 - pycodestyle: Fix deprecated string literals, move away from flake8.
174 - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214)
175 - tools: Support adding a release suffix through packages/bddeb.
176 - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
177 [Harm Weites] (LP: #1404745)
178 - tools: Re-use the orig tarball in packages/bddeb if it is around.
179 - netinfo: fix netdev_pformat when a nic does not have an address
180 assigned. (LP: #1766302)
181 - collect-logs: add -v flag, write to stderr, limit journal to single
182 boot. (LP: #1766335)
183 - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
184 (LP: #1766401)
185 - Add reporting events and log_time around early source of blocking time
186 [Ryan Harper]
187 - IBMCloud: recognize provisioning environment during debug boots.
188 (LP: #1767166)
189 - net: detect unstable network names and trigger a settle if needed
190 [Ryan Harper] (LP: #1766287)
191 - IBMCloud: improve documentation in datasource.
192 - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
193 - packages/debian/control.in: add missing dependency on iproute2.
194 (LP: #1766711)
195 - DataSourceSmartOS: add locking of serial device.
196 [Mike Gerdts] (LP: #1746605)
197 - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085)
198 - DataSourceSmartOS: list() should always return a list
199 [Mike Gerdts] (LP: #1763480)
200 - schema: in validation, raise ImportError if strict but no jsonschema.
201 - set_passwords: Add newline to end of sshd config, only restart if
202 updated. (LP: #1677205)
203 - pylint: pay attention to unused variable warnings.
204 - doc: Add documentation for AliYun datasource. [Junjie Wang]
205 - Schema: do not warn on duplicate items in commands. (LP: #1764264)
206 - net: Depend on iproute2's ip instead of net-tools ifconfig or route
207 - DataSourceSmartOS: fix hang when metadata service is down
208 [Mike Gerdts] (LP: #1667735)
209 - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
210 ext4. [Mike Gerdts] (LP: #1763511)
211 - pycodestyle: Fix invalid escape sequences in string literals.
212 - Implement bash completion script for cloud-init command line
213 [Ryan Harper]
214 - tools: Fix make-tarball cli tool usage for development
215 - renderer: support unicode in render_from_file.
216 - Implement ntp client spec with auto support for distro selection
217 [Ryan Harper] (LP: #1749722)
218 - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
219 - tests: fix ec2 integration network metadata validation
220 - tests: fix integration tests to support lxd 3.0 release
221 - correct documentation to match correct attribute name usage.
222 [Dominic Schlegel] (LP: #1420018)
223 - cc_resizefs, util: handle no /dev/zfs [Ryan Harper]
224 - doc: Fix links in OpenStack datasource documentation.
225 [Dominic Schlegel] (LP: #1721660)
226
118.2:22718.2:
2 - Hetzner: Exit early if dmi system-manufacturer is not Hetzner.228 - Hetzner: Exit early if dmi system-manufacturer is not Hetzner.
3 - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging.229 - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging.
diff --git a/MANIFEST.in b/MANIFEST.in
index 1a4d771..57a85ea 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,5 +1,6 @@
1include *.py MANIFEST.in LICENSE* ChangeLog1include *.py MANIFEST.in LICENSE* ChangeLog
2global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh2global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh
3graft bash_completion
3graft config4graft config
4graft doc5graft doc
5graft packages6graft packages
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
6new file mode 1006447new file mode 100644
index 0000000..581432c
--- /dev/null
+++ b/bash_completion/cloud-init
@@ -0,0 +1,77 @@
1# Copyright (C) 2018 Canonical Ltd.
2#
3# This file is part of cloud-init. See LICENSE file for license information.
4
5# bash completion for cloud-init cli
6_cloudinit_complete()
7{
8
9 local cur_word prev_word
10 cur_word="${COMP_WORDS[COMP_CWORD]}"
11 prev_word="${COMP_WORDS[COMP_CWORD-1]}"
12
13 subcmds="analyze clean collect-logs devel dhclient-hook features init modules single status"
14 base_params="--help --file --version --debug --force"
15 case ${COMP_CWORD} in
16 1)
17 COMPREPLY=($(compgen -W "$base_params $subcmds" -- $cur_word))
18 ;;
19 2)
20 case ${prev_word} in
21 analyze)
22 COMPREPLY=($(compgen -W "--help blame dump show" -- $cur_word))
23 ;;
24 clean)
25 COMPREPLY=($(compgen -W "--help --logs --reboot --seed" -- $cur_word))
26 ;;
27 collect-logs)
28 COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
29 ;;
30 devel)
31 COMPREPLY=($(compgen -W "--help schema" -- $cur_word))
32 ;;
33 dhclient-hook|features)
34 COMPREPLY=($(compgen -W "--help" -- $cur_word))
35 ;;
36 init)
37 COMPREPLY=($(compgen -W "--help --local" -- $cur_word))
38 ;;
39 modules)
40 COMPREPLY=($(compgen -W "--help --mode" -- $cur_word))
41 ;;
42
43 single)
44 COMPREPLY=($(compgen -W "--help --name --frequency --report" -- $cur_word))
45 ;;
46 status)
47 COMPREPLY=($(compgen -W "--help --long --wait" -- $cur_word))
48 ;;
49 esac
50 ;;
51 3)
52 case ${prev_word} in
53 blame|dump)
54 COMPREPLY=($(compgen -W "--help --infile --outfile" -- $cur_word))
55 ;;
56 --mode)
57 COMPREPLY=($(compgen -W "--help init config final" -- $cur_word))
58 ;;
59 --frequency)
60 COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word))
61 ;;
62 schema)
63 COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word))
64 ;;
65 show)
66 COMPREPLY=($(compgen -W "--help --format --infile --outfile" -- $cur_word))
67 ;;
68 esac
69 ;;
70 *)
71 COMPREPLY=()
72 ;;
73 esac
74}
75complete -F _cloudinit_complete cloud-init
76
77# vi: syntax=bash expandtab
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 3ba5903..f861365 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -69,7 +69,7 @@ def analyze_blame(name, args):
69 """69 """
70 (infh, outfh) = configure_io(args)70 (infh, outfh) = configure_io(args)
71 blame_format = ' %ds (%n)'71 blame_format = ' %ds (%n)'
72 r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE)72 r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE)
73 for idx, record in enumerate(show.show_events(_get_events(infh),73 for idx, record in enumerate(show.show_events(_get_events(infh),
74 blame_format)):74 blame_format)):
75 srecs = sorted(filter(r.match, record), reverse=True)75 srecs = sorted(filter(r.match, record), reverse=True)
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index b071aa1..1f3060d 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -112,7 +112,7 @@ def parse_ci_logline(line):
112 return None112 return None
113 event_description = stage_to_description[event_name]113 event_description = stage_to_description[event_name]
114 else:114 else:
115 (pymodloglvl, event_type, event_name) = eventstr.split()[0:3]115 (_pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
116 event_description = eventstr.split(event_name)[1].strip()116 event_description = eventstr.split(event_name)[1].strip()
117117
118 event = {118 event = {
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 618b016..130ff26 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -13,10 +13,29 @@ except ImportError:
1313
1414
15KNOWN_CLOUD_NAMES = [15KNOWN_CLOUD_NAMES = [
16 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma',16 'AliYun',
17 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine',17 'AltCloud',
18 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF',18 'Amazon - Ec2',
19 'Scaleway', 'SmartOS', 'VMware', 'Other']19 'Azure',
20 'Bigstep',
21 'Brightbox',
22 'CloudSigma',
23 'CloudStack',
24 'DigitalOcean',
25 'GCE - Google Compute Engine',
26 'Hetzner Cloud',
27 'IBM - (aka SoftLayer or BlueMix)',
28 'LXD',
29 'MAAS',
30 'NoCloud',
31 'OpenNebula',
32 'OpenStack',
33 'OVF',
34 'OpenTelekomCloud',
35 'Scaleway',
36 'SmartOS',
37 'VMware',
38 'Other']
2039
21# Potentially clear text collected logs40# Potentially clear text collected logs
22CLOUDINIT_LOG = '/var/log/cloud-init.log'41CLOUDINIT_LOG = '/var/log/cloud-init.log'
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 35ca478..df72520 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -11,6 +11,7 @@ from cloudinit.temp_utils import tempdir
11from datetime import datetime11from datetime import datetime
12import os12import os
13import shutil13import shutil
14import sys
1415
1516
16CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']17CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
@@ -31,6 +32,8 @@ def get_parser(parser=None):
31 parser = argparse.ArgumentParser(32 parser = argparse.ArgumentParser(
32 prog='collect-logs',33 prog='collect-logs',
33 description='Collect and tar all cloud-init debug info')34 description='Collect and tar all cloud-init debug info')
35 parser.add_argument('--verbose', '-v', action='count', default=0,
36 dest='verbosity', help="Be more verbose.")
34 parser.add_argument(37 parser.add_argument(
35 "--tarfile", '-t', default='cloud-init.tar.gz',38 "--tarfile", '-t', default='cloud-init.tar.gz',
36 help=('The tarfile to create containing all collected logs.'39 help=('The tarfile to create containing all collected logs.'
@@ -43,17 +46,33 @@ def get_parser(parser=None):
43 return parser46 return parser
4447
4548
46def _write_command_output_to_file(cmd, filename):49def _write_command_output_to_file(cmd, filename, msg, verbosity):
47 """Helper which runs a command and writes output or error to filename."""50 """Helper which runs a command and writes output or error to filename."""
48 try:51 try:
49 out, _ = subp(cmd)52 out, _ = subp(cmd)
50 except ProcessExecutionError as e:53 except ProcessExecutionError as e:
51 write_file(filename, str(e))54 write_file(filename, str(e))
55 _debug("collecting %s failed.\n" % msg, 1, verbosity)
52 else:56 else:
53 write_file(filename, out)57 write_file(filename, out)
58 _debug("collected %s\n" % msg, 1, verbosity)
59 return out
5460
5561
56def collect_logs(tarfile, include_userdata):62def _debug(msg, level, verbosity):
63 if level <= verbosity:
64 sys.stderr.write(msg)
65
66
67def _collect_file(path, out_dir, verbosity):
68 if os.path.isfile(path):
69 copy(path, out_dir)
70 _debug("collected file: %s\n" % path, 1, verbosity)
71 else:
72 _debug("file %s did not exist\n" % path, 2, verbosity)
73
74
75def collect_logs(tarfile, include_userdata, verbosity=0):
57 """Collect all cloud-init logs and tar them up into the provided tarfile.76 """Collect all cloud-init logs and tar them up into the provided tarfile.
5877
59 @param tarfile: The path of the tar-gzipped file to create.78 @param tarfile: The path of the tar-gzipped file to create.
@@ -64,28 +83,46 @@ def collect_logs(tarfile, include_userdata):
64 log_dir = 'cloud-init-logs-{0}'.format(date)83 log_dir = 'cloud-init-logs-{0}'.format(date)
65 with tempdir(dir='/tmp') as tmp_dir:84 with tempdir(dir='/tmp') as tmp_dir:
66 log_dir = os.path.join(tmp_dir, log_dir)85 log_dir = os.path.join(tmp_dir, log_dir)
67 _write_command_output_to_file(86 version = _write_command_output_to_file(
87 ['cloud-init', '--version'],
88 os.path.join(log_dir, 'version'),
89 "cloud-init --version", verbosity)
90 dpkg_ver = _write_command_output_to_file(
68 ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],91 ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
69 os.path.join(log_dir, 'version'))92 os.path.join(log_dir, 'dpkg-version'),
93 "dpkg version", verbosity)
94 if not version:
95 version = dpkg_ver if dpkg_ver else "not-available"
96 _debug("collected cloud-init version: %s\n" % version, 1, verbosity)
70 _write_command_output_to_file(97 _write_command_output_to_file(
71 ['dmesg'], os.path.join(log_dir, 'dmesg.txt'))98 ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
99 "dmesg output", verbosity)
72 _write_command_output_to_file(100 _write_command_output_to_file(
73 ['journalctl', '-o', 'short-precise'],101 ['journalctl', '--boot=0', '-o', 'short-precise'],
74 os.path.join(log_dir, 'journal.txt'))102 os.path.join(log_dir, 'journal.txt'),
103 "systemd journal of current boot", verbosity)
104
75 for log in CLOUDINIT_LOGS:105 for log in CLOUDINIT_LOGS:
76 copy(log, log_dir)106 _collect_file(log, log_dir, verbosity)
77 if include_userdata:107 if include_userdata:
78 copy(USER_DATA_FILE, log_dir)108 _collect_file(USER_DATA_FILE, log_dir, verbosity)
79 run_dir = os.path.join(log_dir, 'run')109 run_dir = os.path.join(log_dir, 'run')
80 ensure_dir(run_dir)110 ensure_dir(run_dir)
81 shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init'))111 if os.path.exists(CLOUDINIT_RUN_DIR):
112 shutil.copytree(CLOUDINIT_RUN_DIR,
113 os.path.join(run_dir, 'cloud-init'))
114 _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
115 else:
116 _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
117 verbosity)
82 with chdir(tmp_dir):118 with chdir(tmp_dir):
83 subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])119 subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
120 sys.stderr.write("Wrote %s\n" % tarfile)
84121
85122
86def handle_collect_logs_args(name, args):123def handle_collect_logs_args(name, args):
87 """Handle calls to 'cloud-init collect-logs' as a subcommand."""124 """Handle calls to 'cloud-init collect-logs' as a subcommand."""
88 collect_logs(args.tarfile, args.userdata)125 collect_logs(args.tarfile, args.userdata, args.verbosity)
89126
90127
91def main():128def main():
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index dc4947c..98b4756 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -4,6 +4,7 @@ from cloudinit.cmd.devel import logs
4from cloudinit.util import ensure_dir, load_file, subp, write_file4from cloudinit.util import ensure_dir, load_file, subp, write_file
5from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call5from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call
6from datetime import datetime6from datetime import datetime
7import mock
7import os8import os
89
910
@@ -27,11 +28,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
27 date = datetime.utcnow().date().strftime('%Y-%m-%d')28 date = datetime.utcnow().date().strftime('%Y-%m-%d')
28 date_logdir = 'cloud-init-logs-{0}'.format(date)29 date_logdir = 'cloud-init-logs-{0}'.format(date)
2930
31 version_out = '/usr/bin/cloud-init 18.2fake\n'
30 expected_subp = {32 expected_subp = {
31 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):33 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
32 '0.7fake\n',34 '0.7fake\n',
35 ('cloud-init', '--version'): version_out,
33 ('dmesg',): 'dmesg-out\n',36 ('dmesg',): 'dmesg-out\n',
34 ('journalctl', '-o', 'short-precise'): 'journal-out\n',37 ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
35 ('tar', 'czvf', output_tarfile, date_logdir): ''38 ('tar', 'czvf', output_tarfile, date_logdir): ''
36 }39 }
3740
@@ -44,9 +47,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
44 subp(cmd) # Pass through tar cmd so we can check output47 subp(cmd) # Pass through tar cmd so we can check output
45 return expected_subp[cmd_tuple], ''48 return expected_subp[cmd_tuple], ''
4649
50 fake_stderr = mock.MagicMock()
51
47 wrap_and_call(52 wrap_and_call(
48 'cloudinit.cmd.devel.logs',53 'cloudinit.cmd.devel.logs',
49 {'subp': {'side_effect': fake_subp},54 {'subp': {'side_effect': fake_subp},
55 'sys.stderr': {'new': fake_stderr},
50 'CLOUDINIT_LOGS': {'new': [log1, log2]},56 'CLOUDINIT_LOGS': {'new': [log1, log2]},
51 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},57 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
52 logs.collect_logs, output_tarfile, include_userdata=False)58 logs.collect_logs, output_tarfile, include_userdata=False)
@@ -55,7 +61,9 @@ class TestCollectLogs(FilesystemMockingTestCase):
55 out_logdir = self.tmp_path(date_logdir, self.new_root)61 out_logdir = self.tmp_path(date_logdir, self.new_root)
56 self.assertEqual(62 self.assertEqual(
57 '0.7fake\n',63 '0.7fake\n',
58 load_file(os.path.join(out_logdir, 'version')))64 load_file(os.path.join(out_logdir, 'dpkg-version')))
65 self.assertEqual(version_out,
66 load_file(os.path.join(out_logdir, 'version')))
59 self.assertEqual(67 self.assertEqual(
60 'cloud-init-log',68 'cloud-init-log',
61 load_file(os.path.join(out_logdir, 'cloud-init.log')))69 load_file(os.path.join(out_logdir, 'cloud-init.log')))
@@ -72,6 +80,7 @@ class TestCollectLogs(FilesystemMockingTestCase):
72 'results',80 'results',
73 load_file(81 load_file(
74 os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))82 os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
83 fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
7584
76 def test_collect_logs_includes_optional_userdata(self):85 def test_collect_logs_includes_optional_userdata(self):
77 """collect-logs include userdata when --include-userdata is set."""86 """collect-logs include userdata when --include-userdata is set."""
@@ -88,11 +97,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
88 date = datetime.utcnow().date().strftime('%Y-%m-%d')97 date = datetime.utcnow().date().strftime('%Y-%m-%d')
89 date_logdir = 'cloud-init-logs-{0}'.format(date)98 date_logdir = 'cloud-init-logs-{0}'.format(date)
9099
100 version_out = '/usr/bin/cloud-init 18.2fake\n'
91 expected_subp = {101 expected_subp = {
92 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):102 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
93 '0.7fake',103 '0.7fake',
104 ('cloud-init', '--version'): version_out,
94 ('dmesg',): 'dmesg-out\n',105 ('dmesg',): 'dmesg-out\n',
95 ('journalctl', '-o', 'short-precise'): 'journal-out\n',106 ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
96 ('tar', 'czvf', output_tarfile, date_logdir): ''107 ('tar', 'czvf', output_tarfile, date_logdir): ''
97 }108 }
98109
@@ -105,9 +116,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
105 subp(cmd) # Pass through tar cmd so we can check output116 subp(cmd) # Pass through tar cmd so we can check output
106 return expected_subp[cmd_tuple], ''117 return expected_subp[cmd_tuple], ''
107118
119 fake_stderr = mock.MagicMock()
120
108 wrap_and_call(121 wrap_and_call(
109 'cloudinit.cmd.devel.logs',122 'cloudinit.cmd.devel.logs',
110 {'subp': {'side_effect': fake_subp},123 {'subp': {'side_effect': fake_subp},
124 'sys.stderr': {'new': fake_stderr},
111 'CLOUDINIT_LOGS': {'new': [log1, log2]},125 'CLOUDINIT_LOGS': {'new': [log1, log2]},
112 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},126 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
113 'USER_DATA_FILE': {'new': userdata}},127 'USER_DATA_FILE': {'new': userdata}},
@@ -118,3 +132,4 @@ class TestCollectLogs(FilesystemMockingTestCase):
118 self.assertEqual(132 self.assertEqual(
119 'user-data',133 'user-data',
120 load_file(os.path.join(out_logdir, 'user-data.txt')))134 load_file(os.path.join(out_logdir, 'user-data.txt')))
135 fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 3f2dbb9..d6ba90f 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
187 data = None187 data = None
188 header = b'#cloud-config'188 header = b'#cloud-config'
189 try:189 try:
190 resp = util.read_file_or_url(**kwargs)190 resp = url_helper.read_file_or_url(**kwargs)
191 if resp.ok():191 if resp.ok():
192 data = resp.contents192 data = resp.contents
193 if not resp.contents.startswith(header):193 if not resp.contents.startswith(header):
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index dbe421c..e2c54ae 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -56,7 +56,7 @@ class TestMain(FilesystemMockingTestCase):
56 cmdargs = myargs(56 cmdargs = myargs(
57 debug=False, files=None, force=False, local=False, reporter=None,57 debug=False, files=None, force=False, local=False, reporter=None,
58 subcommand='init')58 subcommand='init')
59 (item1, item2) = wrap_and_call(59 (_item1, item2) = wrap_and_call(
60 'cloudinit.cmd.main',60 'cloudinit.cmd.main',
61 {'util.close_stdin': True,61 {'util.close_stdin': True,
62 'netinfo.debug_info': 'my net debug info',62 'netinfo.debug_info': 'my net debug info',
@@ -85,7 +85,7 @@ class TestMain(FilesystemMockingTestCase):
85 cmdargs = myargs(85 cmdargs = myargs(
86 debug=False, files=None, force=False, local=False, reporter=None,86 debug=False, files=None, force=False, local=False, reporter=None,
87 subcommand='init')87 subcommand='init')
88 (item1, item2) = wrap_and_call(88 (_item1, item2) = wrap_and_call(
89 'cloudinit.cmd.main',89 'cloudinit.cmd.main',
90 {'util.close_stdin': True,90 {'util.close_stdin': True,
91 'netinfo.debug_info': 'my net debug info',91 'netinfo.debug_info': 'my net debug info',
@@ -133,7 +133,7 @@ class TestMain(FilesystemMockingTestCase):
133 self.assertEqual(main.LOG, log)133 self.assertEqual(main.LOG, log)
134 self.assertIsNone(args)134 self.assertIsNone(args)
135135
136 (item1, item2) = wrap_and_call(136 (_item1, item2) = wrap_and_call(
137 'cloudinit.cmd.main',137 'cloudinit.cmd.main',
138 {'util.close_stdin': True,138 {'util.close_stdin': True,
139 'netinfo.debug_info': 'my net debug info',139 'netinfo.debug_info': 'my net debug info',
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 5b9cbca..e18944e 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for
121All source entries in ``apt-sources`` that match regex in121All source entries in ``apt-sources`` that match regex in
122``add_apt_repo_match`` will be added to the system using122``add_apt_repo_match`` will be added to the system using
123``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults123``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
124to ``^[\w-]+:\w``124to ``^[\\w-]+:\\w``
125125
126**Add source list entries:**126**Add source list entries:**
127127
@@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None):
378378
379 # get a complete list of packages listed in input379 # get a complete list of packages listed in input
380 pkgs_cfgd = set()380 pkgs_cfgd = set()
381 for key, content in selsets.items():381 for _key, content in selsets.items():
382 for line in content.splitlines():382 for line in content.splitlines():
383 if line.startswith("#"):383 if line.startswith("#"):
384 continue384 continue
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 233da1e..db64f0a 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -63,7 +63,6 @@ schema = {
63 'additionalProperties': False,63 'additionalProperties': False,
64 'minItems': 1,64 'minItems': 1,
65 'required': [],65 'required': [],
66 'uniqueItems': True
67 }66 }
68 }67 }
69}68}
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index c56319b..885b313 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS
3232
33frequency = PER_ALWAYS33frequency = PER_ALWAYS
3434
35REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']35REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject']
36REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254']
3637
3738
38def handle(name, cfg, _cloud, log, _args):39def handle(name, cfg, _cloud, log, _args):
39 disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)40 disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
40 if disabled:41 if disabled:
41 util.subp(REJECT_CMD, capture=False)42 reject_cmd = None
43 if util.which('ip'):
44 reject_cmd = REJECT_CMD_IP
45 elif util.which('ifconfig'):
46 reject_cmd = REJECT_CMD_IF
47 else:
48 log.error(('Neither "route" nor "ip" command found, unable to '
49 'manipulate routing table'))
50 return
51 util.subp(reject_cmd, capture=False)
42 else:52 else:
43 log.debug(("Skipping module named %s,"53 log.debug(("Skipping module named %s,"
44 " disabling the ec2 route not enabled"), name)54 " disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index c3e8c48..943089e 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -680,13 +680,13 @@ def read_parttbl(device):
680 reliable way to probe the partition table.680 reliable way to probe the partition table.
681 """681 """
682 blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]682 blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
683 udevadm_settle()683 util.udevadm_settle()
684 try:684 try:
685 util.subp(blkdev_cmd)685 util.subp(blkdev_cmd)
686 except Exception as e:686 except Exception as e:
687 util.logexc(LOG, "Failed reading the partition table %s" % e)687 util.logexc(LOG, "Failed reading the partition table %s" % e)
688688
689 udevadm_settle()689 util.udevadm_settle()
690690
691691
692def exec_mkpart_mbr(device, layout):692def exec_mkpart_mbr(device, layout):
@@ -737,14 +737,10 @@ def exec_mkpart(table_type, device, layout):
737 return get_dyn_func("exec_mkpart_%s", table_type, device, layout)737 return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
738738
739739
740def udevadm_settle():
741 util.subp(['udevadm', 'settle'])
742
743
744def assert_and_settle_device(device):740def assert_and_settle_device(device):
745 """Assert that device exists and settle so it is fully recognized."""741 """Assert that device exists and settle so it is fully recognized."""
746 if not os.path.exists(device):742 if not os.path.exists(device):
747 udevadm_settle()743 util.udevadm_settle()
748 if not os.path.exists(device):744 if not os.path.exists(device):
749 raise RuntimeError("Device %s did not exist and was not created "745 raise RuntimeError("Device %s did not exist and was not created "
750 "with a udevamd settle." % device)746 "with a udevamd settle." % device)
@@ -752,7 +748,7 @@ def assert_and_settle_device(device):
752 # Whether or not the device existed above, it is possible that udev748 # Whether or not the device existed above, it is possible that udev
753 # events that would populate udev database (for reading by lsdname) have749 # events that would populate udev database (for reading by lsdname) have
754 # not yet finished. So settle again.750 # not yet finished. So settle again.
755 udevadm_settle()751 util.udevadm_settle()
756752
757753
758def mkpart(device, definition):754def mkpart(device, definition):
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 69dc2d5..eb9fbe6 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -43,7 +43,7 @@ def is_upstart_system():
43 del myenv['UPSTART_SESSION']43 del myenv['UPSTART_SESSION']
44 check_cmd = ['initctl', 'version']44 check_cmd = ['initctl', 'version']
45 try:45 try:
46 (out, err) = util.subp(check_cmd, env=myenv)46 (out, _err) = util.subp(check_cmd, env=myenv)
47 return 'upstart' in out47 return 'upstart' in out
48 except util.ProcessExecutionError as e:48 except util.ProcessExecutionError as e:
49 LOG.debug("'%s' returned '%s', not using upstart",49 LOG.debug("'%s' returned '%s', not using upstart",
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 09374d2..ac72ac4 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly.
47 domain: <domain>47 domain: <domain>
48"""48"""
4949
50from cloudinit import log as logging
50from cloudinit import util51from cloudinit import util
51import os52import os
5253
53distros = ['ubuntu']54distros = ['ubuntu']
5455
56LOG = logging.getLogger(__name__)
57
58_DEFAULT_NETWORK_NAME = "lxdbr0"
59
5560
56def handle(name, cfg, cloud, log, args):61def handle(name, cfg, cloud, log, args):
57 # Get config62 # Get config
@@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args):
109 # Set up lxd-bridge if bridge config is given114 # Set up lxd-bridge if bridge config is given
110 dconf_comm = "debconf-communicate"115 dconf_comm = "debconf-communicate"
111 if bridge_cfg:116 if bridge_cfg:
117 net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
112 if os.path.exists("/etc/default/lxd-bridge") \118 if os.path.exists("/etc/default/lxd-bridge") \
113 and util.which(dconf_comm):119 and util.which(dconf_comm):
114 # Bridge configured through packaging120 # Bridge configured through packaging
@@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args):
135 else:141 else:
136 # Built-in LXD bridge support142 # Built-in LXD bridge support
137 cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)143 cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
144 maybe_cleanup_default(
145 net_name=net_name, did_init=bool(init_cfg),
146 create=bool(cmd_create), attach=bool(cmd_attach))
138 if cmd_create:147 if cmd_create:
139 log.debug("Creating lxd bridge: %s" %148 log.debug("Creating lxd bridge: %s" %
140 " ".join(cmd_create))149 " ".join(cmd_create))
141 util.subp(cmd_create)150 _lxc(cmd_create)
142151
143 if cmd_attach:152 if cmd_attach:
144 log.debug("Setting up default lxd bridge: %s" %153 log.debug("Setting up default lxd bridge: %s" %
145 " ".join(cmd_create))154 " ".join(cmd_create))
146 util.subp(cmd_attach)155 _lxc(cmd_attach)
147156
148 elif bridge_cfg:157 elif bridge_cfg:
149 raise RuntimeError(158 raise RuntimeError(
@@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg):
204 if bridge_cfg.get("mode") == "none":213 if bridge_cfg.get("mode") == "none":
205 return None, None214 return None, None
206215
207 bridge_name = bridge_cfg.get("name", "lxdbr0")216 bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
208 cmd_create = []217 cmd_create = []
209 cmd_attach = ["lxc", "network", "attach-profile", bridge_name,218 cmd_attach = ["network", "attach-profile", bridge_name,
210 "default", "eth0", "--force-local"]219 "default", "eth0"]
211220
212 if bridge_cfg.get("mode") == "existing":221 if bridge_cfg.get("mode") == "existing":
213 return None, cmd_attach222 return None, cmd_attach
@@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg):
215 if bridge_cfg.get("mode") != "new":224 if bridge_cfg.get("mode") != "new":
216 raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))225 raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
217226
218 cmd_create = ["lxc", "network", "create", bridge_name]227 cmd_create = ["network", "create", bridge_name]
219228
220 if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):229 if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
221 cmd_create.append("ipv4.address=%s/%s" %230 cmd_create.append("ipv4.address=%s/%s" %
@@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg):
247 if bridge_cfg.get("domain"):256 if bridge_cfg.get("domain"):
248 cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))257 cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))
249258
250 cmd_create.append("--force-local")
251
252 return cmd_create, cmd_attach259 return cmd_create, cmd_attach
253260
261
262def _lxc(cmd):
263 env = {'LC_ALL': 'C'}
264 util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
265
266
267def maybe_cleanup_default(net_name, did_init, create, attach,
268 profile="default", nic_name="eth0"):
269 """Newer versions of lxc (3.0.1+) create a lxdbr0 network when
270 'lxd init --auto' is run. Older versions did not.
271
272 By removing ay that lxd-init created, we simply leave the add/attach
273 code in-tact.
274
275 https://github.com/lxc/lxd/issues/4649"""
276 if net_name != _DEFAULT_NETWORK_NAME or not did_init:
277 return
278
279 fail_assume_enoent = " failed. Assuming it did not exist."
280 succeeded = " succeeded."
281 if create:
282 msg = "Deletion of lxd network '%s'" % net_name
283 try:
284 _lxc(["network", "delete", net_name])
285 LOG.debug(msg + succeeded)
286 except util.ProcessExecutionError as e:
287 if e.exit_code != 1:
288 raise e
289 LOG.debug(msg + fail_assume_enoent)
290
291 if attach:
292 msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile)
293 try:
294 _lxc(["profile", "device", "remove", profile, nic_name])
295 LOG.debug(msg + succeeded)
296 except util.ProcessExecutionError as e:
297 if e.exit_code != 1:
298 raise e
299 LOG.debug(msg + fail_assume_enoent)
300
301
254# vi: ts=4 expandtab302# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index f14a4fc..339baba 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
76DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)76DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
77WS = re.compile("[%s]+" % (whitespace))77WS = re.compile("[%s]+" % (whitespace))
78FSTAB_PATH = "/etc/fstab"78FSTAB_PATH = "/etc/fstab"
79MNT_COMMENT = "comment=cloudconfig"
7980
80LOG = logging.getLogger(__name__)81LOG = logging.getLogger(__name__)
8182
@@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None):
232 if str(size).lower() == "auto":233 if str(size).lower() == "auto":
233 try:234 try:
234 memsize = util.read_meminfo()['total']235 memsize = util.read_meminfo()['total']
235 except IOError as e:236 except IOError:
236 LOG.debug("Not creating swap. failed to read meminfo")237 LOG.debug("Not creating swap: failed to read meminfo")
237 return238 return
238239
239 util.ensure_dir(tdir)240 util.ensure_dir(tdir)
@@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg):
280281
281 if os.path.exists(fname):282 if os.path.exists(fname):
282 if not os.path.exists("/proc/swaps"):283 if not os.path.exists("/proc/swaps"):
283 LOG.debug("swap file %s existed. no /proc/swaps. Being safe.",284 LOG.debug("swap file %s exists, but no /proc/swaps exists, "
284 fname)285 "being safe", fname)
285 return fname286 return fname
286 try:287 try:
287 for line in util.load_file("/proc/swaps").splitlines():288 for line in util.load_file("/proc/swaps").splitlines():
288 if line.startswith(fname + " "):289 if line.startswith(fname + " "):
289 LOG.debug("swap file %s already in use.", fname)290 LOG.debug("swap file %s already in use", fname)
290 return fname291 return fname
291 LOG.debug("swap file %s existed, but not in /proc/swaps", fname)292 LOG.debug("swap file %s exists, but not in /proc/swaps", fname)
292 except Exception:293 except Exception:
293 LOG.warning("swap file %s existed. Error reading /proc/swaps",294 LOG.warning("swap file %s exists. Error reading /proc/swaps",
294 fname)295 fname)
295 return fname296 return fname
296297
@@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args):
327328
328 LOG.debug("mounts configuration is %s", cfgmnt)329 LOG.debug("mounts configuration is %s", cfgmnt)
329330
331 fstab_lines = []
332 fstab_devs = {}
333 fstab_removed = []
334
335 for line in util.load_file(FSTAB_PATH).splitlines():
336 if MNT_COMMENT in line:
337 fstab_removed.append(line)
338 continue
339
340 try:
341 toks = WS.split(line)
342 except Exception:
343 pass
344 fstab_devs[toks[0]] = line
345 fstab_lines.append(line)
346
330 for i in range(len(cfgmnt)):347 for i in range(len(cfgmnt)):
331 # skip something that wasn't a list348 # skip something that wasn't a list
332 if not isinstance(cfgmnt[i], list):349 if not isinstance(cfgmnt[i], list):
@@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args):
336353
337 start = str(cfgmnt[i][0])354 start = str(cfgmnt[i][0])
338 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)355 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
356 if sanitized != start:
357 log.debug("changed %s => %s" % (start, sanitized))
358
339 if sanitized is None:359 if sanitized is None:
340 log.debug("Ignorming nonexistant named mount %s", start)360 log.debug("Ignoring nonexistent named mount %s", start)
361 continue
362 elif sanitized in fstab_devs:
363 log.info("Device %s already defined in fstab: %s",
364 sanitized, fstab_devs[sanitized])
341 continue365 continue
342366
343 if sanitized != start:
344 log.debug("changed %s => %s" % (start, sanitized))
345 cfgmnt[i][0] = sanitized367 cfgmnt[i][0] = sanitized
346368
347 # in case the user did not quote a field (likely fs-freq, fs_passno)369 # in case the user did not quote a field (likely fs-freq, fs_passno)
@@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args):
373 for defmnt in defmnts:395 for defmnt in defmnts:
374 start = defmnt[0]396 start = defmnt[0]
375 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)397 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
376 if sanitized is None:
377 log.debug("Ignoring nonexistant default named mount %s", start)
378 continue
379 if sanitized != start:398 if sanitized != start:
380 log.debug("changed default device %s => %s" % (start, sanitized))399 log.debug("changed default device %s => %s" % (start, sanitized))
400
401 if sanitized is None:
402 log.debug("Ignoring nonexistent default named mount %s", start)
403 continue
404 elif sanitized in fstab_devs:
405 log.debug("Device %s already defined in fstab: %s",
406 sanitized, fstab_devs[sanitized])
407 continue
408
381 defmnt[0] = sanitized409 defmnt[0] = sanitized
382410
383 cfgmnt_has = False411 cfgmnt_has = False
@@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args):
397 actlist = []425 actlist = []
398 for x in cfgmnt:426 for x in cfgmnt:
399 if x[1] is None:427 if x[1] is None:
400 log.debug("Skipping non-existent device named %s", x[0])428 log.debug("Skipping nonexistent device named %s", x[0])
401 else:429 else:
402 actlist.append(x)430 actlist.append(x)
403431
@@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args):
406 actlist.append([swapret, "none", "swap", "sw", "0", "0"])434 actlist.append([swapret, "none", "swap", "sw", "0", "0"])
407435
408 if len(actlist) == 0:436 if len(actlist) == 0:
409 log.debug("No modifications to fstab needed.")437 log.debug("No modifications to fstab needed")
410 return438 return
411439
412 comment = "comment=cloudconfig"
413 cc_lines = []440 cc_lines = []
414 needswap = False441 needswap = False
415 dirs = []442 dirs = []
416 for line in actlist:443 for line in actlist:
417 # write 'comment' in the fs_mntops, entry, claiming this444 # write 'comment' in the fs_mntops, entry, claiming this
418 line[3] = "%s,%s" % (line[3], comment)445 line[3] = "%s,%s" % (line[3], MNT_COMMENT)
419 if line[2] == "swap":446 if line[2] == "swap":
420 needswap = True447 needswap = True
421 if line[1].startswith("/"):448 if line[1].startswith("/"):
422 dirs.append(line[1])449 dirs.append(line[1])
423 cc_lines.append('\t'.join(line))450 cc_lines.append('\t'.join(line))
424451
425 fstab_lines = []
426 removed = []
427 for line in util.load_file(FSTAB_PATH).splitlines():
428 try:
429 toks = WS.split(line)
430 if toks[3].find(comment) != -1:
431 removed.append(line)
432 continue
433 except Exception:
434 pass
435 fstab_lines.append(line)
436
437 for d in dirs:452 for d in dirs:
438 try:453 try:
439 util.ensure_dir(d)454 util.ensure_dir(d)
@@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args):
441 util.logexc(log, "Failed to make '%s' config-mount", d)456 util.logexc(log, "Failed to make '%s' config-mount", d)
442457
443 sadds = [WS.sub(" ", n) for n in cc_lines]458 sadds = [WS.sub(" ", n) for n in cc_lines]
444 sdrops = [WS.sub(" ", n) for n in removed]459 sdrops = [WS.sub(" ", n) for n in fstab_removed]
445460
446 sops = (["- " + drop for drop in sdrops if drop not in sadds] +461 sops = (["- " + drop for drop in sdrops if drop not in sadds] +
447 ["+ " + add for add in sadds if add not in sdrops])462 ["+ " + add for add in sadds if add not in sdrops])
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index cbd0237..9e074bd 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -10,20 +10,95 @@ from cloudinit.config.schema import (
10 get_schema_doc, validate_cloudconfig_schema)10 get_schema_doc, validate_cloudconfig_schema)
11from cloudinit import log as logging11from cloudinit import log as logging
12from cloudinit.settings import PER_INSTANCE12from cloudinit.settings import PER_INSTANCE
13from cloudinit import temp_utils
13from cloudinit import templater14from cloudinit import templater
14from cloudinit import type_utils15from cloudinit import type_utils
15from cloudinit import util16from cloudinit import util
1617
18import copy
17import os19import os
20import six
18from textwrap import dedent21from textwrap import dedent
1922
20LOG = logging.getLogger(__name__)23LOG = logging.getLogger(__name__)
2124
22frequency = PER_INSTANCE25frequency = PER_INSTANCE
23NTP_CONF = '/etc/ntp.conf'26NTP_CONF = '/etc/ntp.conf'
24TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
25NR_POOL_SERVERS = 427NR_POOL_SERVERS = 4
26distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu']28distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu']
29
30NTP_CLIENT_CONFIG = {
31 'chrony': {
32 'check_exe': 'chronyd',
33 'confpath': '/etc/chrony.conf',
34 'packages': ['chrony'],
35 'service_name': 'chrony',
36 'template_name': 'chrony.conf.{distro}',
37 'template': None,
38 },
39 'ntp': {
40 'check_exe': 'ntpd',
41 'confpath': NTP_CONF,
42 'packages': ['ntp'],
43 'service_name': 'ntp',
44 'template_name': 'ntp.conf.{distro}',
45 'template': None,
46 },
47 'ntpdate': {
48 'check_exe': 'ntpdate',
49 'confpath': NTP_CONF,
50 'packages': ['ntpdate'],
51 'service_name': 'ntpdate',
52 'template_name': 'ntp.conf.{distro}',
53 'template': None,
54 },
55 'systemd-timesyncd': {
56 'check_exe': '/lib/systemd/systemd-timesyncd',
57 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf',
58 'packages': [],
59 'service_name': 'systemd-timesyncd',
60 'template_name': 'timesyncd.conf',
61 'template': None,
62 },
63}
64
65# This is Distro-specific configuration overrides of the base config
66DISTRO_CLIENT_CONFIG = {
67 'debian': {
68 'chrony': {
69 'confpath': '/etc/chrony/chrony.conf',
70 },
71 },
72 'opensuse': {
73 'chrony': {
74 'service_name': 'chronyd',
75 },
76 'ntp': {
77 'confpath': '/etc/ntp.conf',
78 'service_name': 'ntpd',
79 },
80 'systemd-timesyncd': {
81 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
82 },
83 },
84 'sles': {
85 'chrony': {
86 'service_name': 'chronyd',
87 },
88 'ntp': {
89 'confpath': '/etc/ntp.conf',
90 'service_name': 'ntpd',
91 },
92 'systemd-timesyncd': {
93 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
94 },
95 },
96 'ubuntu': {
97 'chrony': {
98 'confpath': '/etc/chrony/chrony.conf',
99 },
100 },
101}
27102
28103
29# The schema definition for each cloud-config module is a strict contract for104# The schema definition for each cloud-config module is a strict contract for
@@ -48,7 +123,34 @@ schema = {
48 'distros': distros,123 'distros': distros,
49 'examples': [124 'examples': [
50 dedent("""\125 dedent("""\
126 # Override ntp with chrony configuration on Ubuntu
127 ntp:
128 enabled: true
129 ntp_client: chrony # Uses cloud-init default chrony configuration
130 """),
131 dedent("""\
132 # Provide a custom ntp client configuration
51 ntp:133 ntp:
134 enabled: true
135 ntp_client: myntpclient
136 config:
137 confpath: /etc/myntpclient/myntpclient.conf
138 check_exe: myntpclientd
139 packages:
140 - myntpclient
141 service_name: myntpclient
142 template: |
143 ## template:jinja
144 # My NTP Client config
145 {% if pools -%}# pools{% endif %}
146 {% for pool in pools -%}
147 pool {{pool}} iburst
148 {% endfor %}
149 {%- if servers %}# servers
150 {% endif %}
151 {% for server in servers -%}
152 server {{server}} iburst
153 {% endfor %}
52 pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]154 pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]
53 servers:155 servers:
54 - ntp.server.local156 - ntp.server.local
@@ -83,79 +185,159 @@ schema = {
83 List of ntp servers. If both pools and servers are185 List of ntp servers. If both pools and servers are
84 empty, 4 default pool servers will be provided with186 empty, 4 default pool servers will be provided with
85 the format ``{0-3}.{distro}.pool.ntp.org``.""")187 the format ``{0-3}.{distro}.pool.ntp.org``.""")
86 }188 },
189 'ntp_client': {
190 'type': 'string',
191 'default': 'auto',
192 'description': dedent("""\
193 Name of an NTP client to use to configure system NTP.
194 When unprovided or 'auto' the default client preferred
195 by the distribution will be used. The following
196 built-in client names can be used to override existing
197 configuration defaults: chrony, ntp, ntpdate,
198 systemd-timesyncd."""),
199 },
200 'enabled': {
201 'type': 'boolean',
202 'default': True,
203 'description': dedent("""\
204 Attempt to enable ntp clients if set to True. If set
205 to False, ntp client will not be configured or
206 installed"""),
207 },
208 'config': {
209 'description': dedent("""\
210 Configuration settings or overrides for the
211 ``ntp_client`` specified."""),
212 'type': ['object'],
213 'properties': {
214 'confpath': {
215 'type': 'string',
216 'description': dedent("""\
217 The path to where the ``ntp_client``
218 configuration is written."""),
219 },
220 'check_exe': {
221 'type': 'string',
222 'description': dedent("""\
223 The executable name for the ``ntp_client``.
224 For example, ntp service ``check_exe`` is
225 'ntpd' because it runs the ntpd binary."""),
226 },
227 'packages': {
228 'type': 'array',
229 'items': {
230 'type': 'string',
231 },
232 'uniqueItems': True,
233 'description': dedent("""\
234 List of packages needed to be installed for the
235 selected ``ntp_client``."""),
236 },
237 'service_name': {
238 'type': 'string',
239 'description': dedent("""\
240 The systemd or sysvinit service name used to
241 start and stop the ``ntp_client``
242 service."""),
243 },
244 'template': {
245 'type': 'string',
246 'description': dedent("""\
247 Inline template allowing users to define their
248 own ``ntp_client`` configuration template.
249 The value must start with '## template:jinja'
250 to enable use of templating support.
251 """),
252 },
253 },
254 # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
255 # of builtin client values.
256 'required': [],
257 'minProperties': 1, # If we have config, define something
258 'additionalProperties': False
259 },
87 },260 },
88 'required': [],261 'required': [],
89 'additionalProperties': False262 'additionalProperties': False
90 }263 }
91 }264 }
92}265}
93266REQUIRED_NTP_CONFIG_KEYS = frozenset([
94__doc__ = get_schema_doc(schema) # Supplement python help()267 'check_exe', 'confpath', 'packages', 'service_name'])
95268
96269
97def handle(name, cfg, cloud, log, _args):270__doc__ = get_schema_doc(schema) # Supplement python help()
98 """Enable and configure ntp."""
99 if 'ntp' not in cfg:
100 LOG.debug(
101 "Skipping module named %s, not present or disabled by cfg", name)
102 return
103 ntp_cfg = cfg['ntp']
104 if ntp_cfg is None:
105 ntp_cfg = {} # Allow empty config which will install the package
106271
107 # TODO drop this when validate_cloudconfig_schema is strict=True
108 if not isinstance(ntp_cfg, (dict)):
109 raise RuntimeError(
110 "'ntp' key existed in config, but not a dictionary type,"
111 " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
112272
113 validate_cloudconfig_schema(cfg, schema)273def distro_ntp_client_configs(distro):
114 if ntp_installable():274 """Construct a distro-specific ntp client config dictionary by merging
115 service_name = 'ntp'275 distro specific changes into base config.
116 confpath = NTP_CONF
117 template_name = None
118 packages = ['ntp']
119 check_exe = 'ntpd'
120 else:
121 service_name = 'systemd-timesyncd'
122 confpath = TIMESYNCD_CONF
123 template_name = 'timesyncd.conf'
124 packages = []
125 check_exe = '/lib/systemd/systemd-timesyncd'
126
127 rename_ntp_conf()
128 # ensure when ntp is installed it has a configuration file
129 # to use instead of starting up with packaged defaults
130 write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name)
131 install_ntp(cloud.distro.install_packages, packages=packages,
132 check_exe=check_exe)
133276
134 try:277 @param distro: String providing the distro class name.
135 reload_ntp(service_name, systemd=cloud.distro.uses_systemd())278 @returns: Dict of distro configurations for ntp clients.
136 except util.ProcessExecutionError as e:279 """
137 LOG.exception("Failed to reload/start ntp service: %s", e)280 dcfg = DISTRO_CLIENT_CONFIG
138 raise281 cfg = copy.copy(NTP_CLIENT_CONFIG)
282 if distro in dcfg:
283 cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True)
284 return cfg
139285
140286
141def ntp_installable():287def select_ntp_client(ntp_client, distro):
142 """Check if we can install ntp package288 """Determine which ntp client is to be used, consulting the distro
289 for its preference.
143290
144 Ubuntu-Core systems do not have an ntp package available, so291 @param ntp_client: String name of the ntp client to use.
145 we always return False. Other systems require package managers to install292 @param distro: Distro class instance.
146 the ntp package If we fail to find one of the package managers, then we293 @returns: Dict of the selected ntp client or {} if none selected.
147 cannot install ntp.
148 """294 """
149 if util.system_is_snappy():
150 return False
151295
152 if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])):296 # construct distro-specific ntp_client_config dict
153 return True297 distro_cfg = distro_ntp_client_configs(distro.name)
298
299 # user specified client, return its config
300 if ntp_client and ntp_client != 'auto':
301 LOG.debug('Selected NTP client "%s" via user-data configuration',
302 ntp_client)
303 return distro_cfg.get(ntp_client, {})
304
305 # default to auto if unset in distro
306 distro_ntp_client = distro.get_option('ntp_client', 'auto')
307
308 clientcfg = {}
309 if distro_ntp_client == "auto":
310 for client in distro.preferred_ntp_clients:
311 cfg = distro_cfg.get(client)
312 if util.which(cfg.get('check_exe')):
313 LOG.debug('Selected NTP client "%s", already installed',
314 client)
315 clientcfg = cfg
316 break
317
318 if not clientcfg:
319 client = distro.preferred_ntp_clients[0]
320 LOG.debug(
321 'Selected distro preferred NTP client "%s", not yet installed',
322 client)
323 clientcfg = distro_cfg.get(client)
324 else:
325 LOG.debug('Selected NTP client "%s" via distro system config',
326 distro_ntp_client)
327 clientcfg = distro_cfg.get(distro_ntp_client, {})
328
329 return clientcfg
154330
155 return False
156331
332def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
333 """Install ntp client package if not already installed.
157334
158def install_ntp(install_func, packages=None, check_exe="ntpd"):335 @param install_func: function. This parameter is invoked with the contents
336 of the packages parameter.
337 @param packages: list. This parameter defaults to ['ntp'].
338 @param check_exe: string. The name of a binary that indicates the package
339 the specified package is already installed.
340 """
159 if util.which(check_exe):341 if util.which(check_exe):
160 return342 return
161 if packages is None:343 if packages is None:
@@ -164,15 +346,23 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"):
164 install_func(packages)346 install_func(packages)
165347
166348
167def rename_ntp_conf(config=None):349def rename_ntp_conf(confpath=None):
168 """Rename any existing ntp.conf file"""350 """Rename any existing ntp client config file
169 if config is None: # For testing351
170 config = NTP_CONF352 @param confpath: string. Specify a path to an existing ntp client
171 if os.path.exists(config):353 configuration file.
172 util.rename(config, config + ".dist")354 """
355 if os.path.exists(confpath):
356 util.rename(confpath, confpath + ".dist")
173357
174358
175def generate_server_names(distro):359def generate_server_names(distro):
360 """Generate a list of server names to populate an ntp client configuration
361 file.
362
363 @param distro: string. Specify the distro name
364 @returns: list: A list of strings representing ntp servers for this distro.
365 """
176 names = []366 names = []
177 pool_distro = distro367 pool_distro = distro
178 # For legal reasons x.pool.sles.ntp.org does not exist,368 # For legal reasons x.pool.sles.ntp.org does not exist,
@@ -185,34 +375,60 @@ def generate_server_names(distro):
185 return names375 return names
186376
187377
188def write_ntp_config_template(cfg, cloud, path, template=None):378def write_ntp_config_template(distro_name, servers=None, pools=None,
189 servers = cfg.get('servers', [])379 path=None, template_fn=None, template=None):
190 pools = cfg.get('pools', [])380 """Render a ntp client configuration for the specified client.
381
382 @param distro_name: string. The distro class name.
383 @param servers: A list of strings specifying ntp servers. Defaults to empty
384 list.
385 @param pools: A list of strings specifying ntp pools. Defaults to empty
386 list.
387 @param path: A string to specify where to write the rendered template.
388 @param template_fn: A string to specify the template source file.
389 @param template: A string specifying the contents of the template. This
390 content will be written to a temporary file before being used to render
391 the configuration file.
392
393 @raises: ValueError when path is None.
394 @raises: ValueError when template_fn is None and template is None.
395 """
396 if not servers:
397 servers = []
398 if not pools:
399 pools = []
191400
192 if len(servers) == 0 and len(pools) == 0:401 if len(servers) == 0 and len(pools) == 0:
193 pools = generate_server_names(cloud.distro.name)402 pools = generate_server_names(distro_name)
194 LOG.debug(403 LOG.debug(
195 'Adding distro default ntp pool servers: %s', ','.join(pools))404 'Adding distro default ntp pool servers: %s', ','.join(pools))
196405
197 params = {406 if not path:
198 'servers': servers,407 raise ValueError('Invalid value for path parameter')
199 'pools': pools,
200 }
201408
202 if template is None:409 if not template_fn and not template:
203 template = 'ntp.conf.%s' % cloud.distro.name410 raise ValueError('Not template_fn or template provided')
204411
205 template_fn = cloud.get_template_filename(template)412 params = {'servers': servers, 'pools': pools}
206 if not template_fn:413 if template:
207 template_fn = cloud.get_template_filename('ntp.conf')414 tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
208 if not template_fn:415 template_fn = tfile[1] # filepath is second item in tuple
209 raise RuntimeError(416 util.write_file(template_fn, content=template)
210 'No template found, not rendering {path}'.format(path=path))
211417
212 templater.render_to_file(template_fn, path, params)418 templater.render_to_file(template_fn, path, params)
419 # clean up temporary template
420 if template:
421 util.del_file(template_fn)
213422
214423
215def reload_ntp(service, systemd=False):424def reload_ntp(service, systemd=False):
425 """Restart or reload an ntp system service.
426
427 @param service: A string specifying the name of the service to be affected.
428 @param systemd: A boolean indicating if the distro uses systemd, defaults
429 to False.
430 @returns: A tuple of stdout, stderr results from executing the action.
431 """
216 if systemd:432 if systemd:
217 cmd = ['systemctl', 'reload-or-restart', service]433 cmd = ['systemctl', 'reload-or-restart', service]
218 else:434 else:
@@ -220,4 +436,117 @@ def reload_ntp(service, systemd=False):
220 util.subp(cmd, capture=True)436 util.subp(cmd, capture=True)
221437
222438
439def supplemental_schema_validation(ntp_config):
440 """Validate user-provided ntp:config option values.
441
442 This function supplements flexible jsonschema validation with specific
443 value checks to aid in triage of invalid user-provided configuration.
444
445 @param ntp_config: Dictionary of configuration value under 'ntp'.
446
447 @raises: ValueError describing invalid values provided.
448 """
449 errors = []
450 missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys()))
451 if missing:
452 keys = ', '.join(sorted(missing))
453 errors.append(
454 'Missing required ntp:config keys: {keys}'.format(keys=keys))
455 elif not any([ntp_config.get('template'),
456 ntp_config.get('template_name')]):
457 errors.append(
458 'Either ntp:config:template or ntp:config:template_name values'
459 ' are required')
460 for key, value in sorted(ntp_config.items()):
461 keypath = 'ntp:config:' + key
462 if key == 'confpath':
463 if not all([value, isinstance(value, six.string_types)]):
464 errors.append(
465 'Expected a config file path {keypath}.'
466 ' Found ({value})'.format(keypath=keypath, value=value))
467 elif key == 'packages':
468 if not isinstance(value, list):
469 errors.append(
470 'Expected a list of required package names for {keypath}.'
471 ' Found ({value})'.format(keypath=keypath, value=value))
472 elif key in ('template', 'template_name'):
473 if value is None: # Either template or template_name can be none
474 continue
475 if not isinstance(value, six.string_types):
476 errors.append(
477 'Expected a string type for {keypath}.'
478 ' Found ({value})'.format(keypath=keypath, value=value))
479 elif not isinstance(value, six.string_types):
480 errors.append(
481 'Expected a string type for {keypath}.'
482 ' Found ({value})'.format(keypath=keypath, value=value))
483
484 if errors:
485 raise ValueError(r'Invalid ntp configuration:\n{errors}'.format(
486 errors='\n'.join(errors)))
487
488
489def handle(name, cfg, cloud, log, _args):
490 """Enable and configure ntp."""
491 if 'ntp' not in cfg:
492 LOG.debug(
493 "Skipping module named %s, not present or disabled by cfg", name)
494 return
495 ntp_cfg = cfg['ntp']
496 if ntp_cfg is None:
497 ntp_cfg = {} # Allow empty config which will install the package
498
499 # TODO drop this when validate_cloudconfig_schema is strict=True
500 if not isinstance(ntp_cfg, (dict)):
501 raise RuntimeError(
502 "'ntp' key existed in config, but not a dictionary type,"
503 " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
504
505 validate_cloudconfig_schema(cfg, schema)
506
507 # Allow users to explicitly enable/disable
508 enabled = ntp_cfg.get('enabled', True)
509 if util.is_false(enabled):
510 LOG.debug("Skipping module named %s, disabled by cfg", name)
511 return
512
513 # Select which client is going to be used and get the configuration
514 ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
515 cloud.distro)
516
517 # Allow user ntp config to override distro configurations
518 ntp_client_config = util.mergemanydict(
519 [ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
520
521 supplemental_schema_validation(ntp_client_config)
522 rename_ntp_conf(confpath=ntp_client_config.get('confpath'))
523
524 template_fn = None
525 if not ntp_client_config.get('template'):
526 template_name = (
527 ntp_client_config.get('template_name').replace('{distro}',
528 cloud.distro.name))
529 template_fn = cloud.get_template_filename(template_name)
530 if not template_fn:
531 msg = ('No template found, not rendering %s' %
532 ntp_client_config.get('template_name'))
533 raise RuntimeError(msg)
534
535 write_ntp_config_template(cloud.distro.name,
536 servers=ntp_cfg.get('servers', []),
537 pools=ntp_cfg.get('pools', []),
538 path=ntp_client_config.get('confpath'),
539 template_fn=template_fn,
540 template=ntp_client_config.get('template'))
541
542 install_ntp_client(cloud.distro.install_packages,
543 packages=ntp_client_config['packages'],
544 check_exe=ntp_client_config['check_exe'])
545 try:
546 reload_ntp(ntp_client_config['service_name'],
547 systemd=cloud.distro.uses_systemd())
548 except util.ProcessExecutionError as e:
549 LOG.exception("Failed to reload/start ntp service: %s", e)
550 raise
551
223# vi: ts=4 expandtab552# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 878069b..3be0d1c 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -41,6 +41,7 @@ keys to post. Available keys are:
41"""41"""
4242
43from cloudinit import templater43from cloudinit import templater
44from cloudinit import url_helper
44from cloudinit import util45from cloudinit import util
4546
46from cloudinit.settings import PER_INSTANCE47from cloudinit.settings import PER_INSTANCE
@@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args):
136 }137 }
137 url = templater.render_string(url, url_params)138 url = templater.render_string(url, url_params)
138 try:139 try:
139 util.read_file_or_url(url, data=real_submit_keys,140 url_helper.read_file_or_url(
140 retries=tries, sec_between=3,141 url, data=real_submit_keys, retries=tries, sec_between=3,
141 ssl_details=util.fetch_ssl_details(cloud.paths))142 ssl_details=util.fetch_ssl_details(cloud.paths))
142 except Exception:143 except Exception:
143 util.logexc(log, "Failed to post phone home data to %s in %s tries",144 util.logexc(log, "Failed to post phone home data to %s in %s tries",
144 url, tries)145 url, tries)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 4da3a58..50b3747 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -74,7 +74,7 @@ def givecmdline(pid):
74 if util.is_FreeBSD():74 if util.is_FreeBSD():
75 (output, _err) = util.subp(['procstat', '-c', str(pid)])75 (output, _err) = util.subp(['procstat', '-c', str(pid)])
76 line = output.splitlines()[1]76 line = output.splitlines()[1]
77 m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)77 m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
78 return m.group(2)78 return m.group(2)
79 else:79 else:
80 return util.load_file("/proc/%s/cmdline" % pid)80 return util.load_file("/proc/%s/cmdline" % pid)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 013e69b..2edddd0 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth):
8181
8282
83def _resize_ufs(mount_point, devpth):83def _resize_ufs(mount_point, devpth):
84 return ('growfs', devpth)84 return ('growfs', '-y', devpth)
8585
8686
87def _resize_zfs(mount_point, devpth):87def _resize_zfs(mount_point, devpth):
@@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth):
8989
9090
91def _get_dumpfs_output(mount_point):91def _get_dumpfs_output(mount_point):
92 dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point])92 return util.subp(['dumpfs', '-m', mount_point])[0]
93 return dumpfs_res
9493
9594
96def _get_gpart_output(part):95def _get_gpart_output(part):
97 gpart_res, err = util.subp(['gpart', 'show', part])96 return util.subp(['gpart', 'show', part])[0]
98 return gpart_res
9997
10098
101def _can_skip_resize_ufs(mount_point, devpth):99def _can_skip_resize_ufs(mount_point, devpth):
@@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
113 if not line.startswith('#'):111 if not line.startswith('#'):
114 newfs_cmd = shlex.split(line)112 newfs_cmd = shlex.split(line)
115 opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:'113 opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:'
116 optlist, args = getopt.getopt(newfs_cmd[1:], opt_value)114 optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value)
117 for o, a in optlist:115 for o, a in optlist:
118 if o == "-s":116 if o == "-s":
119 cur_fs_sz = int(a)117 cur_fs_sz = int(a)
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 530808c..1c67943 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -209,8 +209,7 @@ class SubscriptionManager(object):
209 cmd.append("--serverurl={0}".format(self.server_hostname))209 cmd.append("--serverurl={0}".format(self.server_hostname))
210210
211 try:211 try:
212 return_out, return_err = self._sub_man_cli(cmd,212 return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
213 logstring_val=True)
214 except util.ProcessExecutionError as e:213 except util.ProcessExecutionError as e:
215 if e.stdout == "":214 if e.stdout == "":
216 self.log_warn("Registration failed due "215 self.log_warn("Registration failed due "
@@ -233,8 +232,7 @@ class SubscriptionManager(object):
233232
234 # Attempting to register the system only233 # Attempting to register the system only
235 try:234 try:
236 return_out, return_err = self._sub_man_cli(cmd,235 return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
237 logstring_val=True)
238 except util.ProcessExecutionError as e:236 except util.ProcessExecutionError as e:
239 if e.stdout == "":237 if e.stdout == "":
240 self.log_warn("Registration failed due "238 self.log_warn("Registration failed due "
@@ -257,7 +255,7 @@ class SubscriptionManager(object):
257 .format(self.servicelevel)]255 .format(self.servicelevel)]
258256
259 try:257 try:
260 return_out, return_err = self._sub_man_cli(cmd)258 return_out = self._sub_man_cli(cmd)[0]
261 except util.ProcessExecutionError as e:259 except util.ProcessExecutionError as e:
262 if e.stdout.rstrip() != '':260 if e.stdout.rstrip() != '':
263 for line in e.stdout.split("\n"):261 for line in e.stdout.split("\n"):
@@ -275,7 +273,7 @@ class SubscriptionManager(object):
275 def _set_auto_attach(self):273 def _set_auto_attach(self):
276 cmd = ['attach', '--auto']274 cmd = ['attach', '--auto']
277 try:275 try:
278 return_out, return_err = self._sub_man_cli(cmd)276 return_out = self._sub_man_cli(cmd)[0]
279 except util.ProcessExecutionError as e:277 except util.ProcessExecutionError as e:
280 self.log_warn("Auto-attach failed with: {0}".format(e))278 self.log_warn("Auto-attach failed with: {0}".format(e))
281 return False279 return False
@@ -294,12 +292,12 @@ class SubscriptionManager(object):
294292
295 # Get all available pools293 # Get all available pools
296 cmd = ['list', '--available', '--pool-only']294 cmd = ['list', '--available', '--pool-only']
297 results, errors = self._sub_man_cli(cmd)295 results = self._sub_man_cli(cmd)[0]
298 available = (results.rstrip()).split("\n")296 available = (results.rstrip()).split("\n")
299297
300 # Get all consumed pools298 # Get all consumed pools
301 cmd = ['list', '--consumed', '--pool-only']299 cmd = ['list', '--consumed', '--pool-only']
302 results, errors = self._sub_man_cli(cmd)300 results = self._sub_man_cli(cmd)[0]
303 consumed = (results.rstrip()).split("\n")301 consumed = (results.rstrip()).split("\n")
304302
305 return available, consumed303 return available, consumed
@@ -311,14 +309,14 @@ class SubscriptionManager(object):
311 '''309 '''
312310
313 cmd = ['repos', '--list-enabled']311 cmd = ['repos', '--list-enabled']
314 return_out, return_err = self._sub_man_cli(cmd)312 return_out = self._sub_man_cli(cmd)[0]
315 active_repos = []313 active_repos = []
316 for repo in return_out.split("\n"):314 for repo in return_out.split("\n"):
317 if "Repo ID:" in repo:315 if "Repo ID:" in repo:
318 active_repos.append((repo.split(':')[1]).strip())316 active_repos.append((repo.split(':')[1]).strip())
319317
320 cmd = ['repos', '--list-disabled']318 cmd = ['repos', '--list-disabled']
321 return_out, return_err = self._sub_man_cli(cmd)319 return_out = self._sub_man_cli(cmd)[0]
322320
323 inactive_repos = []321 inactive_repos = []
324 for repo in return_out.split("\n"):322 for repo in return_out.split("\n"):
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index af08788..27d2366 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__)
203COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')203COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
204HOST_PORT_RE = re.compile(204HOST_PORT_RE = re.compile(
205 r'^(?P<proto>[@]{0,2})'205 r'^(?P<proto>[@]{0,2})'
206 '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'206 r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
207 '([:](?P<port>[0-9]+))?$')207 r'([:](?P<port>[0-9]+))?$')
208208
209209
210def reload_syslog(command=DEF_RELOAD, systemd=False):210def reload_syslog(command=DEF_RELOAD, systemd=False):
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 539cbd5..b6f6c80 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -66,7 +66,6 @@ schema = {
66 'additionalProperties': False,66 'additionalProperties': False,
67 'minItems': 1,67 'minItems': 1,
68 'required': [],68 'required': [],
69 'uniqueItems': True
70 }69 }
71 }70 }
72}71}
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index bb24d57..5ef9737 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -68,16 +68,57 @@ import re
68import sys68import sys
6969
70from cloudinit.distros import ug_util70from cloudinit.distros import ug_util
71from cloudinit import ssh_util71from cloudinit import log as logging
72from cloudinit.ssh_util import update_ssh_config
72from cloudinit import util73from cloudinit import util
7374
74from string import ascii_letters, digits75from string import ascii_letters, digits
7576
77LOG = logging.getLogger(__name__)
78
76# We are removing certain 'painful' letters/numbers79# We are removing certain 'painful' letters/numbers
77PW_SET = (''.join([x for x in ascii_letters + digits80PW_SET = (''.join([x for x in ascii_letters + digits
78 if x not in 'loLOI01']))81 if x not in 'loLOI01']))
7982
8083
84def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
85 """Apply sshd PasswordAuthentication changes.
86
87 @param pw_auth: config setting from 'pw_auth'.
88 Best given as True, False, or "unchanged".
89 @param service_cmd: The service command list (['service'])
90 @param service_name: The name of the sshd service for the system.
91
92 @return: None"""
93 cfg_name = "PasswordAuthentication"
94 if service_cmd is None:
95 service_cmd = ["service"]
96
97 if util.is_true(pw_auth):
98 cfg_val = 'yes'
99 elif util.is_false(pw_auth):
100 cfg_val = 'no'
101 else:
102 bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
103 if pw_auth is None or pw_auth.lower() == 'unchanged':
104 LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
105 else:
106 LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
107 return
108
109 updated = update_ssh_config({cfg_name: cfg_val})
110 if not updated:
111 LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
112 return
113
114 if 'systemctl' in service_cmd:
115 cmd = list(service_cmd) + ["restart", service_name]
116 else:
117 cmd = list(service_cmd) + [service_name, "restart"]
118 util.subp(cmd)
119 LOG.debug("Restarted the ssh daemon.")
120
121
81def handle(_name, cfg, cloud, log, args):122def handle(_name, cfg, cloud, log, args):
82 if len(args) != 0:123 if len(args) != 0:
83 # if run from command line, and give args, wipe the chpasswd['list']124 # if run from command line, and give args, wipe the chpasswd['list']
@@ -170,65 +211,9 @@ def handle(_name, cfg, cloud, log, args):
170 if expired_users:211 if expired_users:
171 log.debug("Expired passwords for: %s users", expired_users)212 log.debug("Expired passwords for: %s users", expired_users)
172213
173 change_pwauth = False214 handle_ssh_pwauth(
174 pw_auth = None215 cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd,
175 if 'ssh_pwauth' in cfg:216 service_name=cloud.distro.get_option('ssh_svcname', 'ssh'))
176 if util.is_true(cfg['ssh_pwauth']):
177 change_pwauth = True
178 pw_auth = 'yes'
179 elif util.is_false(cfg['ssh_pwauth']):
180 change_pwauth = True
181 pw_auth = 'no'
182 elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
183 log.debug('Leaving auth line unchanged')
184 change_pwauth = False
185 elif not str(cfg['ssh_pwauth']).strip():
186 log.debug('Leaving auth line unchanged')
187 change_pwauth = False
188 elif not cfg['ssh_pwauth']:
189 log.debug('Leaving auth line unchanged')
190 change_pwauth = False
191 else:
192 msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
193 util.logexc(log, msg)
194
195 if change_pwauth:
196 replaced_auth = False
197
198 # See: man sshd_config
199 old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
200 new_lines = []
201 i = 0
202 for (i, line) in enumerate(old_lines):
203 # Keywords are case-insensitive and arguments are case-sensitive
204 if line.key == 'passwordauthentication':
205 log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
206 replaced_auth = True
207 line.value = pw_auth
208 new_lines.append(line)
209
210 if not replaced_auth:
211 log.debug("Adding new auth line %s", i + 1)
212 replaced_auth = True
213 new_lines.append(ssh_util.SshdConfigLine('',
214 'PasswordAuthentication',
215 pw_auth))
216
217 lines = [str(l) for l in new_lines]
218 util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines),
219 copy_mode=True)
220
221 try:
222 cmd = cloud.distro.init_cmd # Default service
223 cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
224 cmd.append('restart')
225 if 'systemctl' in cmd: # Switch action ordering
226 cmd[1], cmd[2] = cmd[2], cmd[1]
227 cmd = filter(None, cmd) # Remove empty arguments
228 util.subp(cmd)
229 log.debug("Restarted the ssh daemon")
230 except Exception:
231 util.logexc(log, "Restarting of the ssh daemon failed")
232217
233 if len(errors):218 if len(errors):
234 log.debug("%s errors occured, re-raising the last one", len(errors))219 log.debug("%s errors occured, re-raising the last one", len(errors))
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 34a53fd..90724b8 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -110,7 +110,6 @@ schema = {
110 'additionalItems': False, # Reject non-string & non-list110 'additionalItems': False, # Reject non-string & non-list
111 'minItems': 1,111 'minItems': 1,
112 'minProperties': 1,112 'minProperties': 1,
113 'uniqueItems': True
114 },113 },
115 'squashfuse_in_container': {114 'squashfuse_in_container': {
116 'type': 'boolean'115 'type': 'boolean'
@@ -204,12 +203,12 @@ def maybe_install_squashfuse(cloud):
204 return203 return
205 try:204 try:
206 cloud.distro.update_package_sources()205 cloud.distro.update_package_sources()
207 except Exception as e:206 except Exception:
208 util.logexc(LOG, "Package update failed")207 util.logexc(LOG, "Package update failed")
209 raise208 raise
210 try:209 try:
211 cloud.distro.install_packages(['squashfuse'])210 cloud.distro.install_packages(['squashfuse'])
212 except Exception as e:211 except Exception:
213 util.logexc(LOG, "Failed to install squashfuse")212 util.logexc(LOG, "Failed to install squashfuse")
214 raise213 raise
215214
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index bab80bb..15bee2d 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
213213
214def read_installed_packages():214def read_installed_packages():
215 ret = []215 ret = []
216 for (name, date, version, dev) in read_pkg_data():216 for (name, _date, _version, dev) in read_pkg_data():
217 if dev:217 if dev:
218 ret.append(NAMESPACE_DELIM.join([name, dev]))218 ret.append(NAMESPACE_DELIM.join([name, dev]))
219 else:219 else:
@@ -222,7 +222,7 @@ def read_installed_packages():
222222
223223
224def read_pkg_data():224def read_pkg_data():
225 out, err = util.subp([SNAPPY_CMD, "list"])225 out, _err = util.subp([SNAPPY_CMD, "list"])
226 pkg_data = []226 pkg_data = []
227 for line in out.splitlines()[1:]:227 for line in out.splitlines()[1:]:
228 toks = line.split(sep=None, maxsplit=3)228 toks = line.split(sep=None, maxsplit=3)
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 16b1868..5e082bd 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -87,7 +87,6 @@ schema = {
87 'additionalItems': False, # Reject non-string & non-list87 'additionalItems': False, # Reject non-string & non-list
88 'minItems': 1,88 'minItems': 1,
89 'minProperties': 1,89 'minProperties': 1,
90 'uniqueItems': True
91 }90 }
92 },91 },
93 'additionalProperties': False, # Reject keys not in schema92 'additionalProperties': False, # Reject keys not in schema
@@ -149,12 +148,12 @@ def maybe_install_ua_tools(cloud):
149 return148 return
150 try:149 try:
151 cloud.distro.update_package_sources()150 cloud.distro.update_package_sources()
152 except Exception as e:151 except Exception:
153 util.logexc(LOG, "Package update failed")152 util.logexc(LOG, "Package update failed")
154 raise153 raise
155 try:154 try:
156 cloud.distro.install_packages(['ubuntu-advantage-tools'])155 cloud.distro.install_packages(['ubuntu-advantage-tools'])
157 except Exception as e:156 except Exception:
158 util.logexc(LOG, "Failed to install ubuntu-advantage-tools")157 util.logexc(LOG, "Failed to install ubuntu-advantage-tools")
159 raise158 raise
160159
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index b215e95..c95bdaa 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -54,8 +54,9 @@ config keys for an entry in ``users`` are as follows:
54 - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's54 - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
55 authkeys file. Default: none55 authkeys file. Default: none
56 - ``ssh_import_id``: Optional. SSH id to import for user. Default: none56 - ``ssh_import_id``: Optional. SSH id to import for user. Default: none
57 - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use.57 - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
58 Default: none.58 Default: none. An absence of sudo key, or a value of none or false
59 will result in no sudo rules being written for the user.
59 - ``system``: Optional. Create user as system user with no home directory.60 - ``system``: Optional. Create user as system user with no home directory.
60 Default: false61 Default: false
61 - ``uid``: Optional. The user's ID. Default: The next available value.62 - ``uid``: Optional. The user's ID. Default: The next available value.
@@ -82,6 +83,9 @@ config keys for an entry in ``users`` are as follows:
8283
83 users:84 users:
84 - default85 - default
86 # User explicitly omitted from sudo permission; also default behavior.
87 - name: <some_restricted_user>
88 sudo: false
85 - name: <username>89 - name: <username>
86 expiredate: <date>90 expiredate: <date>
87 gecos: <comment>91 gecos: <comment>
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index ca7d0d5..080a6d0 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -4,7 +4,7 @@
4from __future__ import print_function4from __future__ import print_function
55
6from cloudinit import importer6from cloudinit import importer
7from cloudinit.util import find_modules, read_file_or_url7from cloudinit.util import find_modules, load_file
88
9import argparse9import argparse
10from collections import defaultdict10from collections import defaultdict
@@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False):
93def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):93def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
94 """Return contents of the cloud-config file annotated with schema errors.94 """Return contents of the cloud-config file annotated with schema errors.
9595
96 @param cloudconfig: YAML-loaded object from the original_content.96 @param cloudconfig: YAML-loaded dict from the original_content or empty
97 dict if unparseable.
97 @param original_content: The contents of a cloud-config file98 @param original_content: The contents of a cloud-config file
98 @param schema_errors: List of tuples from a JSONSchemaValidationError. The99 @param schema_errors: List of tuples from a JSONSchemaValidationError. The
99 tuples consist of (schemapath, error_message).100 tuples consist of (schemapath, error_message).
100 """101 """
101 if not schema_errors:102 if not schema_errors:
102 return original_content103 return original_content
103 schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content)104 schemapaths = {}
105 if cloudconfig:
106 schemapaths = _schemapath_for_cloudconfig(
107 cloudconfig, original_content)
104 errors_by_line = defaultdict(list)108 errors_by_line = defaultdict(list)
105 error_count = 1109 error_count = 1
106 error_footer = []110 error_footer = []
107 annotated_content = []111 annotated_content = []
108 for path, msg in schema_errors:112 for path, msg in schema_errors:
109 errors_by_line[schemapaths[path]].append(msg)113 match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path)
114 if match:
115 line, col = match.groups()
116 errors_by_line[int(line)].append(msg)
117 else:
118 col = None
119 errors_by_line[schemapaths[path]].append(msg)
120 if col is not None:
121 msg = 'Line {line} column {col}: {msg}'.format(
122 line=line, col=col, msg=msg)
110 error_footer.append('# E{0}: {1}'.format(error_count, msg))123 error_footer.append('# E{0}: {1}'.format(error_count, msg))
111 error_count += 1124 error_count += 1
112 lines = original_content.decode().split('\n')125 lines = original_content.decode().split('\n')
@@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
139 """152 """
140 if not os.path.exists(config_path):153 if not os.path.exists(config_path):
141 raise RuntimeError('Configfile {0} does not exist'.format(config_path))154 raise RuntimeError('Configfile {0} does not exist'.format(config_path))
142 content = read_file_or_url('file://{0}'.format(config_path)).contents155 content = load_file(config_path, decode=False)
143 if not content.startswith(CLOUD_CONFIG_HEADER):156 if not content.startswith(CLOUD_CONFIG_HEADER):
144 errors = (157 errors = (
145 ('header', 'File {0} needs to begin with "{1}"'.format(158 ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
146 config_path, CLOUD_CONFIG_HEADER.decode())),)159 config_path, CLOUD_CONFIG_HEADER.decode())),)
147 raise SchemaValidationError(errors)160 error = SchemaValidationError(errors)
148161 if annotate:
162 print(annotated_cloudconfig_file({}, content, error.schema_errors))
163 raise error
149 try:164 try:
150 cloudconfig = yaml.safe_load(content)165 cloudconfig = yaml.safe_load(content)
151 except yaml.parser.ParserError as e:166 except (yaml.YAMLError) as e:
152 errors = (167 line = column = 1
153 ('format', 'File {0} is not valid yaml. {1}'.format(168 mark = None
154 config_path, str(e))),)169 if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
155 raise SchemaValidationError(errors)170 mark = getattr(e, 'context_mark')
156171 elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
172 mark = getattr(e, 'problem_mark')
173 if mark:
174 line = mark.line + 1
175 column = mark.column + 1
176 errors = (('format-l{line}.c{col}'.format(line=line, col=column),
177 'File {0} is not valid yaml. {1}'.format(
178 config_path, str(e))),)
179 error = SchemaValidationError(errors)
180 if annotate:
181 print(annotated_cloudconfig_file({}, content, error.schema_errors))
182 raise error
157 try:183 try:
158 validate_cloudconfig_schema(184 validate_cloudconfig_schema(
159 cloudconfig, schema, strict=True)185 cloudconfig, schema, strict=True)
@@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content):
176 list_index = 0202 list_index = 0
177 RE_YAML_INDENT = r'^(\s*)'203 RE_YAML_INDENT = r'^(\s*)'
178 scopes = []204 scopes = []
179 for line_number, line in enumerate(content_lines):205 for line_number, line in enumerate(content_lines, 1):
180 indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])206 indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
181 line = line.strip()207 line = line.strip()
182 if not line or line.startswith('#'):208 if not line or line.startswith('#'):
@@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content):
208 scopes.append((indent_depth + 2, key + '.0'))234 scopes.append((indent_depth + 2, key + '.0'))
209 for inner_list_index in range(0, len(yaml.safe_load(value))):235 for inner_list_index in range(0, len(yaml.safe_load(value))):
210 list_key = key + '.' + str(inner_list_index)236 list_key = key + '.' + str(inner_list_index)
211 schema_line_numbers[list_key] = line_number + 1237 schema_line_numbers[list_key] = line_number
212 schema_line_numbers[key] = line_number + 1238 schema_line_numbers[key] = line_number
213 return schema_line_numbers239 return schema_line_numbers
214240
215241
@@ -297,8 +323,8 @@ def get_schema():
297323
298 configs_dir = os.path.dirname(os.path.abspath(__file__))324 configs_dir = os.path.dirname(os.path.abspath(__file__))
299 potential_handlers = find_modules(configs_dir)325 potential_handlers = find_modules(configs_dir)
300 for (fname, mod_name) in potential_handlers.items():326 for (_fname, mod_name) in potential_handlers.items():
301 mod_locs, looked_locs = importer.find_module(327 mod_locs, _looked_locs = importer.find_module(
302 mod_name, ['cloudinit.config'], ['schema'])328 mod_name, ['cloudinit.config'], ['schema'])
303 if mod_locs:329 if mod_locs:
304 mod = importer.import_module(mod_locs[0])330 mod = importer.import_module(mod_locs[0])
@@ -337,9 +363,11 @@ def handle_schema_args(name, args):
337 try:363 try:
338 validate_cloudconfig_file(364 validate_cloudconfig_file(
339 args.config_file, full_schema, args.annotate)365 args.config_file, full_schema, args.annotate)
340 except (SchemaValidationError, RuntimeError) as e:366 except SchemaValidationError as e:
341 if not args.annotate:367 if not args.annotate:
342 error(str(e))368 error(str(e))
369 except RuntimeError as e:
370 error(str(e))
343 else:371 else:
344 print("Valid cloud-config file {0}".format(args.config_file))372 print("Valid cloud-config file {0}".format(args.config_file))
345 if args.doc:373 if args.doc:
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
346new file mode 100644374new file mode 100644
index 0000000..67646b0
--- /dev/null
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -0,0 +1,50 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3"""Tests cc_disable_ec2_metadata handler"""
4
5import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
6
7from cloudinit.tests.helpers import CiTestCase, mock
8
9import logging
10
11LOG = logging.getLogger(__name__)
12
13DISABLE_CFG = {'disable_ec2_metadata': 'true'}
14
15
16class TestEC2MetadataRoute(CiTestCase):
17
18 with_logs = True
19
20 @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
21 @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
22 def test_disable_ifconfig(self, m_subp, m_which):
23 """Set the route if ifconfig command is available"""
24 m_which.side_effect = lambda x: x if x == 'ifconfig' else None
25 ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
26 m_subp.assert_called_with(
27 ['route', 'add', '-host', '169.254.169.254', 'reject'],
28 capture=False)
29
30 @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
31 @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
32 def test_disable_ip(self, m_subp, m_which):
33 """Set the route if ip command is available"""
34 m_which.side_effect = lambda x: x if x == 'ip' else None
35 ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
36 m_subp.assert_called_with(
37 ['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
38 capture=False)
39
40 @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
41 @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
42 def test_disable_no_tool(self, m_subp, m_which):
43 """Log error when neither route nor ip commands are available"""
44 m_which.return_value = None # Find neither ifconfig nor ip
45 ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
46 self.assertEqual(
47 [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list)
48 m_subp.assert_not_called()
49
50# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
0new file mode 10064451new file mode 100644
index 0000000..b051ec8
--- /dev/null
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -0,0 +1,71 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3import mock
4
5from cloudinit.config import cc_set_passwords as setpass
6from cloudinit.tests.helpers import CiTestCase
7from cloudinit import util
8
9MODPATH = "cloudinit.config.cc_set_passwords."
10
11
12class TestHandleSshPwauth(CiTestCase):
13 """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
14
15 with_logs = True
16
17 @mock.patch(MODPATH + "util.subp")
18 def test_unknown_value_logs_warning(self, m_subp):
19 setpass.handle_ssh_pwauth("floo")
20 self.assertIn("Unrecognized value: ssh_pwauth=floo",
21 self.logs.getvalue())
22 m_subp.assert_not_called()
23
24 @mock.patch(MODPATH + "update_ssh_config", return_value=True)
25 @mock.patch(MODPATH + "util.subp")
26 def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
27 """If systemctl in service cmd: systemctl restart name."""
28 setpass.handle_ssh_pwauth(
29 True, service_cmd=["systemctl"], service_name="myssh")
30 self.assertEqual(mock.call(["systemctl", "restart", "myssh"]),
31 m_subp.call_args)
32
33 @mock.patch(MODPATH + "update_ssh_config", return_value=True)
34 @mock.patch(MODPATH + "util.subp")
35 def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
36 """If systemctl in service cmd: systemctl restart name."""
37 setpass.handle_ssh_pwauth(
38 True, service_cmd=["service"], service_name="myssh")
39 self.assertEqual(mock.call(["service", "myssh", "restart"]),
40 m_subp.call_args)
41
42 @mock.patch(MODPATH + "update_ssh_config", return_value=False)
43 @mock.patch(MODPATH + "util.subp")
44 def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
45 """If config is not updated, then no system restart should be done."""
46 setpass.handle_ssh_pwauth(True)
47 m_subp.assert_not_called()
48 self.assertIn("No need to restart ssh", self.logs.getvalue())
49
50 @mock.patch(MODPATH + "update_ssh_config", return_value=True)
51 @mock.patch(MODPATH + "util.subp")
52 def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
53 """If 'unchanged', then no updates to config and no restart."""
54 setpass.handle_ssh_pwauth(
55 "unchanged", service_cmd=["systemctl"], service_name="myssh")
56 m_update_ssh_config.assert_not_called()
57 m_subp.assert_not_called()
58
59 @mock.patch(MODPATH + "util.subp")
60 def test_valid_change_values(self, m_subp):
61 """If value is a valid changen value, then update should be called."""
62 upname = MODPATH + "update_ssh_config"
63 optname = "PasswordAuthentication"
64 for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
65 optval = "yes" if value in util.TRUE_STRINGS else "no"
66 with mock.patch(upname, return_value=False) as m_update:
67 setpass.handle_ssh_pwauth(value)
68 m_update.assert_called_with({optname: optval})
69 m_subp.assert_not_called()
70
71# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index c5b4a9d..34c80f1 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -9,7 +9,7 @@ from cloudinit.config.cc_snap import (
9from cloudinit.config.schema import validate_cloudconfig_schema9from cloudinit.config.schema import validate_cloudconfig_schema
10from cloudinit import util10from cloudinit import util
11from cloudinit.tests.helpers import (11from cloudinit.tests.helpers import (
12 CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema)12 CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema)
1313
1414
15SYSTEM_USER_ASSERTION = """\15SYSTEM_USER_ASSERTION = """\
@@ -245,9 +245,10 @@ class TestRunCommands(CiTestCase):
245245
246246
247@skipUnlessJsonSchema()247@skipUnlessJsonSchema()
248class TestSchema(CiTestCase):248class TestSchema(CiTestCase, SchemaTestCaseMixin):
249249
250 with_logs = True250 with_logs = True
251 schema = schema
251252
252 def test_schema_warns_on_snap_not_as_dict(self):253 def test_schema_warns_on_snap_not_as_dict(self):
253 """If the snap configuration is not a dict, emit a warning."""254 """If the snap configuration is not a dict, emit a warning."""
@@ -340,6 +341,30 @@ class TestSchema(CiTestCase):
340 {'snap': {'assertions': {'01': 'also valid'}}}, schema)341 {'snap': {'assertions': {'01': 'also valid'}}}, schema)
341 self.assertEqual('', self.logs.getvalue())342 self.assertEqual('', self.logs.getvalue())
342343
344 def test_duplicates_are_fine_array_array(self):
345 """Duplicated commands array/array entries are allowed."""
346 self.assertSchemaValid(
347 {'commands': [["echo", "bye"], ["echo" "bye"]]},
348 "command entries can be duplicate.")
349
350 def test_duplicates_are_fine_array_string(self):
351 """Duplicated commands array/string entries are allowed."""
352 self.assertSchemaValid(
353 {'commands': ["echo bye", "echo bye"]},
354 "command entries can be duplicate.")
355
356 def test_duplicates_are_fine_dict_array(self):
357 """Duplicated commands dict/array entries are allowed."""
358 self.assertSchemaValid(
359 {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
360 "command entries can be duplicate.")
361
362 def test_duplicates_are_fine_dict_string(self):
363 """Duplicated commands dict/string entries are allowed."""
364 self.assertSchemaValid(
365 {'commands': {'00': "echo bye", '01': "echo bye"}},
366 "command entries can be duplicate.")
367
343368
344class TestHandle(CiTestCase):369class TestHandle(CiTestCase):
345370
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index f2a59fa..f1beeff 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -7,7 +7,8 @@ from cloudinit.config.cc_ubuntu_advantage import (
7 handle, maybe_install_ua_tools, run_commands, schema)7 handle, maybe_install_ua_tools, run_commands, schema)
8from cloudinit.config.schema import validate_cloudconfig_schema8from cloudinit.config.schema import validate_cloudconfig_schema
9from cloudinit import util9from cloudinit import util
10from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema10from cloudinit.tests.helpers import (
11 CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
1112
1213
13# Module path used in mocks14# Module path used in mocks
@@ -105,9 +106,10 @@ class TestRunCommands(CiTestCase):
105106
106107
107@skipUnlessJsonSchema()108@skipUnlessJsonSchema()
108class TestSchema(CiTestCase):109class TestSchema(CiTestCase, SchemaTestCaseMixin):
109110
110 with_logs = True111 with_logs = True
112 schema = schema
111113
112 def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):114 def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):
113 """If ubuntu-advantage configuration is not a dict, emit a warning."""115 """If ubuntu-advantage configuration is not a dict, emit a warning."""
@@ -169,6 +171,30 @@ class TestSchema(CiTestCase):
169 {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)171 {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)
170 self.assertEqual('', self.logs.getvalue())172 self.assertEqual('', self.logs.getvalue())
171173
174 def test_duplicates_are_fine_array_array(self):
175 """Duplicated commands array/array entries are allowed."""
176 self.assertSchemaValid(
177 {'commands': [["echo", "bye"], ["echo" "bye"]]},
178 "command entries can be duplicate.")
179
180 def test_duplicates_are_fine_array_string(self):
181 """Duplicated commands array/string entries are allowed."""
182 self.assertSchemaValid(
183 {'commands': ["echo bye", "echo bye"]},
184 "command entries can be duplicate.")
185
186 def test_duplicates_are_fine_dict_array(self):
187 """Duplicated commands dict/array entries are allowed."""
188 self.assertSchemaValid(
189 {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
190 "command entries can be duplicate.")
191
192 def test_duplicates_are_fine_dict_string(self):
193 """Duplicated commands dict/string entries are allowed."""
194 self.assertSchemaValid(
195 {'commands': {'00': "echo bye", '01': "echo bye"}},
196 "command entries can be duplicate.")
197
172198
173class TestHandle(CiTestCase):199class TestHandle(CiTestCase):
174200
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 55260ea..ab0b077 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -49,6 +49,9 @@ LOG = logging.getLogger(__name__)
49# It could break when Amazon adds new regions and new AZs.49# It could break when Amazon adds new regions and new AZs.
50_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')50_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
5151
52# Default NTP Client Configurations
53PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
54
5255
53@six.add_metaclass(abc.ABCMeta)56@six.add_metaclass(abc.ABCMeta)
54class Distro(object):57class Distro(object):
@@ -60,6 +63,7 @@ class Distro(object):
60 tz_zone_dir = "/usr/share/zoneinfo"63 tz_zone_dir = "/usr/share/zoneinfo"
61 init_cmd = ['service'] # systemctl, service etc64 init_cmd = ['service'] # systemctl, service etc
62 renderer_configs = {}65 renderer_configs = {}
66 _preferred_ntp_clients = None
6367
64 def __init__(self, name, cfg, paths):68 def __init__(self, name, cfg, paths):
65 self._paths = paths69 self._paths = paths
@@ -339,6 +343,14 @@ class Distro(object):
339 contents.write("%s\n" % (eh))343 contents.write("%s\n" % (eh))
340 util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)344 util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)
341345
346 @property
347 def preferred_ntp_clients(self):
348 """Allow distro to determine the preferred ntp client list"""
349 if not self._preferred_ntp_clients:
350 self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS)
351
352 return self._preferred_ntp_clients
353
342 def _bring_up_interface(self, device_name):354 def _bring_up_interface(self, device_name):
343 cmd = ['ifup', device_name]355 cmd = ['ifup', device_name]
344 LOG.debug("Attempting to run bring up interface %s using command %s",356 LOG.debug("Attempting to run bring up interface %s using command %s",
@@ -519,7 +531,7 @@ class Distro(object):
519 self.lock_passwd(name)531 self.lock_passwd(name)
520532
521 # Configure sudo access533 # Configure sudo access
522 if 'sudo' in kwargs:534 if 'sudo' in kwargs and kwargs['sudo'] is not False:
523 self.write_sudo_rules(name, kwargs['sudo'])535 self.write_sudo_rules(name, kwargs['sudo'])
524536
525 # Import SSH keys537 # Import SSH keys
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 754d3df..ff22d56 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -110,15 +110,15 @@ class Distro(distros.Distro):
110 if dev.startswith('lo'):110 if dev.startswith('lo'):
111 return dev111 return dev
112112
113 n = re.search('\d+$', dev)113 n = re.search(r'\d+$', dev)
114 index = n.group(0)114 index = n.group(0)
115115
116 (out, err) = util.subp(['ifconfig', '-a'])116 (out, _err) = util.subp(['ifconfig', '-a'])
117 ifconfigoutput = [x for x in (out.strip()).splitlines()117 ifconfigoutput = [x for x in (out.strip()).splitlines()
118 if len(x.split()) > 0]118 if len(x.split()) > 0]
119 bsddev = 'NOT_FOUND'119 bsddev = 'NOT_FOUND'
120 for line in ifconfigoutput:120 for line in ifconfigoutput:
121 m = re.match('^\w+', line)121 m = re.match(r'^\w+', line)
122 if m:122 if m:
123 if m.group(0).startswith('lo'):123 if m.group(0).startswith('lo'):
124 continue124 continue
@@ -128,7 +128,7 @@ class Distro(distros.Distro):
128 break128 break
129129
130 # Replace the index with the one we're after.130 # Replace the index with the one we're after.
131 bsddev = re.sub('\d+$', index, bsddev)131 bsddev = re.sub(r'\d+$', index, bsddev)
132 LOG.debug("Using network interface %s", bsddev)132 LOG.debug("Using network interface %s", bsddev)
133 return bsddev133 return bsddev
134134
@@ -266,7 +266,7 @@ class Distro(distros.Distro):
266 self.lock_passwd(name)266 self.lock_passwd(name)
267267
268 # Configure sudo access268 # Configure sudo access
269 if 'sudo' in kwargs:269 if 'sudo' in kwargs and kwargs['sudo'] is not False:
270 self.write_sudo_rules(name, kwargs['sudo'])270 self.write_sudo_rules(name, kwargs['sudo'])
271271
272 # Import SSH keys272 # Import SSH keys
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 162dfa0..9f90e95 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -208,4 +208,28 @@ class Distro(distros.Distro):
208 nameservers, searchservers)208 nameservers, searchservers)
209 return dev_names209 return dev_names
210210
211 @property
212 def preferred_ntp_clients(self):
213 """The preferred ntp client is dependent on the version."""
214
215 """Allow distro to determine the preferred ntp client list"""
216 if not self._preferred_ntp_clients:
217 distro_info = util.system_info()['dist']
218 name = distro_info[0]
219 major_ver = int(distro_info[1].split('.')[0])
220
221 # This is horribly complicated because of a case of
222 # "we do not care if versions should be increasing syndrome"
223 if (
224 (major_ver >= 15 and 'openSUSE' not in name) or
225 (major_ver >= 15 and 'openSUSE' in name and major_ver != 42)
226 ):
227 self._preferred_ntp_clients = ['chrony',
228 'systemd-timesyncd', 'ntp']
229 else:
230 self._preferred_ntp_clients = ['ntp',
231 'systemd-timesyncd', 'chrony']
232
233 return self._preferred_ntp_clients
234
211# vi: ts=4 expandtab235# vi: ts=4 expandtab
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 82ca34f..6815410 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -10,12 +10,31 @@
10# This file is part of cloud-init. See LICENSE file for license information.10# This file is part of cloud-init. See LICENSE file for license information.
1111
12from cloudinit.distros import debian12from cloudinit.distros import debian
13from cloudinit.distros import PREFERRED_NTP_CLIENTS
13from cloudinit import log as logging14from cloudinit import log as logging
15from cloudinit import util
16
17import copy
1418
15LOG = logging.getLogger(__name__)19LOG = logging.getLogger(__name__)
1620
1721
18class Distro(debian.Distro):22class Distro(debian.Distro):
23
24 @property
25 def preferred_ntp_clients(self):
26 """The preferred ntp client is dependent on the version."""
27 if not self._preferred_ntp_clients:
28 (_name, _version, codename) = util.system_info()['dist']
29 # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd.
30 if codename == "xenial" and not util.system_is_snappy():
31 self._preferred_ntp_clients = ['ntp']
32 else:
33 self._preferred_ntp_clients = (
34 copy.deepcopy(PREFERRED_NTP_CLIENTS))
35 return self._preferred_ntp_clients
36
19 pass37 pass
2038
39
21# vi: ts=4 expandtab40# vi: ts=4 expandtab
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index dc3f0fc..3b7b17f 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest',
150 # NOT_FOUND occurs) and just in that case returning an empty string.150 # NOT_FOUND occurs) and just in that case returning an empty string.
151 exception_cb = functools.partial(_skip_retry_on_codes,151 exception_cb = functools.partial(_skip_retry_on_codes,
152 SKIP_USERDATA_CODES)152 SKIP_USERDATA_CODES)
153 response = util.read_file_or_url(ud_url,153 response = url_helper.read_file_or_url(
154 ssl_details=ssl_details,154 ud_url, ssl_details=ssl_details, timeout=timeout,
155 timeout=timeout,155 retries=retries, exception_cb=exception_cb)
156 retries=retries,
157 exception_cb=exception_cb)
158 user_data = response.contents156 user_data = response.contents
159 except url_helper.UrlError as e:157 except url_helper.UrlError as e:
160 if e.code not in SKIP_USERDATA_CODES:158 if e.code not in SKIP_USERDATA_CODES:
@@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest',
169 ssl_details=None, timeout=5, retries=5,167 ssl_details=None, timeout=5, retries=5,
170 leaf_decoder=None):168 leaf_decoder=None):
171 md_url = url_helper.combine_url(metadata_address, api_version, tree)169 md_url = url_helper.combine_url(metadata_address, api_version, tree)
172 caller = functools.partial(util.read_file_or_url,170 caller = functools.partial(
173 ssl_details=ssl_details, timeout=timeout,171 url_helper.read_file_or_url, ssl_details=ssl_details,
174 retries=retries)172 timeout=timeout, retries=retries)
175173
176 def mcaller(url):174 def mcaller(url):
177 return caller(url).contents175 return caller(url).contents
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 1ca92d4..dc33876 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -97,7 +97,7 @@ def _has_suitable_upstart():
97 else:97 else:
98 util.logexc(LOG, "dpkg --compare-versions failed [%s]",98 util.logexc(LOG, "dpkg --compare-versions failed [%s]",
99 e.exit_code)99 e.exit_code)
100 except Exception as e:100 except Exception:
101 util.logexc(LOG, "dpkg --compare-versions failed")101 util.logexc(LOG, "dpkg --compare-versions failed")
102 return False102 return False
103 else:103 else:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index f69c0ef..3ffde52 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -107,6 +107,21 @@ def is_bond(devname):
107 return os.path.exists(sys_dev_path(devname, "bonding"))107 return os.path.exists(sys_dev_path(devname, "bonding"))
108108
109109
110def is_renamed(devname):
111 """
112 /* interface name assignment types (sysfs name_assign_type attribute) */
113 #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
114 #define NET_NAME_ENUM 1 /* enumerated by kernel */
115 #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
116 #define NET_NAME_USER 3 /* provided by user-space */
117 #define NET_NAME_RENAMED 4 /* renamed by user-space */
118 """
119 name_assign_type = read_sys_net_safe(devname, 'name_assign_type')
120 if name_assign_type and name_assign_type in ['3', '4']:
121 return True
122 return False
123
124
110def is_vlan(devname):125def is_vlan(devname):
111 uevent = str(read_sys_net_safe(devname, "uevent"))126 uevent = str(read_sys_net_safe(devname, "uevent"))
112 return 'DEVTYPE=vlan' in uevent.splitlines()127 return 'DEVTYPE=vlan' in uevent.splitlines()
@@ -180,6 +195,17 @@ def find_fallback_nic(blacklist_drivers=None):
180 if not blacklist_drivers:195 if not blacklist_drivers:
181 blacklist_drivers = []196 blacklist_drivers = []
182197
198 if 'net.ifnames=0' in util.get_cmdline():
199 LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline')
200 else:
201 unstable = [device for device in get_devicelist()
202 if device != 'lo' and not is_renamed(device)]
203 if len(unstable):
204 LOG.debug('Found unstable nic names: %s; calling udevadm settle',
205 unstable)
206 msg = 'Waiting for udev events to settle'
207 util.log_time(LOG.debug, msg, func=util.udevadm_settle)
208
183 # get list of interfaces that could have connections209 # get list of interfaces that could have connections
184 invalid_interfaces = set(['lo'])210 invalid_interfaces = set(['lo'])
185 potential_interfaces = set([device for device in get_devicelist()211 potential_interfaces = set([device for device in get_devicelist()
@@ -295,7 +321,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
295321
296 def _version_2(netcfg):322 def _version_2(netcfg):
297 renames = []323 renames = []
298 for key, ent in netcfg.get('ethernets', {}).items():324 for ent in netcfg.get('ethernets', {}).values():
299 # only rename if configured to do so325 # only rename if configured to do so
300 name = ent.get('set-name')326 name = ent.get('set-name')
301 if not name:327 if not name:
@@ -333,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False):
333 1: randomly generated 3: set using dev_set_mac_address"""359 1: randomly generated 3: set using dev_set_mac_address"""
334360
335 assign_type = read_sys_net_int(ifname, "addr_assign_type")361 assign_type = read_sys_net_int(ifname, "addr_assign_type")
336 if strict and assign_type is None:362 if assign_type is None:
337 raise ValueError("%s had no addr_assign_type.")363 # None is returned if this nic had no 'addr_assign_type' entry.
364 # if strict, raise an error, if not return True.
365 if strict:
366 raise ValueError("%s had no addr_assign_type.")
367 return True
338 return assign_type in (0, 1, 3)368 return assign_type in (0, 1, 3)
339369
340370
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 9e9fe0f..f89a0f7 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -65,7 +65,7 @@ def _klibc_to_config_entry(content, mac_addrs=None):
65 iface['mac_address'] = mac_addrs[name]65 iface['mac_address'] = mac_addrs[name]
6666
67 # Handle both IPv4 and IPv6 values67 # Handle both IPv4 and IPv6 values
68 for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')):68 for pre in ('IPV4', 'IPV6'):
69 # if no IPV4ADDR or IPV6ADDR, then go on.69 # if no IPV4ADDR or IPV6ADDR, then go on.
70 if pre + "ADDR" not in data:70 if pre + "ADDR" not in data:
71 continue71 continue
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 087c0c0..12cf509 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -216,7 +216,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None):
216 if leases_d is None:216 if leases_d is None:
217 leases_d = NETWORKD_LEASES_DIR217 leases_d = NETWORKD_LEASES_DIR
218 leases = networkd_load_leases(leases_d=leases_d)218 leases = networkd_load_leases(leases_d=leases_d)
219 for ifindex, data in sorted(leases.items()):219 for _ifindex, data in sorted(leases.items()):
220 if data.get(keyname):220 if data.get(keyname):
221 return data[keyname]221 return data[keyname]
222 return None222 return None
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index c6a71d1..bd20a36 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -10,9 +10,12 @@ from . import ParserError
10from . import renderer10from . import renderer
11from .network_state import subnet_is_ipv611from .network_state import subnet_is_ipv6
1212
13from cloudinit import log as logging
13from cloudinit import util14from cloudinit import util
1415
1516
17LOG = logging.getLogger(__name__)
18
16NET_CONFIG_COMMANDS = [19NET_CONFIG_COMMANDS = [
17 "pre-up", "up", "post-up", "down", "pre-down", "post-down",20 "pre-up", "up", "post-up", "down", "pre-down", "post-down",
18]21]
@@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet):
6164
6265
63# TODO: switch to valid_map for attrs66# TODO: switch to valid_map for attrs
64def _iface_add_attrs(iface, index):67def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
65 # If the index is non-zero, this is an alias interface. Alias interfaces68 # If the index is non-zero, this is an alias interface. Alias interfaces
66 # represent additional interface addresses, and should not have additional69 # represent additional interface addresses, and should not have additional
67 # attributes. (extra attributes here are almost always either incorrect,70 # attributes. (extra attributes here are almost always either incorrect,
@@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index):
100 value = 'on' if iface[key] else 'off'103 value = 'on' if iface[key] else 'off'
101 if not value or key in ignore_map:104 if not value or key in ignore_map:
102 continue105 continue
106 if key == 'mtu' and ipv4_subnet_mtu:
107 if value != ipv4_subnet_mtu:
108 LOG.warning(
109 "Network config: ignoring %s device-level mtu:%s because"
110 " ipv4 subnet-level mtu:%s provided.",
111 iface['name'], value, ipv4_subnet_mtu)
112 continue
103 if key in multiline_keys:113 if key in multiline_keys:
104 for v in value:114 for v in value:
105 content.append(" {0} {1}".format(renames.get(key, key), v))115 content.append(" {0} {1}".format(renames.get(key, key), v))
@@ -377,12 +387,15 @@ class Renderer(renderer.Renderer):
377 subnets = iface.get('subnets', {})387 subnets = iface.get('subnets', {})
378 if subnets:388 if subnets:
379 for index, subnet in enumerate(subnets):389 for index, subnet in enumerate(subnets):
390 ipv4_subnet_mtu = None
380 iface['index'] = index391 iface['index'] = index
381 iface['mode'] = subnet['type']392 iface['mode'] = subnet['type']
382 iface['control'] = subnet.get('control', 'auto')393 iface['control'] = subnet.get('control', 'auto')
383 subnet_inet = 'inet'394 subnet_inet = 'inet'
384 if subnet_is_ipv6(subnet):395 if subnet_is_ipv6(subnet):
385 subnet_inet += '6'396 subnet_inet += '6'
397 else:
398 ipv4_subnet_mtu = subnet.get('mtu')
386 iface['inet'] = subnet_inet399 iface['inet'] = subnet_inet
387 if subnet['type'].startswith('dhcp'):400 if subnet['type'].startswith('dhcp'):
388 iface['mode'] = 'dhcp'401 iface['mode'] = 'dhcp'
@@ -397,7 +410,7 @@ class Renderer(renderer.Renderer):
397 _iface_start_entry(410 _iface_start_entry(
398 iface, index, render_hwaddress=render_hwaddress) +411 iface, index, render_hwaddress=render_hwaddress) +
399 _iface_add_subnet(iface, subnet) +412 _iface_add_subnet(iface, subnet) +
400 _iface_add_attrs(iface, index)413 _iface_add_attrs(iface, index, ipv4_subnet_mtu)
401 )414 )
402 for route in subnet.get('routes', []):415 for route in subnet.get('routes', []):
403 lines.extend(self._render_route(route, indent=" "))416 lines.extend(self._render_route(route, indent=" "))
@@ -409,7 +422,8 @@ class Renderer(renderer.Renderer):
409 if 'bond-master' in iface or 'bond-slaves' in iface:422 if 'bond-master' in iface or 'bond-slaves' in iface:
410 lines.append("auto {name}".format(**iface))423 lines.append("auto {name}".format(**iface))
411 lines.append("iface {name} {inet} {mode}".format(**iface))424 lines.append("iface {name} {inet} {mode}".format(**iface))
412 lines.extend(_iface_add_attrs(iface, index=0))425 lines.extend(
426 _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None))
413 sections.append(lines)427 sections.append(lines)
414 return sections428 return sections
415429
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 6344348..4014363 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match):
34 if key.startswith(match))34 if key.startswith(match))
3535
3636
37def _extract_addresses(config, entry):37def _extract_addresses(config, entry, ifname):
38 """This method parse a cloudinit.net.network_state dictionary (config) and38 """This method parse a cloudinit.net.network_state dictionary (config) and
39 maps netstate keys/values into a dictionary (entry) to represent39 maps netstate keys/values into a dictionary (entry) to represent
40 netplan yaml.40 netplan yaml.
@@ -124,6 +124,15 @@ def _extract_addresses(config, entry):
124124
125 addresses.append(addr)125 addresses.append(addr)
126126
127 if 'mtu' in config:
128 entry_mtu = entry.get('mtu')
129 if entry_mtu and config['mtu'] != entry_mtu:
130 LOG.warning(
131 "Network config: ignoring %s device-level mtu:%s because"
132 " ipv4 subnet-level mtu:%s provided.",
133 ifname, config['mtu'], entry_mtu)
134 else:
135 entry['mtu'] = config['mtu']
127 if len(addresses) > 0:136 if len(addresses) > 0:
128 entry.update({'addresses': addresses})137 entry.update({'addresses': addresses})
129 if len(routes) > 0:138 if len(routes) > 0:
@@ -262,10 +271,7 @@ class Renderer(renderer.Renderer):
262 else:271 else:
263 del eth['match']272 del eth['match']
264 del eth['set-name']273 del eth['set-name']
265 if 'mtu' in ifcfg:274 _extract_addresses(ifcfg, eth, ifname)
266 eth['mtu'] = ifcfg.get('mtu')
267
268 _extract_addresses(ifcfg, eth)
269 ethernets.update({ifname: eth})275 ethernets.update({ifname: eth})
270276
271 elif if_type == 'bond':277 elif if_type == 'bond':
@@ -288,7 +294,7 @@ class Renderer(renderer.Renderer):
288 slave_interfaces = ifcfg.get('bond-slaves')294 slave_interfaces = ifcfg.get('bond-slaves')
289 if slave_interfaces == 'none':295 if slave_interfaces == 'none':
290 _extract_bond_slaves_by_name(interfaces, bond, ifname)296 _extract_bond_slaves_by_name(interfaces, bond, ifname)
291 _extract_addresses(ifcfg, bond)297 _extract_addresses(ifcfg, bond, ifname)
292 bonds.update({ifname: bond})298 bonds.update({ifname: bond})
293299
294 elif if_type == 'bridge':300 elif if_type == 'bridge':
@@ -321,7 +327,7 @@ class Renderer(renderer.Renderer):
321327
322 if len(br_config) > 0:328 if len(br_config) > 0:
323 bridge.update({'parameters': br_config})329 bridge.update({'parameters': br_config})
324 _extract_addresses(ifcfg, bridge)330 _extract_addresses(ifcfg, bridge, ifname)
325 bridges.update({ifname: bridge})331 bridges.update({ifname: bridge})
326332
327 elif if_type == 'vlan':333 elif if_type == 'vlan':
@@ -333,7 +339,7 @@ class Renderer(renderer.Renderer):
333 macaddr = ifcfg.get('mac_address', None)339 macaddr = ifcfg.get('mac_address', None)
334 if macaddr is not None:340 if macaddr is not None:
335 vlan['macaddress'] = macaddr.lower()341 vlan['macaddress'] = macaddr.lower()
336 _extract_addresses(ifcfg, vlan)342 _extract_addresses(ifcfg, vlan, ifname)
337 vlans.update({ifname: vlan})343 vlans.update({ifname: vlan})
338344
339 # inject global nameserver values under each all interface which345 # inject global nameserver values under each all interface which
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 6d63e5c..72c803e 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -7,6 +7,8 @@
7import copy7import copy
8import functools8import functools
9import logging9import logging
10import socket
11import struct
1012
11import six13import six
1214
@@ -886,12 +888,9 @@ def net_prefix_to_ipv4_mask(prefix):
886 This is the inverse of ipv4_mask_to_net_prefix.888 This is the inverse of ipv4_mask_to_net_prefix.
887 24 -> "255.255.255.0"889 24 -> "255.255.255.0"
888 Also supports input as a string."""890 Also supports input as a string."""
889891 mask = socket.inet_ntoa(
890 mask = [0, 0, 0, 0]892 struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff)))
891 for i in list(range(0, int(prefix))):893 return mask
892 idx = int(i / 8)
893 mask[idx] = mask[idx] + (1 << (7 - i % 8))
894 return ".".join([str(x) for x in mask])
895894
896895
897def ipv4_mask_to_net_prefix(mask):896def ipv4_mask_to_net_prefix(mask):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 39d89c4..3d71923 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -287,7 +287,6 @@ class Renderer(renderer.Renderer):
287 if subnet_type == 'dhcp6':287 if subnet_type == 'dhcp6':
288 iface_cfg['IPV6INIT'] = True288 iface_cfg['IPV6INIT'] = True
289 iface_cfg['DHCPV6C'] = True289 iface_cfg['DHCPV6C'] = True
290 iface_cfg['BOOTPROTO'] = 'dhcp'
291 elif subnet_type in ['dhcp4', 'dhcp']:290 elif subnet_type in ['dhcp4', 'dhcp']:
292 iface_cfg['BOOTPROTO'] = 'dhcp'291 iface_cfg['BOOTPROTO'] = 'dhcp'
293 elif subnet_type == 'static':292 elif subnet_type == 'static':
@@ -305,6 +304,13 @@ class Renderer(renderer.Renderer):
305 mtu_key = 'IPV6_MTU'304 mtu_key = 'IPV6_MTU'
306 iface_cfg['IPV6INIT'] = True305 iface_cfg['IPV6INIT'] = True
307 if 'mtu' in subnet:306 if 'mtu' in subnet:
307 mtu_mismatch = bool(mtu_key in iface_cfg and
308 subnet['mtu'] != iface_cfg[mtu_key])
309 if mtu_mismatch:
310 LOG.warning(
311 'Network config: ignoring %s device-level mtu:%s'
312 ' because ipv4 subnet-level mtu:%s provided.',
313 iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
308 iface_cfg[mtu_key] = subnet['mtu']314 iface_cfg[mtu_key] = subnet['mtu']
309 elif subnet_type == 'manual':315 elif subnet_type == 'manual':
310 # If the subnet has an MTU setting, then ONBOOT=True316 # If the subnet has an MTU setting, then ONBOOT=True
@@ -364,7 +370,7 @@ class Renderer(renderer.Renderer):
364370
365 @classmethod371 @classmethod
366 def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):372 def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):
367 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):373 for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
368 for route in subnet.get('routes', []):374 for route in subnet.get('routes', []):
369 is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])375 is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
370376
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 276556e..5c017d1 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -199,6 +199,7 @@ class TestGenerateFallbackConfig(CiTestCase):
199 self.sysdir = self.tmp_dir() + '/'199 self.sysdir = self.tmp_dir() + '/'
200 self.m_sys_path.return_value = self.sysdir200 self.m_sys_path.return_value = self.sysdir
201 self.addCleanup(sys_mock.stop)201 self.addCleanup(sys_mock.stop)
202 self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
202203
203 def test_generate_fallback_finds_connected_eth_with_mac(self):204 def test_generate_fallback_finds_connected_eth_with_mac(self):
204 """generate_fallback_config finds any connected device with a mac."""205 """generate_fallback_config finds any connected device with a mac."""
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 993b26c..9ff929c 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -8,9 +8,11 @@
8#8#
9# This file is part of cloud-init. See LICENSE file for license information.9# This file is part of cloud-init. See LICENSE file for license information.
1010
11from copy import copy, deepcopy
11import re12import re
1213
13from cloudinit import log as logging14from cloudinit import log as logging
15from cloudinit.net.network_state import net_prefix_to_ipv4_mask
14from cloudinit import util16from cloudinit import util
1517
16from cloudinit.simpletable import SimpleTable18from cloudinit.simpletable import SimpleTable
@@ -18,18 +20,90 @@ from cloudinit.simpletable import SimpleTable
18LOG = logging.getLogger()20LOG = logging.getLogger()
1921
2022
21def netdev_info(empty=""):23DEFAULT_NETDEV_INFO = {
22 fields = ("hwaddr", "addr", "bcast", "mask")24 "ipv4": [],
23 (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])25 "ipv6": [],
26 "hwaddr": "",
27 "up": False
28}
29
30
31def _netdev_info_iproute(ipaddr_out):
32 """
33 Get network device dicts from ip route and ip link info.
34
35 @param ipaddr_out: Output string from 'ip addr show' command.
36
37 @returns: A dict of device info keyed by network device name containing
38 device configuration values.
39 @raise: TypeError if ipaddr_out isn't a string.
40 """
41 devs = {}
42 dev_name = None
43 for num, line in enumerate(ipaddr_out.splitlines()):
44 m = re.match(r'^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*', line)
45 if m:
46 dev_name = m.group('dev').lower().split('@')[0]
47 flags = m.group('flags').split(',')
48 devs[dev_name] = {
49 'ipv4': [], 'ipv6': [], 'hwaddr': '',
50 'up': bool('UP' in flags and 'LOWER_UP' in flags),
51 }
52 elif 'inet6' in line:
53 m = re.match(
54 r'\s+inet6\s(?P<ip>\S+)\sscope\s(?P<scope6>\S+).*', line)
55 if not m:
56 LOG.warning(
57 'Could not parse ip addr show: (line:%d) %s', num, line)
58 continue
59 devs[dev_name]['ipv6'].append(m.groupdict())
60 elif 'inet' in line:
61 m = re.match(
62 r'\s+inet\s(?P<cidr4>\S+)(\sbrd\s(?P<bcast>\S+))?\sscope\s'
63 r'(?P<scope>\S+).*', line)
64 if not m:
65 LOG.warning(
66 'Could not parse ip addr show: (line:%d) %s', num, line)
67 continue
68 match = m.groupdict()
69 cidr4 = match.pop('cidr4')
70 addr, _, prefix = cidr4.partition('/')
71 if not prefix:
72 prefix = '32'
73 devs[dev_name]['ipv4'].append({
74 'ip': addr,
75 'bcast': match['bcast'] if match['bcast'] else '',
76 'mask': net_prefix_to_ipv4_mask(prefix),
77 'scope': match['scope']})
78 elif 'link' in line:
79 m = re.match(
80 r'\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*', line)
81 if not m:
82 LOG.warning(
83 'Could not parse ip addr show: (line:%d) %s', num, line)
84 continue
85 if m.group('link_type') == 'ether':
86 devs[dev_name]['hwaddr'] = m.group('hwaddr')
87 else:
88 devs[dev_name]['hwaddr'] = ''
89 else:
90 continue
91 return devs
92
93
94def _netdev_info_ifconfig(ifconfig_data):
95 # fields that need to be returned in devs for each dev
24 devs = {}96 devs = {}
25 for line in str(ifcfg_out).splitlines():97 for line in ifconfig_data.splitlines():
26 if len(line) == 0:98 if len(line) == 0:
27 continue99 continue
28 if line[0] not in ("\t", " "):100 if line[0] not in ("\t", " "):
29 curdev = line.split()[0]101 curdev = line.split()[0]
30 devs[curdev] = {"up": False}102 # current ifconfig pops a ':' on the end of the device
31 for field in fields:103 if curdev.endswith(':'):
32 devs[curdev][field] = ""104 curdev = curdev[:-1]
105 if curdev not in devs:
106 devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
33 toks = line.lower().strip().split()107 toks = line.lower().strip().split()
34 if toks[0] == "up":108 if toks[0] == "up":
35 devs[curdev]['up'] = True109 devs[curdev]['up'] = True
@@ -39,59 +113,164 @@ def netdev_info(empty=""):
39 if re.search(r"flags=\d+<up,", toks[1]):113 if re.search(r"flags=\d+<up,", toks[1]):
40 devs[curdev]['up'] = True114 devs[curdev]['up'] = True
41115
42 fieldpost = ""
43 if toks[0] == "inet6":
44 fieldpost = "6"
45
46 for i in range(len(toks)):116 for i in range(len(toks)):
47 # older net-tools (ubuntu) show 'inet addr:xx.yy',117 if toks[i] == "inet": # Create new ipv4 addr entry
48 # newer (freebsd and fedora) show 'inet xx.yy'118 devs[curdev]['ipv4'].append(
49 # just skip this 'inet' entry. (LP: #1285185)119 {'ip': toks[i + 1].lstrip("addr:")})
50 try:120 elif toks[i].startswith("bcast:"):
51 if ((toks[i] in ("inet", "inet6") and121 devs[curdev]['ipv4'][-1]['bcast'] = toks[i].lstrip("bcast:")
52 toks[i + 1].startswith("addr:"))):122 elif toks[i] == "broadcast":
53 continue123 devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
54 except IndexError:124 elif toks[i].startswith("mask:"):
55 pass125 devs[curdev]['ipv4'][-1]['mask'] = toks[i].lstrip("mask:")
56126 elif toks[i] == "netmask":
57 # Couple the different items we're interested in with the correct127 devs[curdev]['ipv4'][-1]['mask'] = toks[i + 1]
58 # field since FreeBSD/CentOS/Fedora differ in the output.128 elif toks[i] == "hwaddr" or toks[i] == "ether":
59 ifconfigfields = {129 devs[curdev]['hwaddr'] = toks[i + 1]
60 "addr:": "addr", "inet": "addr",130 elif toks[i] == "inet6":
61 "bcast:": "bcast", "broadcast": "bcast",131 if toks[i + 1] == "addr:":
62 "mask:": "mask", "netmask": "mask",132 devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
63 "hwaddr": "hwaddr", "ether": "hwaddr",133 else:
64 "scope": "scope",134 devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
65 }135 elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
66 for origfield, field in ifconfigfields.items():136 addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
67 target = "%s%s" % (field, fieldpost)137 devs[curdev]['ipv6'][-1]['ip'] = addr6
68 if devs[curdev].get(target, ""):138 elif toks[i].startswith("scope:"):
69 continue139 devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
70 if toks[i] == "%s" % origfield:140 elif toks[i] == "scopeid":
71 try:141 res = re.match(r'.*<(\S+)>', toks[i + 1])
72 devs[curdev][target] = toks[i + 1]142 if res:
73 except IndexError:143 devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
74 pass144 return devs
75 elif toks[i].startswith("%s" % origfield):145
76 devs[curdev][target] = toks[i][len(field) + 1:]146
77147def netdev_info(empty=""):
78 if empty != "":148 devs = {}
79 for (_devname, dev) in devs.items():149 if util.which('ip'):
80 for field in dev:150 # Try iproute first of all
81 if dev[field] == "":151 (ipaddr_out, _err) = util.subp(["ip", "addr", "show"])
82 dev[field] = empty152 devs = _netdev_info_iproute(ipaddr_out)
153 elif util.which('ifconfig'):
154 # Fall back to net-tools if iproute2 is not present
155 (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
156 devs = _netdev_info_ifconfig(ifcfg_out)
157 else:
158 LOG.warning(
159 "Could not print networks: missing 'ip' and 'ifconfig' commands")
83160
161 if empty == "":
162 return devs
163
164 recurse_types = (dict, tuple, list)
165
166 def fill(data, new_val="", empty_vals=("", b"")):
167 """Recursively replace 'empty_vals' in data (dict, tuple, list)
168 with new_val"""
169 if isinstance(data, dict):
170 myiter = data.items()
171 elif isinstance(data, (tuple, list)):
172 myiter = enumerate(data)
173 else:
174 raise TypeError("Unexpected input to fill")
175
176 for key, val in myiter:
177 if val in empty_vals:
178 data[key] = new_val
179 elif isinstance(val, recurse_types):
180 fill(val, new_val)
181
182 fill(devs, new_val=empty)
84 return devs183 return devs
85184
86185
87def route_info():186def _netdev_route_info_iproute(iproute_data):
88 (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1])187 """
188 Get network route dicts from ip route info.
189
190 @param iproute_data: Output string from ip route command.
191
192 @returns: A dict containing ipv4 and ipv6 route entries as lists. Each
193 item in the list is a route dictionary representing destination,
194 gateway, flags, genmask and interface information.
195 """
196
197 routes = {}
198 routes['ipv4'] = []
199 routes['ipv6'] = []
200 entries = iproute_data.splitlines()
201 default_route_entry = {
202 'destination': '', 'flags': '', 'gateway': '', 'genmask': '',
203 'iface': '', 'metric': ''}
204 for line in entries:
205 entry = copy(default_route_entry)
206 if not line:
207 continue
208 toks = line.split()
209 flags = ['U']
210 if toks[0] == "default":
211 entry['destination'] = "0.0.0.0"
212 entry['genmask'] = "0.0.0.0"
213 else:
214 if '/' in toks[0]:
215 (addr, cidr) = toks[0].split("/")
216 else:
217 addr = toks[0]
218 cidr = '32'
219 flags.append("H")
220 entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
221 entry['destination'] = addr
222 entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
223 entry['gateway'] = "0.0.0.0"
224 for i in range(len(toks)):
225 if toks[i] == "via":
226 entry['gateway'] = toks[i + 1]
227 flags.insert(1, "G")
228 if toks[i] == "dev":
229 entry["iface"] = toks[i + 1]
230 if toks[i] == "metric":
231 entry['metric'] = toks[i + 1]
232 entry['flags'] = ''.join(flags)
233 routes['ipv4'].append(entry)
234 try:
235 (iproute_data6, _err6) = util.subp(
236 ["ip", "--oneline", "-6", "route", "list", "table", "all"],
237 rcs=[0, 1])
238 except util.ProcessExecutionError:
239 pass
240 else:
241 entries6 = iproute_data6.splitlines()
242 for line in entries6:
243 entry = {}
244 if not line:
245 continue
246 toks = line.split()
247 if toks[0] == "default":
248 entry['destination'] = "::/0"
249 entry['flags'] = "UG"
250 else:
251 entry['destination'] = toks[0]
252 entry['gateway'] = "::"
253 entry['flags'] = "U"
254 for i in range(len(toks)):
255 if toks[i] == "via":
256 entry['gateway'] = toks[i + 1]
257 entry['flags'] = "UG"
258 if toks[i] == "dev":
259 entry["iface"] = toks[i + 1]
260 if toks[i] == "metric":
261 entry['metric'] = toks[i + 1]
262 if toks[i] == "expires":
263 entry['flags'] = entry['flags'] + 'e'
264 routes['ipv6'].append(entry)
265 return routes
266
89267
268def _netdev_route_info_netstat(route_data):
90 routes = {}269 routes = {}
91 routes['ipv4'] = []270 routes['ipv4'] = []
92 routes['ipv6'] = []271 routes['ipv6'] = []
93272
94 entries = route_out.splitlines()[1:]273 entries = route_data.splitlines()
95 for line in entries:274 for line in entries:
96 if not line:275 if not line:
97 continue276 continue
@@ -101,8 +280,8 @@ def route_info():
101 # default 10.65.0.1 UGS 0 34920 vtnet0280 # default 10.65.0.1 UGS 0 34920 vtnet0
102 #281 #
103 # Linux netstat shows 2 more:282 # Linux netstat shows 2 more:
104 # Destination Gateway Genmask Flags MSS Window irtt Iface283 # Destination Gateway Genmask Flags Metric Ref Use Iface
105 # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0284 # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
106 if (len(toks) < 6 or toks[0] == "Kernel" or285 if (len(toks) < 6 or toks[0] == "Kernel" or
107 toks[0] == "Destination" or toks[0] == "Internet" or286 toks[0] == "Destination" or toks[0] == "Internet" or
108 toks[0] == "Internet6" or toks[0] == "Routing"):287 toks[0] == "Internet6" or toks[0] == "Routing"):
@@ -125,31 +304,57 @@ def route_info():
125 routes['ipv4'].append(entry)304 routes['ipv4'].append(entry)
126305
127 try:306 try:
128 (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"],307 (route_data6, _err6) = util.subp(
129 rcs=[0, 1])308 ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
130 except util.ProcessExecutionError:309 except util.ProcessExecutionError:
131 pass310 pass
132 else:311 else:
133 entries6 = route_out6.splitlines()[1:]312 entries6 = route_data6.splitlines()
134 for line in entries6:313 for line in entries6:
135 if not line:314 if not line:
136 continue315 continue
137 toks = line.split()316 toks = line.split()
138 if (len(toks) < 6 or toks[0] == "Kernel" or317 if (len(toks) < 7 or toks[0] == "Kernel" or
318 toks[0] == "Destination" or toks[0] == "Internet" or
139 toks[0] == "Proto" or toks[0] == "Active"):319 toks[0] == "Proto" or toks[0] == "Active"):
140 continue320 continue
141 entry = {321 entry = {
142 'proto': toks[0],322 'destination': toks[0],
143 'recv-q': toks[1],323 'gateway': toks[1],
144 'send-q': toks[2],324 'flags': toks[2],
145 'local address': toks[3],325 'metric': toks[3],
146 'foreign address': toks[4],326 'ref': toks[4],
147 'state': toks[5],327 'use': toks[5],
328 'iface': toks[6],
148 }329 }
330 # skip lo interface on ipv6
331 if entry['iface'] == "lo":
332 continue
333 # strip /128 from address if it's included
334 if entry['destination'].endswith('/128'):
335 entry['destination'] = re.sub(
336 r'\/128$', '', entry['destination'])
149 routes['ipv6'].append(entry)337 routes['ipv6'].append(entry)
150 return routes338 return routes
151339
152340
341def route_info():
342 routes = {}
343 if util.which('ip'):
344 # Try iproute first of all
345 (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"])
346 routes = _netdev_route_info_iproute(iproute_out)
347 elif util.which('netstat'):
348 # Fall back to net-tools if iproute2 is not present
349 (route_out, _err) = util.subp(
350 ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
351 routes = _netdev_route_info_netstat(route_out)
352 else:
353 LOG.warning(
354 "Could not print routes: missing 'ip' and 'netstat' commands")
355 return routes
356
357
153def getgateway():358def getgateway():
154 try:359 try:
155 routes = route_info()360 routes = route_info()
@@ -164,23 +369,36 @@ def getgateway():
164369
165def netdev_pformat():370def netdev_pformat():
166 lines = []371 lines = []
372 empty = "."
167 try:373 try:
168 netdev = netdev_info(empty=".")374 netdev = netdev_info(empty=empty)
169 except Exception:375 except Exception as e:
170 lines.append(util.center("Net device info failed", '!', 80))376 lines.append(
377 util.center(
378 "Net device info failed ({error})".format(error=str(e)),
379 '!', 80))
171 else:380 else:
381 if not netdev:
382 return '\n'
172 fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']383 fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
173 tbl = SimpleTable(fields)384 tbl = SimpleTable(fields)
174 for (dev, d) in sorted(netdev.items()):385 for (dev, data) in sorted(netdev.items()):
175 tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])386 for addr in data.get('ipv4'):
176 if d.get('addr6'):387 tbl.add_row(
177 tbl.add_row([dev, d["up"],388 (dev, data["up"], addr["ip"], addr["mask"],
178 d["addr6"], ".", d.get("scope6"), d["hwaddr"]])389 addr.get('scope', empty), data["hwaddr"]))
390 for addr in data.get('ipv6'):
391 tbl.add_row(
392 (dev, data["up"], addr["ip"], empty, addr["scope6"],
393 data["hwaddr"]))
394 if len(data.get('ipv6')) + len(data.get('ipv4')) == 0:
395 tbl.add_row((dev, data["up"], empty, empty, empty,
396 data["hwaddr"]))
179 netdev_s = tbl.get_string()397 netdev_s = tbl.get_string()
180 max_len = len(max(netdev_s.splitlines(), key=len))398 max_len = len(max(netdev_s.splitlines(), key=len))
181 header = util.center("Net device info", "+", max_len)399 header = util.center("Net device info", "+", max_len)
182 lines.extend([header, netdev_s])400 lines.extend([header, netdev_s])
183 return "\n".join(lines)401 return "\n".join(lines) + "\n"
184402
185403
186def route_pformat():404def route_pformat():
@@ -188,7 +406,10 @@ def route_pformat():
188 try:406 try:
189 routes = route_info()407 routes = route_info()
190 except Exception as e:408 except Exception as e:
191 lines.append(util.center('Route info failed', '!', 80))409 lines.append(
410 util.center(
411 'Route info failed ({error})'.format(error=str(e)),
412 '!', 80))
192 util.logexc(LOG, "Route info failed: %s" % e)413 util.logexc(LOG, "Route info failed: %s" % e)
193 else:414 else:
194 if routes.get('ipv4'):415 if routes.get('ipv4'):
@@ -205,20 +426,20 @@ def route_pformat():
205 header = util.center("Route IPv4 info", "+", max_len)426 header = util.center("Route IPv4 info", "+", max_len)
206 lines.extend([header, route_s])427 lines.extend([header, route_s])
207 if routes.get('ipv6'):428 if routes.get('ipv6'):
208 fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',429 fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface',
209 'Local Address', 'Foreign Address', 'State']430 'Flags']
210 tbl_v6 = SimpleTable(fields_v6)431 tbl_v6 = SimpleTable(fields_v6)
211 for (n, r) in enumerate(routes.get('ipv6')):432 for (n, r) in enumerate(routes.get('ipv6')):
212 route_id = str(n)433 route_id = str(n)
213 tbl_v6.add_row([route_id, r['proto'],434 if r['iface'] == 'lo':
214 r['recv-q'], r['send-q'],435 continue
215 r['local address'], r['foreign address'],436 tbl_v6.add_row([route_id, r['destination'],
216 r['state']])437 r['gateway'], r['iface'], r['flags']])
217 route_s = tbl_v6.get_string()438 route_s = tbl_v6.get_string()
218 max_len = len(max(route_s.splitlines(), key=len))439 max_len = len(max(route_s.splitlines(), key=len))
219 header = util.center("Route IPv6 info", "+", max_len)440 header = util.center("Route IPv6 info", "+", max_len)
220 lines.extend([header, route_s])441 lines.extend([header, route_s])
221 return "\n".join(lines)442 return "\n".join(lines) + "\n"
222443
223444
224def debug_info(prefix='ci-info: '):445def debug_info(prefix='ci-info: '):
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index 4f62d2f..e5dfab3 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -192,7 +192,7 @@ class ReportEventStack(object):
192192
193 def _childrens_finish_info(self):193 def _childrens_finish_info(self):
194 for cand_result in (status.FAIL, status.WARN):194 for cand_result in (status.FAIL, status.WARN):
195 for name, (value, msg) in self.children.items():195 for _name, (value, _msg) in self.children.items():
196 if value == cand_result:196 if value == cand_result:
197 return (value, self.message)197 return (value, self.message)
198 return (self.result, self.message)198 return (self.result, self.message)
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 22279d0..858e082 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -45,7 +45,7 @@ def _is_aliyun():
4545
46def parse_public_keys(public_keys):46def parse_public_keys(public_keys):
47 keys = []47 keys = []
48 for key_id, key_body in public_keys.items():48 for _key_id, key_body in public_keys.items():
49 if isinstance(key_body, str):49 if isinstance(key_body, str):
50 keys.append(key_body.strip())50 keys.append(key_body.strip())
51 elif isinstance(key_body, list):51 elif isinstance(key_body, list):
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index e1d0055..24fd65f 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -29,7 +29,6 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
2929
30# Shell command lists30# Shell command lists
31CMD_PROBE_FLOPPY = ['modprobe', 'floppy']31CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
32CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5']
3332
34META_DATA_NOT_SUPPORTED = {33META_DATA_NOT_SUPPORTED = {
35 'block-device-mapping': {},34 'block-device-mapping': {},
@@ -185,26 +184,24 @@ class DataSourceAltCloud(sources.DataSource):
185 cmd = CMD_PROBE_FLOPPY184 cmd = CMD_PROBE_FLOPPY
186 (cmd_out, _err) = util.subp(cmd)185 (cmd_out, _err) = util.subp(cmd)
187 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)186 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
188 except ProcessExecutionError as _err:187 except ProcessExecutionError as e:
189 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)188 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
190 return False189 return False
191 except OSError as _err:190 except OSError as e:
192 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)191 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
193 return False192 return False
194193
195 floppy_dev = '/dev/fd0'194 floppy_dev = '/dev/fd0'
196195
197 # udevadm settle for floppy device196 # udevadm settle for floppy device
198 try:197 try:
199 cmd = CMD_UDEVADM_SETTLE198 (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
200 cmd.append('--exit-if-exists=' + floppy_dev)
201 (cmd_out, _err) = util.subp(cmd)
202 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)199 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
203 except ProcessExecutionError as _err:200 except ProcessExecutionError as e:
204 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)201 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
205 return False202 return False
206 except OSError as _err:203 except OSError as e:
207 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)204 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
208 return False205 return False
209206
210 try:207 try:
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 0ee622e..7007d9e 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4'
48# DMI chassis-asset-tag is set static for all azure instances48# DMI chassis-asset-tag is set static for all azure instances
49AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'49AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
50REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"50REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
51REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
51IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"52IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
5253
5354
@@ -107,31 +108,24 @@ def find_dev_from_busdev(camcontrol_out, busdev):
107 return None108 return None
108109
109110
110def get_dev_storvsc_sysctl():111def execute_or_debug(cmd, fail_ret=None):
111 try:112 try:
112 sysctl_out, err = util.subp(['sysctl', 'dev.storvsc'])113 return util.subp(cmd)[0]
113 except util.ProcessExecutionError:114 except util.ProcessExecutionError:
114 LOG.debug("Fail to execute sysctl dev.storvsc")115 LOG.debug("Failed to execute: %s", ' '.join(cmd))
115 sysctl_out = ""116 return fail_ret
116 return sysctl_out117
118
119def get_dev_storvsc_sysctl():
120 return execute_or_debug(["sysctl", "dev.storvsc"], fail_ret="")
117121
118122
119def get_camcontrol_dev_bus():123def get_camcontrol_dev_bus():
120 try:124 return execute_or_debug(['camcontrol', 'devlist', '-b'])
121 camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b'])
122 except util.ProcessExecutionError:
123 LOG.debug("Fail to execute camcontrol devlist -b")
124 return None
125 return camcontrol_b_out
126125
127126
128def get_camcontrol_dev():127def get_camcontrol_dev():
129 try:128 return execute_or_debug(['camcontrol', 'devlist'])
130 camcontrol_out, err = util.subp(['camcontrol', 'devlist'])
131 except util.ProcessExecutionError:
132 LOG.debug("Fail to execute camcontrol devlist")
133 return None
134 return camcontrol_out
135129
136130
137def get_resource_disk_on_freebsd(port_id):131def get_resource_disk_on_freebsd(port_id):
@@ -214,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = {
214}208}
215209
216DS_CFG_PATH = ['datasource', DS_NAME]210DS_CFG_PATH = ['datasource', DS_NAME]
211DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
217DEF_EPHEMERAL_LABEL = 'Temporary Storage'212DEF_EPHEMERAL_LABEL = 'Temporary Storage'
218213
219# The redacted password fails to meet password complexity requirements214# The redacted password fails to meet password complexity requirements
@@ -400,14 +395,9 @@ class DataSourceAzure(sources.DataSource):
400 if found == ddir:395 if found == ddir:
401 LOG.debug("using files cached in %s", ddir)396 LOG.debug("using files cached in %s", ddir)
402397
403 # azure / hyper-v provides random data here398 seed = _get_random_seed()
404 # TODO. find the seed on FreeBSD platform399 if seed:
405 # now update ds_cfg to reflect contents pass in config400 self.metadata['random_seed'] = seed
406 if not util.is_FreeBSD():
407 seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
408 quiet=True, decode=False)
409 if seed:
410 self.metadata['random_seed'] = seed
411401
412 user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})402 user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
413 self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])403 self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -443,11 +433,12 @@ class DataSourceAzure(sources.DataSource):
443 LOG.debug("negotiating already done for %s",433 LOG.debug("negotiating already done for %s",
444 self.get_instance_id())434 self.get_instance_id())
445435
446 def _poll_imds(self, report_ready=True):436 def _poll_imds(self):
447 """Poll IMDS for the new provisioning data until we get a valid437 """Poll IMDS for the new provisioning data until we get a valid
448 response. Then return the returned JSON object."""438 response. Then return the returned JSON object."""
449 url = IMDS_URL + "?api-version=2017-04-02"439 url = IMDS_URL + "?api-version=2017-04-02"
450 headers = {"Metadata": "true"}440 headers = {"Metadata": "true"}
441 report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
451 LOG.debug("Start polling IMDS")442 LOG.debug("Start polling IMDS")
452443
453 def exc_cb(msg, exception):444 def exc_cb(msg, exception):
@@ -457,13 +448,17 @@ class DataSourceAzure(sources.DataSource):
457 # call DHCP and setup the ephemeral network to acquire the new IP.448 # call DHCP and setup the ephemeral network to acquire the new IP.
458 return False449 return False
459450
460 need_report = report_ready
461 while True:451 while True:
462 try:452 try:
463 with EphemeralDHCPv4() as lease:453 with EphemeralDHCPv4() as lease:
464 if need_report:454 if report_ready:
455 path = REPORTED_READY_MARKER_FILE
456 LOG.info(
457 "Creating a marker file to report ready: %s", path)
458 util.write_file(path, "{pid}: {time}\n".format(
459 pid=os.getpid(), time=time()))
465 self._report_ready(lease=lease)460 self._report_ready(lease=lease)
466 need_report = False461 report_ready = False
467 return readurl(url, timeout=1, headers=headers,462 return readurl(url, timeout=1, headers=headers,
468 exception_cb=exc_cb, infinite=True).contents463 exception_cb=exc_cb, infinite=True).contents
469 except UrlError:464 except UrlError:
@@ -474,7 +469,7 @@ class DataSourceAzure(sources.DataSource):
474 before we go into our polling loop."""469 before we go into our polling loop."""
475 try:470 try:
476 get_metadata_from_fabric(None, lease['unknown-245'])471 get_metadata_from_fabric(None, lease['unknown-245'])
477 except Exception as exc:472 except Exception:
478 LOG.warning(473 LOG.warning(
479 "Error communicating with Azure fabric; You may experience."474 "Error communicating with Azure fabric; You may experience."
480 "connectivity issues.", exc_info=True)475 "connectivity issues.", exc_info=True)
@@ -492,13 +487,15 @@ class DataSourceAzure(sources.DataSource):
492 jump back into the polling loop in order to retrieve the ovf_env."""487 jump back into the polling loop in order to retrieve the ovf_env."""
493 if not ret:488 if not ret:
494 return False489 return False
495 (md, self.userdata_raw, cfg, files) = ret490 (_md, self.userdata_raw, cfg, _files) = ret
496 path = REPROVISION_MARKER_FILE491 path = REPROVISION_MARKER_FILE
497 if (cfg.get('PreprovisionedVm') is True or492 if (cfg.get('PreprovisionedVm') is True or
498 os.path.isfile(path)):493 os.path.isfile(path)):
499 if not os.path.isfile(path):494 if not os.path.isfile(path):
500 LOG.info("Creating a marker file to poll imds")495 LOG.info("Creating a marker file to poll imds: %s",
501 util.write_file(path, "%s: %s\n" % (os.getpid(), time()))496 path)
497 util.write_file(path, "{pid}: {time}\n".format(
498 pid=os.getpid(), time=time()))
502 return True499 return True
503 return False500 return False
504501
@@ -528,16 +525,19 @@ class DataSourceAzure(sources.DataSource):
528 self.ds_cfg['agent_command'])525 self.ds_cfg['agent_command'])
529 try:526 try:
530 fabric_data = metadata_func()527 fabric_data = metadata_func()
531 except Exception as exc:528 except Exception:
532 LOG.warning(529 LOG.warning(
533 "Error communicating with Azure fabric; You may experience."530 "Error communicating with Azure fabric; You may experience."
534 "connectivity issues.", exc_info=True)531 "connectivity issues.", exc_info=True)
535 return False532 return False
533 util.del_file(REPORTED_READY_MARKER_FILE)
536 util.del_file(REPROVISION_MARKER_FILE)534 util.del_file(REPROVISION_MARKER_FILE)
537 return fabric_data535 return fabric_data
538536
539 def activate(self, cfg, is_new_instance):537 def activate(self, cfg, is_new_instance):
540 address_ephemeral_resize(is_new_instance=is_new_instance)538 address_ephemeral_resize(is_new_instance=is_new_instance,
539 preserve_ntfs=self.ds_cfg.get(
540 DS_CFG_KEY_PRESERVE_NTFS, False))
541 return541 return
542542
543 @property543 @property
@@ -581,17 +581,29 @@ def _has_ntfs_filesystem(devpath):
581 return os.path.realpath(devpath) in ntfs_devices581 return os.path.realpath(devpath) in ntfs_devices
582582
583583
584def can_dev_be_reformatted(devpath):584def can_dev_be_reformatted(devpath, preserve_ntfs):
585 """Determine if block device devpath is newly formatted ephemeral.585 """Determine if the ephemeral drive at devpath should be reformatted.
586586
587 A newly formatted disk will:587 A fresh ephemeral disk is formatted by Azure and will:
588 a.) have a partition table (dos or gpt)588 a.) have a partition table (dos or gpt)
589 b.) have 1 partition that is ntfs formatted, or589 b.) have 1 partition that is ntfs formatted, or
590 have 2 partitions with the second partition ntfs formatted.590 have 2 partitions with the second partition ntfs formatted.
591 (larger instances with >2TB ephemeral disk have gpt, and will591 (larger instances with >2TB ephemeral disk have gpt, and will
592 have a microsoft reserved partition as part 1. LP: #1686514)592 have a microsoft reserved partition as part 1. LP: #1686514)
593 c.) the ntfs partition will have no files other than possibly593 c.) the ntfs partition will have no files other than possibly
594 'dataloss_warning_readme.txt'"""594 'dataloss_warning_readme.txt'
595
596 User can indicate that NTFS should never be destroyed by setting
597 DS_CFG_KEY_PRESERVE_NTFS in dscfg.
598 If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS
599 to make sure cloud-init does not accidentally wipe their data.
600 If cloud-init cannot mount the disk to check for data, destruction
601 will be allowed, unless the dscfg key is set."""
602 if preserve_ntfs:
603 msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
604 (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
605 return False, msg
606
595 if not os.path.exists(devpath):607 if not os.path.exists(devpath):
596 return False, 'device %s does not exist' % devpath608 return False, 'device %s does not exist' % devpath
597609
@@ -624,18 +636,27 @@ def can_dev_be_reformatted(devpath):
624 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %636 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
625 (cand_part, cand_path, devpath))637 (cand_part, cand_path, devpath))
626 try:638 try:
627 file_count = util.mount_cb(cand_path, count_files)639 file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
640 update_env_for_mount={'LANG': 'C'})
628 except util.MountFailedError as e:641 except util.MountFailedError as e:
642 if "mount: unknown filesystem type 'ntfs'" in str(e):
643 return True, (bmsg + ' but this system cannot mount NTFS,'
644 ' assuming there are no important files.'
645 ' Formatting allowed.')
629 return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)646 return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
630647
631 if file_count != 0:648 if file_count != 0:
649 LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
650 'to ensure that filesystem does not get wiped, set '
651 '%s.%s in config', '.'.join(DS_CFG_PATH),
652 DS_CFG_KEY_PRESERVE_NTFS)
632 return False, bmsg + ' but had %d files on it.' % file_count653 return False, bmsg + ' but had %d files on it.' % file_count
633654
634 return True, bmsg + ' and had no important files. Safe for reformatting.'655 return True, bmsg + ' and had no important files. Safe for reformatting.'
635656
636657
637def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,658def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
638 is_new_instance=False):659 is_new_instance=False, preserve_ntfs=False):
639 # wait for ephemeral disk to come up660 # wait for ephemeral disk to come up
640 naplen = .2661 naplen = .2
641 missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,662 missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
@@ -651,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
651 if is_new_instance:672 if is_new_instance:
652 result, msg = (True, "First instance boot.")673 result, msg = (True, "First instance boot.")
653 else:674 else:
654 result, msg = can_dev_be_reformatted(devpath)675 result, msg = can_dev_be_reformatted(devpath, preserve_ntfs)
655676
656 LOG.debug("reformattable=%s: %s", result, msg)677 LOG.debug("reformattable=%s: %s", result, msg)
657 if not result:678 if not result:
@@ -965,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev):
965 return False986 return False
966987
967988
989def _get_random_seed():
990 """Return content random seed file if available, otherwise,
991 return None."""
992 # azure / hyper-v provides random data here
993 # TODO. find the seed on FreeBSD platform
994 # now update ds_cfg to reflect contents pass in config
995 if util.is_FreeBSD():
996 return None
997 return util.load_file("/sys/firmware/acpi/tables/OEM0",
998 quiet=True, decode=False)
999
1000
968def list_possible_azure_ds_devs():1001def list_possible_azure_ds_devs():
969 devlist = []1002 devlist = []
970 if util.is_FreeBSD():1003 if util.is_FreeBSD():
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 0df545f..d4b758f 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource):
6868
69 dsname = 'CloudStack'69 dsname = 'CloudStack'
7070
71 # Setup read_url parameters per get_url_params.
72 url_max_wait = 120
73 url_timeout = 50
74
71 def __init__(self, sys_cfg, distro, paths):75 def __init__(self, sys_cfg, distro, paths):
72 sources.DataSource.__init__(self, sys_cfg, distro, paths)76 sources.DataSource.__init__(self, sys_cfg, distro, paths)
73 self.seed_dir = os.path.join(paths.seed_dir, 'cs')77 self.seed_dir = os.path.join(paths.seed_dir, 'cs')
@@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource):
80 self.metadata_address = "http://%s/" % (self.vr_addr,)84 self.metadata_address = "http://%s/" % (self.vr_addr,)
81 self.cfg = {}85 self.cfg = {}
8286
83 def _get_url_settings(self):87 def wait_for_metadata_service(self):
84 mcfg = self.ds_cfg88 url_params = self.get_url_params()
85 max_wait = 120
86 try:
87 max_wait = int(mcfg.get("max_wait", max_wait))
88 except Exception:
89 util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
9089
91 if max_wait == 0:90 if url_params.max_wait_seconds <= 0:
92 return False91 return False
9392
94 timeout = 50
95 try:
96 timeout = int(mcfg.get("timeout", timeout))
97 except Exception:
98 util.logexc(LOG, "Failed to get timeout, using %s", timeout)
99
100 return (max_wait, timeout)
101
102 def wait_for_metadata_service(self):
103 (max_wait, timeout) = self._get_url_settings()
104
105 urls = [uhelp.combine_url(self.metadata_address,93 urls = [uhelp.combine_url(self.metadata_address,
106 'latest/meta-data/instance-id')]94 'latest/meta-data/instance-id')]
107 start_time = time.time()95 start_time = time.time()
108 url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,96 url = uhelp.wait_for_url(
109 timeout=timeout, status_cb=LOG.warn)97 urls=urls, max_wait=url_params.max_wait_seconds,
98 timeout=url_params.timeout_seconds, status_cb=LOG.warn)
11099
111 if url:100 if url:
112 LOG.debug("Using metadata source: '%s'", url)101 LOG.debug("Using metadata source: '%s'", url)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index c7b5fe5..4cb2897 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
43 self.version = None43 self.version = None
44 self.ec2_metadata = None44 self.ec2_metadata = None
45 self._network_config = None45 self._network_config = None
46 self.network_json = None46 self.network_json = sources.UNSET
47 self.network_eni = None47 self.network_eni = None
48 self.known_macs = None48 self.known_macs = None
49 self.files = {}49 self.files = {}
@@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
69 util.logexc(LOG, "Failed reading config drive from %s", sdir)69 util.logexc(LOG, "Failed reading config drive from %s", sdir)
7070
71 if not found:71 if not found:
72 for dev in find_candidate_devs():72 dslist = self.sys_cfg.get('datasource_list')
73 for dev in find_candidate_devs(dslist=dslist):
73 try:74 try:
74 # Set mtype if freebsd and turn off sync75 # Set mtype if freebsd and turn off sync
75 if dev.startswith("/dev/cd"):76 if dev.startswith("/dev/cd"):
@@ -148,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
148 @property149 @property
149 def network_config(self):150 def network_config(self):
150 if self._network_config is None:151 if self._network_config is None:
151 if self.network_json is not None:152 if self.network_json not in (None, sources.UNSET):
152 LOG.debug("network config provided via network_json")153 LOG.debug("network config provided via network_json")
153 self._network_config = openstack.convert_net_json(154 self._network_config = openstack.convert_net_json(
154 self.network_json, known_macs=self.known_macs)155 self.network_json, known_macs=self.known_macs)
@@ -211,7 +212,7 @@ def write_injected_files(files):
211 util.logexc(LOG, "Failed writing file: %s", filename)212 util.logexc(LOG, "Failed writing file: %s", filename)
212213
213214
214def find_candidate_devs(probe_optical=True):215def find_candidate_devs(probe_optical=True, dslist=None):
215 """Return a list of devices that may contain the config drive.216 """Return a list of devices that may contain the config drive.
216217
217 The returned list is sorted by search order where the first item has218 The returned list is sorted by search order where the first item has
@@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True):
227 * either vfat or iso9660 formated228 * either vfat or iso9660 formated
228 * labeled with 'config-2' or 'CONFIG-2'229 * labeled with 'config-2' or 'CONFIG-2'
229 """230 """
231 if dslist is None:
232 dslist = []
233
230 # query optical drive to get it in blkid cache for 2.6 kernels234 # query optical drive to get it in blkid cache for 2.6 kernels
231 if probe_optical:235 if probe_optical:
232 for device in OPTICAL_DEVICES:236 for device in OPTICAL_DEVICES:
@@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True):
257 devices = [d for d in candidates261 devices = [d for d in candidates
258 if d in by_label or not util.is_partition(d)]262 if d in by_label or not util.is_partition(d)]
259263
260 if devices:264 LOG.debug("devices=%s dslist=%s", devices, dslist)
265 if devices and "IBMCloud" in dslist:
261 # IBMCloud uses config-2 label, but limited to a single UUID.266 # IBMCloud uses config-2 label, but limited to a single UUID.
262 ibm_platform, ibm_path = get_ibm_platform()267 ibm_platform, ibm_path = get_ibm_platform()
263 if ibm_path in devices:268 if ibm_path in devices:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 21e9ef8..968ab3f 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
27STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")27STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
28STRICT_ID_DEFAULT = "warn"28STRICT_ID_DEFAULT = "warn"
2929
30_unset = "_unset"
31
3230
33class Platforms(object):31class Platforms(object):
34 # TODO Rename and move to cloudinit.cloud.CloudNames32 # TODO Rename and move to cloudinit.cloud.CloudNames
@@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource):
59 # for extended metadata content. IPv6 support comes in 2016-09-0257 # for extended metadata content. IPv6 support comes in 2016-09-02
60 extended_metadata_versions = ['2016-09-02']58 extended_metadata_versions = ['2016-09-02']
6159
60 # Setup read_url parameters per get_url_params.
61 url_max_wait = 120
62 url_timeout = 50
63
62 _cloud_platform = None64 _cloud_platform = None
6365
64 _network_config = _unset # Used for caching calculated network config v166 _network_config = sources.UNSET # Used to cache calculated network cfg v1
6567
66 # Whether we want to get network configuration from the metadata service.68 # Whether we want to get network configuration from the metadata service.
67 get_network_metadata = False69 perform_dhcp_setup = False
68
69 # Track the discovered fallback nic for use in configuration generation.
70 _fallback_interface = None
7170
72 def __init__(self, sys_cfg, distro, paths):71 def __init__(self, sys_cfg, distro, paths):
73 super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)72 super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
@@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource):
98 elif self.cloud_platform == Platforms.NO_EC2_METADATA:97 elif self.cloud_platform == Platforms.NO_EC2_METADATA:
99 return False98 return False
10099
101 if self.get_network_metadata: # Setup networking in init-local stage.100 if self.perform_dhcp_setup: # Setup networking in init-local stage.
102 if util.is_FreeBSD():101 if util.is_FreeBSD():
103 LOG.debug("FreeBSD doesn't support running dhclient with -sf")102 LOG.debug("FreeBSD doesn't support running dhclient with -sf")
104 return False103 return False
@@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource):
158 else:157 else:
159 return self.metadata['instance-id']158 return self.metadata['instance-id']
160159
161 def _get_url_settings(self):
162 mcfg = self.ds_cfg
163 max_wait = 120
164 try:
165 max_wait = int(mcfg.get("max_wait", max_wait))
166 except Exception:
167 util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
168
169 timeout = 50
170 try:
171 timeout = max(0, int(mcfg.get("timeout", timeout)))
172 except Exception:
173 util.logexc(LOG, "Failed to get timeout, using %s", timeout)
174
175 return (max_wait, timeout)
176
177 def wait_for_metadata_service(self):160 def wait_for_metadata_service(self):
178 mcfg = self.ds_cfg161 mcfg = self.ds_cfg
179162
180 (max_wait, timeout) = self._get_url_settings()163 url_params = self.get_url_params()
181 if max_wait <= 0:164 if url_params.max_wait_seconds <= 0:
182 return False165 return False
183166
184 # Remove addresses from the list that wont resolve.167 # Remove addresses from the list that wont resolve.
@@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource):
205188
206 start_time = time.time()189 start_time = time.time()
207 url = uhelp.wait_for_url(190 url = uhelp.wait_for_url(
208 urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn)191 urls=urls, max_wait=url_params.max_wait_seconds,
192 timeout=url_params.timeout_seconds, status_cb=LOG.warn)
209193
210 if url:194 if url:
211 self.metadata_address = url2base[url]195 self.metadata_address = url2base[url]
@@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource):
310 @property294 @property
311 def network_config(self):295 def network_config(self):
312 """Return a network config dict for rendering ENI or netplan files."""296 """Return a network config dict for rendering ENI or netplan files."""
313 if self._network_config != _unset:297 if self._network_config != sources.UNSET:
314 return self._network_config298 return self._network_config
315299
316 if self.metadata is None:300 if self.metadata is None:
317 # this would happen if get_data hadn't been called. leave as _unset301 # this would happen if get_data hadn't been called. leave as UNSET
318 LOG.warning(302 LOG.warning(
319 "Unexpected call to network_config when metadata is None.")303 "Unexpected call to network_config when metadata is None.")
320 return None304 return None
@@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource):
353 self._fallback_interface = _legacy_fbnic337 self._fallback_interface = _legacy_fbnic
354 self.fallback_nic = None338 self.fallback_nic = None
355 else:339 else:
356 self._fallback_interface = net.find_fallback_nic()340 return super(DataSourceEc2, self).fallback_interface
357 if self._fallback_interface is None:
358 LOG.warning("Did not find a fallback interface on EC2.")
359 return self._fallback_interface341 return self._fallback_interface
360342
361 def _crawl_metadata(self):343 def _crawl_metadata(self):
@@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2):
390 metadata service. If the metadata service provides network configuration372 metadata service. If the metadata service provides network configuration
391 then render the network configuration for that instance based on metadata.373 then render the network configuration for that instance based on metadata.
392 """374 """
393 get_network_metadata = True # Get metadata network config if present375 perform_dhcp_setup = True # Use dhcp before querying metadata
394376
395 def get_data(self):377 def get_data(self):
396 supported_platforms = (Platforms.AWS,)378 supported_platforms = (Platforms.AWS,)
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 02b3d56..01106ec 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -8,17 +8,11 @@ There are 2 different api exposed launch methods.
8 * template: This is the legacy method of launching instances.8 * template: This is the legacy method of launching instances.
9 When booting from an image template, the system boots first into9 When booting from an image template, the system boots first into
10 a "provisioning" mode. There, host <-> guest mechanisms are utilized10 a "provisioning" mode. There, host <-> guest mechanisms are utilized
11 to execute code in the guest and provision it.11 to execute code in the guest and configure it. The configuration
12 includes configuring the system network and possibly installing
13 packages and other software stack.
1214
13 Cloud-init will disable itself when it detects that it is in the15 After the provisioning is finished, the system reboots.
14 provisioning mode. It detects this by the presence of
15 a file '/root/provisioningConfiguration.cfg'.
16
17 When provided with user-data, the "first boot" will contain a
18 ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data
19 provided, then there is no data-source.
20
21 Cloud-init never does any network configuration in this mode.
2216
23 * os_code: Essentially "launch by OS Code" (Operating System Code).17 * os_code: Essentially "launch by OS Code" (Operating System Code).
24 This is a more modern approach. There is no specific "provisioning" boot.18 This is a more modern approach. There is no specific "provisioning" boot.
@@ -30,11 +24,73 @@ There are 2 different api exposed launch methods.
30 mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be24 mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be
31 incorrectly identified as IBMCloud.25 incorrectly identified as IBMCloud.
3226
27The combination of these 2 launch methods and with or without user-data
28creates 6 boot scenarios.
29 A. os_code with user-data
30 B. os_code without user-data
31 Cloud-init is fully operational in this mode.
32
33 There is a block device attached with label 'config-2'.
34 As it differs from OpenStack's config-2, we have to differentiate.
35 We do so by requiring the UUID on the filesystem to be "9796-932E".
36
37 This disk will have the following files. Specifically note, there
38 is no versioned path to the meta-data, only 'latest':
39 openstack/latest/meta_data.json
40 openstack/latest/network_data.json
41 openstack/latest/user_data [optional]
42 openstack/latest/vendor_data.json
43
44 vendor_data.json as of 2018-04 looks like this:
45 {"cloud-init":"#!/bin/bash\necho 'root:$6$<snip>' | chpasswd -e"}
46
47 The only difference between A and B in this mode is the presence
48 of user_data on the config disk.
49
50 C. template, provisioning boot with user-data
51 D. template, provisioning boot without user-data.
52 With ds-identify cloud-init is fully disabled in this mode.
53 Without ds-identify, cloud-init None datasource will be used.
54
55 This is currently identified by the presence of
56 /root/provisioningConfiguration.cfg . That file is placed into the
57 system before it is booted.
58
59 The difference between C and D is the presence of the METADATA disk
60 as described in E below. There is no METADATA disk attached unless
61 user-data is provided.
62
63 E. template, post-provisioning boot with user-data.
64 Cloud-init is fully operational in this mode.
65
66 This is identified by a block device with filesystem label "METADATA".
67 The looks similar to a version-1 OpenStack config drive. It will
68 have the following files:
69
70 openstack/latest/user_data
71 openstack/latest/meta_data.json
72 openstack/content/interfaces
73 meta.js
74
75 meta.js contains something similar to user_data. cloud-init ignores it.
76 cloud-init ignores the 'interfaces' style file here.
77 In this mode, cloud-init has networking code disabled. It relies
78 on the provisioning boot to have configured networking.
79
80 F. template, post-provisioning boot without user-data.
81 With ds-identify, cloud-init will be fully disabled.
82 Without ds-identify, cloud-init None datasource will be used.
83
84 There is no information available to identify this scenario.
85
86 The user will be able to ssh in as as root with their public keys that
87 have been installed into /root/ssh/.authorized_keys
88 during the provisioning stage.
89
33TODO:90TODO:
34 * is uuid (/sys/hypervisor/uuid) stable for life of an instance?91 * is uuid (/sys/hypervisor/uuid) stable for life of an instance?
35 it seems it is not the same as data's uuid in the os_code case92 it seems it is not the same as data's uuid in the os_code case
36 but is in the template case.93 but is in the template case.
37
38"""94"""
39import base6495import base64
40import json96import json
@@ -138,8 +194,30 @@ def _is_xen():
138 return os.path.exists("/proc/xen")194 return os.path.exists("/proc/xen")
139195
140196
141def _is_ibm_provisioning():197def _is_ibm_provisioning(
142 return os.path.exists("/root/provisioningConfiguration.cfg")198 prov_cfg="/root/provisioningConfiguration.cfg",
199 inst_log="/root/swinstall.log",
200 boot_ref="/proc/1/environ"):
201 """Return boolean indicating if this boot is ibm provisioning boot."""
202 if os.path.exists(prov_cfg):
203 msg = "config '%s' exists." % prov_cfg
204 result = True
205 if os.path.exists(inst_log):
206 if os.path.exists(boot_ref):
207 result = (os.stat(inst_log).st_mtime >
208 os.stat(boot_ref).st_mtime)
209 msg += (" log '%s' from %s boot." %
210 (inst_log, "current" if result else "previous"))
211 else:
212 msg += (" log '%s' existed, but no reference file '%s'." %
213 (inst_log, boot_ref))
214 result = False
215 else:
216 msg += " log '%s' did not exist." % inst_log
217 else:
218 result, msg = (False, "config '%s' did not exist." % prov_cfg)
219 LOG.debug("ibm_provisioning=%s: %s", result, msg)
220 return result
143221
144222
145def get_ibm_platform():223def get_ibm_platform():
@@ -189,7 +267,7 @@ def get_ibm_platform():
189 else:267 else:
190 return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path)268 return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path)
191 elif _is_ibm_provisioning():269 elif _is_ibm_provisioning():
192 return (Platforms.TEMPLATE_PROVISIONING_NODATA, None)270 return (Platforms.TEMPLATE_PROVISIONING_NODATA, None)
193 return not_found271 return not_found
194272
195273
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 6ac8863..bcb3854 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -198,13 +198,13 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
198 If version is None, then <version>/ will not be used.198 If version is None, then <version>/ will not be used.
199 """199 """
200 if read_file_or_url is None:200 if read_file_or_url is None:
201 read_file_or_url = util.read_file_or_url201 read_file_or_url = url_helper.read_file_or_url
202202
203 if seed_url.endswith("/"):203 if seed_url.endswith("/"):
204 seed_url = seed_url[:-1]204 seed_url = seed_url[:-1]
205205
206 md = {}206 md = {}
207 for path, dictname, binary, optional in DS_FIELDS:207 for path, _dictname, binary, optional in DS_FIELDS:
208 if version is None:208 if version is None:
209 url = "%s/%s" % (seed_url, path)209 url = "%s/%s" % (seed_url, path)
210 else:210 else:
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 5d3a8dd..2daea59 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource):
78 LOG.debug("Using seeded data from %s", path)78 LOG.debug("Using seeded data from %s", path)
79 mydata = _merge_new_seed(mydata, seeded)79 mydata = _merge_new_seed(mydata, seeded)
80 break80 break
81 except ValueError as e:81 except ValueError:
82 pass82 pass
8383
84 # If the datasource config had a 'seedfrom' entry, then that takes84 # If the datasource config had a 'seedfrom' entry, then that takes
@@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource):
117 try:117 try:
118 seeded = util.mount_cb(dev, _pp2d_callback,118 seeded = util.mount_cb(dev, _pp2d_callback,
119 pp2d_kwargs)119 pp2d_kwargs)
120 except ValueError as e:120 except ValueError:
121 if dev in label_list:121 if dev in label_list:
122 LOG.warning("device %s with label=%s not a"122 LOG.warning("device %s with label=%s not a"
123 "valid seed.", dev, label)123 "valid seed.", dev, label)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index dc914a7..178ccb0 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -556,7 +556,7 @@ def search_file(dirpath, filename):
556 if not dirpath or not filename:556 if not dirpath or not filename:
557 return None557 return None
558558
559 for root, dirs, files in os.walk(dirpath):559 for root, _dirs, files in os.walk(dirpath):
560 if filename in files:560 if filename in files:
561 return os.path.join(root, filename)561 return os.path.join(root, filename)
562562
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index d4a4111..16c1078 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None):
378 if asuser is not None:378 if asuser is not None:
379 try:379 try:
380 pwd.getpwnam(asuser)380 pwd.getpwnam(asuser)
381 except KeyError as e:381 except KeyError:
382 raise BrokenContextDiskDir(382 raise BrokenContextDiskDir(
383 "configured user '{user}' does not exist".format(383 "configured user '{user}' does not exist".format(
384 user=asuser))384 user=asuser))
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index e55a763..365af96 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -7,6 +7,7 @@
7import time7import time
88
9from cloudinit import log as logging9from cloudinit import log as logging
10from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
10from cloudinit import sources11from cloudinit import sources
11from cloudinit import url_helper12from cloudinit import url_helper
12from cloudinit import util13from cloudinit import util
@@ -22,51 +23,37 @@ DEFAULT_METADATA = {
22 "instance-id": DEFAULT_IID,23 "instance-id": DEFAULT_IID,
23}24}
2425
26# OpenStack DMI constants
27DMI_PRODUCT_NOVA = 'OpenStack Nova'
28DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
29VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
30DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
31VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
32
2533
26class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):34class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
2735
28 dsname = "OpenStack"36 dsname = "OpenStack"
2937
38 _network_config = sources.UNSET # Used to cache calculated network cfg v1
39
40 # Whether we want to get network configuration from the metadata service.
41 perform_dhcp_setup = False
42
30 def __init__(self, sys_cfg, distro, paths):43 def __init__(self, sys_cfg, distro, paths):
31 super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)44 super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
32 self.metadata_address = None45 self.metadata_address = None
33 self.ssl_details = util.fetch_ssl_details(self.paths)46 self.ssl_details = util.fetch_ssl_details(self.paths)
34 self.version = None47 self.version = None
35 self.files = {}48 self.files = {}
36 self.ec2_metadata = None49 self.ec2_metadata = sources.UNSET
50 self.network_json = sources.UNSET
3751
38 def __str__(self):52 def __str__(self):
39 root = sources.DataSource.__str__(self)53 root = sources.DataSource.__str__(self)
40 mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)54 mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
41 return mstr55 return mstr
4256
43 def _get_url_settings(self):
44 # TODO(harlowja): this is shared with ec2 datasource, we should just
45 # move it to a shared location instead...
46 # Note: the defaults here are different though.
47
48 # max_wait < 0 indicates do not wait
49 max_wait = -1
50 timeout = 10
51 retries = 5
52
53 try:
54 max_wait = int(self.ds_cfg.get("max_wait", max_wait))
55 except Exception:
56 util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
57
58 try:
59 timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
60 except Exception:
61 util.logexc(LOG, "Failed to get timeout, using %s", timeout)
62
63 try:
64 retries = int(self.ds_cfg.get("retries", retries))
65 except Exception:
66 util.logexc(LOG, "Failed to get retries. using %s", retries)
67
68 return (max_wait, timeout, retries)
69
70 def wait_for_metadata_service(self):57 def wait_for_metadata_service(self):
71 urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])58 urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
72 filtered = [x for x in urls if util.is_resolvable_url(x)]59 filtered = [x for x in urls if util.is_resolvable_url(x)]
@@ -86,10 +73,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
86 md_urls.append(md_url)73 md_urls.append(md_url)
87 url2base[md_url] = url74 url2base[md_url] = url
8875
89 (max_wait, timeout, retries) = self._get_url_settings()76 url_params = self.get_url_params()
90 start_time = time.time()77 start_time = time.time()
91 avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,78 avail_url = url_helper.wait_for_url(
92 timeout=timeout)79 urls=md_urls, max_wait=url_params.max_wait_seconds,
80 timeout=url_params.timeout_seconds)
93 if avail_url:81 if avail_url:
94 LOG.debug("Using metadata source: '%s'", url2base[avail_url])82 LOG.debug("Using metadata source: '%s'", url2base[avail_url])
95 else:83 else:
@@ -99,38 +87,66 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
99 self.metadata_address = url2base.get(avail_url)87 self.metadata_address = url2base.get(avail_url)
100 return bool(avail_url)88 return bool(avail_url)
10189
102 def _get_data(self):90 def check_instance_id(self, sys_cfg):
103 try:91 # quickly (local check only) if self.instance_id is still valid
104 if not self.wait_for_metadata_service():92 return sources.instance_id_matches_system_uuid(self.get_instance_id())
105 return False
106 except IOError:
107 return False
10893
109 (max_wait, timeout, retries) = self._get_url_settings()94 @property
95 def network_config(self):
96 """Return a network config dict for rendering ENI or netplan files."""
97 if self._network_config != sources.UNSET:
98 return self._network_config
99
100 # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
101 # network_config by default unless configured in /etc/cloud/cloud.cfg*.
102 # Patch Xenial and Artful before release to default to False.
103 if util.is_false(self.ds_cfg.get('apply_network_config', True)):
104 self._network_config = None
105 return self._network_config
106 if self.network_json == sources.UNSET:
107 # this would happen if get_data hadn't been called. leave as UNSET
108 LOG.warning(
109 'Unexpected call to network_config when network_json is None.')
110 return None
111
112 LOG.debug('network config provided via network_json')
113 self._network_config = openstack.convert_net_json(
114 self.network_json, known_macs=None)
115 return self._network_config
110116
111 try:117 def _get_data(self):
112 results = util.log_time(LOG.debug,118 """Crawl metadata, parse and persist that data for this instance.
113 'Crawl of openstack metadata service',119
114 read_metadata_service,120 @return: True when metadata discovered indicates OpenStack datasource.
115 args=[self.metadata_address],121 False when unable to contact metadata service or when metadata
116 kwargs={'ssl_details': self.ssl_details,122 format is invalid or disabled.
117 'retries': retries,123 """
118 'timeout': timeout})124 if not detect_openstack():
119 except openstack.NonReadable:
120 return False
121 except (openstack.BrokenMetadata, IOError):
122 util.logexc(LOG, "Broken metadata address %s",
123 self.metadata_address)
124 return False125 return False
126 if self.perform_dhcp_setup: # Setup networking in init-local stage.
127 try:
128 with EphemeralDHCPv4(self.fallback_interface):
129 results = util.log_time(
130 logfunc=LOG.debug, msg='Crawl of metadata service',
131 func=self._crawl_metadata)
132 except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
133 util.logexc(LOG, str(e))
134 return False
135 else:
136 try:
137 results = self._crawl_metadata()
138 except sources.InvalidMetaDataException as e:
139 util.logexc(LOG, str(e))
140 return False
125141
126 self.dsmode = self._determine_dsmode([results.get('dsmode')])142 self.dsmode = self._determine_dsmode([results.get('dsmode')])
127 if self.dsmode == sources.DSMODE_DISABLED:143 if self.dsmode == sources.DSMODE_DISABLED:
128 return False144 return False
129
130 md = results.get('metadata', {})145 md = results.get('metadata', {})
131 md = util.mergemanydict([md, DEFAULT_METADATA])146 md = util.mergemanydict([md, DEFAULT_METADATA])
132 self.metadata = md147 self.metadata = md
133 self.ec2_metadata = results.get('ec2-metadata')148 self.ec2_metadata = results.get('ec2-metadata')
149 self.network_json = results.get('networkdata')
134 self.userdata_raw = results.get('userdata')150 self.userdata_raw = results.get('userdata')
135 self.version = results['version']151 self.version = results['version']
136 self.files.update(results.get('files', {}))152 self.files.update(results.get('files', {}))
@@ -145,9 +161,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
145161
146 return True162 return True
147163
148 def check_instance_id(self, sys_cfg):164 def _crawl_metadata(self):
149 # quickly (local check only) if self.instance_id is still valid165 """Crawl metadata service when available.
150 return sources.instance_id_matches_system_uuid(self.get_instance_id())166
167 @returns: Dictionary with all metadata discovered for this datasource.
168 @raise: InvalidMetaDataException on unreadable or broken
169 metadata.
170 """
171 try:
172 if not self.wait_for_metadata_service():
173 raise sources.InvalidMetaDataException(
174 'No active metadata service found')
175 except IOError as e:
176 raise sources.InvalidMetaDataException(
177 'IOError contacting metadata service: {error}'.format(
178 error=str(e)))
179
180 url_params = self.get_url_params()
181
182 try:
183 result = util.log_time(
184 LOG.debug, 'Crawl of openstack metadata service',
185 read_metadata_service, args=[self.metadata_address],
186 kwargs={'ssl_details': self.ssl_details,
187 'retries': url_params.num_retries,
188 'timeout': url_params.timeout_seconds})
189 except openstack.NonReadable as e:
190 raise sources.InvalidMetaDataException(str(e))
191 except (openstack.BrokenMetadata, IOError):
192 msg = 'Broken metadata address {addr}'.format(
193 addr=self.metadata_address)
194 raise sources.InvalidMetaDataException(msg)
195 return result
196
197
198class DataSourceOpenStackLocal(DataSourceOpenStack):
199 """Run in init-local using a dhcp discovery prior to metadata crawl.
200
201 In init-local, no network is available. This subclass sets up minimal
202 networking with dhclient on a viable nic so that it can talk to the
203 metadata service. If the metadata service provides network configuration
204 then render the network configuration for that instance based on metadata.
205 """
206
207 perform_dhcp_setup = True # Get metadata network config if present
151208
152209
153def read_metadata_service(base_url, ssl_details=None,210def read_metadata_service(base_url, ssl_details=None,
@@ -157,8 +214,23 @@ def read_metadata_service(base_url, ssl_details=None,
157 return reader.read_v2()214 return reader.read_v2()
158215
159216
217def detect_openstack():
218 """Return True when a potential OpenStack platform is detected."""
219 if not util.is_x86():
220 return True # Non-Intel cpus don't properly report dmi product names
221 product_name = util.read_dmi_data('system-product-name')
222 if product_name in VALID_DMI_PRODUCT_NAMES:
223 return True
224 elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
225 return True
226 elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
227 return True
228 return False
229
230
160# Used to match classes to dependencies231# Used to match classes to dependencies
161datasources = [232datasources = [
233 (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)),
162 (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),234 (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
163]235]
164236
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 86bfa5d..f92e8b5 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -1,4 +1,5 @@
1# Copyright (C) 2013 Canonical Ltd.1# Copyright (C) 2013 Canonical Ltd.
2# Copyright (c) 2018, Joyent, Inc.
2#3#
3# Author: Ben Howard <ben.howard@canonical.com>4# Author: Ben Howard <ben.howard@canonical.com>
4#5#
@@ -10,17 +11,19 @@
10# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests11# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
11# The meta-data is transmitted via key/value pairs made by12# The meta-data is transmitted via key/value pairs made by
12# requests on the console. For example, to get the hostname, you13# requests on the console. For example, to get the hostname, you
13# would send "GET hostname" on /dev/ttyS1.14# would send "GET sdc:hostname" on /dev/ttyS1.
14# For Linux Guests running in LX-Brand Zones on SmartOS hosts15# For Linux Guests running in LX-Brand Zones on SmartOS hosts
15# a socket (/native/.zonecontrol/metadata.sock) is used instead16# a socket (/native/.zonecontrol/metadata.sock) is used instead
16# of a serial console.17# of a serial console.
17#18#
18# Certain behavior is defined by the DataDictionary19# Certain behavior is defined by the DataDictionary
19# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html20# https://eng.joyent.com/mdata/datadict.html
20# Comments with "@datadictionary" are snippets of the definition21# Comments with "@datadictionary" are snippets of the definition
2122
22import base6423import base64
23import binascii24import binascii
25import errno
26import fcntl
24import json27import json
25import os28import os
26import random29import random
@@ -108,7 +111,7 @@ BUILTIN_CLOUD_CONFIG = {
108 'overwrite': False}111 'overwrite': False}
109 },112 },
110 'fs_setup': [{'label': 'ephemeral0',113 'fs_setup': [{'label': 'ephemeral0',
111 'filesystem': 'ext3',114 'filesystem': 'ext4',
112 'device': 'ephemeral0'}],115 'device': 'ephemeral0'}],
113}116}
114117
@@ -162,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource):
162165
163 dsname = "Joyent"166 dsname = "Joyent"
164167
165 _unset = "_unset"168 smartos_type = sources.UNSET
166 smartos_type = _unset169 md_client = sources.UNSET
167 md_client = _unset
168170
169 def __init__(self, sys_cfg, distro, paths):171 def __init__(self, sys_cfg, distro, paths):
170 sources.DataSource.__init__(self, sys_cfg, distro, paths)172 sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -186,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource):
186 return "%s [client=%s]" % (root, self.md_client)188 return "%s [client=%s]" % (root, self.md_client)
187189
188 def _init(self):190 def _init(self):
189 if self.smartos_type == self._unset:191 if self.smartos_type == sources.UNSET:
190 self.smartos_type = get_smartos_environ()192 self.smartos_type = get_smartos_environ()
191 if self.smartos_type is None:193 if self.smartos_type is None:
192 self.md_client = None194 self.md_client = None
193195
194 if self.md_client == self._unset:196 if self.md_client == sources.UNSET:
195 self.md_client = jmc_client_factory(197 self.md_client = jmc_client_factory(
196 smartos_type=self.smartos_type,198 smartos_type=self.smartos_type,
197 metadata_sockfile=self.ds_cfg['metadata_sockfile'],199 metadata_sockfile=self.ds_cfg['metadata_sockfile'],
@@ -229,6 +231,9 @@ class DataSourceSmartOS(sources.DataSource):
229 self.md_client)231 self.md_client)
230 return False232 return False
231233
234 # Open once for many requests, rather than once for each request
235 self.md_client.open_transport()
236
232 for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():237 for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
233 smartos_noun, strip = attribute238 smartos_noun, strip = attribute
234 md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)239 md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
@@ -236,6 +241,8 @@ class DataSourceSmartOS(sources.DataSource):
236 for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():241 for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
237 md[ci_noun] = self.md_client.get_json(smartos_noun)242 md[ci_noun] = self.md_client.get_json(smartos_noun)
238243
244 self.md_client.close_transport()
245
239 # @datadictionary: This key may contain a program that is written246 # @datadictionary: This key may contain a program that is written
240 # to a file in the filesystem of the guest on each boot and then247 # to a file in the filesystem of the guest on each boot and then
241 # executed. It may be of any format that would be considered248 # executed. It may be of any format that would be considered
@@ -266,8 +273,14 @@ class DataSourceSmartOS(sources.DataSource):
266 write_boot_content(u_data, u_data_f)273 write_boot_content(u_data, u_data_f)
267274
268 # Handle the cloud-init regular meta275 # Handle the cloud-init regular meta
276
277 # The hostname may or may not be qualified with the local domain name.
278 # This follows section 3.14 of RFC 2132.
269 if not md['local-hostname']:279 if not md['local-hostname']:
270 md['local-hostname'] = md['instance-id']280 if md['hostname']:
281 md['local-hostname'] = md['hostname']
282 else:
283 md['local-hostname'] = md['instance-id']
271284
272 ud = None285 ud = None
273 if md['user-data']:286 if md['user-data']:
@@ -285,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource):
285 self.userdata_raw = ud298 self.userdata_raw = ud
286 self.vendordata_raw = md['vendor-data']299 self.vendordata_raw = md['vendor-data']
287 self.network_data = md['network-data']300 self.network_data = md['network-data']
301 self.routes_data = md['routes']
288302
289 self._set_provisioned()303 self._set_provisioned()
290 return True304 return True
@@ -308,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource):
308 convert_smartos_network_data(322 convert_smartos_network_data(
309 network_data=self.network_data,323 network_data=self.network_data,
310 dns_servers=self.metadata['dns_servers'],324 dns_servers=self.metadata['dns_servers'],
311 dns_domain=self.metadata['dns_domain']))325 dns_domain=self.metadata['dns_domain'],
326 routes=self.routes_data))
312 return self._network_config327 return self._network_config
313328
314329
@@ -316,6 +331,10 @@ class JoyentMetadataFetchException(Exception):
316 pass331 pass
317332
318333
334class JoyentMetadataTimeoutException(JoyentMetadataFetchException):
335 pass
336
337
319class JoyentMetadataClient(object):338class JoyentMetadataClient(object):
320 """339 """
321 A client implementing v2 of the Joyent Metadata Protocol Specification.340 A client implementing v2 of the Joyent Metadata Protocol Specification.
@@ -360,6 +379,47 @@ class JoyentMetadataClient(object):
360 LOG.debug('Value "%s" found.', value)379 LOG.debug('Value "%s" found.', value)
361 return value380 return value
362381
382 def _readline(self):
383 """
384 Reads a line a byte at a time until \n is encountered. Returns an
385 ascii string with the trailing newline removed.
386
387 If a timeout (per-byte) is set and it expires, a
388 JoyentMetadataFetchException will be thrown.
389 """
390 response = []
391
392 def as_ascii():
393 return b''.join(response).decode('ascii')
394
395 msg = "Partial response: '%s'"
396 while True:
397 try:
398 byte = self.fp.read(1)
399 if len(byte) == 0:
400 raise JoyentMetadataTimeoutException(msg % as_ascii())
401 if byte == b'\n':
402 return as_ascii()
403 response.append(byte)
404 except OSError as exc:
405 if exc.errno == errno.EAGAIN:
406 raise JoyentMetadataTimeoutException(msg % as_ascii())
407 raise
408
409 def _write(self, msg):
410 self.fp.write(msg.encode('ascii'))
411 self.fp.flush()
412
413 def _negotiate(self):
414 LOG.debug('Negotiating protocol V2')
415 self._write('NEGOTIATE V2\n')
416 response = self._readline()
417 LOG.debug('read "%s"', response)
418 if response != 'V2_OK':
419 raise JoyentMetadataFetchException(
420 'Invalid response "%s" to "NEGOTIATE V2"' % response)
421 LOG.debug('Negotiation complete')
422
363 def request(self, rtype, param=None):423 def request(self, rtype, param=None):
364 request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))424 request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
365 message_body = ' '.join((request_id, rtype,))425 message_body = ' '.join((request_id, rtype,))
@@ -374,18 +434,11 @@ class JoyentMetadataClient(object):
374 self.open_transport()434 self.open_transport()
375 need_close = True435 need_close = True
376436
377 self.fp.write(msg.encode('ascii'))437 self._write(msg)
378 self.fp.flush()438 response = self._readline()
379
380 response = bytearray()
381 response.extend(self.fp.read(1))
382 while response[-1:] != b'\n':
383 response.extend(self.fp.read(1))
384
385 if need_close:439 if need_close:
386 self.close_transport()440 self.close_transport()
387441
388 response = response.rstrip().decode('ascii')
389 LOG.debug('Read "%s" from metadata transport.', response)442 LOG.debug('Read "%s" from metadata transport.', response)
390443
391 if 'SUCCESS' not in response:444 if 'SUCCESS' not in response:
@@ -410,9 +463,9 @@ class JoyentMetadataClient(object):
410463
411 def list(self):464 def list(self):
412 result = self.request(rtype='KEYS')465 result = self.request(rtype='KEYS')
413 if result:466 if not result:
414 result = result.split('\n')467 return []
415 return result468 return result.split('\n')
416469
417 def put(self, key, val):470 def put(self, key, val):
418 param = b' '.join([base64.b64encode(i.encode())471 param = b' '.join([base64.b64encode(i.encode())
@@ -450,6 +503,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
450 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)503 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
451 sock.connect(self.socketpath)504 sock.connect(self.socketpath)
452 self.fp = sock.makefile('rwb')505 self.fp = sock.makefile('rwb')
506 self._negotiate()
453507
454 def exists(self):508 def exists(self):
455 return os.path.exists(self.socketpath)509 return os.path.exists(self.socketpath)
@@ -459,8 +513,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
459513
460514
461class JoyentMetadataSerialClient(JoyentMetadataClient):515class JoyentMetadataSerialClient(JoyentMetadataClient):
462 def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM):516 def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM,
463 super(JoyentMetadataSerialClient, self).__init__(smartos_type)517 fp=None):
518 super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp)
464 self.device = device519 self.device = device
465 self.timeout = timeout520 self.timeout = timeout
466521
@@ -468,10 +523,51 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
468 return os.path.exists(self.device)523 return os.path.exists(self.device)
469524
470 def open_transport(self):525 def open_transport(self):
471 ser = serial.Serial(self.device, timeout=self.timeout)526 if self.fp is None:
472 if not ser.isOpen():527 ser = serial.Serial(self.device, timeout=self.timeout)
473 raise SystemError("Unable to open %s" % self.device)528 if not ser.isOpen():
474 self.fp = ser529 raise SystemError("Unable to open %s" % self.device)
530 self.fp = ser
531 fcntl.lockf(ser, fcntl.LOCK_EX)
532 self._flush()
533 self._negotiate()
534
535 def _flush(self):
536 LOG.debug('Flushing input')
537 # Read any pending data
538 timeout = self.fp.timeout
539 self.fp.timeout = 0.1
540 while True:
541 try:
542 self._readline()
543 except JoyentMetadataTimeoutException:
544 break
545 LOG.debug('Input empty')
546
547 # Send a newline and expect "invalid command". Keep trying until
548 # successful. Retry rather frequently so that the "Is the host
549 # metadata service running" appears on the console soon after someone
550 # attaches in an effort to debug.
551 if timeout > 5:
552 self.fp.timeout = 5
553 else:
554 self.fp.timeout = timeout
555 while True:
556 LOG.debug('Writing newline, expecting "invalid command"')
557 self._write('\n')
558 try:
559 response = self._readline()
560 if response == 'invalid command':
561 break
562 if response == 'FAILURE':
563 LOG.debug('Got "FAILURE". Retrying.')
564 continue
565 LOG.warning('Unexpected response "%s" during flush', response)
566 except JoyentMetadataTimeoutException:
567 LOG.warning('Timeout while initializing metadata client. ' +
568 'Is the host metadata service running?')
569 LOG.debug('Got "invalid command". Flush complete.')
570 self.fp.timeout = timeout
475571
476 def __repr__(self):572 def __repr__(self):
477 return "%s(device=%s, timeout=%s)" % (573 return "%s(device=%s, timeout=%s)" % (
@@ -650,7 +746,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
650 # report 'BrandZ virtual linux' as the kernel version746 # report 'BrandZ virtual linux' as the kernel version
651 if uname_version is None:747 if uname_version is None:
652 uname_version = uname[3]748 uname_version = uname[3]
653 if uname_version.lower() == 'brandz virtual linux':749 if uname_version == 'BrandZ virtual linux':
654 return SMARTOS_ENV_LX_BRAND750 return SMARTOS_ENV_LX_BRAND
655751
656 if product_name is None:752 if product_name is None:
@@ -658,7 +754,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
658 else:754 else:
659 system_type = product_name755 system_type = product_name
660756
661 if system_type and 'smartdc' in system_type.lower():757 if system_type and system_type.startswith('SmartDC'):
662 return SMARTOS_ENV_KVM758 return SMARTOS_ENV_KVM
663759
664 return None760 return None
@@ -666,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None):
666762
667# Convert SMARTOS 'sdc:nics' data to network_config yaml763# Convert SMARTOS 'sdc:nics' data to network_config yaml
668def convert_smartos_network_data(network_data=None,764def convert_smartos_network_data(network_data=None,
669 dns_servers=None, dns_domain=None):765 dns_servers=None, dns_domain=None,
766 routes=None):
670 """Return a dictionary of network_config by parsing provided767 """Return a dictionary of network_config by parsing provided
671 SMARTOS sdc:nics configuration data768 SMARTOS sdc:nics configuration data
672769
@@ -684,6 +781,10 @@ def convert_smartos_network_data(network_data=None,
684 keys are related to ip configuration. For each ip in the 'ips' list781 keys are related to ip configuration. For each ip in the 'ips' list
685 we create a subnet entry under 'subnets' pairing the ip to a one in782 we create a subnet entry under 'subnets' pairing the ip to a one in
686 the 'gateways' list.783 the 'gateways' list.
784
785 Each route in sdc:routes is mapped to a route on each interface.
786 The sdc:routes properties 'dst' and 'gateway' map to 'network' and
787 'gateway'. The 'linklocal' sdc:routes property is ignored.
687 """788 """
688789
689 valid_keys = {790 valid_keys = {
@@ -706,6 +807,10 @@ def convert_smartos_network_data(network_data=None,
706 'scope',807 'scope',
707 'type',808 'type',
708 ],809 ],
810 'route': [
811 'network',
812 'gateway',
813 ],
709 }814 }
710815
711 if dns_servers:816 if dns_servers:
@@ -720,6 +825,9 @@ def convert_smartos_network_data(network_data=None,
720 else:825 else:
721 dns_domain = []826 dns_domain = []
722827
828 if not routes:
829 routes = []
830
723 def is_valid_ipv4(addr):831 def is_valid_ipv4(addr):
724 return '.' in addr832 return '.' in addr
725833
@@ -746,6 +854,7 @@ def convert_smartos_network_data(network_data=None,
746 if ip == "dhcp":854 if ip == "dhcp":
747 subnet = {'type': 'dhcp4'}855 subnet = {'type': 'dhcp4'}
748 else:856 else:
857 routeents = []
749 subnet = dict((k, v) for k, v in nic.items()858 subnet = dict((k, v) for k, v in nic.items()
750 if k in valid_keys['subnet'])859 if k in valid_keys['subnet'])
751 subnet.update({860 subnet.update({
@@ -767,6 +876,25 @@ def convert_smartos_network_data(network_data=None,
767 pgws[proto]['gw'] = gateways[0]876 pgws[proto]['gw'] = gateways[0]
768 subnet.update({'gateway': pgws[proto]['gw']})877 subnet.update({'gateway': pgws[proto]['gw']})
769878
879 for route in routes:
880 rcfg = dict((k, v) for k, v in route.items()
881 if k in valid_keys['route'])
882 # Linux uses the value of 'gateway' to determine
883 # automatically if the route is a forward/next-hop
884 # (non-local IP for gateway) or an interface/resolver
885 # (local IP for gateway). So we can ignore the
886 # 'interface' attribute of sdc:routes, because SDC
887 # guarantees that the gateway is a local IP for
888 # "interface=true".
889 #
890 # Eventually we should be smart and compare "gateway"
891 # to see if it's in the prefix. We can then smartly
892 # add or not-add this route. But for now,
893 # when in doubt, use brute force! Routes for everyone!
894 rcfg.update({'network': route['dst']})
895 routeents.append(rcfg)
896 subnet.update({'routes': routeents})
897
770 subnets.append(subnet)898 subnets.append(subnet)
771 cfg.update({'subnets': subnets})899 cfg.update({'subnets': subnets})
772 config.append(cfg)900 config.append(cfg)
@@ -810,12 +938,14 @@ if __name__ == "__main__":
810 keyname = SMARTOS_ATTRIB_JSON[key]938 keyname = SMARTOS_ATTRIB_JSON[key]
811 data[key] = client.get_json(keyname)939 data[key] = client.get_json(keyname)
812 elif key == "network_config":940 elif key == "network_config":
813 for depkey in ('network-data', 'dns_servers', 'dns_domain'):941 for depkey in ('network-data', 'dns_servers', 'dns_domain',
942 'routes'):
814 load_key(client, depkey, data)943 load_key(client, depkey, data)
815 data[key] = convert_smartos_network_data(944 data[key] = convert_smartos_network_data(
816 network_data=data['network-data'],945 network_data=data['network-data'],
817 dns_servers=data['dns_servers'],946 dns_servers=data['dns_servers'],
818 dns_domain=data['dns_domain'])947 dns_domain=data['dns_domain'],
948 routes=data['routes'])
819 else:949 else:
820 if key in SMARTOS_ATTRIB_MAP:950 if key in SMARTOS_ATTRIB_MAP:
821 keyname, strip = SMARTOS_ATTRIB_MAP[key]951 keyname, strip = SMARTOS_ATTRIB_MAP[key]
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index df0b374..90d7457 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -9,6 +9,7 @@
9# This file is part of cloud-init. See LICENSE file for license information.9# This file is part of cloud-init. See LICENSE file for license information.
1010
11import abc11import abc
12from collections import namedtuple
12import copy13import copy
13import json14import json
14import os15import os
@@ -17,6 +18,7 @@ import six
17from cloudinit.atomic_helper import write_json18from cloudinit.atomic_helper import write_json
18from cloudinit import importer19from cloudinit import importer
19from cloudinit import log as logging20from cloudinit import log as logging
21from cloudinit import net
20from cloudinit import type_utils22from cloudinit import type_utils
21from cloudinit import user_data as ud23from cloudinit import user_data as ud
22from cloudinit import util24from cloudinit import util
@@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json'
41# Key which can be provide a cloud's official product name to cloud-init43# Key which can be provide a cloud's official product name to cloud-init
42METADATA_CLOUD_NAME_KEY = 'cloud-name'44METADATA_CLOUD_NAME_KEY = 'cloud-name'
4345
46UNSET = "_unset"
47
44LOG = logging.getLogger(__name__)48LOG = logging.getLogger(__name__)
4549
4650
@@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception):
48 pass52 pass
4953
5054
55class InvalidMetaDataException(Exception):
56 """Raised when metadata is broken, unavailable or disabled."""
57 pass
58
59
51def process_base64_metadata(metadata, key_path=''):60def process_base64_metadata(metadata, key_path=''):
52 """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""61 """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
53 md_copy = copy.deepcopy(metadata)62 md_copy = copy.deepcopy(metadata)
@@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''):
68 return md_copy77 return md_copy
6978
7079
80URLParams = namedtuple(
81 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
82
83
71@six.add_metaclass(abc.ABCMeta)84@six.add_metaclass(abc.ABCMeta)
72class DataSource(object):85class DataSource(object):
7386
@@ -81,6 +94,14 @@ class DataSource(object):
81 # Cached cloud_name as determined by _get_cloud_name94 # Cached cloud_name as determined by _get_cloud_name
82 _cloud_name = None95 _cloud_name = None
8396
97 # Track the discovered fallback nic for use in configuration generation.
98 _fallback_interface = None
99
100 # read_url_params
101 url_max_wait = -1 # max_wait < 0 means do not wait
102 url_timeout = 10 # timeout for each metadata url read attempt
103 url_retries = 5 # number of times to retry url upon 404
104
84 def __init__(self, sys_cfg, distro, paths, ud_proc=None):105 def __init__(self, sys_cfg, distro, paths, ud_proc=None):
85 self.sys_cfg = sys_cfg106 self.sys_cfg = sys_cfg
86 self.distro = distro107 self.distro = distro
@@ -128,6 +149,14 @@ class DataSource(object):
128 'meta-data': self.metadata,149 'meta-data': self.metadata,
129 'user-data': self.get_userdata_raw(),150 'user-data': self.get_userdata_raw(),
130 'vendor-data': self.get_vendordata_raw()}}151 'vendor-data': self.get_vendordata_raw()}}
152 if hasattr(self, 'network_json'):
153 network_json = getattr(self, 'network_json')
154 if network_json != UNSET:
155 instance_data['ds']['network_json'] = network_json
156 if hasattr(self, 'ec2_metadata'):
157 ec2_metadata = getattr(self, 'ec2_metadata')
158 if ec2_metadata != UNSET:
159 instance_data['ds']['ec2_metadata'] = ec2_metadata
131 instance_data.update(160 instance_data.update(
132 self._get_standardized_metadata())161 self._get_standardized_metadata())
133 try:162 try:
@@ -149,6 +178,42 @@ class DataSource(object):
149 'Subclasses of DataSource must implement _get_data which'178 'Subclasses of DataSource must implement _get_data which'
150 ' sets self.metadata, vendordata_raw and userdata_raw.')179 ' sets self.metadata, vendordata_raw and userdata_raw.')
151180
181 def get_url_params(self):
182 """Return the Datasource's prefered url_read parameters.
183
184 Subclasses may override url_max_wait, url_timeout, url_retries.
185
186 @return: A URLParams object with max_wait_seconds, timeout_seconds,
187 num_retries.
188 """
189 max_wait = self.url_max_wait
190 try:
191 max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
192 except ValueError:
193 util.logexc(
194 LOG, "Config max_wait '%s' is not an int, using default '%s'",
195 self.ds_cfg.get("max_wait"), max_wait)
196
197 timeout = self.url_timeout
198 try:
199 timeout = max(
200 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
201 except ValueError:
202 timeout = self.url_timeout
203 util.logexc(
204 LOG, "Config timeout '%s' is not an int, using default '%s'",
205 self.ds_cfg.get('timeout'), timeout)
206
207 retries = self.url_retries
208 try:
209 retries = int(self.ds_cfg.get("retries", self.url_retries))
210 except Exception:
211 util.logexc(
212 LOG, "Config retries '%s' is not an int, using default '%s'",
213 self.ds_cfg.get('retries'), retries)
214
215 return URLParams(max_wait, timeout, retries)
216
152 def get_userdata(self, apply_filter=False):217 def get_userdata(self, apply_filter=False):
153 if self.userdata is None:218 if self.userdata is None:
154 self.userdata = self.ud_proc.process(self.get_userdata_raw())219 self.userdata = self.ud_proc.process(self.get_userdata_raw())
@@ -162,6 +227,17 @@ class DataSource(object):
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches