Merge ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial

Proposed by Chad Smith
Status: Merged
Merged at revision: 5aece632e5a8439efb324ea2ade9f0e52f8cfd1c
Proposed branch: ~chad.smith/cloud-init:ubuntu/xenial
Merge into: cloud-init:ubuntu/xenial
Diff against target: 15407 lines (+7273/-1955)
202 files modified
.pylintrc (+1/-1)
ChangeLog (+226/-0)
MANIFEST.in (+1/-0)
bash_completion/cloud-init (+77/-0)
cloudinit/analyze/__main__.py (+1/-1)
cloudinit/analyze/dump.py (+1/-1)
cloudinit/apport.py (+23/-4)
cloudinit/cmd/devel/logs.py (+48/-11)
cloudinit/cmd/devel/tests/test_logs.py (+18/-3)
cloudinit/cmd/main.py (+1/-1)
cloudinit/cmd/tests/test_main.py (+3/-3)
cloudinit/config/cc_apt_configure.py (+2/-2)
cloudinit/config/cc_bootcmd.py (+0/-1)
cloudinit/config/cc_disable_ec2_metadata.py (+12/-2)
cloudinit/config/cc_disk_setup.py (+4/-8)
cloudinit/config/cc_emit_upstart.py (+1/-1)
cloudinit/config/cc_lxd.py (+56/-8)
cloudinit/config/cc_mounts.py (+45/-30)
cloudinit/config/cc_ntp.py (+407/-78)
cloudinit/config/cc_phone_home.py (+4/-3)
cloudinit/config/cc_power_state_change.py (+1/-1)
cloudinit/config/cc_resizefs.py (+4/-6)
cloudinit/config/cc_rh_subscription.py (+8/-10)
cloudinit/config/cc_rsyslog.py (+2/-2)
cloudinit/config/cc_runcmd.py (+0/-1)
cloudinit/config/cc_set_passwords.py (+45/-60)
cloudinit/config/cc_snap.py (+2/-3)
cloudinit/config/cc_snappy.py (+2/-2)
cloudinit/config/cc_ubuntu_advantage.py (+2/-3)
cloudinit/config/cc_users_groups.py (+6/-2)
cloudinit/config/schema.py (+48/-20)
cloudinit/config/tests/test_disable_ec2_metadata.py (+50/-0)
cloudinit/config/tests/test_set_passwords.py (+71/-0)
cloudinit/config/tests/test_snap.py (+27/-2)
cloudinit/config/tests/test_ubuntu_advantage.py (+28/-2)
cloudinit/distros/__init__.py (+13/-1)
cloudinit/distros/freebsd.py (+5/-5)
cloudinit/distros/opensuse.py (+24/-0)
cloudinit/distros/ubuntu.py (+19/-0)
cloudinit/ec2_utils.py (+6/-8)
cloudinit/handlers/upstart_job.py (+1/-1)
cloudinit/net/__init__.py (+33/-3)
cloudinit/net/cmdline.py (+1/-1)
cloudinit/net/dhcp.py (+1/-1)
cloudinit/net/eni.py (+17/-3)
cloudinit/net/netplan.py (+14/-8)
cloudinit/net/network_state.py (+5/-6)
cloudinit/net/sysconfig.py (+8/-2)
cloudinit/net/tests/test_init.py (+1/-0)
cloudinit/netinfo.py (+300/-79)
cloudinit/reporting/events.py (+1/-1)
cloudinit/sources/DataSourceAliYun.py (+1/-1)
cloudinit/sources/DataSourceAltCloud.py (+9/-12)
cloudinit/sources/DataSourceAzure.py (+75/-42)
cloudinit/sources/DataSourceCloudStack.py (+10/-21)
cloudinit/sources/DataSourceConfigDrive.py (+10/-5)
cloudinit/sources/DataSourceEc2.py (+15/-33)
cloudinit/sources/DataSourceIBMCloud.py (+92/-14)
cloudinit/sources/DataSourceMAAS.py (+2/-2)
cloudinit/sources/DataSourceNoCloud.py (+2/-2)
cloudinit/sources/DataSourceOVF.py (+1/-1)
cloudinit/sources/DataSourceOpenNebula.py (+1/-1)
cloudinit/sources/DataSourceOpenStack.py (+127/-55)
cloudinit/sources/DataSourceSmartOS.py (+163/-33)
cloudinit/sources/__init__.py (+76/-0)
cloudinit/sources/helpers/azure.py (+3/-2)
cloudinit/sources/helpers/digitalocean.py (+3/-4)
cloudinit/sources/helpers/openstack.py (+1/-1)
cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1)
cloudinit/sources/helpers/vmware/imc/config_passwd.py (+2/-2)
cloudinit/sources/helpers/vmware/imc/guestcust_util.py (+2/-2)
cloudinit/sources/tests/test_init.py (+88/-3)
cloudinit/ssh_util.py (+63/-7)
cloudinit/stages.py (+17/-9)
cloudinit/templater.py (+10/-2)
cloudinit/tests/helpers.py (+56/-30)
cloudinit/tests/test_netinfo.py (+147/-86)
cloudinit/tests/test_url_helper.py (+27/-1)
cloudinit/tests/test_util.py (+127/-2)
cloudinit/tests/test_version.py (+17/-0)
cloudinit/url_helper.py (+29/-2)
cloudinit/user_data.py (+16/-12)
cloudinit/util.py (+171/-68)
cloudinit/version.py (+5/-1)
config/cloud.cfg.tmpl (+2/-0)
debian/changelog (+94/-3)
debian/patches/azure-use-walinux-agent.patch (+1/-1)
debian/patches/ds-identify-behavior-xenial.patch (+2/-2)
debian/patches/openstack-no-network-config.patch (+2/-4)
doc/examples/cloud-config-disk-setup.txt (+2/-2)
doc/examples/cloud-config-user-groups.txt (+20/-7)
doc/rtd/topics/datasources.rst (+98/-0)
doc/rtd/topics/datasources/aliyun.rst (+74/-0)
doc/rtd/topics/datasources/cloudstack.rst (+20/-6)
doc/rtd/topics/datasources/ec2.rst (+30/-0)
doc/rtd/topics/datasources/openstack.rst (+21/-2)
doc/rtd/topics/network-config-format-v1.rst (+27/-0)
doc/rtd/topics/network-config-format-v2.rst (+6/-0)
doc/rtd/topics/tests.rst (+6/-1)
integration-requirements.txt (+1/-1)
packages/bddeb (+36/-4)
packages/brpm (+3/-3)
packages/debian/changelog.in (+1/-1)
packages/debian/control.in (+1/-0)
packages/debian/rules.in (+2/-0)
packages/redhat/cloud-init.spec.in (+8/-0)
packages/suse/cloud-init.spec.in (+29/-42)
setup.py (+15/-3)
systemd/cloud-config.service.tmpl (+1/-0)
templates/chrony.conf.debian.tmpl (+39/-0)
templates/chrony.conf.fedora.tmpl (+48/-0)
templates/chrony.conf.opensuse.tmpl (+38/-0)
templates/chrony.conf.rhel.tmpl (+45/-0)
templates/chrony.conf.sles.tmpl (+38/-0)
templates/chrony.conf.ubuntu.tmpl (+42/-0)
tests/cloud_tests/args.py (+3/-0)
tests/cloud_tests/bddeb.py (+1/-1)
tests/cloud_tests/collect.py (+5/-3)
tests/cloud_tests/platforms/instances.py (+30/-11)
tests/cloud_tests/platforms/lxd/instance.py (+5/-7)
tests/cloud_tests/releases.yaml (+16/-0)
tests/cloud_tests/setup_image.py (+5/-6)
tests/cloud_tests/stage.py (+12/-3)
tests/cloud_tests/testcases.yaml (+2/-2)
tests/cloud_tests/testcases/base.py (+28/-6)
tests/cloud_tests/testcases/examples/including_user_groups.py (+1/-1)
tests/cloud_tests/testcases/modules/byobu.py (+1/-2)
tests/cloud_tests/testcases/modules/byobu.yaml (+0/-3)
tests/cloud_tests/testcases/modules/ca_certs.py (+17/-4)
tests/cloud_tests/testcases/modules/ca_certs.yaml (+6/-2)
tests/cloud_tests/testcases/modules/ntp.py (+2/-3)
tests/cloud_tests/testcases/modules/ntp.yaml (+1/-0)
tests/cloud_tests/testcases/modules/ntp_chrony.py (+26/-0)
tests/cloud_tests/testcases/modules/ntp_chrony.yaml (+17/-0)
tests/cloud_tests/testcases/modules/ntp_pools.yaml (+1/-0)
tests/cloud_tests/testcases/modules/ntp_servers.yaml (+1/-0)
tests/cloud_tests/testcases/modules/ntp_timesyncd.py (+15/-0)
tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml (+15/-0)
tests/cloud_tests/testcases/modules/package_update_upgrade_install.py (+6/-8)
tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+3/-6)
tests/cloud_tests/testcases/modules/salt_minion.py (+1/-2)
tests/cloud_tests/testcases/modules/salt_minion.yaml (+12/-5)
tests/cloud_tests/testcases/modules/user_groups.py (+1/-1)
tests/cloud_tests/util.py (+1/-1)
tests/cloud_tests/verify.py (+46/-1)
tests/data/netinfo/netdev-formatted-output (+10/-0)
tests/data/netinfo/netdev-formatted-output-down (+8/-0)
tests/data/netinfo/new-ifconfig-output (+18/-0)
tests/data/netinfo/new-ifconfig-output-down (+15/-0)
tests/data/netinfo/old-ifconfig-output (+18/-0)
tests/data/netinfo/route-formatted-output (+22/-0)
tests/data/netinfo/sample-ipaddrshow-output (+13/-0)
tests/data/netinfo/sample-ipaddrshow-output-down (+8/-0)
tests/data/netinfo/sample-iproute-output-v4 (+3/-0)
tests/data/netinfo/sample-iproute-output-v6 (+11/-0)
tests/data/netinfo/sample-route-output-v4 (+5/-0)
tests/data/netinfo/sample-route-output-v6 (+13/-0)
tests/unittests/test__init__.py (+5/-5)
tests/unittests/test_data.py (+21/-3)
tests/unittests/test_datasource/test_aliyun.py (+0/-2)
tests/unittests/test_datasource/test_azure.py (+209/-70)
tests/unittests/test_datasource/test_azure_helper.py (+1/-1)
tests/unittests/test_datasource/test_common.py (+1/-0)
tests/unittests/test_datasource/test_ec2.py (+0/-12)
tests/unittests/test_datasource/test_gce.py (+0/-1)
tests/unittests/test_datasource/test_ibmcloud.py (+50/-0)
tests/unittests/test_datasource/test_maas.py (+2/-2)
tests/unittests/test_datasource/test_nocloud.py (+0/-3)
tests/unittests/test_datasource/test_openstack.py (+215/-20)
tests/unittests/test_datasource/test_scaleway.py (+0/-3)
tests/unittests/test_datasource/test_smartos.py (+245/-5)
tests/unittests/test_distros/test_create_users.py (+8/-0)
tests/unittests/test_distros/test_netconfig.py (+6/-0)
tests/unittests/test_distros/test_user_data_normalize.py (+6/-0)
tests/unittests/test_ds_identify.py (+205/-18)
tests/unittests/test_ec2_util.py (+0/-9)
tests/unittests/test_filters/test_launch_index.py (+5/-5)
tests/unittests/test_handler/test_handler_apt_conf_v1.py (+6/-10)
tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+0/-7)
tests/unittests/test_handler/test_handler_apt_source_v1.py (+10/-17)
tests/unittests/test_handler/test_handler_apt_source_v3.py (+11/-18)
tests/unittests/test_handler/test_handler_bootcmd.py (+26/-8)
tests/unittests/test_handler/test_handler_chef.py (+12/-4)
tests/unittests/test_handler/test_handler_lxd.py (+64/-16)
tests/unittests/test_handler/test_handler_mounts.py (+100/-4)
tests/unittests/test_handler/test_handler_ntp.py (+571/-305)
tests/unittests/test_handler/test_handler_resizefs.py (+1/-1)
tests/unittests/test_handler/test_handler_runcmd.py (+26/-7)
tests/unittests/test_handler/test_schema.py (+33/-6)
tests/unittests/test_merging.py (+1/-1)
tests/unittests/test_net.py (+189/-12)
tests/unittests/test_runs/test_merge_run.py (+1/-1)
tests/unittests/test_runs/test_simple_run.py (+30/-2)
tests/unittests/test_sshutil.py (+94/-3)
tests/unittests/test_templating.py (+42/-3)
tests/unittests/test_util.py (+126/-13)
tools/ds-identify (+83/-28)
tools/make-tarball (+12/-3)
tools/read-dependencies (+6/-2)
tools/run-centos (+30/-310)
tools/run-container (+590/-0)
tox.ini (+9/-7)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
cloud-init Commiters Pending
Review via email: mp+348361@code.launchpad.net

Commit message

cloud-init 18.3 new upstream snapshot for release into Xenial.

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:5aece632e5a8439efb324ea2ade9f0e52f8cfd1c
https://jenkins.ubuntu.com/server/job/cloud-init-ci/121/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    SUCCESS: MAAS Compatability Testing
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/121/rebuild

review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) wrote :

there is no 'New upstream release' comment, nor a bug number for the sru in debian/changelog

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/.pylintrc b/.pylintrc
2index 0bdfa59..3bfa0c8 100644
3--- a/.pylintrc
4+++ b/.pylintrc
5@@ -28,7 +28,7 @@ jobs=4
6 # W0703(broad-except)
7 # W1401(anomalous-backslash-in-string)
8
9-disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401
10+disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401
11
12
13 [REPORTS]
14diff --git a/ChangeLog b/ChangeLog
15index daa7ccf..72c5287 100644
16--- a/ChangeLog
17+++ b/ChangeLog
18@@ -1,3 +1,229 @@
19+18.3:
20+ - docs: represent sudo:false in docs for user_groups config module
21+ - Explicitly prevent `sudo` access for user module
22+ [Jacob Bednarz] (LP: #1771468)
23+ - lxd: Delete default network and detach device if lxd-init created them.
24+ (LP: #1776958)
25+ - openstack: avoid unneeded metadata probe on non-openstack platforms
26+ (LP: #1776701)
27+ - stages: fix tracebacks if a module stage is undefined or empty
28+ [Robert Schweikert] (LP: #1770462)
29+ - Be more safe on string/bytes when writing multipart user-data to disk.
30+ (LP: #1768600)
31+ - Fix get_proc_env for pids that have non-utf8 content in environment.
32+ (LP: #1775371)
33+ - tests: fix salt_minion integration test on bionic and later
34+ - tests: provide human-readable integration test summary when --verbose
35+ - tests: skip chrony integration tests on lxd running artful or older
36+ - test: add optional --preserve-instance arg to integraiton tests
37+ - netplan: fix mtu if provided by network config for all rendered types
38+ (LP: #1774666)
39+ - tests: remove pip install workarounds for pylxd, take upstream fix.
40+ - subp: support combine_capture argument.
41+ - tests: ordered tox dependencies for pylxd install
42+ - util: add get_linux_distro function to replace platform.dist
43+ [Robert Schweikert] (LP: #1745235)
44+ - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
45+ - - Do not use the systemd_prefix macro, not available in this environment
46+ [Robert Schweikert]
47+ - doc: Add config info to ec2, openstack and cloudstack datasource docs
48+ - Enable SmartOS network metadata to work with netplan via per-subnet
49+ routes [Dan McDonald] (LP: #1763512)
50+ - openstack: Allow discovery in init-local using dhclient in a sandbox.
51+ (LP: #1749717)
52+ - tests: Avoid using https in httpretty, improve HttPretty test case.
53+ (LP: #1771659)
54+ - yaml_load/schema: Add invalid line and column nums to error message
55+ - Azure: Ignore NTFS mount errors when checking ephemeral drive
56+ [Paul Meyer]
57+ - packages/brpm: Get proper dependencies for cmdline distro.
58+ - packages: Make rpm spec files patch in package version like in debs.
59+ - tools/run-container: replace tools/run-centos with more generic.
60+ - Update version.version_string to contain packaged version. (LP: #1770712)
61+ - cc_mounts: Do not add devices to fstab that are already present.
62+ [Lars Kellogg-Stedman]
63+ - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382)
64+ - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
65+ - read_file_or_url: move to url_helper, fix bug in its FileResponse.
66+ - cloud_tests: help pylint [Ryan Harper]
67+ - flake8: fix flake8 errors in previous commit.
68+ - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
69+ - tests: restructure SSH and initial connections [Joshua Powers]
70+ - ds-identify: recognize container-other as a container, test SmartOS.
71+ - cloud-config.service: run After snap.seeded.service. (LP: #1767131)
72+ - tests: do not rely on host /proc/cmdline in test_net.py
73+ [Lars Kellogg-Stedman] (LP: #1769952)
74+ - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
75+ - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
76+ - tests: fix package and ca_cert cloud_tests on bionic
77+ (LP: #1769985)
78+ - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
79+ - pycodestyle: Fix deprecated string literals, move away from flake8.
80+ - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214)
81+ - tools: Support adding a release suffix through packages/bddeb.
82+ - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
83+ [Harm Weites] (LP: #1404745)
84+ - tools: Re-use the orig tarball in packages/bddeb if it is around.
85+ - netinfo: fix netdev_pformat when a nic does not have an address
86+ assigned. (LP: #1766302)
87+ - collect-logs: add -v flag, write to stderr, limit journal to single
88+ boot. (LP: #1766335)
89+ - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
90+ (LP: #1766401)
91+ - Add reporting events and log_time around early source of blocking time
92+ [Ryan Harper]
93+ - IBMCloud: recognize provisioning environment during debug boots.
94+ (LP: #1767166)
95+ - net: detect unstable network names and trigger a settle if needed
96+ [Ryan Harper] (LP: #1766287)
97+ - IBMCloud: improve documentation in datasource.
98+ - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
99+ - packages/debian/control.in: add missing dependency on iproute2.
100+ (LP: #1766711)
101+ - DataSourceSmartOS: add locking of serial device.
102+ [Mike Gerdts] (LP: #1746605)
103+ - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085)
104+ - DataSourceSmartOS: list() should always return a list
105+ [Mike Gerdts] (LP: #1763480)
106+ - schema: in validation, raise ImportError if strict but no jsonschema.
107+ - set_passwords: Add newline to end of sshd config, only restart if
108+ updated. (LP: #1677205)
109+ - pylint: pay attention to unused variable warnings.
110+ - doc: Add documentation for AliYun datasource. [Junjie Wang]
111+ - Schema: do not warn on duplicate items in commands. (LP: #1764264)
112+ - net: Depend on iproute2's ip instead of net-tools ifconfig or route
113+ - DataSourceSmartOS: fix hang when metadata service is down
114+ [Mike Gerdts] (LP: #1667735)
115+ - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
116+ ext4. [Mike Gerdts] (LP: #1763511)
117+ - pycodestyle: Fix invalid escape sequences in string literals.
118+ - Implement bash completion script for cloud-init command line
119+ [Ryan Harper]
120+ - tools: Fix make-tarball cli tool usage for development
121+ - renderer: support unicode in render_from_file.
122+ - Implement ntp client spec with auto support for distro selection
123+ [Ryan Harper] (LP: #1749722)
124+ - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
125+ - tests: fix ec2 integration network metadata validation
126+ - tests: fix integration tests to support lxd 3.0 release
127+ - correct documentation to match correct attribute name usage.
128+ [Dominic Schlegel] (LP: #1420018)
129+ - cc_resizefs, util: handle no /dev/zfs [Ryan Harper]
130+ - doc: Fix links in OpenStack datasource documentation.
131+ [Dominic Schlegel] (LP: #1721660)
132+ - docs: represent sudo:false in docs for user_groups config module
133+ - Explicitly prevent `sudo` access for user module
134+ [Jacob Bednarz] (LP: #1771468)
135+ - lxd: Delete default network and detach device if lxd-init created them.
136+ (LP: #1776958)
137+ - openstack: avoid unneeded metadata probe on non-openstack platforms
138+ (LP: #1776701)
139+ - stages: fix tracebacks if a module stage is undefined or empty
140+ [Robert Schweikert] (LP: #1770462)
141+ - Be more safe on string/bytes when writing multipart user-data to disk.
142+ (LP: #1768600)
143+ - Fix get_proc_env for pids that have non-utf8 content in environment.
144+ (LP: #1775371)
145+ - tests: fix salt_minion integration test on bionic and later
146+ - tests: provide human-readable integration test summary when --verbose
147+ - tests: skip chrony integration tests on lxd running artful or older
148+ - test: add optional --preserve-instance arg to integraiton tests
149+ - netplan: fix mtu if provided by network config for all rendered types
150+ (LP: #1774666)
151+ - tests: remove pip install workarounds for pylxd, take upstream fix.
152+ - subp: support combine_capture argument.
153+ - tests: ordered tox dependencies for pylxd install
154+ - util: add get_linux_distro function to replace platform.dist
155+ [Robert Schweikert] (LP: #1745235)
156+ - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
157+ - - Do not use the systemd_prefix macro, not available in this environment
158+ [Robert Schweikert]
159+ - doc: Add config info to ec2, openstack and cloudstack datasource docs
160+ - Enable SmartOS network metadata to work with netplan via per-subnet
161+ routes [Dan McDonald] (LP: #1763512)
162+ - openstack: Allow discovery in init-local using dhclient in a sandbox.
163+ (LP: #1749717)
164+ - tests: Avoid using https in httpretty, improve HttPretty test case.
165+ (LP: #1771659)
166+ - yaml_load/schema: Add invalid line and column nums to error message
167+ - Azure: Ignore NTFS mount errors when checking ephemeral drive
168+ [Paul Meyer]
169+ - packages/brpm: Get proper dependencies for cmdline distro.
170+ - packages: Make rpm spec files patch in package version like in debs.
171+ - tools/run-container: replace tools/run-centos with more generic.
172+ - Update version.version_string to contain packaged version. (LP: #1770712)
173+ - cc_mounts: Do not add devices to fstab that are already present.
174+ [Lars Kellogg-Stedman]
175+ - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382)
176+ - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
177+ - read_file_or_url: move to url_helper, fix bug in its FileResponse.
178+ - cloud_tests: help pylint [Ryan Harper]
179+ - flake8: fix flake8 errors in previous commit.
180+ - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
181+ - tests: restructure SSH and initial connections [Joshua Powers]
182+ - ds-identify: recognize container-other as a container, test SmartOS.
183+ - cloud-config.service: run After snap.seeded.service. (LP: #1767131)
184+ - tests: do not rely on host /proc/cmdline in test_net.py
185+ [Lars Kellogg-Stedman] (LP: #1769952)
186+ - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
187+ - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
188+ - tests: fix package and ca_cert cloud_tests on bionic
189+ (LP: #1769985)
190+ - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
191+ - pycodestyle: Fix deprecated string literals, move away from flake8.
192+ - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214)
193+ - tools: Support adding a release suffix through packages/bddeb.
194+ - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
195+ [Harm Weites] (LP: #1404745)
196+ - tools: Re-use the orig tarball in packages/bddeb if it is around.
197+ - netinfo: fix netdev_pformat when a nic does not have an address
198+ assigned. (LP: #1766302)
199+ - collect-logs: add -v flag, write to stderr, limit journal to single
200+ boot. (LP: #1766335)
201+ - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
202+ (LP: #1766401)
203+ - Add reporting events and log_time around early source of blocking time
204+ [Ryan Harper]
205+ - IBMCloud: recognize provisioning environment during debug boots.
206+ (LP: #1767166)
207+ - net: detect unstable network names and trigger a settle if needed
208+ [Ryan Harper] (LP: #1766287)
209+ - IBMCloud: improve documentation in datasource.
210+ - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
211+ - packages/debian/control.in: add missing dependency on iproute2.
212+ (LP: #1766711)
213+ - DataSourceSmartOS: add locking of serial device.
214+ [Mike Gerdts] (LP: #1746605)
215+ - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085)
216+ - DataSourceSmartOS: list() should always return a list
217+ [Mike Gerdts] (LP: #1763480)
218+ - schema: in validation, raise ImportError if strict but no jsonschema.
219+ - set_passwords: Add newline to end of sshd config, only restart if
220+ updated. (LP: #1677205)
221+ - pylint: pay attention to unused variable warnings.
222+ - doc: Add documentation for AliYun datasource. [Junjie Wang]
223+ - Schema: do not warn on duplicate items in commands. (LP: #1764264)
224+ - net: Depend on iproute2's ip instead of net-tools ifconfig or route
225+ - DataSourceSmartOS: fix hang when metadata service is down
226+ [Mike Gerdts] (LP: #1667735)
227+ - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
228+ ext4. [Mike Gerdts] (LP: #1763511)
229+ - pycodestyle: Fix invalid escape sequences in string literals.
230+ - Implement bash completion script for cloud-init command line
231+ [Ryan Harper]
232+ - tools: Fix make-tarball cli tool usage for development
233+ - renderer: support unicode in render_from_file.
234+ - Implement ntp client spec with auto support for distro selection
235+ [Ryan Harper] (LP: #1749722)
236+ - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
237+ - tests: fix ec2 integration network metadata validation
238+ - tests: fix integration tests to support lxd 3.0 release
239+ - correct documentation to match correct attribute name usage.
240+ [Dominic Schlegel] (LP: #1420018)
241+ - cc_resizefs, util: handle no /dev/zfs [Ryan Harper]
242+ - doc: Fix links in OpenStack datasource documentation.
243+ [Dominic Schlegel] (LP: #1721660)
244+
245 18.2:
246 - Hetzner: Exit early if dmi system-manufacturer is not Hetzner.
247 - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging.
248diff --git a/MANIFEST.in b/MANIFEST.in
249index 1a4d771..57a85ea 100644
250--- a/MANIFEST.in
251+++ b/MANIFEST.in
252@@ -1,5 +1,6 @@
253 include *.py MANIFEST.in LICENSE* ChangeLog
254 global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh
255+graft bash_completion
256 graft config
257 graft doc
258 graft packages
259diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
260new file mode 100644
261index 0000000..581432c
262--- /dev/null
263+++ b/bash_completion/cloud-init
264@@ -0,0 +1,77 @@
265+# Copyright (C) 2018 Canonical Ltd.
266+#
267+# This file is part of cloud-init. See LICENSE file for license information.
268+
269+# bash completion for cloud-init cli
270+_cloudinit_complete()
271+{
272+
273+ local cur_word prev_word
274+ cur_word="${COMP_WORDS[COMP_CWORD]}"
275+ prev_word="${COMP_WORDS[COMP_CWORD-1]}"
276+
277+ subcmds="analyze clean collect-logs devel dhclient-hook features init modules single status"
278+ base_params="--help --file --version --debug --force"
279+ case ${COMP_CWORD} in
280+ 1)
281+ COMPREPLY=($(compgen -W "$base_params $subcmds" -- $cur_word))
282+ ;;
283+ 2)
284+ case ${prev_word} in
285+ analyze)
286+ COMPREPLY=($(compgen -W "--help blame dump show" -- $cur_word))
287+ ;;
288+ clean)
289+ COMPREPLY=($(compgen -W "--help --logs --reboot --seed" -- $cur_word))
290+ ;;
291+ collect-logs)
292+ COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
293+ ;;
294+ devel)
295+ COMPREPLY=($(compgen -W "--help schema" -- $cur_word))
296+ ;;
297+ dhclient-hook|features)
298+ COMPREPLY=($(compgen -W "--help" -- $cur_word))
299+ ;;
300+ init)
301+ COMPREPLY=($(compgen -W "--help --local" -- $cur_word))
302+ ;;
303+ modules)
304+ COMPREPLY=($(compgen -W "--help --mode" -- $cur_word))
305+ ;;
306+
307+ single)
308+ COMPREPLY=($(compgen -W "--help --name --frequency --report" -- $cur_word))
309+ ;;
310+ status)
311+ COMPREPLY=($(compgen -W "--help --long --wait" -- $cur_word))
312+ ;;
313+ esac
314+ ;;
315+ 3)
316+ case ${prev_word} in
317+ blame|dump)
318+ COMPREPLY=($(compgen -W "--help --infile --outfile" -- $cur_word))
319+ ;;
320+ --mode)
321+ COMPREPLY=($(compgen -W "--help init config final" -- $cur_word))
322+ ;;
323+ --frequency)
324+ COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word))
325+ ;;
326+ schema)
327+ COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word))
328+ ;;
329+ show)
330+ COMPREPLY=($(compgen -W "--help --format --infile --outfile" -- $cur_word))
331+ ;;
332+ esac
333+ ;;
334+ *)
335+ COMPREPLY=()
336+ ;;
337+ esac
338+}
339+complete -F _cloudinit_complete cloud-init
340+
341+# vi: syntax=bash expandtab
342diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
343index 3ba5903..f861365 100644
344--- a/cloudinit/analyze/__main__.py
345+++ b/cloudinit/analyze/__main__.py
346@@ -69,7 +69,7 @@ def analyze_blame(name, args):
347 """
348 (infh, outfh) = configure_io(args)
349 blame_format = ' %ds (%n)'
350- r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE)
351+ r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE)
352 for idx, record in enumerate(show.show_events(_get_events(infh),
353 blame_format)):
354 srecs = sorted(filter(r.match, record), reverse=True)
355diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
356index b071aa1..1f3060d 100644
357--- a/cloudinit/analyze/dump.py
358+++ b/cloudinit/analyze/dump.py
359@@ -112,7 +112,7 @@ def parse_ci_logline(line):
360 return None
361 event_description = stage_to_description[event_name]
362 else:
363- (pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
364+ (_pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
365 event_description = eventstr.split(event_name)[1].strip()
366
367 event = {
368diff --git a/cloudinit/apport.py b/cloudinit/apport.py
369index 618b016..130ff26 100644
370--- a/cloudinit/apport.py
371+++ b/cloudinit/apport.py
372@@ -13,10 +13,29 @@ except ImportError:
373
374
375 KNOWN_CLOUD_NAMES = [
376- 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma',
377- 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine',
378- 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF',
379- 'Scaleway', 'SmartOS', 'VMware', 'Other']
380+ 'AliYun',
381+ 'AltCloud',
382+ 'Amazon - Ec2',
383+ 'Azure',
384+ 'Bigstep',
385+ 'Brightbox',
386+ 'CloudSigma',
387+ 'CloudStack',
388+ 'DigitalOcean',
389+ 'GCE - Google Compute Engine',
390+ 'Hetzner Cloud',
391+ 'IBM - (aka SoftLayer or BlueMix)',
392+ 'LXD',
393+ 'MAAS',
394+ 'NoCloud',
395+ 'OpenNebula',
396+ 'OpenStack',
397+ 'OVF',
398+ 'OpenTelekomCloud',
399+ 'Scaleway',
400+ 'SmartOS',
401+ 'VMware',
402+ 'Other']
403
404 # Potentially clear text collected logs
405 CLOUDINIT_LOG = '/var/log/cloud-init.log'
406diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
407index 35ca478..df72520 100644
408--- a/cloudinit/cmd/devel/logs.py
409+++ b/cloudinit/cmd/devel/logs.py
410@@ -11,6 +11,7 @@ from cloudinit.temp_utils import tempdir
411 from datetime import datetime
412 import os
413 import shutil
414+import sys
415
416
417 CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
418@@ -31,6 +32,8 @@ def get_parser(parser=None):
419 parser = argparse.ArgumentParser(
420 prog='collect-logs',
421 description='Collect and tar all cloud-init debug info')
422+ parser.add_argument('--verbose', '-v', action='count', default=0,
423+ dest='verbosity', help="Be more verbose.")
424 parser.add_argument(
425 "--tarfile", '-t', default='cloud-init.tar.gz',
426 help=('The tarfile to create containing all collected logs.'
427@@ -43,17 +46,33 @@ def get_parser(parser=None):
428 return parser
429
430
431-def _write_command_output_to_file(cmd, filename):
432+def _write_command_output_to_file(cmd, filename, msg, verbosity):
433 """Helper which runs a command and writes output or error to filename."""
434 try:
435 out, _ = subp(cmd)
436 except ProcessExecutionError as e:
437 write_file(filename, str(e))
438+ _debug("collecting %s failed.\n" % msg, 1, verbosity)
439 else:
440 write_file(filename, out)
441+ _debug("collected %s\n" % msg, 1, verbosity)
442+ return out
443
444
445-def collect_logs(tarfile, include_userdata):
446+def _debug(msg, level, verbosity):
447+ if level <= verbosity:
448+ sys.stderr.write(msg)
449+
450+
451+def _collect_file(path, out_dir, verbosity):
452+ if os.path.isfile(path):
453+ copy(path, out_dir)
454+ _debug("collected file: %s\n" % path, 1, verbosity)
455+ else:
456+ _debug("file %s did not exist\n" % path, 2, verbosity)
457+
458+
459+def collect_logs(tarfile, include_userdata, verbosity=0):
460 """Collect all cloud-init logs and tar them up into the provided tarfile.
461
462 @param tarfile: The path of the tar-gzipped file to create.
463@@ -64,28 +83,46 @@ def collect_logs(tarfile, include_userdata):
464 log_dir = 'cloud-init-logs-{0}'.format(date)
465 with tempdir(dir='/tmp') as tmp_dir:
466 log_dir = os.path.join(tmp_dir, log_dir)
467- _write_command_output_to_file(
468+ version = _write_command_output_to_file(
469+ ['cloud-init', '--version'],
470+ os.path.join(log_dir, 'version'),
471+ "cloud-init --version", verbosity)
472+ dpkg_ver = _write_command_output_to_file(
473 ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
474- os.path.join(log_dir, 'version'))
475+ os.path.join(log_dir, 'dpkg-version'),
476+ "dpkg version", verbosity)
477+ if not version:
478+ version = dpkg_ver if dpkg_ver else "not-available"
479+ _debug("collected cloud-init version: %s\n" % version, 1, verbosity)
480 _write_command_output_to_file(
481- ['dmesg'], os.path.join(log_dir, 'dmesg.txt'))
482+ ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
483+ "dmesg output", verbosity)
484 _write_command_output_to_file(
485- ['journalctl', '-o', 'short-precise'],
486- os.path.join(log_dir, 'journal.txt'))
487+ ['journalctl', '--boot=0', '-o', 'short-precise'],
488+ os.path.join(log_dir, 'journal.txt'),
489+ "systemd journal of current boot", verbosity)
490+
491 for log in CLOUDINIT_LOGS:
492- copy(log, log_dir)
493+ _collect_file(log, log_dir, verbosity)
494 if include_userdata:
495- copy(USER_DATA_FILE, log_dir)
496+ _collect_file(USER_DATA_FILE, log_dir, verbosity)
497 run_dir = os.path.join(log_dir, 'run')
498 ensure_dir(run_dir)
499- shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init'))
500+ if os.path.exists(CLOUDINIT_RUN_DIR):
501+ shutil.copytree(CLOUDINIT_RUN_DIR,
502+ os.path.join(run_dir, 'cloud-init'))
503+ _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
504+ else:
505+ _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
506+ verbosity)
507 with chdir(tmp_dir):
508 subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
509+ sys.stderr.write("Wrote %s\n" % tarfile)
510
511
512 def handle_collect_logs_args(name, args):
513 """Handle calls to 'cloud-init collect-logs' as a subcommand."""
514- collect_logs(args.tarfile, args.userdata)
515+ collect_logs(args.tarfile, args.userdata, args.verbosity)
516
517
518 def main():
519diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
520index dc4947c..98b4756 100644
521--- a/cloudinit/cmd/devel/tests/test_logs.py
522+++ b/cloudinit/cmd/devel/tests/test_logs.py
523@@ -4,6 +4,7 @@ from cloudinit.cmd.devel import logs
524 from cloudinit.util import ensure_dir, load_file, subp, write_file
525 from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call
526 from datetime import datetime
527+import mock
528 import os
529
530
531@@ -27,11 +28,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
532 date = datetime.utcnow().date().strftime('%Y-%m-%d')
533 date_logdir = 'cloud-init-logs-{0}'.format(date)
534
535+ version_out = '/usr/bin/cloud-init 18.2fake\n'
536 expected_subp = {
537 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
538 '0.7fake\n',
539+ ('cloud-init', '--version'): version_out,
540 ('dmesg',): 'dmesg-out\n',
541- ('journalctl', '-o', 'short-precise'): 'journal-out\n',
542+ ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
543 ('tar', 'czvf', output_tarfile, date_logdir): ''
544 }
545
546@@ -44,9 +47,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
547 subp(cmd) # Pass through tar cmd so we can check output
548 return expected_subp[cmd_tuple], ''
549
550+ fake_stderr = mock.MagicMock()
551+
552 wrap_and_call(
553 'cloudinit.cmd.devel.logs',
554 {'subp': {'side_effect': fake_subp},
555+ 'sys.stderr': {'new': fake_stderr},
556 'CLOUDINIT_LOGS': {'new': [log1, log2]},
557 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
558 logs.collect_logs, output_tarfile, include_userdata=False)
559@@ -55,7 +61,9 @@ class TestCollectLogs(FilesystemMockingTestCase):
560 out_logdir = self.tmp_path(date_logdir, self.new_root)
561 self.assertEqual(
562 '0.7fake\n',
563- load_file(os.path.join(out_logdir, 'version')))
564+ load_file(os.path.join(out_logdir, 'dpkg-version')))
565+ self.assertEqual(version_out,
566+ load_file(os.path.join(out_logdir, 'version')))
567 self.assertEqual(
568 'cloud-init-log',
569 load_file(os.path.join(out_logdir, 'cloud-init.log')))
570@@ -72,6 +80,7 @@ class TestCollectLogs(FilesystemMockingTestCase):
571 'results',
572 load_file(
573 os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
574+ fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
575
576 def test_collect_logs_includes_optional_userdata(self):
577 """collect-logs include userdata when --include-userdata is set."""
578@@ -88,11 +97,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
579 date = datetime.utcnow().date().strftime('%Y-%m-%d')
580 date_logdir = 'cloud-init-logs-{0}'.format(date)
581
582+ version_out = '/usr/bin/cloud-init 18.2fake\n'
583 expected_subp = {
584 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
585 '0.7fake',
586+ ('cloud-init', '--version'): version_out,
587 ('dmesg',): 'dmesg-out\n',
588- ('journalctl', '-o', 'short-precise'): 'journal-out\n',
589+ ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
590 ('tar', 'czvf', output_tarfile, date_logdir): ''
591 }
592
593@@ -105,9 +116,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
594 subp(cmd) # Pass through tar cmd so we can check output
595 return expected_subp[cmd_tuple], ''
596
597+ fake_stderr = mock.MagicMock()
598+
599 wrap_and_call(
600 'cloudinit.cmd.devel.logs',
601 {'subp': {'side_effect': fake_subp},
602+ 'sys.stderr': {'new': fake_stderr},
603 'CLOUDINIT_LOGS': {'new': [log1, log2]},
604 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
605 'USER_DATA_FILE': {'new': userdata}},
606@@ -118,3 +132,4 @@ class TestCollectLogs(FilesystemMockingTestCase):
607 self.assertEqual(
608 'user-data',
609 load_file(os.path.join(out_logdir, 'user-data.txt')))
610+ fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
611diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
612index 3f2dbb9..d6ba90f 100644
613--- a/cloudinit/cmd/main.py
614+++ b/cloudinit/cmd/main.py
615@@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
616 data = None
617 header = b'#cloud-config'
618 try:
619- resp = util.read_file_or_url(**kwargs)
620+ resp = url_helper.read_file_or_url(**kwargs)
621 if resp.ok():
622 data = resp.contents
623 if not resp.contents.startswith(header):
624diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
625index dbe421c..e2c54ae 100644
626--- a/cloudinit/cmd/tests/test_main.py
627+++ b/cloudinit/cmd/tests/test_main.py
628@@ -56,7 +56,7 @@ class TestMain(FilesystemMockingTestCase):
629 cmdargs = myargs(
630 debug=False, files=None, force=False, local=False, reporter=None,
631 subcommand='init')
632- (item1, item2) = wrap_and_call(
633+ (_item1, item2) = wrap_and_call(
634 'cloudinit.cmd.main',
635 {'util.close_stdin': True,
636 'netinfo.debug_info': 'my net debug info',
637@@ -85,7 +85,7 @@ class TestMain(FilesystemMockingTestCase):
638 cmdargs = myargs(
639 debug=False, files=None, force=False, local=False, reporter=None,
640 subcommand='init')
641- (item1, item2) = wrap_and_call(
642+ (_item1, item2) = wrap_and_call(
643 'cloudinit.cmd.main',
644 {'util.close_stdin': True,
645 'netinfo.debug_info': 'my net debug info',
646@@ -133,7 +133,7 @@ class TestMain(FilesystemMockingTestCase):
647 self.assertEqual(main.LOG, log)
648 self.assertIsNone(args)
649
650- (item1, item2) = wrap_and_call(
651+ (_item1, item2) = wrap_and_call(
652 'cloudinit.cmd.main',
653 {'util.close_stdin': True,
654 'netinfo.debug_info': 'my net debug info',
655diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
656index 5b9cbca..e18944e 100644
657--- a/cloudinit/config/cc_apt_configure.py
658+++ b/cloudinit/config/cc_apt_configure.py
659@@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for
660 All source entries in ``apt-sources`` that match regex in
661 ``add_apt_repo_match`` will be added to the system using
662 ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
663-to ``^[\w-]+:\w``
664+to ``^[\\w-]+:\\w``
665
666 **Add source list entries:**
667
668@@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None):
669
670 # get a complete list of packages listed in input
671 pkgs_cfgd = set()
672- for key, content in selsets.items():
673+ for _key, content in selsets.items():
674 for line in content.splitlines():
675 if line.startswith("#"):
676 continue
677diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
678index 233da1e..db64f0a 100644
679--- a/cloudinit/config/cc_bootcmd.py
680+++ b/cloudinit/config/cc_bootcmd.py
681@@ -63,7 +63,6 @@ schema = {
682 'additionalProperties': False,
683 'minItems': 1,
684 'required': [],
685- 'uniqueItems': True
686 }
687 }
688 }
689diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
690index c56319b..885b313 100644
691--- a/cloudinit/config/cc_disable_ec2_metadata.py
692+++ b/cloudinit/config/cc_disable_ec2_metadata.py
693@@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS
694
695 frequency = PER_ALWAYS
696
697-REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
698+REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject']
699+REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254']
700
701
702 def handle(name, cfg, _cloud, log, _args):
703 disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
704 if disabled:
705- util.subp(REJECT_CMD, capture=False)
706+ reject_cmd = None
707+ if util.which('ip'):
708+ reject_cmd = REJECT_CMD_IP
709+ elif util.which('ifconfig'):
710+ reject_cmd = REJECT_CMD_IF
711+ else:
712+ log.error(('Neither "route" nor "ip" command found, unable to '
713+ 'manipulate routing table'))
714+ return
715+ util.subp(reject_cmd, capture=False)
716 else:
717 log.debug(("Skipping module named %s,"
718 " disabling the ec2 route not enabled"), name)
719diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
720index c3e8c48..943089e 100644
721--- a/cloudinit/config/cc_disk_setup.py
722+++ b/cloudinit/config/cc_disk_setup.py
723@@ -680,13 +680,13 @@ def read_parttbl(device):
724 reliable way to probe the partition table.
725 """
726 blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
727- udevadm_settle()
728+ util.udevadm_settle()
729 try:
730 util.subp(blkdev_cmd)
731 except Exception as e:
732 util.logexc(LOG, "Failed reading the partition table %s" % e)
733
734- udevadm_settle()
735+ util.udevadm_settle()
736
737
738 def exec_mkpart_mbr(device, layout):
739@@ -737,14 +737,10 @@ def exec_mkpart(table_type, device, layout):
740 return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
741
742
743-def udevadm_settle():
744- util.subp(['udevadm', 'settle'])
745-
746-
747 def assert_and_settle_device(device):
748 """Assert that device exists and settle so it is fully recognized."""
749 if not os.path.exists(device):
750- udevadm_settle()
751+ util.udevadm_settle()
752 if not os.path.exists(device):
753 raise RuntimeError("Device %s did not exist and was not created "
754 "with a udevamd settle." % device)
755@@ -752,7 +748,7 @@ def assert_and_settle_device(device):
756 # Whether or not the device existed above, it is possible that udev
757 # events that would populate udev database (for reading by lsdname) have
758 # not yet finished. So settle again.
759- udevadm_settle()
760+ util.udevadm_settle()
761
762
763 def mkpart(device, definition):
764diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
765index 69dc2d5..eb9fbe6 100644
766--- a/cloudinit/config/cc_emit_upstart.py
767+++ b/cloudinit/config/cc_emit_upstart.py
768@@ -43,7 +43,7 @@ def is_upstart_system():
769 del myenv['UPSTART_SESSION']
770 check_cmd = ['initctl', 'version']
771 try:
772- (out, err) = util.subp(check_cmd, env=myenv)
773+ (out, _err) = util.subp(check_cmd, env=myenv)
774 return 'upstart' in out
775 except util.ProcessExecutionError as e:
776 LOG.debug("'%s' returned '%s', not using upstart",
777diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
778index 09374d2..ac72ac4 100644
779--- a/cloudinit/config/cc_lxd.py
780+++ b/cloudinit/config/cc_lxd.py
781@@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly.
782 domain: <domain>
783 """
784
785+from cloudinit import log as logging
786 from cloudinit import util
787 import os
788
789 distros = ['ubuntu']
790
791+LOG = logging.getLogger(__name__)
792+
793+_DEFAULT_NETWORK_NAME = "lxdbr0"
794+
795
796 def handle(name, cfg, cloud, log, args):
797 # Get config
798@@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args):
799 # Set up lxd-bridge if bridge config is given
800 dconf_comm = "debconf-communicate"
801 if bridge_cfg:
802+ net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
803 if os.path.exists("/etc/default/lxd-bridge") \
804 and util.which(dconf_comm):
805 # Bridge configured through packaging
806@@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args):
807 else:
808 # Built-in LXD bridge support
809 cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
810+ maybe_cleanup_default(
811+ net_name=net_name, did_init=bool(init_cfg),
812+ create=bool(cmd_create), attach=bool(cmd_attach))
813 if cmd_create:
814 log.debug("Creating lxd bridge: %s" %
815 " ".join(cmd_create))
816- util.subp(cmd_create)
817+ _lxc(cmd_create)
818
819 if cmd_attach:
820 log.debug("Setting up default lxd bridge: %s" %
821 " ".join(cmd_create))
822- util.subp(cmd_attach)
823+ _lxc(cmd_attach)
824
825 elif bridge_cfg:
826 raise RuntimeError(
827@@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg):
828 if bridge_cfg.get("mode") == "none":
829 return None, None
830
831- bridge_name = bridge_cfg.get("name", "lxdbr0")
832+ bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
833 cmd_create = []
834- cmd_attach = ["lxc", "network", "attach-profile", bridge_name,
835- "default", "eth0", "--force-local"]
836+ cmd_attach = ["network", "attach-profile", bridge_name,
837+ "default", "eth0"]
838
839 if bridge_cfg.get("mode") == "existing":
840 return None, cmd_attach
841@@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg):
842 if bridge_cfg.get("mode") != "new":
843 raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
844
845- cmd_create = ["lxc", "network", "create", bridge_name]
846+ cmd_create = ["network", "create", bridge_name]
847
848 if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
849 cmd_create.append("ipv4.address=%s/%s" %
850@@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg):
851 if bridge_cfg.get("domain"):
852 cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))
853
854- cmd_create.append("--force-local")
855-
856 return cmd_create, cmd_attach
857
858+
859+def _lxc(cmd):
860+ env = {'LC_ALL': 'C'}
861+ util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
862+
863+
864+def maybe_cleanup_default(net_name, did_init, create, attach,
865+ profile="default", nic_name="eth0"):
866+ """Newer versions of lxc (3.0.1+) create a lxdbr0 network when
867+ 'lxd init --auto' is run. Older versions did not.
868+
869+ By removing ay that lxd-init created, we simply leave the add/attach
870+ code in-tact.
871+
872+ https://github.com/lxc/lxd/issues/4649"""
873+ if net_name != _DEFAULT_NETWORK_NAME or not did_init:
874+ return
875+
876+ fail_assume_enoent = " failed. Assuming it did not exist."
877+ succeeded = " succeeded."
878+ if create:
879+ msg = "Deletion of lxd network '%s'" % net_name
880+ try:
881+ _lxc(["network", "delete", net_name])
882+ LOG.debug(msg + succeeded)
883+ except util.ProcessExecutionError as e:
884+ if e.exit_code != 1:
885+ raise e
886+ LOG.debug(msg + fail_assume_enoent)
887+
888+ if attach:
889+ msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile)
890+ try:
891+ _lxc(["profile", "device", "remove", profile, nic_name])
892+ LOG.debug(msg + succeeded)
893+ except util.ProcessExecutionError as e:
894+ if e.exit_code != 1:
895+ raise e
896+ LOG.debug(msg + fail_assume_enoent)
897+
898+
899 # vi: ts=4 expandtab
900diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
901index f14a4fc..339baba 100644
902--- a/cloudinit/config/cc_mounts.py
903+++ b/cloudinit/config/cc_mounts.py
904@@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
905 DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
906 WS = re.compile("[%s]+" % (whitespace))
907 FSTAB_PATH = "/etc/fstab"
908+MNT_COMMENT = "comment=cloudconfig"
909
910 LOG = logging.getLogger(__name__)
911
912@@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None):
913 if str(size).lower() == "auto":
914 try:
915 memsize = util.read_meminfo()['total']
916- except IOError as e:
917- LOG.debug("Not creating swap. failed to read meminfo")
918+ except IOError:
919+ LOG.debug("Not creating swap: failed to read meminfo")
920 return
921
922 util.ensure_dir(tdir)
923@@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg):
924
925 if os.path.exists(fname):
926 if not os.path.exists("/proc/swaps"):
927- LOG.debug("swap file %s existed. no /proc/swaps. Being safe.",
928- fname)
929+ LOG.debug("swap file %s exists, but no /proc/swaps exists, "
930+ "being safe", fname)
931 return fname
932 try:
933 for line in util.load_file("/proc/swaps").splitlines():
934 if line.startswith(fname + " "):
935- LOG.debug("swap file %s already in use.", fname)
936+ LOG.debug("swap file %s already in use", fname)
937 return fname
938- LOG.debug("swap file %s existed, but not in /proc/swaps", fname)
939+ LOG.debug("swap file %s exists, but not in /proc/swaps", fname)
940 except Exception:
941- LOG.warning("swap file %s existed. Error reading /proc/swaps",
942+ LOG.warning("swap file %s exists. Error reading /proc/swaps",
943 fname)
944 return fname
945
946@@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args):
947
948 LOG.debug("mounts configuration is %s", cfgmnt)
949
950+ fstab_lines = []
951+ fstab_devs = {}
952+ fstab_removed = []
953+
954+ for line in util.load_file(FSTAB_PATH).splitlines():
955+ if MNT_COMMENT in line:
956+ fstab_removed.append(line)
957+ continue
958+
959+ try:
960+ toks = WS.split(line)
961+ except Exception:
962+ pass
963+ fstab_devs[toks[0]] = line
964+ fstab_lines.append(line)
965+
966 for i in range(len(cfgmnt)):
967 # skip something that wasn't a list
968 if not isinstance(cfgmnt[i], list):
969@@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args):
970
971 start = str(cfgmnt[i][0])
972 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
973+ if sanitized != start:
974+ log.debug("changed %s => %s" % (start, sanitized))
975+
976 if sanitized is None:
977- log.debug("Ignorming nonexistant named mount %s", start)
978+ log.debug("Ignoring nonexistent named mount %s", start)
979+ continue
980+ elif sanitized in fstab_devs:
981+ log.info("Device %s already defined in fstab: %s",
982+ sanitized, fstab_devs[sanitized])
983 continue
984
985- if sanitized != start:
986- log.debug("changed %s => %s" % (start, sanitized))
987 cfgmnt[i][0] = sanitized
988
989 # in case the user did not quote a field (likely fs-freq, fs_passno)
990@@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args):
991 for defmnt in defmnts:
992 start = defmnt[0]
993 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
994- if sanitized is None:
995- log.debug("Ignoring nonexistant default named mount %s", start)
996- continue
997 if sanitized != start:
998 log.debug("changed default device %s => %s" % (start, sanitized))
999+
1000+ if sanitized is None:
1001+ log.debug("Ignoring nonexistent default named mount %s", start)
1002+ continue
1003+ elif sanitized in fstab_devs:
1004+ log.debug("Device %s already defined in fstab: %s",
1005+ sanitized, fstab_devs[sanitized])
1006+ continue
1007+
1008 defmnt[0] = sanitized
1009
1010 cfgmnt_has = False
1011@@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args):
1012 actlist = []
1013 for x in cfgmnt:
1014 if x[1] is None:
1015- log.debug("Skipping non-existent device named %s", x[0])
1016+ log.debug("Skipping nonexistent device named %s", x[0])
1017 else:
1018 actlist.append(x)
1019
1020@@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args):
1021 actlist.append([swapret, "none", "swap", "sw", "0", "0"])
1022
1023 if len(actlist) == 0:
1024- log.debug("No modifications to fstab needed.")
1025+ log.debug("No modifications to fstab needed")
1026 return
1027
1028- comment = "comment=cloudconfig"
1029 cc_lines = []
1030 needswap = False
1031 dirs = []
1032 for line in actlist:
1033 # write 'comment' in the fs_mntops, entry, claiming this
1034- line[3] = "%s,%s" % (line[3], comment)
1035+ line[3] = "%s,%s" % (line[3], MNT_COMMENT)
1036 if line[2] == "swap":
1037 needswap = True
1038 if line[1].startswith("/"):
1039 dirs.append(line[1])
1040 cc_lines.append('\t'.join(line))
1041
1042- fstab_lines = []
1043- removed = []
1044- for line in util.load_file(FSTAB_PATH).splitlines():
1045- try:
1046- toks = WS.split(line)
1047- if toks[3].find(comment) != -1:
1048- removed.append(line)
1049- continue
1050- except Exception:
1051- pass
1052- fstab_lines.append(line)
1053-
1054 for d in dirs:
1055 try:
1056 util.ensure_dir(d)
1057@@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args):
1058 util.logexc(log, "Failed to make '%s' config-mount", d)
1059
1060 sadds = [WS.sub(" ", n) for n in cc_lines]
1061- sdrops = [WS.sub(" ", n) for n in removed]
1062+ sdrops = [WS.sub(" ", n) for n in fstab_removed]
1063
1064 sops = (["- " + drop for drop in sdrops if drop not in sadds] +
1065 ["+ " + add for add in sadds if add not in sdrops])
1066diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
1067index cbd0237..9e074bd 100644
1068--- a/cloudinit/config/cc_ntp.py
1069+++ b/cloudinit/config/cc_ntp.py
1070@@ -10,20 +10,95 @@ from cloudinit.config.schema import (
1071 get_schema_doc, validate_cloudconfig_schema)
1072 from cloudinit import log as logging
1073 from cloudinit.settings import PER_INSTANCE
1074+from cloudinit import temp_utils
1075 from cloudinit import templater
1076 from cloudinit import type_utils
1077 from cloudinit import util
1078
1079+import copy
1080 import os
1081+import six
1082 from textwrap import dedent
1083
1084 LOG = logging.getLogger(__name__)
1085
1086 frequency = PER_INSTANCE
1087 NTP_CONF = '/etc/ntp.conf'
1088-TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
1089 NR_POOL_SERVERS = 4
1090-distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu']
1091+distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu']
1092+
1093+NTP_CLIENT_CONFIG = {
1094+ 'chrony': {
1095+ 'check_exe': 'chronyd',
1096+ 'confpath': '/etc/chrony.conf',
1097+ 'packages': ['chrony'],
1098+ 'service_name': 'chrony',
1099+ 'template_name': 'chrony.conf.{distro}',
1100+ 'template': None,
1101+ },
1102+ 'ntp': {
1103+ 'check_exe': 'ntpd',
1104+ 'confpath': NTP_CONF,
1105+ 'packages': ['ntp'],
1106+ 'service_name': 'ntp',
1107+ 'template_name': 'ntp.conf.{distro}',
1108+ 'template': None,
1109+ },
1110+ 'ntpdate': {
1111+ 'check_exe': 'ntpdate',
1112+ 'confpath': NTP_CONF,
1113+ 'packages': ['ntpdate'],
1114+ 'service_name': 'ntpdate',
1115+ 'template_name': 'ntp.conf.{distro}',
1116+ 'template': None,
1117+ },
1118+ 'systemd-timesyncd': {
1119+ 'check_exe': '/lib/systemd/systemd-timesyncd',
1120+ 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf',
1121+ 'packages': [],
1122+ 'service_name': 'systemd-timesyncd',
1123+ 'template_name': 'timesyncd.conf',
1124+ 'template': None,
1125+ },
1126+}
1127+
1128+# This is Distro-specific configuration overrides of the base config
1129+DISTRO_CLIENT_CONFIG = {
1130+ 'debian': {
1131+ 'chrony': {
1132+ 'confpath': '/etc/chrony/chrony.conf',
1133+ },
1134+ },
1135+ 'opensuse': {
1136+ 'chrony': {
1137+ 'service_name': 'chronyd',
1138+ },
1139+ 'ntp': {
1140+ 'confpath': '/etc/ntp.conf',
1141+ 'service_name': 'ntpd',
1142+ },
1143+ 'systemd-timesyncd': {
1144+ 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
1145+ },
1146+ },
1147+ 'sles': {
1148+ 'chrony': {
1149+ 'service_name': 'chronyd',
1150+ },
1151+ 'ntp': {
1152+ 'confpath': '/etc/ntp.conf',
1153+ 'service_name': 'ntpd',
1154+ },
1155+ 'systemd-timesyncd': {
1156+ 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
1157+ },
1158+ },
1159+ 'ubuntu': {
1160+ 'chrony': {
1161+ 'confpath': '/etc/chrony/chrony.conf',
1162+ },
1163+ },
1164+}
1165
1166
1167 # The schema definition for each cloud-config module is a strict contract for
1168@@ -48,7 +123,34 @@ schema = {
1169 'distros': distros,
1170 'examples': [
1171 dedent("""\
1172+ # Override ntp with chrony configuration on Ubuntu
1173+ ntp:
1174+ enabled: true
1175+ ntp_client: chrony # Uses cloud-init default chrony configuration
1176+ """),
1177+ dedent("""\
1178+ # Provide a custom ntp client configuration
1179 ntp:
1180+ enabled: true
1181+ ntp_client: myntpclient
1182+ config:
1183+ confpath: /etc/myntpclient/myntpclient.conf
1184+ check_exe: myntpclientd
1185+ packages:
1186+ - myntpclient
1187+ service_name: myntpclient
1188+ template: |
1189+ ## template:jinja
1190+ # My NTP Client config
1191+ {% if pools -%}# pools{% endif %}
1192+ {% for pool in pools -%}
1193+ pool {{pool}} iburst
1194+ {% endfor %}
1195+ {%- if servers %}# servers
1196+ {% endif %}
1197+ {% for server in servers -%}
1198+ server {{server}} iburst
1199+ {% endfor %}
1200 pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]
1201 servers:
1202 - ntp.server.local
1203@@ -83,79 +185,159 @@ schema = {
1204 List of ntp servers. If both pools and servers are
1205 empty, 4 default pool servers will be provided with
1206 the format ``{0-3}.{distro}.pool.ntp.org``.""")
1207- }
1208+ },
1209+ 'ntp_client': {
1210+ 'type': 'string',
1211+ 'default': 'auto',
1212+ 'description': dedent("""\
1213+ Name of an NTP client to use to configure system NTP.
1214+ When unprovided or 'auto' the default client preferred
1215+ by the distribution will be used. The following
1216+ built-in client names can be used to override existing
1217+ configuration defaults: chrony, ntp, ntpdate,
1218+ systemd-timesyncd."""),
1219+ },
1220+ 'enabled': {
1221+ 'type': 'boolean',
1222+ 'default': True,
1223+ 'description': dedent("""\
1224+ Attempt to enable ntp clients if set to True. If set
1225+ to False, ntp client will not be configured or
1226+ installed"""),
1227+ },
1228+ 'config': {
1229+ 'description': dedent("""\
1230+ Configuration settings or overrides for the
1231+ ``ntp_client`` specified."""),
1232+ 'type': ['object'],
1233+ 'properties': {
1234+ 'confpath': {
1235+ 'type': 'string',
1236+ 'description': dedent("""\
1237+ The path to where the ``ntp_client``
1238+ configuration is written."""),
1239+ },
1240+ 'check_exe': {
1241+ 'type': 'string',
1242+ 'description': dedent("""\
1243+ The executable name for the ``ntp_client``.
1244+ For example, ntp service ``check_exe`` is
1245+ 'ntpd' because it runs the ntpd binary."""),
1246+ },
1247+ 'packages': {
1248+ 'type': 'array',
1249+ 'items': {
1250+ 'type': 'string',
1251+ },
1252+ 'uniqueItems': True,
1253+ 'description': dedent("""\
1254+ List of packages needed to be installed for the
1255+ selected ``ntp_client``."""),
1256+ },
1257+ 'service_name': {
1258+ 'type': 'string',
1259+ 'description': dedent("""\
1260+ The systemd or sysvinit service name used to
1261+ start and stop the ``ntp_client``
1262+ service."""),
1263+ },
1264+ 'template': {
1265+ 'type': 'string',
1266+ 'description': dedent("""\
1267+ Inline template allowing users to define their
1268+ own ``ntp_client`` configuration template.
1269+ The value must start with '## template:jinja'
1270+ to enable use of templating support.
1271+ """),
1272+ },
1273+ },
1274+ # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
1275+ # of builtin client values.
1276+ 'required': [],
1277+ 'minProperties': 1, # If we have config, define something
1278+ 'additionalProperties': False
1279+ },
1280 },
1281 'required': [],
1282 'additionalProperties': False
1283 }
1284 }
1285 }
1286-
1287-__doc__ = get_schema_doc(schema) # Supplement python help()
1288+REQUIRED_NTP_CONFIG_KEYS = frozenset([
1289+ 'check_exe', 'confpath', 'packages', 'service_name'])
1290
1291
1292-def handle(name, cfg, cloud, log, _args):
1293- """Enable and configure ntp."""
1294- if 'ntp' not in cfg:
1295- LOG.debug(
1296- "Skipping module named %s, not present or disabled by cfg", name)
1297- return
1298- ntp_cfg = cfg['ntp']
1299- if ntp_cfg is None:
1300- ntp_cfg = {} # Allow empty config which will install the package
1301+__doc__ = get_schema_doc(schema) # Supplement python help()
1302
1303- # TODO drop this when validate_cloudconfig_schema is strict=True
1304- if not isinstance(ntp_cfg, (dict)):
1305- raise RuntimeError(
1306- "'ntp' key existed in config, but not a dictionary type,"
1307- " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
1308
1309- validate_cloudconfig_schema(cfg, schema)
1310- if ntp_installable():
1311- service_name = 'ntp'
1312- confpath = NTP_CONF
1313- template_name = None
1314- packages = ['ntp']
1315- check_exe = 'ntpd'
1316- else:
1317- service_name = 'systemd-timesyncd'
1318- confpath = TIMESYNCD_CONF
1319- template_name = 'timesyncd.conf'
1320- packages = []
1321- check_exe = '/lib/systemd/systemd-timesyncd'
1322-
1323- rename_ntp_conf()
1324- # ensure when ntp is installed it has a configuration file
1325- # to use instead of starting up with packaged defaults
1326- write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name)
1327- install_ntp(cloud.distro.install_packages, packages=packages,
1328- check_exe=check_exe)
1329+def distro_ntp_client_configs(distro):
1330+ """Construct a distro-specific ntp client config dictionary by merging
1331+ distro specific changes into base config.
1332
1333- try:
1334- reload_ntp(service_name, systemd=cloud.distro.uses_systemd())
1335- except util.ProcessExecutionError as e:
1336- LOG.exception("Failed to reload/start ntp service: %s", e)
1337- raise
1338+ @param distro: String providing the distro class name.
1339+ @returns: Dict of distro configurations for ntp clients.
1340+ """
1341+ dcfg = DISTRO_CLIENT_CONFIG
1342+ cfg = copy.copy(NTP_CLIENT_CONFIG)
1343+ if distro in dcfg:
1344+ cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True)
1345+ return cfg
1346
1347
1348-def ntp_installable():
1349- """Check if we can install ntp package
1350+def select_ntp_client(ntp_client, distro):
1351+ """Determine which ntp client is to be used, consulting the distro
1352+ for its preference.
1353
1354- Ubuntu-Core systems do not have an ntp package available, so
1355- we always return False. Other systems require package managers to install
1356- the ntp package If we fail to find one of the package managers, then we
1357- cannot install ntp.
1358+ @param ntp_client: String name of the ntp client to use.
1359+ @param distro: Distro class instance.
1360+ @returns: Dict of the selected ntp client or {} if none selected.
1361 """
1362- if util.system_is_snappy():
1363- return False
1364
1365- if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])):
1366- return True
1367+ # construct distro-specific ntp_client_config dict
1368+ distro_cfg = distro_ntp_client_configs(distro.name)
1369+
1370+ # user specified client, return its config
1371+ if ntp_client and ntp_client != 'auto':
1372+ LOG.debug('Selected NTP client "%s" via user-data configuration',
1373+ ntp_client)
1374+ return distro_cfg.get(ntp_client, {})
1375+
1376+ # default to auto if unset in distro
1377+ distro_ntp_client = distro.get_option('ntp_client', 'auto')
1378+
1379+ clientcfg = {}
1380+ if distro_ntp_client == "auto":
1381+ for client in distro.preferred_ntp_clients:
1382+ cfg = distro_cfg.get(client)
1383+ if util.which(cfg.get('check_exe')):
1384+ LOG.debug('Selected NTP client "%s", already installed',
1385+ client)
1386+ clientcfg = cfg
1387+ break
1388+
1389+ if not clientcfg:
1390+ client = distro.preferred_ntp_clients[0]
1391+ LOG.debug(
1392+ 'Selected distro preferred NTP client "%s", not yet installed',
1393+ client)
1394+ clientcfg = distro_cfg.get(client)
1395+ else:
1396+ LOG.debug('Selected NTP client "%s" via distro system config',
1397+ distro_ntp_client)
1398+ clientcfg = distro_cfg.get(distro_ntp_client, {})
1399+
1400+ return clientcfg
1401
1402- return False
1403
1404+def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
1405+ """Install ntp client package if not already installed.
1406
1407-def install_ntp(install_func, packages=None, check_exe="ntpd"):
1408+ @param install_func: function. This parameter is invoked with the contents
1409+ of the packages parameter.
1410+ @param packages: list. This parameter defaults to ['ntp'].
1411+ @param check_exe: string. The name of a binary that indicates the package
1412+ the specified package is already installed.
1413+ """
1414 if util.which(check_exe):
1415 return
1416 if packages is None:
1417@@ -164,15 +346,23 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"):
1418 install_func(packages)
1419
1420
1421-def rename_ntp_conf(config=None):
1422- """Rename any existing ntp.conf file"""
1423- if config is None: # For testing
1424- config = NTP_CONF
1425- if os.path.exists(config):
1426- util.rename(config, config + ".dist")
1427+def rename_ntp_conf(confpath=None):
1428+ """Rename any existing ntp client config file
1429+
1430+ @param confpath: string. Specify a path to an existing ntp client
1431+ configuration file.
1432+ """
1433+ if os.path.exists(confpath):
1434+ util.rename(confpath, confpath + ".dist")
1435
1436
1437 def generate_server_names(distro):
1438+ """Generate a list of server names to populate an ntp client configuration
1439+ file.
1440+
1441+ @param distro: string. Specify the distro name
1442+ @returns: list: A list of strings representing ntp servers for this distro.
1443+ """
1444 names = []
1445 pool_distro = distro
1446 # For legal reasons x.pool.sles.ntp.org does not exist,
1447@@ -185,34 +375,60 @@ def generate_server_names(distro):
1448 return names
1449
1450
1451-def write_ntp_config_template(cfg, cloud, path, template=None):
1452- servers = cfg.get('servers', [])
1453- pools = cfg.get('pools', [])
1454+def write_ntp_config_template(distro_name, servers=None, pools=None,
1455+ path=None, template_fn=None, template=None):
1456+ """Render a ntp client configuration for the specified client.
1457+
1458+ @param distro_name: string. The distro class name.
1459+ @param servers: A list of strings specifying ntp servers. Defaults to empty
1460+ list.
1461+ @param pools: A list of strings specifying ntp pools. Defaults to empty
1462+ list.
1463+ @param path: A string to specify where to write the rendered template.
1464+ @param template_fn: A string to specify the template source file.
1465+ @param template: A string specifying the contents of the template. This
1466+ content will be written to a temporary file before being used to render
1467+ the configuration file.
1468+
1469+ @raises: ValueError when path is None.
1470+ @raises: ValueError when template_fn is None and template is None.
1471+ """
1472+ if not servers:
1473+ servers = []
1474+ if not pools:
1475+ pools = []
1476
1477 if len(servers) == 0 and len(pools) == 0:
1478- pools = generate_server_names(cloud.distro.name)
1479+ pools = generate_server_names(distro_name)
1480 LOG.debug(
1481 'Adding distro default ntp pool servers: %s', ','.join(pools))
1482
1483- params = {
1484- 'servers': servers,
1485- 'pools': pools,
1486- }
1487+ if not path:
1488+ raise ValueError('Invalid value for path parameter')
1489
1490- if template is None:
1491- template = 'ntp.conf.%s' % cloud.distro.name
1492+ if not template_fn and not template:
1493+ raise ValueError('Not template_fn or template provided')
1494
1495- template_fn = cloud.get_template_filename(template)
1496- if not template_fn:
1497- template_fn = cloud.get_template_filename('ntp.conf')
1498- if not template_fn:
1499- raise RuntimeError(
1500- 'No template found, not rendering {path}'.format(path=path))
1501+ params = {'servers': servers, 'pools': pools}
1502+ if template:
1503+ tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
1504+ template_fn = tfile[1] # filepath is second item in tuple
1505+ util.write_file(template_fn, content=template)
1506
1507 templater.render_to_file(template_fn, path, params)
1508+ # clean up temporary template
1509+ if template:
1510+ util.del_file(template_fn)
1511
1512
1513 def reload_ntp(service, systemd=False):
1514+ """Restart or reload an ntp system service.
1515+
1516+ @param service: A string specifying the name of the service to be affected.
1517+ @param systemd: A boolean indicating if the distro uses systemd, defaults
1518+ to False.
1519+ @returns: A tuple of stdout, stderr results from executing the action.
1520+ """
1521 if systemd:
1522 cmd = ['systemctl', 'reload-or-restart', service]
1523 else:
1524@@ -220,4 +436,117 @@ def reload_ntp(service, systemd=False):
1525 util.subp(cmd, capture=True)
1526
1527
1528+def supplemental_schema_validation(ntp_config):
1529+ """Validate user-provided ntp:config option values.
1530+
1531+ This function supplements flexible jsonschema validation with specific
1532+ value checks to aid in triage of invalid user-provided configuration.
1533+
1534+ @param ntp_config: Dictionary of configuration value under 'ntp'.
1535+
1536+ @raises: ValueError describing invalid values provided.
1537+ """
1538+ errors = []
1539+ missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys()))
1540+ if missing:
1541+ keys = ', '.join(sorted(missing))
1542+ errors.append(
1543+ 'Missing required ntp:config keys: {keys}'.format(keys=keys))
1544+ elif not any([ntp_config.get('template'),
1545+ ntp_config.get('template_name')]):
1546+ errors.append(
1547+ 'Either ntp:config:template or ntp:config:template_name values'
1548+ ' are required')
1549+ for key, value in sorted(ntp_config.items()):
1550+ keypath = 'ntp:config:' + key
1551+ if key == 'confpath':
1552+ if not all([value, isinstance(value, six.string_types)]):
1553+ errors.append(
1554+ 'Expected a config file path {keypath}.'
1555+ ' Found ({value})'.format(keypath=keypath, value=value))
1556+ elif key == 'packages':
1557+ if not isinstance(value, list):
1558+ errors.append(
1559+ 'Expected a list of required package names for {keypath}.'
1560+ ' Found ({value})'.format(keypath=keypath, value=value))
1561+ elif key in ('template', 'template_name'):
1562+ if value is None: # Either template or template_name can be none
1563+ continue
1564+ if not isinstance(value, six.string_types):
1565+ errors.append(
1566+ 'Expected a string type for {keypath}.'
1567+ ' Found ({value})'.format(keypath=keypath, value=value))
1568+ elif not isinstance(value, six.string_types):
1569+ errors.append(
1570+ 'Expected a string type for {keypath}.'
1571+ ' Found ({value})'.format(keypath=keypath, value=value))
1572+
1573+ if errors:
1574+ raise ValueError(r'Invalid ntp configuration:\n{errors}'.format(
1575+ errors='\n'.join(errors)))
1576+
1577+
1578+def handle(name, cfg, cloud, log, _args):
1579+ """Enable and configure ntp."""
1580+ if 'ntp' not in cfg:
1581+ LOG.debug(
1582+ "Skipping module named %s, not present or disabled by cfg", name)
1583+ return
1584+ ntp_cfg = cfg['ntp']
1585+ if ntp_cfg is None:
1586+ ntp_cfg = {} # Allow empty config which will install the package
1587+
1588+ # TODO drop this when validate_cloudconfig_schema is strict=True
1589+ if not isinstance(ntp_cfg, (dict)):
1590+ raise RuntimeError(
1591+ "'ntp' key existed in config, but not a dictionary type,"
1592+ " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
1593+
1594+ validate_cloudconfig_schema(cfg, schema)
1595+
1596+ # Allow users to explicitly enable/disable
1597+ enabled = ntp_cfg.get('enabled', True)
1598+ if util.is_false(enabled):
1599+ LOG.debug("Skipping module named %s, disabled by cfg", name)
1600+ return
1601+
1602+ # Select which client is going to be used and get the configuration
1603+ ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
1604+ cloud.distro)
1605+
1606+ # Allow user ntp config to override distro configurations
1607+ ntp_client_config = util.mergemanydict(
1608+ [ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
1609+
1610+ supplemental_schema_validation(ntp_client_config)
1611+ rename_ntp_conf(confpath=ntp_client_config.get('confpath'))
1612+
1613+ template_fn = None
1614+ if not ntp_client_config.get('template'):
1615+ template_name = (
1616+ ntp_client_config.get('template_name').replace('{distro}',
1617+ cloud.distro.name))
1618+ template_fn = cloud.get_template_filename(template_name)
1619+ if not template_fn:
1620+ msg = ('No template found, not rendering %s' %
1621+ ntp_client_config.get('template_name'))
1622+ raise RuntimeError(msg)
1623+
1624+ write_ntp_config_template(cloud.distro.name,
1625+ servers=ntp_cfg.get('servers', []),
1626+ pools=ntp_cfg.get('pools', []),
1627+ path=ntp_client_config.get('confpath'),
1628+ template_fn=template_fn,
1629+ template=ntp_client_config.get('template'))
1630+
1631+ install_ntp_client(cloud.distro.install_packages,
1632+ packages=ntp_client_config['packages'],
1633+ check_exe=ntp_client_config['check_exe'])
1634+ try:
1635+ reload_ntp(ntp_client_config['service_name'],
1636+ systemd=cloud.distro.uses_systemd())
1637+ except util.ProcessExecutionError as e:
1638+ LOG.exception("Failed to reload/start ntp service: %s", e)
1639+ raise
1640+
1641 # vi: ts=4 expandtab
1642diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
1643index 878069b..3be0d1c 100644
1644--- a/cloudinit/config/cc_phone_home.py
1645+++ b/cloudinit/config/cc_phone_home.py
1646@@ -41,6 +41,7 @@ keys to post. Available keys are:
1647 """
1648
1649 from cloudinit import templater
1650+from cloudinit import url_helper
1651 from cloudinit import util
1652
1653 from cloudinit.settings import PER_INSTANCE
1654@@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args):
1655 }
1656 url = templater.render_string(url, url_params)
1657 try:
1658- util.read_file_or_url(url, data=real_submit_keys,
1659- retries=tries, sec_between=3,
1660- ssl_details=util.fetch_ssl_details(cloud.paths))
1661+ url_helper.read_file_or_url(
1662+ url, data=real_submit_keys, retries=tries, sec_between=3,
1663+ ssl_details=util.fetch_ssl_details(cloud.paths))
1664 except Exception:
1665 util.logexc(log, "Failed to post phone home data to %s in %s tries",
1666 url, tries)
1667diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
1668index 4da3a58..50b3747 100644
1669--- a/cloudinit/config/cc_power_state_change.py
1670+++ b/cloudinit/config/cc_power_state_change.py
1671@@ -74,7 +74,7 @@ def givecmdline(pid):
1672 if util.is_FreeBSD():
1673 (output, _err) = util.subp(['procstat', '-c', str(pid)])
1674 line = output.splitlines()[1]
1675- m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
1676+ m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
1677 return m.group(2)
1678 else:
1679 return util.load_file("/proc/%s/cmdline" % pid)
1680diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
1681index 013e69b..2edddd0 100644
1682--- a/cloudinit/config/cc_resizefs.py
1683+++ b/cloudinit/config/cc_resizefs.py
1684@@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth):
1685
1686
1687 def _resize_ufs(mount_point, devpth):
1688- return ('growfs', devpth)
1689+ return ('growfs', '-y', devpth)
1690
1691
1692 def _resize_zfs(mount_point, devpth):
1693@@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth):
1694
1695
1696 def _get_dumpfs_output(mount_point):
1697- dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point])
1698- return dumpfs_res
1699+ return util.subp(['dumpfs', '-m', mount_point])[0]
1700
1701
1702 def _get_gpart_output(part):
1703- gpart_res, err = util.subp(['gpart', 'show', part])
1704- return gpart_res
1705+ return util.subp(['gpart', 'show', part])[0]
1706
1707
1708 def _can_skip_resize_ufs(mount_point, devpth):
1709@@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
1710 if not line.startswith('#'):
1711 newfs_cmd = shlex.split(line)
1712 opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:'
1713- optlist, args = getopt.getopt(newfs_cmd[1:], opt_value)
1714+ optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value)
1715 for o, a in optlist:
1716 if o == "-s":
1717 cur_fs_sz = int(a)
1718diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
1719index 530808c..1c67943 100644
1720--- a/cloudinit/config/cc_rh_subscription.py
1721+++ b/cloudinit/config/cc_rh_subscription.py
1722@@ -209,8 +209,7 @@ class SubscriptionManager(object):
1723 cmd.append("--serverurl={0}".format(self.server_hostname))
1724
1725 try:
1726- return_out, return_err = self._sub_man_cli(cmd,
1727- logstring_val=True)
1728+ return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
1729 except util.ProcessExecutionError as e:
1730 if e.stdout == "":
1731 self.log_warn("Registration failed due "
1732@@ -233,8 +232,7 @@ class SubscriptionManager(object):
1733
1734 # Attempting to register the system only
1735 try:
1736- return_out, return_err = self._sub_man_cli(cmd,
1737- logstring_val=True)
1738+ return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
1739 except util.ProcessExecutionError as e:
1740 if e.stdout == "":
1741 self.log_warn("Registration failed due "
1742@@ -257,7 +255,7 @@ class SubscriptionManager(object):
1743 .format(self.servicelevel)]
1744
1745 try:
1746- return_out, return_err = self._sub_man_cli(cmd)
1747+ return_out = self._sub_man_cli(cmd)[0]
1748 except util.ProcessExecutionError as e:
1749 if e.stdout.rstrip() != '':
1750 for line in e.stdout.split("\n"):
1751@@ -275,7 +273,7 @@ class SubscriptionManager(object):
1752 def _set_auto_attach(self):
1753 cmd = ['attach', '--auto']
1754 try:
1755- return_out, return_err = self._sub_man_cli(cmd)
1756+ return_out = self._sub_man_cli(cmd)[0]
1757 except util.ProcessExecutionError as e:
1758 self.log_warn("Auto-attach failed with: {0}".format(e))
1759 return False
1760@@ -294,12 +292,12 @@ class SubscriptionManager(object):
1761
1762 # Get all available pools
1763 cmd = ['list', '--available', '--pool-only']
1764- results, errors = self._sub_man_cli(cmd)
1765+ results = self._sub_man_cli(cmd)[0]
1766 available = (results.rstrip()).split("\n")
1767
1768 # Get all consumed pools
1769 cmd = ['list', '--consumed', '--pool-only']
1770- results, errors = self._sub_man_cli(cmd)
1771+ results = self._sub_man_cli(cmd)[0]
1772 consumed = (results.rstrip()).split("\n")
1773
1774 return available, consumed
1775@@ -311,14 +309,14 @@ class SubscriptionManager(object):
1776 '''
1777
1778 cmd = ['repos', '--list-enabled']
1779- return_out, return_err = self._sub_man_cli(cmd)
1780+ return_out = self._sub_man_cli(cmd)[0]
1781 active_repos = []
1782 for repo in return_out.split("\n"):
1783 if "Repo ID:" in repo:
1784 active_repos.append((repo.split(':')[1]).strip())
1785
1786 cmd = ['repos', '--list-disabled']
1787- return_out, return_err = self._sub_man_cli(cmd)
1788+ return_out = self._sub_man_cli(cmd)[0]
1789
1790 inactive_repos = []
1791 for repo in return_out.split("\n"):
1792diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
1793index af08788..27d2366 100644
1794--- a/cloudinit/config/cc_rsyslog.py
1795+++ b/cloudinit/config/cc_rsyslog.py
1796@@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__)
1797 COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
1798 HOST_PORT_RE = re.compile(
1799 r'^(?P<proto>[@]{0,2})'
1800- '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
1801- '([:](?P<port>[0-9]+))?$')
1802+ r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
1803+ r'([:](?P<port>[0-9]+))?$')
1804
1805
1806 def reload_syslog(command=DEF_RELOAD, systemd=False):
1807diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
1808index 539cbd5..b6f6c80 100644
1809--- a/cloudinit/config/cc_runcmd.py
1810+++ b/cloudinit/config/cc_runcmd.py
1811@@ -66,7 +66,6 @@ schema = {
1812 'additionalProperties': False,
1813 'minItems': 1,
1814 'required': [],
1815- 'uniqueItems': True
1816 }
1817 }
1818 }
1819diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
1820index bb24d57..5ef9737 100755
1821--- a/cloudinit/config/cc_set_passwords.py
1822+++ b/cloudinit/config/cc_set_passwords.py
1823@@ -68,16 +68,57 @@ import re
1824 import sys
1825
1826 from cloudinit.distros import ug_util
1827-from cloudinit import ssh_util
1828+from cloudinit import log as logging
1829+from cloudinit.ssh_util import update_ssh_config
1830 from cloudinit import util
1831
1832 from string import ascii_letters, digits
1833
1834+LOG = logging.getLogger(__name__)
1835+
1836 # We are removing certain 'painful' letters/numbers
1837 PW_SET = (''.join([x for x in ascii_letters + digits
1838 if x not in 'loLOI01']))
1839
1840
1841+def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
1842+ """Apply sshd PasswordAuthentication changes.
1843+
1844+ @param pw_auth: config setting from 'pw_auth'.
1845+ Best given as True, False, or "unchanged".
1846+ @param service_cmd: The service command list (['service'])
1847+ @param service_name: The name of the sshd service for the system.
1848+
1849+ @return: None"""
1850+ cfg_name = "PasswordAuthentication"
1851+ if service_cmd is None:
1852+ service_cmd = ["service"]
1853+
1854+ if util.is_true(pw_auth):
1855+ cfg_val = 'yes'
1856+ elif util.is_false(pw_auth):
1857+ cfg_val = 'no'
1858+ else:
1859+ bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
1860+ if pw_auth is None or pw_auth.lower() == 'unchanged':
1861+ LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
1862+ else:
1863+ LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
1864+ return
1865+
1866+ updated = update_ssh_config({cfg_name: cfg_val})
1867+ if not updated:
1868+ LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
1869+ return
1870+
1871+ if 'systemctl' in service_cmd:
1872+ cmd = list(service_cmd) + ["restart", service_name]
1873+ else:
1874+ cmd = list(service_cmd) + [service_name, "restart"]
1875+ util.subp(cmd)
1876+ LOG.debug("Restarted the ssh daemon.")
1877+
1878+
1879 def handle(_name, cfg, cloud, log, args):
1880 if len(args) != 0:
1881 # if run from command line, and give args, wipe the chpasswd['list']
1882@@ -170,65 +211,9 @@ def handle(_name, cfg, cloud, log, args):
1883 if expired_users:
1884 log.debug("Expired passwords for: %s users", expired_users)
1885
1886- change_pwauth = False
1887- pw_auth = None
1888- if 'ssh_pwauth' in cfg:
1889- if util.is_true(cfg['ssh_pwauth']):
1890- change_pwauth = True
1891- pw_auth = 'yes'
1892- elif util.is_false(cfg['ssh_pwauth']):
1893- change_pwauth = True
1894- pw_auth = 'no'
1895- elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
1896- log.debug('Leaving auth line unchanged')
1897- change_pwauth = False
1898- elif not str(cfg['ssh_pwauth']).strip():
1899- log.debug('Leaving auth line unchanged')
1900- change_pwauth = False
1901- elif not cfg['ssh_pwauth']:
1902- log.debug('Leaving auth line unchanged')
1903- change_pwauth = False
1904- else:
1905- msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
1906- util.logexc(log, msg)
1907-
1908- if change_pwauth:
1909- replaced_auth = False
1910-
1911- # See: man sshd_config
1912- old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
1913- new_lines = []
1914- i = 0
1915- for (i, line) in enumerate(old_lines):
1916- # Keywords are case-insensitive and arguments are case-sensitive
1917- if line.key == 'passwordauthentication':
1918- log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
1919- replaced_auth = True
1920- line.value = pw_auth
1921- new_lines.append(line)
1922-
1923- if not replaced_auth:
1924- log.debug("Adding new auth line %s", i + 1)
1925- replaced_auth = True
1926- new_lines.append(ssh_util.SshdConfigLine('',
1927- 'PasswordAuthentication',
1928- pw_auth))
1929-
1930- lines = [str(l) for l in new_lines]
1931- util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines),
1932- copy_mode=True)
1933-
1934- try:
1935- cmd = cloud.distro.init_cmd # Default service
1936- cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
1937- cmd.append('restart')
1938- if 'systemctl' in cmd: # Switch action ordering
1939- cmd[1], cmd[2] = cmd[2], cmd[1]
1940- cmd = filter(None, cmd) # Remove empty arguments
1941- util.subp(cmd)
1942- log.debug("Restarted the ssh daemon")
1943- except Exception:
1944- util.logexc(log, "Restarting of the ssh daemon failed")
1945+ handle_ssh_pwauth(
1946+ cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd,
1947+ service_name=cloud.distro.get_option('ssh_svcname', 'ssh'))
1948
1949 if len(errors):
1950 log.debug("%s errors occured, re-raising the last one", len(errors))
1951diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
1952index 34a53fd..90724b8 100644
1953--- a/cloudinit/config/cc_snap.py
1954+++ b/cloudinit/config/cc_snap.py
1955@@ -110,7 +110,6 @@ schema = {
1956 'additionalItems': False, # Reject non-string & non-list
1957 'minItems': 1,
1958 'minProperties': 1,
1959- 'uniqueItems': True
1960 },
1961 'squashfuse_in_container': {
1962 'type': 'boolean'
1963@@ -204,12 +203,12 @@ def maybe_install_squashfuse(cloud):
1964 return
1965 try:
1966 cloud.distro.update_package_sources()
1967- except Exception as e:
1968+ except Exception:
1969 util.logexc(LOG, "Package update failed")
1970 raise
1971 try:
1972 cloud.distro.install_packages(['squashfuse'])
1973- except Exception as e:
1974+ except Exception:
1975 util.logexc(LOG, "Failed to install squashfuse")
1976 raise
1977
1978diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
1979index bab80bb..15bee2d 100644
1980--- a/cloudinit/config/cc_snappy.py
1981+++ b/cloudinit/config/cc_snappy.py
1982@@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
1983
1984 def read_installed_packages():
1985 ret = []
1986- for (name, date, version, dev) in read_pkg_data():
1987+ for (name, _date, _version, dev) in read_pkg_data():
1988 if dev:
1989 ret.append(NAMESPACE_DELIM.join([name, dev]))
1990 else:
1991@@ -222,7 +222,7 @@ def read_installed_packages():
1992
1993
1994 def read_pkg_data():
1995- out, err = util.subp([SNAPPY_CMD, "list"])
1996+ out, _err = util.subp([SNAPPY_CMD, "list"])
1997 pkg_data = []
1998 for line in out.splitlines()[1:]:
1999 toks = line.split(sep=None, maxsplit=3)
2000diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
2001index 16b1868..5e082bd 100644
2002--- a/cloudinit/config/cc_ubuntu_advantage.py
2003+++ b/cloudinit/config/cc_ubuntu_advantage.py
2004@@ -87,7 +87,6 @@ schema = {
2005 'additionalItems': False, # Reject non-string & non-list
2006 'minItems': 1,
2007 'minProperties': 1,
2008- 'uniqueItems': True
2009 }
2010 },
2011 'additionalProperties': False, # Reject keys not in schema
2012@@ -149,12 +148,12 @@ def maybe_install_ua_tools(cloud):
2013 return
2014 try:
2015 cloud.distro.update_package_sources()
2016- except Exception as e:
2017+ except Exception:
2018 util.logexc(LOG, "Package update failed")
2019 raise
2020 try:
2021 cloud.distro.install_packages(['ubuntu-advantage-tools'])
2022- except Exception as e:
2023+ except Exception:
2024 util.logexc(LOG, "Failed to install ubuntu-advantage-tools")
2025 raise
2026
2027diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
2028index b215e95..c95bdaa 100644
2029--- a/cloudinit/config/cc_users_groups.py
2030+++ b/cloudinit/config/cc_users_groups.py
2031@@ -54,8 +54,9 @@ config keys for an entry in ``users`` are as follows:
2032 - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
2033 authkeys file. Default: none
2034 - ``ssh_import_id``: Optional. SSH id to import for user. Default: none
2035- - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use.
2036- Default: none.
2037+ - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
2038+ Default: none. An absence of sudo key, or a value of none or false
2039+ will result in no sudo rules being written for the user.
2040 - ``system``: Optional. Create user as system user with no home directory.
2041 Default: false
2042 - ``uid``: Optional. The user's ID. Default: The next available value.
2043@@ -82,6 +83,9 @@ config keys for an entry in ``users`` are as follows:
2044
2045 users:
2046 - default
2047+ # User explicitly omitted from sudo permission; also default behavior.
2048+ - name: <some_restricted_user>
2049+ sudo: false
2050 - name: <username>
2051 expiredate: <date>
2052 gecos: <comment>
2053diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
2054index ca7d0d5..080a6d0 100644
2055--- a/cloudinit/config/schema.py
2056+++ b/cloudinit/config/schema.py
2057@@ -4,7 +4,7 @@
2058 from __future__ import print_function
2059
2060 from cloudinit import importer
2061-from cloudinit.util import find_modules, read_file_or_url
2062+from cloudinit.util import find_modules, load_file
2063
2064 import argparse
2065 from collections import defaultdict
2066@@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False):
2067 def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
2068 """Return contents of the cloud-config file annotated with schema errors.
2069
2070- @param cloudconfig: YAML-loaded object from the original_content.
2071+ @param cloudconfig: YAML-loaded dict from the original_content or empty
2072+ dict if unparseable.
2073 @param original_content: The contents of a cloud-config file
2074 @param schema_errors: List of tuples from a JSONSchemaValidationError. The
2075 tuples consist of (schemapath, error_message).
2076 """
2077 if not schema_errors:
2078 return original_content
2079- schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content)
2080+ schemapaths = {}
2081+ if cloudconfig:
2082+ schemapaths = _schemapath_for_cloudconfig(
2083+ cloudconfig, original_content)
2084 errors_by_line = defaultdict(list)
2085 error_count = 1
2086 error_footer = []
2087 annotated_content = []
2088 for path, msg in schema_errors:
2089- errors_by_line[schemapaths[path]].append(msg)
2090+ match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path)
2091+ if match:
2092+ line, col = match.groups()
2093+ errors_by_line[int(line)].append(msg)
2094+ else:
2095+ col = None
2096+ errors_by_line[schemapaths[path]].append(msg)
2097+ if col is not None:
2098+ msg = 'Line {line} column {col}: {msg}'.format(
2099+ line=line, col=col, msg=msg)
2100 error_footer.append('# E{0}: {1}'.format(error_count, msg))
2101 error_count += 1
2102 lines = original_content.decode().split('\n')
2103@@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
2104 """
2105 if not os.path.exists(config_path):
2106 raise RuntimeError('Configfile {0} does not exist'.format(config_path))
2107- content = read_file_or_url('file://{0}'.format(config_path)).contents
2108+ content = load_file(config_path, decode=False)
2109 if not content.startswith(CLOUD_CONFIG_HEADER):
2110 errors = (
2111- ('header', 'File {0} needs to begin with "{1}"'.format(
2112+ ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
2113 config_path, CLOUD_CONFIG_HEADER.decode())),)
2114- raise SchemaValidationError(errors)
2115-
2116+ error = SchemaValidationError(errors)
2117+ if annotate:
2118+ print(annotated_cloudconfig_file({}, content, error.schema_errors))
2119+ raise error
2120 try:
2121 cloudconfig = yaml.safe_load(content)
2122- except yaml.parser.ParserError as e:
2123- errors = (
2124- ('format', 'File {0} is not valid yaml. {1}'.format(
2125- config_path, str(e))),)
2126- raise SchemaValidationError(errors)
2127-
2128+ except (yaml.YAMLError) as e:
2129+ line = column = 1
2130+ mark = None
2131+ if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
2132+ mark = getattr(e, 'context_mark')
2133+ elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
2134+ mark = getattr(e, 'problem_mark')
2135+ if mark:
2136+ line = mark.line + 1
2137+ column = mark.column + 1
2138+ errors = (('format-l{line}.c{col}'.format(line=line, col=column),
2139+ 'File {0} is not valid yaml. {1}'.format(
2140+ config_path, str(e))),)
2141+ error = SchemaValidationError(errors)
2142+ if annotate:
2143+ print(annotated_cloudconfig_file({}, content, error.schema_errors))
2144+ raise error
2145 try:
2146 validate_cloudconfig_schema(
2147 cloudconfig, schema, strict=True)
2148@@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content):
2149 list_index = 0
2150 RE_YAML_INDENT = r'^(\s*)'
2151 scopes = []
2152- for line_number, line in enumerate(content_lines):
2153+ for line_number, line in enumerate(content_lines, 1):
2154 indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
2155 line = line.strip()
2156 if not line or line.startswith('#'):
2157@@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content):
2158 scopes.append((indent_depth + 2, key + '.0'))
2159 for inner_list_index in range(0, len(yaml.safe_load(value))):
2160 list_key = key + '.' + str(inner_list_index)
2161- schema_line_numbers[list_key] = line_number + 1
2162- schema_line_numbers[key] = line_number + 1
2163+ schema_line_numbers[list_key] = line_number
2164+ schema_line_numbers[key] = line_number
2165 return schema_line_numbers
2166
2167
2168@@ -297,8 +323,8 @@ def get_schema():
2169
2170 configs_dir = os.path.dirname(os.path.abspath(__file__))
2171 potential_handlers = find_modules(configs_dir)
2172- for (fname, mod_name) in potential_handlers.items():
2173- mod_locs, looked_locs = importer.find_module(
2174+ for (_fname, mod_name) in potential_handlers.items():
2175+ mod_locs, _looked_locs = importer.find_module(
2176 mod_name, ['cloudinit.config'], ['schema'])
2177 if mod_locs:
2178 mod = importer.import_module(mod_locs[0])
2179@@ -337,9 +363,11 @@ def handle_schema_args(name, args):
2180 try:
2181 validate_cloudconfig_file(
2182 args.config_file, full_schema, args.annotate)
2183- except (SchemaValidationError, RuntimeError) as e:
2184+ except SchemaValidationError as e:
2185 if not args.annotate:
2186 error(str(e))
2187+ except RuntimeError as e:
2188+ error(str(e))
2189 else:
2190 print("Valid cloud-config file {0}".format(args.config_file))
2191 if args.doc:
2192diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
2193new file mode 100644
2194index 0000000..67646b0
2195--- /dev/null
2196+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
2197@@ -0,0 +1,50 @@
2198+# This file is part of cloud-init. See LICENSE file for license information.
2199+
2200+"""Tests cc_disable_ec2_metadata handler"""
2201+
2202+import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
2203+
2204+from cloudinit.tests.helpers import CiTestCase, mock
2205+
2206+import logging
2207+
2208+LOG = logging.getLogger(__name__)
2209+
2210+DISABLE_CFG = {'disable_ec2_metadata': 'true'}
2211+
2212+
2213+class TestEC2MetadataRoute(CiTestCase):
2214+
2215+ with_logs = True
2216+
2217+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
2218+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
2219+ def test_disable_ifconfig(self, m_subp, m_which):
2220+ """Set the route if ifconfig command is available"""
2221+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
2222+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
2223+ m_subp.assert_called_with(
2224+ ['route', 'add', '-host', '169.254.169.254', 'reject'],
2225+ capture=False)
2226+
2227+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
2228+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
2229+ def test_disable_ip(self, m_subp, m_which):
2230+ """Set the route if ip command is available"""
2231+ m_which.side_effect = lambda x: x if x == 'ip' else None
2232+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
2233+ m_subp.assert_called_with(
2234+ ['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
2235+ capture=False)
2236+
2237+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
2238+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
2239+ def test_disable_no_tool(self, m_subp, m_which):
2240+ """Log error when neither route nor ip commands are available"""
2241+ m_which.return_value = None # Find neither ifconfig nor ip
2242+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
2243+ self.assertEqual(
2244+ [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list)
2245+ m_subp.assert_not_called()
2246+
2247+# vi: ts=4 expandtab
2248diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
2249new file mode 100644
2250index 0000000..b051ec8
2251--- /dev/null
2252+++ b/cloudinit/config/tests/test_set_passwords.py
2253@@ -0,0 +1,71 @@
2254+# This file is part of cloud-init. See LICENSE file for license information.
2255+
2256+import mock
2257+
2258+from cloudinit.config import cc_set_passwords as setpass
2259+from cloudinit.tests.helpers import CiTestCase
2260+from cloudinit import util
2261+
2262+MODPATH = "cloudinit.config.cc_set_passwords."
2263+
2264+
2265+class TestHandleSshPwauth(CiTestCase):
2266+ """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
2267+
2268+ with_logs = True
2269+
2270+ @mock.patch(MODPATH + "util.subp")
2271+ def test_unknown_value_logs_warning(self, m_subp):
2272+ setpass.handle_ssh_pwauth("floo")
2273+ self.assertIn("Unrecognized value: ssh_pwauth=floo",
2274+ self.logs.getvalue())
2275+ m_subp.assert_not_called()
2276+
2277+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
2278+ @mock.patch(MODPATH + "util.subp")
2279+ def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
2280+ """If systemctl in service cmd: systemctl restart name."""
2281+ setpass.handle_ssh_pwauth(
2282+ True, service_cmd=["systemctl"], service_name="myssh")
2283+ self.assertEqual(mock.call(["systemctl", "restart", "myssh"]),
2284+ m_subp.call_args)
2285+
2286+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
2287+ @mock.patch(MODPATH + "util.subp")
2288+ def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
2289+ """If systemctl in service cmd: systemctl restart name."""
2290+ setpass.handle_ssh_pwauth(
2291+ True, service_cmd=["service"], service_name="myssh")
2292+ self.assertEqual(mock.call(["service", "myssh", "restart"]),
2293+ m_subp.call_args)
2294+
2295+ @mock.patch(MODPATH + "update_ssh_config", return_value=False)
2296+ @mock.patch(MODPATH + "util.subp")
2297+ def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
2298+ """If config is not updated, then no system restart should be done."""
2299+ setpass.handle_ssh_pwauth(True)
2300+ m_subp.assert_not_called()
2301+ self.assertIn("No need to restart ssh", self.logs.getvalue())
2302+
2303+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
2304+ @mock.patch(MODPATH + "util.subp")
2305+ def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
2306+ """If 'unchanged', then no updates to config and no restart."""
2307+ setpass.handle_ssh_pwauth(
2308+ "unchanged", service_cmd=["systemctl"], service_name="myssh")
2309+ m_update_ssh_config.assert_not_called()
2310+ m_subp.assert_not_called()
2311+
2312+ @mock.patch(MODPATH + "util.subp")
2313+ def test_valid_change_values(self, m_subp):
2314+ """If value is a valid changen value, then update should be called."""
2315+ upname = MODPATH + "update_ssh_config"
2316+ optname = "PasswordAuthentication"
2317+ for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
2318+ optval = "yes" if value in util.TRUE_STRINGS else "no"
2319+ with mock.patch(upname, return_value=False) as m_update:
2320+ setpass.handle_ssh_pwauth(value)
2321+ m_update.assert_called_with({optname: optval})
2322+ m_subp.assert_not_called()
2323+
2324+# vi: ts=4 expandtab
2325diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
2326index c5b4a9d..34c80f1 100644
2327--- a/cloudinit/config/tests/test_snap.py
2328+++ b/cloudinit/config/tests/test_snap.py
2329@@ -9,7 +9,7 @@ from cloudinit.config.cc_snap import (
2330 from cloudinit.config.schema import validate_cloudconfig_schema
2331 from cloudinit import util
2332 from cloudinit.tests.helpers import (
2333- CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema)
2334+ CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema)
2335
2336
2337 SYSTEM_USER_ASSERTION = """\
2338@@ -245,9 +245,10 @@ class TestRunCommands(CiTestCase):
2339
2340
2341 @skipUnlessJsonSchema()
2342-class TestSchema(CiTestCase):
2343+class TestSchema(CiTestCase, SchemaTestCaseMixin):
2344
2345 with_logs = True
2346+ schema = schema
2347
2348 def test_schema_warns_on_snap_not_as_dict(self):
2349 """If the snap configuration is not a dict, emit a warning."""
2350@@ -340,6 +341,30 @@ class TestSchema(CiTestCase):
2351 {'snap': {'assertions': {'01': 'also valid'}}}, schema)
2352 self.assertEqual('', self.logs.getvalue())
2353
2354+ def test_duplicates_are_fine_array_array(self):
2355+ """Duplicated commands array/array entries are allowed."""
2356+ self.assertSchemaValid(
2357+ {'commands': [["echo", "bye"], ["echo" "bye"]]},
2358+ "command entries can be duplicate.")
2359+
2360+ def test_duplicates_are_fine_array_string(self):
2361+ """Duplicated commands array/string entries are allowed."""
2362+ self.assertSchemaValid(
2363+ {'commands': ["echo bye", "echo bye"]},
2364+ "command entries can be duplicate.")
2365+
2366+ def test_duplicates_are_fine_dict_array(self):
2367+ """Duplicated commands dict/array entries are allowed."""
2368+ self.assertSchemaValid(
2369+ {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
2370+ "command entries can be duplicate.")
2371+
2372+ def test_duplicates_are_fine_dict_string(self):
2373+ """Duplicated commands dict/string entries are allowed."""
2374+ self.assertSchemaValid(
2375+ {'commands': {'00': "echo bye", '01': "echo bye"}},
2376+ "command entries can be duplicate.")
2377+
2378
2379 class TestHandle(CiTestCase):
2380
2381diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
2382index f2a59fa..f1beeff 100644
2383--- a/cloudinit/config/tests/test_ubuntu_advantage.py
2384+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
2385@@ -7,7 +7,8 @@ from cloudinit.config.cc_ubuntu_advantage import (
2386 handle, maybe_install_ua_tools, run_commands, schema)
2387 from cloudinit.config.schema import validate_cloudconfig_schema
2388 from cloudinit import util
2389-from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
2390+from cloudinit.tests.helpers import (
2391+ CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
2392
2393
2394 # Module path used in mocks
2395@@ -105,9 +106,10 @@ class TestRunCommands(CiTestCase):
2396
2397
2398 @skipUnlessJsonSchema()
2399-class TestSchema(CiTestCase):
2400+class TestSchema(CiTestCase, SchemaTestCaseMixin):
2401
2402 with_logs = True
2403+ schema = schema
2404
2405 def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):
2406 """If ubuntu-advantage configuration is not a dict, emit a warning."""
2407@@ -169,6 +171,30 @@ class TestSchema(CiTestCase):
2408 {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)
2409 self.assertEqual('', self.logs.getvalue())
2410
2411+ def test_duplicates_are_fine_array_array(self):
2412+ """Duplicated commands array/array entries are allowed."""
2413+ self.assertSchemaValid(
2414+ {'commands': [["echo", "bye"], ["echo" "bye"]]},
2415+ "command entries can be duplicate.")
2416+
2417+ def test_duplicates_are_fine_array_string(self):
2418+ """Duplicated commands array/string entries are allowed."""
2419+ self.assertSchemaValid(
2420+ {'commands': ["echo bye", "echo bye"]},
2421+ "command entries can be duplicate.")
2422+
2423+ def test_duplicates_are_fine_dict_array(self):
2424+ """Duplicated commands dict/array entries are allowed."""
2425+ self.assertSchemaValid(
2426+ {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
2427+ "command entries can be duplicate.")
2428+
2429+ def test_duplicates_are_fine_dict_string(self):
2430+ """Duplicated commands dict/string entries are allowed."""
2431+ self.assertSchemaValid(
2432+ {'commands': {'00': "echo bye", '01': "echo bye"}},
2433+ "command entries can be duplicate.")
2434+
2435
2436 class TestHandle(CiTestCase):
2437
2438diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
2439index 55260ea..ab0b077 100755
2440--- a/cloudinit/distros/__init__.py
2441+++ b/cloudinit/distros/__init__.py
2442@@ -49,6 +49,9 @@ LOG = logging.getLogger(__name__)
2443 # It could break when Amazon adds new regions and new AZs.
2444 _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
2445
2446+# Default NTP Client Configurations
2447+PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
2448+
2449
2450 @six.add_metaclass(abc.ABCMeta)
2451 class Distro(object):
2452@@ -60,6 +63,7 @@ class Distro(object):
2453 tz_zone_dir = "/usr/share/zoneinfo"
2454 init_cmd = ['service'] # systemctl, service etc
2455 renderer_configs = {}
2456+ _preferred_ntp_clients = None
2457
2458 def __init__(self, name, cfg, paths):
2459 self._paths = paths
2460@@ -339,6 +343,14 @@ class Distro(object):
2461 contents.write("%s\n" % (eh))
2462 util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)
2463
2464+ @property
2465+ def preferred_ntp_clients(self):
2466+ """Allow distro to determine the preferred ntp client list"""
2467+ if not self._preferred_ntp_clients:
2468+ self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS)
2469+
2470+ return self._preferred_ntp_clients
2471+
2472 def _bring_up_interface(self, device_name):
2473 cmd = ['ifup', device_name]
2474 LOG.debug("Attempting to run bring up interface %s using command %s",
2475@@ -519,7 +531,7 @@ class Distro(object):
2476 self.lock_passwd(name)
2477
2478 # Configure sudo access
2479- if 'sudo' in kwargs:
2480+ if 'sudo' in kwargs and kwargs['sudo'] is not False:
2481 self.write_sudo_rules(name, kwargs['sudo'])
2482
2483 # Import SSH keys
2484diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
2485index 754d3df..ff22d56 100644
2486--- a/cloudinit/distros/freebsd.py
2487+++ b/cloudinit/distros/freebsd.py
2488@@ -110,15 +110,15 @@ class Distro(distros.Distro):
2489 if dev.startswith('lo'):
2490 return dev
2491
2492- n = re.search('\d+$', dev)
2493+ n = re.search(r'\d+$', dev)
2494 index = n.group(0)
2495
2496- (out, err) = util.subp(['ifconfig', '-a'])
2497+ (out, _err) = util.subp(['ifconfig', '-a'])
2498 ifconfigoutput = [x for x in (out.strip()).splitlines()
2499 if len(x.split()) > 0]
2500 bsddev = 'NOT_FOUND'
2501 for line in ifconfigoutput:
2502- m = re.match('^\w+', line)
2503+ m = re.match(r'^\w+', line)
2504 if m:
2505 if m.group(0).startswith('lo'):
2506 continue
2507@@ -128,7 +128,7 @@ class Distro(distros.Distro):
2508 break
2509
2510 # Replace the index with the one we're after.
2511- bsddev = re.sub('\d+$', index, bsddev)
2512+ bsddev = re.sub(r'\d+$', index, bsddev)
2513 LOG.debug("Using network interface %s", bsddev)
2514 return bsddev
2515
2516@@ -266,7 +266,7 @@ class Distro(distros.Distro):
2517 self.lock_passwd(name)
2518
2519 # Configure sudo access
2520- if 'sudo' in kwargs:
2521+ if 'sudo' in kwargs and kwargs['sudo'] is not False:
2522 self.write_sudo_rules(name, kwargs['sudo'])
2523
2524 # Import SSH keys
2525diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
2526index 162dfa0..9f90e95 100644
2527--- a/cloudinit/distros/opensuse.py
2528+++ b/cloudinit/distros/opensuse.py
2529@@ -208,4 +208,28 @@ class Distro(distros.Distro):
2530 nameservers, searchservers)
2531 return dev_names
2532
2533+ @property
2534+ def preferred_ntp_clients(self):
2535+ """The preferred ntp client is dependent on the version."""
2536+
2537+ """Allow distro to determine the preferred ntp client list"""
2538+ if not self._preferred_ntp_clients:
2539+ distro_info = util.system_info()['dist']
2540+ name = distro_info[0]
2541+ major_ver = int(distro_info[1].split('.')[0])
2542+
2543+ # This is horribly complicated because of a case of
2544+ # "we do not care if versions should be increasing syndrome"
2545+ if (
2546+ (major_ver >= 15 and 'openSUSE' not in name) or
2547+ (major_ver >= 15 and 'openSUSE' in name and major_ver != 42)
2548+ ):
2549+ self._preferred_ntp_clients = ['chrony',
2550+ 'systemd-timesyncd', 'ntp']
2551+ else:
2552+ self._preferred_ntp_clients = ['ntp',
2553+ 'systemd-timesyncd', 'chrony']
2554+
2555+ return self._preferred_ntp_clients
2556+
2557 # vi: ts=4 expandtab
2558diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
2559index 82ca34f..6815410 100644
2560--- a/cloudinit/distros/ubuntu.py
2561+++ b/cloudinit/distros/ubuntu.py
2562@@ -10,12 +10,31 @@
2563 # This file is part of cloud-init. See LICENSE file for license information.
2564
2565 from cloudinit.distros import debian
2566+from cloudinit.distros import PREFERRED_NTP_CLIENTS
2567 from cloudinit import log as logging
2568+from cloudinit import util
2569+
2570+import copy
2571
2572 LOG = logging.getLogger(__name__)
2573
2574
2575 class Distro(debian.Distro):
2576+
2577+ @property
2578+ def preferred_ntp_clients(self):
2579+ """The preferred ntp client is dependent on the version."""
2580+ if not self._preferred_ntp_clients:
2581+ (_name, _version, codename) = util.system_info()['dist']
2582+ # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd.
2583+ if codename == "xenial" and not util.system_is_snappy():
2584+ self._preferred_ntp_clients = ['ntp']
2585+ else:
2586+ self._preferred_ntp_clients = (
2587+ copy.deepcopy(PREFERRED_NTP_CLIENTS))
2588+ return self._preferred_ntp_clients
2589+
2590 pass
2591
2592+
2593 # vi: ts=4 expandtab
2594diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
2595index dc3f0fc..3b7b17f 100644
2596--- a/cloudinit/ec2_utils.py
2597+++ b/cloudinit/ec2_utils.py
2598@@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest',
2599 # NOT_FOUND occurs) and just in that case returning an empty string.
2600 exception_cb = functools.partial(_skip_retry_on_codes,
2601 SKIP_USERDATA_CODES)
2602- response = util.read_file_or_url(ud_url,
2603- ssl_details=ssl_details,
2604- timeout=timeout,
2605- retries=retries,
2606- exception_cb=exception_cb)
2607+ response = url_helper.read_file_or_url(
2608+ ud_url, ssl_details=ssl_details, timeout=timeout,
2609+ retries=retries, exception_cb=exception_cb)
2610 user_data = response.contents
2611 except url_helper.UrlError as e:
2612 if e.code not in SKIP_USERDATA_CODES:
2613@@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest',
2614 ssl_details=None, timeout=5, retries=5,
2615 leaf_decoder=None):
2616 md_url = url_helper.combine_url(metadata_address, api_version, tree)
2617- caller = functools.partial(util.read_file_or_url,
2618- ssl_details=ssl_details, timeout=timeout,
2619- retries=retries)
2620+ caller = functools.partial(
2621+ url_helper.read_file_or_url, ssl_details=ssl_details,
2622+ timeout=timeout, retries=retries)
2623
2624 def mcaller(url):
2625 return caller(url).contents
2626diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
2627index 1ca92d4..dc33876 100644
2628--- a/cloudinit/handlers/upstart_job.py
2629+++ b/cloudinit/handlers/upstart_job.py
2630@@ -97,7 +97,7 @@ def _has_suitable_upstart():
2631 else:
2632 util.logexc(LOG, "dpkg --compare-versions failed [%s]",
2633 e.exit_code)
2634- except Exception as e:
2635+ except Exception:
2636 util.logexc(LOG, "dpkg --compare-versions failed")
2637 return False
2638 else:
2639diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
2640index f69c0ef..3ffde52 100644
2641--- a/cloudinit/net/__init__.py
2642+++ b/cloudinit/net/__init__.py
2643@@ -107,6 +107,21 @@ def is_bond(devname):
2644 return os.path.exists(sys_dev_path(devname, "bonding"))
2645
2646
2647+def is_renamed(devname):
2648+ """
2649+ /* interface name assignment types (sysfs name_assign_type attribute) */
2650+ #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
2651+ #define NET_NAME_ENUM 1 /* enumerated by kernel */
2652+ #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
2653+ #define NET_NAME_USER 3 /* provided by user-space */
2654+ #define NET_NAME_RENAMED 4 /* renamed by user-space */
2655+ """
2656+ name_assign_type = read_sys_net_safe(devname, 'name_assign_type')
2657+ if name_assign_type and name_assign_type in ['3', '4']:
2658+ return True
2659+ return False
2660+
2661+
2662 def is_vlan(devname):
2663 uevent = str(read_sys_net_safe(devname, "uevent"))
2664 return 'DEVTYPE=vlan' in uevent.splitlines()
2665@@ -180,6 +195,17 @@ def find_fallback_nic(blacklist_drivers=None):
2666 if not blacklist_drivers:
2667 blacklist_drivers = []
2668
2669+ if 'net.ifnames=0' in util.get_cmdline():
2670+ LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline')
2671+ else:
2672+ unstable = [device for device in get_devicelist()
2673+ if device != 'lo' and not is_renamed(device)]
2674+ if len(unstable):
2675+ LOG.debug('Found unstable nic names: %s; calling udevadm settle',
2676+ unstable)
2677+ msg = 'Waiting for udev events to settle'
2678+ util.log_time(LOG.debug, msg, func=util.udevadm_settle)
2679+
2680 # get list of interfaces that could have connections
2681 invalid_interfaces = set(['lo'])
2682 potential_interfaces = set([device for device in get_devicelist()
2683@@ -295,7 +321,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
2684
2685 def _version_2(netcfg):
2686 renames = []
2687- for key, ent in netcfg.get('ethernets', {}).items():
2688+ for ent in netcfg.get('ethernets', {}).values():
2689 # only rename if configured to do so
2690 name = ent.get('set-name')
2691 if not name:
2692@@ -333,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False):
2693 1: randomly generated 3: set using dev_set_mac_address"""
2694
2695 assign_type = read_sys_net_int(ifname, "addr_assign_type")
2696- if strict and assign_type is None:
2697- raise ValueError("%s had no addr_assign_type.")
2698+ if assign_type is None:
2699+ # None is returned if this nic had no 'addr_assign_type' entry.
2700+ # if strict, raise an error, if not return True.
2701+ if strict:
2702+ raise ValueError("%s had no addr_assign_type.")
2703+ return True
2704 return assign_type in (0, 1, 3)
2705
2706
2707diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
2708index 9e9fe0f..f89a0f7 100755
2709--- a/cloudinit/net/cmdline.py
2710+++ b/cloudinit/net/cmdline.py
2711@@ -65,7 +65,7 @@ def _klibc_to_config_entry(content, mac_addrs=None):
2712 iface['mac_address'] = mac_addrs[name]
2713
2714 # Handle both IPv4 and IPv6 values
2715- for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')):
2716+ for pre in ('IPV4', 'IPV6'):
2717 # if no IPV4ADDR or IPV6ADDR, then go on.
2718 if pre + "ADDR" not in data:
2719 continue
2720diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
2721index 087c0c0..12cf509 100644
2722--- a/cloudinit/net/dhcp.py
2723+++ b/cloudinit/net/dhcp.py
2724@@ -216,7 +216,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None):
2725 if leases_d is None:
2726 leases_d = NETWORKD_LEASES_DIR
2727 leases = networkd_load_leases(leases_d=leases_d)
2728- for ifindex, data in sorted(leases.items()):
2729+ for _ifindex, data in sorted(leases.items()):
2730 if data.get(keyname):
2731 return data[keyname]
2732 return None
2733diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
2734index c6a71d1..bd20a36 100644
2735--- a/cloudinit/net/eni.py
2736+++ b/cloudinit/net/eni.py
2737@@ -10,9 +10,12 @@ from . import ParserError
2738 from . import renderer
2739 from .network_state import subnet_is_ipv6
2740
2741+from cloudinit import log as logging
2742 from cloudinit import util
2743
2744
2745+LOG = logging.getLogger(__name__)
2746+
2747 NET_CONFIG_COMMANDS = [
2748 "pre-up", "up", "post-up", "down", "pre-down", "post-down",
2749 ]
2750@@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet):
2751
2752
2753 # TODO: switch to valid_map for attrs
2754-def _iface_add_attrs(iface, index):
2755+def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
2756 # If the index is non-zero, this is an alias interface. Alias interfaces
2757 # represent additional interface addresses, and should not have additional
2758 # attributes. (extra attributes here are almost always either incorrect,
2759@@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index):
2760 value = 'on' if iface[key] else 'off'
2761 if not value or key in ignore_map:
2762 continue
2763+ if key == 'mtu' and ipv4_subnet_mtu:
2764+ if value != ipv4_subnet_mtu:
2765+ LOG.warning(
2766+ "Network config: ignoring %s device-level mtu:%s because"
2767+ " ipv4 subnet-level mtu:%s provided.",
2768+ iface['name'], value, ipv4_subnet_mtu)
2769+ continue
2770 if key in multiline_keys:
2771 for v in value:
2772 content.append(" {0} {1}".format(renames.get(key, key), v))
2773@@ -377,12 +387,15 @@ class Renderer(renderer.Renderer):
2774 subnets = iface.get('subnets', {})
2775 if subnets:
2776 for index, subnet in enumerate(subnets):
2777+ ipv4_subnet_mtu = None
2778 iface['index'] = index
2779 iface['mode'] = subnet['type']
2780 iface['control'] = subnet.get('control', 'auto')
2781 subnet_inet = 'inet'
2782 if subnet_is_ipv6(subnet):
2783 subnet_inet += '6'
2784+ else:
2785+ ipv4_subnet_mtu = subnet.get('mtu')
2786 iface['inet'] = subnet_inet
2787 if subnet['type'].startswith('dhcp'):
2788 iface['mode'] = 'dhcp'
2789@@ -397,7 +410,7 @@ class Renderer(renderer.Renderer):
2790 _iface_start_entry(
2791 iface, index, render_hwaddress=render_hwaddress) +
2792 _iface_add_subnet(iface, subnet) +
2793- _iface_add_attrs(iface, index)
2794+ _iface_add_attrs(iface, index, ipv4_subnet_mtu)
2795 )
2796 for route in subnet.get('routes', []):
2797 lines.extend(self._render_route(route, indent=" "))
2798@@ -409,7 +422,8 @@ class Renderer(renderer.Renderer):
2799 if 'bond-master' in iface or 'bond-slaves' in iface:
2800 lines.append("auto {name}".format(**iface))
2801 lines.append("iface {name} {inet} {mode}".format(**iface))
2802- lines.extend(_iface_add_attrs(iface, index=0))
2803+ lines.extend(
2804+ _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None))
2805 sections.append(lines)
2806 return sections
2807
2808diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
2809index 6344348..4014363 100644
2810--- a/cloudinit/net/netplan.py
2811+++ b/cloudinit/net/netplan.py
2812@@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match):
2813 if key.startswith(match))
2814
2815
2816-def _extract_addresses(config, entry):
2817+def _extract_addresses(config, entry, ifname):
2818 """This method parse a cloudinit.net.network_state dictionary (config) and
2819 maps netstate keys/values into a dictionary (entry) to represent
2820 netplan yaml.
2821@@ -124,6 +124,15 @@ def _extract_addresses(config, entry):
2822
2823 addresses.append(addr)
2824
2825+ if 'mtu' in config:
2826+ entry_mtu = entry.get('mtu')
2827+ if entry_mtu and config['mtu'] != entry_mtu:
2828+ LOG.warning(
2829+ "Network config: ignoring %s device-level mtu:%s because"
2830+ " ipv4 subnet-level mtu:%s provided.",
2831+ ifname, config['mtu'], entry_mtu)
2832+ else:
2833+ entry['mtu'] = config['mtu']
2834 if len(addresses) > 0:
2835 entry.update({'addresses': addresses})
2836 if len(routes) > 0:
2837@@ -262,10 +271,7 @@ class Renderer(renderer.Renderer):
2838 else:
2839 del eth['match']
2840 del eth['set-name']
2841- if 'mtu' in ifcfg:
2842- eth['mtu'] = ifcfg.get('mtu')
2843-
2844- _extract_addresses(ifcfg, eth)
2845+ _extract_addresses(ifcfg, eth, ifname)
2846 ethernets.update({ifname: eth})
2847
2848 elif if_type == 'bond':
2849@@ -288,7 +294,7 @@ class Renderer(renderer.Renderer):
2850 slave_interfaces = ifcfg.get('bond-slaves')
2851 if slave_interfaces == 'none':
2852 _extract_bond_slaves_by_name(interfaces, bond, ifname)
2853- _extract_addresses(ifcfg, bond)
2854+ _extract_addresses(ifcfg, bond, ifname)
2855 bonds.update({ifname: bond})
2856
2857 elif if_type == 'bridge':
2858@@ -321,7 +327,7 @@ class Renderer(renderer.Renderer):
2859
2860 if len(br_config) > 0:
2861 bridge.update({'parameters': br_config})
2862- _extract_addresses(ifcfg, bridge)
2863+ _extract_addresses(ifcfg, bridge, ifname)
2864 bridges.update({ifname: bridge})
2865
2866 elif if_type == 'vlan':
2867@@ -333,7 +339,7 @@ class Renderer(renderer.Renderer):
2868 macaddr = ifcfg.get('mac_address', None)
2869 if macaddr is not None:
2870 vlan['macaddress'] = macaddr.lower()
2871- _extract_addresses(ifcfg, vlan)
2872+ _extract_addresses(ifcfg, vlan, ifname)
2873 vlans.update({ifname: vlan})
2874
2875 # inject global nameserver values under each all interface which
2876diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
2877index 6d63e5c..72c803e 100644
2878--- a/cloudinit/net/network_state.py
2879+++ b/cloudinit/net/network_state.py
2880@@ -7,6 +7,8 @@
2881 import copy
2882 import functools
2883 import logging
2884+import socket
2885+import struct
2886
2887 import six
2888
2889@@ -886,12 +888,9 @@ def net_prefix_to_ipv4_mask(prefix):
2890 This is the inverse of ipv4_mask_to_net_prefix.
2891 24 -> "255.255.255.0"
2892 Also supports input as a string."""
2893-
2894- mask = [0, 0, 0, 0]
2895- for i in list(range(0, int(prefix))):
2896- idx = int(i / 8)
2897- mask[idx] = mask[idx] + (1 << (7 - i % 8))
2898- return ".".join([str(x) for x in mask])
2899+ mask = socket.inet_ntoa(
2900+ struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff)))
2901+ return mask
2902
2903
2904 def ipv4_mask_to_net_prefix(mask):
2905diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
2906index 39d89c4..3d71923 100644
2907--- a/cloudinit/net/sysconfig.py
2908+++ b/cloudinit/net/sysconfig.py
2909@@ -287,7 +287,6 @@ class Renderer(renderer.Renderer):
2910 if subnet_type == 'dhcp6':
2911 iface_cfg['IPV6INIT'] = True
2912 iface_cfg['DHCPV6C'] = True
2913- iface_cfg['BOOTPROTO'] = 'dhcp'
2914 elif subnet_type in ['dhcp4', 'dhcp']:
2915 iface_cfg['BOOTPROTO'] = 'dhcp'
2916 elif subnet_type == 'static':
2917@@ -305,6 +304,13 @@ class Renderer(renderer.Renderer):
2918 mtu_key = 'IPV6_MTU'
2919 iface_cfg['IPV6INIT'] = True
2920 if 'mtu' in subnet:
2921+ mtu_mismatch = bool(mtu_key in iface_cfg and
2922+ subnet['mtu'] != iface_cfg[mtu_key])
2923+ if mtu_mismatch:
2924+ LOG.warning(
2925+ 'Network config: ignoring %s device-level mtu:%s'
2926+ ' because ipv4 subnet-level mtu:%s provided.',
2927+ iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
2928 iface_cfg[mtu_key] = subnet['mtu']
2929 elif subnet_type == 'manual':
2930 # If the subnet has an MTU setting, then ONBOOT=True
2931@@ -364,7 +370,7 @@ class Renderer(renderer.Renderer):
2932
2933 @classmethod
2934 def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):
2935- for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
2936+ for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
2937 for route in subnet.get('routes', []):
2938 is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
2939
2940diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
2941index 276556e..5c017d1 100644
2942--- a/cloudinit/net/tests/test_init.py
2943+++ b/cloudinit/net/tests/test_init.py
2944@@ -199,6 +199,7 @@ class TestGenerateFallbackConfig(CiTestCase):
2945 self.sysdir = self.tmp_dir() + '/'
2946 self.m_sys_path.return_value = self.sysdir
2947 self.addCleanup(sys_mock.stop)
2948+ self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
2949
2950 def test_generate_fallback_finds_connected_eth_with_mac(self):
2951 """generate_fallback_config finds any connected device with a mac."""
2952diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
2953index 993b26c..9ff929c 100644
2954--- a/cloudinit/netinfo.py
2955+++ b/cloudinit/netinfo.py
2956@@ -8,9 +8,11 @@
2957 #
2958 # This file is part of cloud-init. See LICENSE file for license information.
2959
2960+from copy import copy, deepcopy
2961 import re
2962
2963 from cloudinit import log as logging
2964+from cloudinit.net.network_state import net_prefix_to_ipv4_mask
2965 from cloudinit import util
2966
2967 from cloudinit.simpletable import SimpleTable
2968@@ -18,18 +20,90 @@ from cloudinit.simpletable import SimpleTable
2969 LOG = logging.getLogger()
2970
2971
2972-def netdev_info(empty=""):
2973- fields = ("hwaddr", "addr", "bcast", "mask")
2974- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
2975+DEFAULT_NETDEV_INFO = {
2976+ "ipv4": [],
2977+ "ipv6": [],
2978+ "hwaddr": "",
2979+ "up": False
2980+}
2981+
2982+
2983+def _netdev_info_iproute(ipaddr_out):
2984+ """
2985+ Get network device dicts from ip route and ip link info.
2986+
2987+ @param ipaddr_out: Output string from 'ip addr show' command.
2988+
2989+ @returns: A dict of device info keyed by network device name containing
2990+ device configuration values.
2991+ @raise: TypeError if ipaddr_out isn't a string.
2992+ """
2993+ devs = {}
2994+ dev_name = None
2995+ for num, line in enumerate(ipaddr_out.splitlines()):
2996+ m = re.match(r'^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*', line)
2997+ if m:
2998+ dev_name = m.group('dev').lower().split('@')[0]
2999+ flags = m.group('flags').split(',')
3000+ devs[dev_name] = {
3001+ 'ipv4': [], 'ipv6': [], 'hwaddr': '',
3002+ 'up': bool('UP' in flags and 'LOWER_UP' in flags),
3003+ }
3004+ elif 'inet6' in line:
3005+ m = re.match(
3006+ r'\s+inet6\s(?P<ip>\S+)\sscope\s(?P<scope6>\S+).*', line)
3007+ if not m:
3008+ LOG.warning(
3009+ 'Could not parse ip addr show: (line:%d) %s', num, line)
3010+ continue
3011+ devs[dev_name]['ipv6'].append(m.groupdict())
3012+ elif 'inet' in line:
3013+ m = re.match(
3014+ r'\s+inet\s(?P<cidr4>\S+)(\sbrd\s(?P<bcast>\S+))?\sscope\s'
3015+ r'(?P<scope>\S+).*', line)
3016+ if not m:
3017+ LOG.warning(
3018+ 'Could not parse ip addr show: (line:%d) %s', num, line)
3019+ continue
3020+ match = m.groupdict()
3021+ cidr4 = match.pop('cidr4')
3022+ addr, _, prefix = cidr4.partition('/')
3023+ if not prefix:
3024+ prefix = '32'
3025+ devs[dev_name]['ipv4'].append({
3026+ 'ip': addr,
3027+ 'bcast': match['bcast'] if match['bcast'] else '',
3028+ 'mask': net_prefix_to_ipv4_mask(prefix),
3029+ 'scope': match['scope']})
3030+ elif 'link' in line:
3031+ m = re.match(
3032+ r'\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*', line)
3033+ if not m:
3034+ LOG.warning(
3035+ 'Could not parse ip addr show: (line:%d) %s', num, line)
3036+ continue
3037+ if m.group('link_type') == 'ether':
3038+ devs[dev_name]['hwaddr'] = m.group('hwaddr')
3039+ else:
3040+ devs[dev_name]['hwaddr'] = ''
3041+ else:
3042+ continue
3043+ return devs
3044+
3045+
3046+def _netdev_info_ifconfig(ifconfig_data):
3047+ # fields that need to be returned in devs for each dev
3048 devs = {}
3049- for line in str(ifcfg_out).splitlines():
3050+ for line in ifconfig_data.splitlines():
3051 if len(line) == 0:
3052 continue
3053 if line[0] not in ("\t", " "):
3054 curdev = line.split()[0]
3055- devs[curdev] = {"up": False}
3056- for field in fields:
3057- devs[curdev][field] = ""
3058+ # current ifconfig pops a ':' on the end of the device
3059+ if curdev.endswith(':'):
3060+ curdev = curdev[:-1]
3061+ if curdev not in devs:
3062+ devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
3063 toks = line.lower().strip().split()
3064 if toks[0] == "up":
3065 devs[curdev]['up'] = True
3066@@ -39,59 +113,164 @@ def netdev_info(empty=""):
3067 if re.search(r"flags=\d+<up,", toks[1]):
3068 devs[curdev]['up'] = True
3069
3070- fieldpost = ""
3071- if toks[0] == "inet6":
3072- fieldpost = "6"
3073-
3074 for i in range(len(toks)):
3075- # older net-tools (ubuntu) show 'inet addr:xx.yy',
3076- # newer (freebsd and fedora) show 'inet xx.yy'
3077- # just skip this 'inet' entry. (LP: #1285185)
3078- try:
3079- if ((toks[i] in ("inet", "inet6") and
3080- toks[i + 1].startswith("addr:"))):
3081- continue
3082- except IndexError:
3083- pass
3084-
3085- # Couple the different items we're interested in with the correct
3086- # field since FreeBSD/CentOS/Fedora differ in the output.
3087- ifconfigfields = {
3088- "addr:": "addr", "inet": "addr",
3089- "bcast:": "bcast", "broadcast": "bcast",
3090- "mask:": "mask", "netmask": "mask",
3091- "hwaddr": "hwaddr", "ether": "hwaddr",
3092- "scope": "scope",
3093- }
3094- for origfield, field in ifconfigfields.items():
3095- target = "%s%s" % (field, fieldpost)
3096- if devs[curdev].get(target, ""):
3097- continue
3098- if toks[i] == "%s" % origfield:
3099- try:
3100- devs[curdev][target] = toks[i + 1]
3101- except IndexError:
3102- pass
3103- elif toks[i].startswith("%s" % origfield):
3104- devs[curdev][target] = toks[i][len(field) + 1:]
3105-
3106- if empty != "":
3107- for (_devname, dev) in devs.items():
3108- for field in dev:
3109- if dev[field] == "":
3110- dev[field] = empty
3111+ if toks[i] == "inet": # Create new ipv4 addr entry
3112+ devs[curdev]['ipv4'].append(
3113+ {'ip': toks[i + 1].lstrip("addr:")})
3114+ elif toks[i].startswith("bcast:"):
3115+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i].lstrip("bcast:")
3116+ elif toks[i] == "broadcast":
3117+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
3118+ elif toks[i].startswith("mask:"):
3119+ devs[curdev]['ipv4'][-1]['mask'] = toks[i].lstrip("mask:")
3120+ elif toks[i] == "netmask":
3121+ devs[curdev]['ipv4'][-1]['mask'] = toks[i + 1]
3122+ elif toks[i] == "hwaddr" or toks[i] == "ether":
3123+ devs[curdev]['hwaddr'] = toks[i + 1]
3124+ elif toks[i] == "inet6":
3125+ if toks[i + 1] == "addr:":
3126+ devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
3127+ else:
3128+ devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
3129+ elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
3130+ addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
3131+ devs[curdev]['ipv6'][-1]['ip'] = addr6
3132+ elif toks[i].startswith("scope:"):
3133+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
3134+ elif toks[i] == "scopeid":
3135+ res = re.match(r'.*<(\S+)>', toks[i + 1])
3136+ if res:
3137+ devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
3138+ return devs
3139+
3140+
3141+def netdev_info(empty=""):
3142+ devs = {}
3143+ if util.which('ip'):
3144+ # Try iproute first of all
3145+ (ipaddr_out, _err) = util.subp(["ip", "addr", "show"])
3146+ devs = _netdev_info_iproute(ipaddr_out)
3147+ elif util.which('ifconfig'):
3148+ # Fall back to net-tools if iproute2 is not present
3149+ (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
3150+ devs = _netdev_info_ifconfig(ifcfg_out)
3151+ else:
3152+ LOG.warning(
3153+ "Could not print networks: missing 'ip' and 'ifconfig' commands")
3154
3155+ if empty == "":
3156+ return devs
3157+
3158+ recurse_types = (dict, tuple, list)
3159+
3160+ def fill(data, new_val="", empty_vals=("", b"")):
3161+ """Recursively replace 'empty_vals' in data (dict, tuple, list)
3162+ with new_val"""
3163+ if isinstance(data, dict):
3164+ myiter = data.items()
3165+ elif isinstance(data, (tuple, list)):
3166+ myiter = enumerate(data)
3167+ else:
3168+ raise TypeError("Unexpected input to fill")
3169+
3170+ for key, val in myiter:
3171+ if val in empty_vals:
3172+ data[key] = new_val
3173+ elif isinstance(val, recurse_types):
3174+ fill(val, new_val)
3175+
3176+ fill(devs, new_val=empty)
3177 return devs
3178
3179
3180-def route_info():
3181- (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1])
3182+def _netdev_route_info_iproute(iproute_data):
3183+ """
3184+ Get network route dicts from ip route info.
3185+
3186+ @param iproute_data: Output string from ip route command.
3187+
3188+ @returns: A dict containing ipv4 and ipv6 route entries as lists. Each
3189+ item in the list is a route dictionary representing destination,
3190+ gateway, flags, genmask and interface information.
3191+ """
3192+
3193+ routes = {}
3194+ routes['ipv4'] = []
3195+ routes['ipv6'] = []
3196+ entries = iproute_data.splitlines()
3197+ default_route_entry = {
3198+ 'destination': '', 'flags': '', 'gateway': '', 'genmask': '',
3199+ 'iface': '', 'metric': ''}
3200+ for line in entries:
3201+ entry = copy(default_route_entry)
3202+ if not line:
3203+ continue
3204+ toks = line.split()
3205+ flags = ['U']
3206+ if toks[0] == "default":
3207+ entry['destination'] = "0.0.0.0"
3208+ entry['genmask'] = "0.0.0.0"
3209+ else:
3210+ if '/' in toks[0]:
3211+ (addr, cidr) = toks[0].split("/")
3212+ else:
3213+ addr = toks[0]
3214+ cidr = '32'
3215+ flags.append("H")
3216+ entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
3217+ entry['destination'] = addr
3218+ entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
3219+ entry['gateway'] = "0.0.0.0"
3220+ for i in range(len(toks)):
3221+ if toks[i] == "via":
3222+ entry['gateway'] = toks[i + 1]
3223+ flags.insert(1, "G")
3224+ if toks[i] == "dev":
3225+ entry["iface"] = toks[i + 1]
3226+ if toks[i] == "metric":
3227+ entry['metric'] = toks[i + 1]
3228+ entry['flags'] = ''.join(flags)
3229+ routes['ipv4'].append(entry)
3230+ try:
3231+ (iproute_data6, _err6) = util.subp(
3232+ ["ip", "--oneline", "-6", "route", "list", "table", "all"],
3233+ rcs=[0, 1])
3234+ except util.ProcessExecutionError:
3235+ pass
3236+ else:
3237+ entries6 = iproute_data6.splitlines()
3238+ for line in entries6:
3239+ entry = {}
3240+ if not line:
3241+ continue
3242+ toks = line.split()
3243+ if toks[0] == "default":
3244+ entry['destination'] = "::/0"
3245+ entry['flags'] = "UG"
3246+ else:
3247+ entry['destination'] = toks[0]
3248+ entry['gateway'] = "::"
3249+ entry['flags'] = "U"
3250+ for i in range(len(toks)):
3251+ if toks[i] == "via":
3252+ entry['gateway'] = toks[i + 1]
3253+ entry['flags'] = "UG"
3254+ if toks[i] == "dev":
3255+ entry["iface"] = toks[i + 1]
3256+ if toks[i] == "metric":
3257+ entry['metric'] = toks[i + 1]
3258+ if toks[i] == "expires":
3259+ entry['flags'] = entry['flags'] + 'e'
3260+ routes['ipv6'].append(entry)
3261+ return routes
3262+
3263
3264+def _netdev_route_info_netstat(route_data):
3265 routes = {}
3266 routes['ipv4'] = []
3267 routes['ipv6'] = []
3268
3269- entries = route_out.splitlines()[1:]
3270+ entries = route_data.splitlines()
3271 for line in entries:
3272 if not line:
3273 continue
3274@@ -101,8 +280,8 @@ def route_info():
3275 # default 10.65.0.1 UGS 0 34920 vtnet0
3276 #
3277 # Linux netstat shows 2 more:
3278- # Destination Gateway Genmask Flags MSS Window irtt Iface
3279- # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
3280+ # Destination Gateway Genmask Flags Metric Ref Use Iface
3281+ # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
3282 if (len(toks) < 6 or toks[0] == "Kernel" or
3283 toks[0] == "Destination" or toks[0] == "Internet" or
3284 toks[0] == "Internet6" or toks[0] == "Routing"):
3285@@ -125,31 +304,57 @@ def route_info():
3286 routes['ipv4'].append(entry)
3287
3288 try:
3289- (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"],
3290- rcs=[0, 1])
3291+ (route_data6, _err6) = util.subp(
3292+ ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
3293 except util.ProcessExecutionError:
3294 pass
3295 else:
3296- entries6 = route_out6.splitlines()[1:]
3297+ entries6 = route_data6.splitlines()
3298 for line in entries6:
3299 if not line:
3300 continue
3301 toks = line.split()
3302- if (len(toks) < 6 or toks[0] == "Kernel" or
3303+ if (len(toks) < 7 or toks[0] == "Kernel" or
3304+ toks[0] == "Destination" or toks[0] == "Internet" or
3305 toks[0] == "Proto" or toks[0] == "Active"):
3306 continue
3307 entry = {
3308- 'proto': toks[0],
3309- 'recv-q': toks[1],
3310- 'send-q': toks[2],
3311- 'local address': toks[3],
3312- 'foreign address': toks[4],
3313- 'state': toks[5],
3314+ 'destination': toks[0],
3315+ 'gateway': toks[1],
3316+ 'flags': toks[2],
3317+ 'metric': toks[3],
3318+ 'ref': toks[4],
3319+ 'use': toks[5],
3320+ 'iface': toks[6],
3321 }
3322+ # skip lo interface on ipv6
3323+ if entry['iface'] == "lo":
3324+ continue
3325+ # strip /128 from address if it's included
3326+ if entry['destination'].endswith('/128'):
3327+ entry['destination'] = re.sub(
3328+ r'\/128$', '', entry['destination'])
3329 routes['ipv6'].append(entry)
3330 return routes
3331
3332
3333+def route_info():
3334+ routes = {}
3335+ if util.which('ip'):
3336+ # Try iproute first of all
3337+ (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"])
3338+ routes = _netdev_route_info_iproute(iproute_out)
3339+ elif util.which('netstat'):
3340+ # Fall back to net-tools if iproute2 is not present
3341+ (route_out, _err) = util.subp(
3342+ ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
3343+ routes = _netdev_route_info_netstat(route_out)
3344+ else:
3345+ LOG.warning(
3346+ "Could not print routes: missing 'ip' and 'netstat' commands")
3347+ return routes
3348+
3349+
3350 def getgateway():
3351 try:
3352 routes = route_info()
3353@@ -164,23 +369,36 @@ def getgateway():
3354
3355 def netdev_pformat():
3356 lines = []
3357+ empty = "."
3358 try:
3359- netdev = netdev_info(empty=".")
3360- except Exception:
3361- lines.append(util.center("Net device info failed", '!', 80))
3362+ netdev = netdev_info(empty=empty)
3363+ except Exception as e:
3364+ lines.append(
3365+ util.center(
3366+ "Net device info failed ({error})".format(error=str(e)),
3367+ '!', 80))
3368 else:
3369+ if not netdev:
3370+ return '\n'
3371 fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
3372 tbl = SimpleTable(fields)
3373- for (dev, d) in sorted(netdev.items()):
3374- tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
3375- if d.get('addr6'):
3376- tbl.add_row([dev, d["up"],
3377- d["addr6"], ".", d.get("scope6"), d["hwaddr"]])
3378+ for (dev, data) in sorted(netdev.items()):
3379+ for addr in data.get('ipv4'):
3380+ tbl.add_row(
3381+ (dev, data["up"], addr["ip"], addr["mask"],
3382+ addr.get('scope', empty), data["hwaddr"]))
3383+ for addr in data.get('ipv6'):
3384+ tbl.add_row(
3385+ (dev, data["up"], addr["ip"], empty, addr["scope6"],
3386+ data["hwaddr"]))
3387+ if len(data.get('ipv6')) + len(data.get('ipv4')) == 0:
3388+ tbl.add_row((dev, data["up"], empty, empty, empty,
3389+ data["hwaddr"]))
3390 netdev_s = tbl.get_string()
3391 max_len = len(max(netdev_s.splitlines(), key=len))
3392 header = util.center("Net device info", "+", max_len)
3393 lines.extend([header, netdev_s])
3394- return "\n".join(lines)
3395+ return "\n".join(lines) + "\n"
3396
3397
3398 def route_pformat():
3399@@ -188,7 +406,10 @@ def route_pformat():
3400 try:
3401 routes = route_info()
3402 except Exception as e:
3403- lines.append(util.center('Route info failed', '!', 80))
3404+ lines.append(
3405+ util.center(
3406+ 'Route info failed ({error})'.format(error=str(e)),
3407+ '!', 80))
3408 util.logexc(LOG, "Route info failed: %s" % e)
3409 else:
3410 if routes.get('ipv4'):
3411@@ -205,20 +426,20 @@ def route_pformat():
3412 header = util.center("Route IPv4 info", "+", max_len)
3413 lines.extend([header, route_s])
3414 if routes.get('ipv6'):
3415- fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
3416- 'Local Address', 'Foreign Address', 'State']
3417+ fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface',
3418+ 'Flags']
3419 tbl_v6 = SimpleTable(fields_v6)
3420 for (n, r) in enumerate(routes.get('ipv6')):
3421 route_id = str(n)
3422- tbl_v6.add_row([route_id, r['proto'],
3423- r['recv-q'], r['send-q'],
3424- r['local address'], r['foreign address'],
3425- r['state']])
3426+ if r['iface'] == 'lo':
3427+ continue
3428+ tbl_v6.add_row([route_id, r['destination'],
3429+ r['gateway'], r['iface'], r['flags']])
3430 route_s = tbl_v6.get_string()
3431 max_len = len(max(route_s.splitlines(), key=len))
3432 header = util.center("Route IPv6 info", "+", max_len)
3433 lines.extend([header, route_s])
3434- return "\n".join(lines)
3435+ return "\n".join(lines) + "\n"
3436
3437
3438 def debug_info(prefix='ci-info: '):
3439diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
3440index 4f62d2f..e5dfab3 100644
3441--- a/cloudinit/reporting/events.py
3442+++ b/cloudinit/reporting/events.py
3443@@ -192,7 +192,7 @@ class ReportEventStack(object):
3444
3445 def _childrens_finish_info(self):
3446 for cand_result in (status.FAIL, status.WARN):
3447- for name, (value, msg) in self.children.items():
3448+ for _name, (value, _msg) in self.children.items():
3449 if value == cand_result:
3450 return (value, self.message)
3451 return (self.result, self.message)
3452diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
3453index 22279d0..858e082 100644
3454--- a/cloudinit/sources/DataSourceAliYun.py
3455+++ b/cloudinit/sources/DataSourceAliYun.py
3456@@ -45,7 +45,7 @@ def _is_aliyun():
3457
3458 def parse_public_keys(public_keys):
3459 keys = []
3460- for key_id, key_body in public_keys.items():
3461+ for _key_id, key_body in public_keys.items():
3462 if isinstance(key_body, str):
3463 keys.append(key_body.strip())
3464 elif isinstance(key_body, list):
3465diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
3466index e1d0055..24fd65f 100644
3467--- a/cloudinit/sources/DataSourceAltCloud.py
3468+++ b/cloudinit/sources/DataSourceAltCloud.py
3469@@ -29,7 +29,6 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
3470
3471 # Shell command lists
3472 CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
3473-CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5']
3474
3475 META_DATA_NOT_SUPPORTED = {
3476 'block-device-mapping': {},
3477@@ -185,26 +184,24 @@ class DataSourceAltCloud(sources.DataSource):
3478 cmd = CMD_PROBE_FLOPPY
3479 (cmd_out, _err) = util.subp(cmd)
3480 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
3481- except ProcessExecutionError as _err:
3482- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
3483+ except ProcessExecutionError as e:
3484+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
3485 return False
3486- except OSError as _err:
3487- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
3488+ except OSError as e:
3489+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
3490 return False
3491
3492 floppy_dev = '/dev/fd0'
3493
3494 # udevadm settle for floppy device
3495 try:
3496- cmd = CMD_UDEVADM_SETTLE
3497- cmd.append('--exit-if-exists=' + floppy_dev)
3498- (cmd_out, _err) = util.subp(cmd)
3499+ (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
3500 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
3501- except ProcessExecutionError as _err:
3502- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
3503+ except ProcessExecutionError as e:
3504+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
3505 return False
3506- except OSError as _err:
3507- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
3508+ except OSError as e:
3509+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
3510 return False
3511
3512 try:
3513diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
3514index 0ee622e..7007d9e 100644
3515--- a/cloudinit/sources/DataSourceAzure.py
3516+++ b/cloudinit/sources/DataSourceAzure.py
3517@@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4'
3518 # DMI chassis-asset-tag is set static for all azure instances
3519 AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
3520 REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
3521+REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
3522 IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
3523
3524
3525@@ -107,31 +108,24 @@ def find_dev_from_busdev(camcontrol_out, busdev):
3526 return None
3527
3528
3529-def get_dev_storvsc_sysctl():
3530+def execute_or_debug(cmd, fail_ret=None):
3531 try:
3532- sysctl_out, err = util.subp(['sysctl', 'dev.storvsc'])
3533+ return util.subp(cmd)[0]
3534 except util.ProcessExecutionError:
3535- LOG.debug("Fail to execute sysctl dev.storvsc")
3536- sysctl_out = ""
3537- return sysctl_out
3538+ LOG.debug("Failed to execute: %s", ' '.join(cmd))
3539+ return fail_ret
3540+
3541+
3542+def get_dev_storvsc_sysctl():
3543+ return execute_or_debug(["sysctl", "dev.storvsc"], fail_ret="")
3544
3545
3546 def get_camcontrol_dev_bus():
3547- try:
3548- camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b'])
3549- except util.ProcessExecutionError:
3550- LOG.debug("Fail to execute camcontrol devlist -b")
3551- return None
3552- return camcontrol_b_out
3553+ return execute_or_debug(['camcontrol', 'devlist', '-b'])
3554
3555
3556 def get_camcontrol_dev():
3557- try:
3558- camcontrol_out, err = util.subp(['camcontrol', 'devlist'])
3559- except util.ProcessExecutionError:
3560- LOG.debug("Fail to execute camcontrol devlist")
3561- return None
3562- return camcontrol_out
3563+ return execute_or_debug(['camcontrol', 'devlist'])
3564
3565
3566 def get_resource_disk_on_freebsd(port_id):
3567@@ -214,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = {
3568 }
3569
3570 DS_CFG_PATH = ['datasource', DS_NAME]
3571+DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
3572 DEF_EPHEMERAL_LABEL = 'Temporary Storage'
3573
3574 # The redacted password fails to meet password complexity requirements
3575@@ -400,14 +395,9 @@ class DataSourceAzure(sources.DataSource):
3576 if found == ddir:
3577 LOG.debug("using files cached in %s", ddir)
3578
3579- # azure / hyper-v provides random data here
3580- # TODO. find the seed on FreeBSD platform
3581- # now update ds_cfg to reflect contents pass in config
3582- if not util.is_FreeBSD():
3583- seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
3584- quiet=True, decode=False)
3585- if seed:
3586- self.metadata['random_seed'] = seed
3587+ seed = _get_random_seed()
3588+ if seed:
3589+ self.metadata['random_seed'] = seed
3590
3591 user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
3592 self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
3593@@ -443,11 +433,12 @@ class DataSourceAzure(sources.DataSource):
3594 LOG.debug("negotiating already done for %s",
3595 self.get_instance_id())
3596
3597- def _poll_imds(self, report_ready=True):
3598+ def _poll_imds(self):
3599 """Poll IMDS for the new provisioning data until we get a valid
3600 response. Then return the returned JSON object."""
3601 url = IMDS_URL + "?api-version=2017-04-02"
3602 headers = {"Metadata": "true"}
3603+ report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
3604 LOG.debug("Start polling IMDS")
3605
3606 def exc_cb(msg, exception):
3607@@ -457,13 +448,17 @@ class DataSourceAzure(sources.DataSource):
3608 # call DHCP and setup the ephemeral network to acquire the new IP.
3609 return False
3610
3611- need_report = report_ready
3612 while True:
3613 try:
3614 with EphemeralDHCPv4() as lease:
3615- if need_report:
3616+ if report_ready:
3617+ path = REPORTED_READY_MARKER_FILE
3618+ LOG.info(
3619+ "Creating a marker file to report ready: %s", path)
3620+ util.write_file(path, "{pid}: {time}\n".format(
3621+ pid=os.getpid(), time=time()))
3622 self._report_ready(lease=lease)
3623- need_report = False
3624+ report_ready = False
3625 return readurl(url, timeout=1, headers=headers,
3626 exception_cb=exc_cb, infinite=True).contents
3627 except UrlError:
3628@@ -474,7 +469,7 @@ class DataSourceAzure(sources.DataSource):
3629 before we go into our polling loop."""
3630 try:
3631 get_metadata_from_fabric(None, lease['unknown-245'])
3632- except Exception as exc:
3633+ except Exception:
3634 LOG.warning(
3635 "Error communicating with Azure fabric; You may experience."
3636 "connectivity issues.", exc_info=True)
3637@@ -492,13 +487,15 @@ class DataSourceAzure(sources.DataSource):
3638 jump back into the polling loop in order to retrieve the ovf_env."""
3639 if not ret:
3640 return False
3641- (md, self.userdata_raw, cfg, files) = ret
3642+ (_md, self.userdata_raw, cfg, _files) = ret
3643 path = REPROVISION_MARKER_FILE
3644 if (cfg.get('PreprovisionedVm') is True or
3645 os.path.isfile(path)):
3646 if not os.path.isfile(path):
3647- LOG.info("Creating a marker file to poll imds")
3648- util.write_file(path, "%s: %s\n" % (os.getpid(), time()))
3649+ LOG.info("Creating a marker file to poll imds: %s",
3650+ path)
3651+ util.write_file(path, "{pid}: {time}\n".format(
3652+ pid=os.getpid(), time=time()))
3653 return True
3654 return False
3655
3656@@ -528,16 +525,19 @@ class DataSourceAzure(sources.DataSource):
3657 self.ds_cfg['agent_command'])
3658 try:
3659 fabric_data = metadata_func()
3660- except Exception as exc:
3661+ except Exception:
3662 LOG.warning(
3663 "Error communicating with Azure fabric; You may experience."
3664 "connectivity issues.", exc_info=True)
3665 return False
3666+ util.del_file(REPORTED_READY_MARKER_FILE)
3667 util.del_file(REPROVISION_MARKER_FILE)
3668 return fabric_data
3669
3670 def activate(self, cfg, is_new_instance):
3671- address_ephemeral_resize(is_new_instance=is_new_instance)
3672+ address_ephemeral_resize(is_new_instance=is_new_instance,
3673+ preserve_ntfs=self.ds_cfg.get(
3674+ DS_CFG_KEY_PRESERVE_NTFS, False))
3675 return
3676
3677 @property
3678@@ -581,17 +581,29 @@ def _has_ntfs_filesystem(devpath):
3679 return os.path.realpath(devpath) in ntfs_devices
3680
3681
3682-def can_dev_be_reformatted(devpath):
3683- """Determine if block device devpath is newly formatted ephemeral.
3684+def can_dev_be_reformatted(devpath, preserve_ntfs):
3685+ """Determine if the ephemeral drive at devpath should be reformatted.
3686
3687- A newly formatted disk will:
3688+ A fresh ephemeral disk is formatted by Azure and will:
3689 a.) have a partition table (dos or gpt)
3690 b.) have 1 partition that is ntfs formatted, or
3691 have 2 partitions with the second partition ntfs formatted.
3692 (larger instances with >2TB ephemeral disk have gpt, and will
3693 have a microsoft reserved partition as part 1. LP: #1686514)
3694 c.) the ntfs partition will have no files other than possibly
3695- 'dataloss_warning_readme.txt'"""
3696+ 'dataloss_warning_readme.txt'
3697+
3698+ User can indicate that NTFS should never be destroyed by setting
3699+ DS_CFG_KEY_PRESERVE_NTFS in dscfg.
3700+ If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS
3701+ to make sure cloud-init does not accidentally wipe their data.
3702+ If cloud-init cannot mount the disk to check for data, destruction
3703+ will be allowed, unless the dscfg key is set."""
3704+ if preserve_ntfs:
3705+ msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
3706+ (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
3707+ return False, msg
3708+
3709 if not os.path.exists(devpath):
3710 return False, 'device %s does not exist' % devpath
3711
3712@@ -624,18 +636,27 @@ def can_dev_be_reformatted(devpath):
3713 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
3714 (cand_part, cand_path, devpath))
3715 try:
3716- file_count = util.mount_cb(cand_path, count_files)
3717+ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
3718+ update_env_for_mount={'LANG': 'C'})
3719 except util.MountFailedError as e:
3720+ if "mount: unknown filesystem type 'ntfs'" in str(e):
3721+ return True, (bmsg + ' but this system cannot mount NTFS,'
3722+ ' assuming there are no important files.'
3723+ ' Formatting allowed.')
3724 return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
3725
3726 if file_count != 0:
3727+ LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
3728+ 'to ensure that filesystem does not get wiped, set '
3729+ '%s.%s in config', '.'.join(DS_CFG_PATH),
3730+ DS_CFG_KEY_PRESERVE_NTFS)
3731 return False, bmsg + ' but had %d files on it.' % file_count
3732
3733 return True, bmsg + ' and had no important files. Safe for reformatting.'
3734
3735
3736 def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
3737- is_new_instance=False):
3738+ is_new_instance=False, preserve_ntfs=False):
3739 # wait for ephemeral disk to come up
3740 naplen = .2
3741 missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
3742@@ -651,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
3743 if is_new_instance:
3744 result, msg = (True, "First instance boot.")
3745 else:
3746- result, msg = can_dev_be_reformatted(devpath)
3747+ result, msg = can_dev_be_reformatted(devpath, preserve_ntfs)
3748
3749 LOG.debug("reformattable=%s: %s", result, msg)
3750 if not result:
3751@@ -965,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev):
3752 return False
3753
3754
3755+def _get_random_seed():
3756+ """Return content random seed file if available, otherwise,
3757+ return None."""
3758+ # azure / hyper-v provides random data here
3759+ # TODO. find the seed on FreeBSD platform
3760+ # now update ds_cfg to reflect contents pass in config
3761+ if util.is_FreeBSD():
3762+ return None
3763+ return util.load_file("/sys/firmware/acpi/tables/OEM0",
3764+ quiet=True, decode=False)
3765+
3766+
3767 def list_possible_azure_ds_devs():
3768 devlist = []
3769 if util.is_FreeBSD():
3770diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
3771index 0df545f..d4b758f 100644
3772--- a/cloudinit/sources/DataSourceCloudStack.py
3773+++ b/cloudinit/sources/DataSourceCloudStack.py
3774@@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource):
3775
3776 dsname = 'CloudStack'
3777
3778+ # Setup read_url parameters per get_url_params.
3779+ url_max_wait = 120
3780+ url_timeout = 50
3781+
3782 def __init__(self, sys_cfg, distro, paths):
3783 sources.DataSource.__init__(self, sys_cfg, distro, paths)
3784 self.seed_dir = os.path.join(paths.seed_dir, 'cs')
3785@@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource):
3786 self.metadata_address = "http://%s/" % (self.vr_addr,)
3787 self.cfg = {}
3788
3789- def _get_url_settings(self):
3790- mcfg = self.ds_cfg
3791- max_wait = 120
3792- try:
3793- max_wait = int(mcfg.get("max_wait", max_wait))
3794- except Exception:
3795- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
3796+ def wait_for_metadata_service(self):
3797+ url_params = self.get_url_params()
3798
3799- if max_wait == 0:
3800+ if url_params.max_wait_seconds <= 0:
3801 return False
3802
3803- timeout = 50
3804- try:
3805- timeout = int(mcfg.get("timeout", timeout))
3806- except Exception:
3807- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
3808-
3809- return (max_wait, timeout)
3810-
3811- def wait_for_metadata_service(self):
3812- (max_wait, timeout) = self._get_url_settings()
3813-
3814 urls = [uhelp.combine_url(self.metadata_address,
3815 'latest/meta-data/instance-id')]
3816 start_time = time.time()
3817- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
3818- timeout=timeout, status_cb=LOG.warn)
3819+ url = uhelp.wait_for_url(
3820+ urls=urls, max_wait=url_params.max_wait_seconds,
3821+ timeout=url_params.timeout_seconds, status_cb=LOG.warn)
3822
3823 if url:
3824 LOG.debug("Using metadata source: '%s'", url)
3825diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
3826index c7b5fe5..4cb2897 100644
3827--- a/cloudinit/sources/DataSourceConfigDrive.py
3828+++ b/cloudinit/sources/DataSourceConfigDrive.py
3829@@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
3830 self.version = None
3831 self.ec2_metadata = None
3832 self._network_config = None
3833- self.network_json = None
3834+ self.network_json = sources.UNSET
3835 self.network_eni = None
3836 self.known_macs = None
3837 self.files = {}
3838@@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
3839 util.logexc(LOG, "Failed reading config drive from %s", sdir)
3840
3841 if not found:
3842- for dev in find_candidate_devs():
3843+ dslist = self.sys_cfg.get('datasource_list')
3844+ for dev in find_candidate_devs(dslist=dslist):
3845 try:
3846 # Set mtype if freebsd and turn off sync
3847 if dev.startswith("/dev/cd"):
3848@@ -148,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
3849 @property
3850 def network_config(self):
3851 if self._network_config is None:
3852- if self.network_json is not None:
3853+ if self.network_json not in (None, sources.UNSET):
3854 LOG.debug("network config provided via network_json")
3855 self._network_config = openstack.convert_net_json(
3856 self.network_json, known_macs=self.known_macs)
3857@@ -211,7 +212,7 @@ def write_injected_files(files):
3858 util.logexc(LOG, "Failed writing file: %s", filename)
3859
3860
3861-def find_candidate_devs(probe_optical=True):
3862+def find_candidate_devs(probe_optical=True, dslist=None):
3863 """Return a list of devices that may contain the config drive.
3864
3865 The returned list is sorted by search order where the first item has
3866@@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True):
3867 * either vfat or iso9660 formated
3868 * labeled with 'config-2' or 'CONFIG-2'
3869 """
3870+ if dslist is None:
3871+ dslist = []
3872+
3873 # query optical drive to get it in blkid cache for 2.6 kernels
3874 if probe_optical:
3875 for device in OPTICAL_DEVICES:
3876@@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True):
3877 devices = [d for d in candidates
3878 if d in by_label or not util.is_partition(d)]
3879
3880- if devices:
3881+ LOG.debug("devices=%s dslist=%s", devices, dslist)
3882+ if devices and "IBMCloud" in dslist:
3883 # IBMCloud uses config-2 label, but limited to a single UUID.
3884 ibm_platform, ibm_path = get_ibm_platform()
3885 if ibm_path in devices:
3886diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
3887index 21e9ef8..968ab3f 100644
3888--- a/cloudinit/sources/DataSourceEc2.py
3889+++ b/cloudinit/sources/DataSourceEc2.py
3890@@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
3891 STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
3892 STRICT_ID_DEFAULT = "warn"
3893
3894-_unset = "_unset"
3895-
3896
3897 class Platforms(object):
3898 # TODO Rename and move to cloudinit.cloud.CloudNames
3899@@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource):
3900 # for extended metadata content. IPv6 support comes in 2016-09-02
3901 extended_metadata_versions = ['2016-09-02']
3902
3903+ # Setup read_url parameters per get_url_params.
3904+ url_max_wait = 120
3905+ url_timeout = 50
3906+
3907 _cloud_platform = None
3908
3909- _network_config = _unset # Used for caching calculated network config v1
3910+ _network_config = sources.UNSET # Used to cache calculated network cfg v1
3911
3912 # Whether we want to get network configuration from the metadata service.
3913- get_network_metadata = False
3914-
3915- # Track the discovered fallback nic for use in configuration generation.
3916- _fallback_interface = None
3917+ perform_dhcp_setup = False
3918
3919 def __init__(self, sys_cfg, distro, paths):
3920 super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
3921@@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource):
3922 elif self.cloud_platform == Platforms.NO_EC2_METADATA:
3923 return False
3924
3925- if self.get_network_metadata: # Setup networking in init-local stage.
3926+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
3927 if util.is_FreeBSD():
3928 LOG.debug("FreeBSD doesn't support running dhclient with -sf")
3929 return False
3930@@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource):
3931 else:
3932 return self.metadata['instance-id']
3933
3934- def _get_url_settings(self):
3935- mcfg = self.ds_cfg
3936- max_wait = 120
3937- try:
3938- max_wait = int(mcfg.get("max_wait", max_wait))
3939- except Exception:
3940- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
3941-
3942- timeout = 50
3943- try:
3944- timeout = max(0, int(mcfg.get("timeout", timeout)))
3945- except Exception:
3946- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
3947-
3948- return (max_wait, timeout)
3949-
3950 def wait_for_metadata_service(self):
3951 mcfg = self.ds_cfg
3952
3953- (max_wait, timeout) = self._get_url_settings()
3954- if max_wait <= 0:
3955+ url_params = self.get_url_params()
3956+ if url_params.max_wait_seconds <= 0:
3957 return False
3958
3959 # Remove addresses from the list that wont resolve.
3960@@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource):
3961
3962 start_time = time.time()
3963 url = uhelp.wait_for_url(
3964- urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn)
3965+ urls=urls, max_wait=url_params.max_wait_seconds,
3966+ timeout=url_params.timeout_seconds, status_cb=LOG.warn)
3967
3968 if url:
3969 self.metadata_address = url2base[url]
3970@@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource):
3971 @property
3972 def network_config(self):
3973 """Return a network config dict for rendering ENI or netplan files."""
3974- if self._network_config != _unset:
3975+ if self._network_config != sources.UNSET:
3976 return self._network_config
3977
3978 if self.metadata is None:
3979- # this would happen if get_data hadn't been called. leave as _unset
3980+ # this would happen if get_data hadn't been called. leave as UNSET
3981 LOG.warning(
3982 "Unexpected call to network_config when metadata is None.")
3983 return None
3984@@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource):
3985 self._fallback_interface = _legacy_fbnic
3986 self.fallback_nic = None
3987 else:
3988- self._fallback_interface = net.find_fallback_nic()
3989- if self._fallback_interface is None:
3990- LOG.warning("Did not find a fallback interface on EC2.")
3991+ return super(DataSourceEc2, self).fallback_interface
3992 return self._fallback_interface
3993
3994 def _crawl_metadata(self):
3995@@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2):
3996 metadata service. If the metadata service provides network configuration
3997 then render the network configuration for that instance based on metadata.
3998 """
3999- get_network_metadata = True # Get metadata network config if present
4000+ perform_dhcp_setup = True # Use dhcp before querying metadata
4001
4002 def get_data(self):
4003 supported_platforms = (Platforms.AWS,)
4004diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
4005index 02b3d56..01106ec 100644
4006--- a/cloudinit/sources/DataSourceIBMCloud.py
4007+++ b/cloudinit/sources/DataSourceIBMCloud.py
4008@@ -8,17 +8,11 @@ There are 2 different api exposed launch methods.
4009 * template: This is the legacy method of launching instances.
4010 When booting from an image template, the system boots first into
4011 a "provisioning" mode. There, host <-> guest mechanisms are utilized
4012- to execute code in the guest and provision it.
4013+ to execute code in the guest and configure it. The configuration
4014+ includes configuring the system network and possibly installing
4015+ packages and other software stack.
4016
4017- Cloud-init will disable itself when it detects that it is in the
4018- provisioning mode. It detects this by the presence of
4019- a file '/root/provisioningConfiguration.cfg'.
4020-
4021- When provided with user-data, the "first boot" will contain a
4022- ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data
4023- provided, then there is no data-source.
4024-
4025- Cloud-init never does any network configuration in this mode.
4026+ After the provisioning is finished, the system reboots.
4027
4028 * os_code: Essentially "launch by OS Code" (Operating System Code).
4029 This is a more modern approach. There is no specific "provisioning" boot.
4030@@ -30,11 +24,73 @@ There are 2 different api exposed launch methods.
4031 mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be
4032 incorrectly identified as IBMCloud.
4033
4034+The combination of these 2 launch methods and with or without user-data
4035+creates 6 boot scenarios.
4036+ A. os_code with user-data
4037+ B. os_code without user-data
4038+ Cloud-init is fully operational in this mode.
4039+
4040+ There is a block device attached with label 'config-2'.
4041+ As it differs from OpenStack's config-2, we have to differentiate.
4042+ We do so by requiring the UUID on the filesystem to be "9796-932E".
4043+
4044+ This disk will have the following files. Specifically note, there
4045+ is no versioned path to the meta-data, only 'latest':
4046+ openstack/latest/meta_data.json
4047+ openstack/latest/network_data.json
4048+ openstack/latest/user_data [optional]
4049+ openstack/latest/vendor_data.json
4050+
4051+ vendor_data.json as of 2018-04 looks like this:
4052+ {"cloud-init":"#!/bin/bash\necho 'root:$6$<snip>' | chpasswd -e"}
4053+
4054+ The only difference between A and B in this mode is the presence
4055+ of user_data on the config disk.
4056+
4057+ C. template, provisioning boot with user-data
4058+ D. template, provisioning boot without user-data.
4059+ With ds-identify cloud-init is fully disabled in this mode.
4060+ Without ds-identify, cloud-init None datasource will be used.
4061+
4062+ This is currently identified by the presence of
4063+ /root/provisioningConfiguration.cfg . That file is placed into the
4064+ system before it is booted.
4065+
4066+ The difference between C and D is the presence of the METADATA disk
4067+ as described in E below. There is no METADATA disk attached unless
4068+ user-data is provided.
4069+
4070+ E. template, post-provisioning boot with user-data.
4071+ Cloud-init is fully operational in this mode.
4072+
4073+ This is identified by a block device with filesystem label "METADATA".
4074+ The looks similar to a version-1 OpenStack config drive. It will
4075+ have the following files:
4076+
4077+ openstack/latest/user_data
4078+ openstack/latest/meta_data.json
4079+ openstack/content/interfaces
4080+ meta.js
4081+
4082+ meta.js contains something similar to user_data. cloud-init ignores it.
4083+ cloud-init ignores the 'interfaces' style file here.
4084+ In this mode, cloud-init has networking code disabled. It relies
4085+ on the provisioning boot to have configured networking.
4086+
4087+ F. template, post-provisioning boot without user-data.
4088+ With ds-identify, cloud-init will be fully disabled.
4089+ Without ds-identify, cloud-init None datasource will be used.
4090+
4091+ There is no information available to identify this scenario.
4092+
4093+ The user will be able to ssh in as as root with their public keys that
4094+ have been installed into /root/ssh/.authorized_keys
4095+ during the provisioning stage.
4096+
4097 TODO:
4098 * is uuid (/sys/hypervisor/uuid) stable for life of an instance?
4099 it seems it is not the same as data's uuid in the os_code case
4100 but is in the template case.
4101-
4102 """
4103 import base64
4104 import json
4105@@ -138,8 +194,30 @@ def _is_xen():
4106 return os.path.exists("/proc/xen")
4107
4108
4109-def _is_ibm_provisioning():
4110- return os.path.exists("/root/provisioningConfiguration.cfg")
4111+def _is_ibm_provisioning(
4112+ prov_cfg="/root/provisioningConfiguration.cfg",
4113+ inst_log="/root/swinstall.log",
4114+ boot_ref="/proc/1/environ"):
4115+ """Return boolean indicating if this boot is ibm provisioning boot."""
4116+ if os.path.exists(prov_cfg):
4117+ msg = "config '%s' exists." % prov_cfg
4118+ result = True
4119+ if os.path.exists(inst_log):
4120+ if os.path.exists(boot_ref):
4121+ result = (os.stat(inst_log).st_mtime >
4122+ os.stat(boot_ref).st_mtime)
4123+ msg += (" log '%s' from %s boot." %
4124+ (inst_log, "current" if result else "previous"))
4125+ else:
4126+ msg += (" log '%s' existed, but no reference file '%s'." %
4127+ (inst_log, boot_ref))
4128+ result = False
4129+ else:
4130+ msg += " log '%s' did not exist." % inst_log
4131+ else:
4132+ result, msg = (False, "config '%s' did not exist." % prov_cfg)
4133+ LOG.debug("ibm_provisioning=%s: %s", result, msg)
4134+ return result
4135
4136
4137 def get_ibm_platform():
4138@@ -189,7 +267,7 @@ def get_ibm_platform():
4139 else:
4140 return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path)
4141 elif _is_ibm_provisioning():
4142- return (Platforms.TEMPLATE_PROVISIONING_NODATA, None)
4143+ return (Platforms.TEMPLATE_PROVISIONING_NODATA, None)
4144 return not_found
4145
4146
4147diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
4148index 6ac8863..bcb3854 100644
4149--- a/cloudinit/sources/DataSourceMAAS.py
4150+++ b/cloudinit/sources/DataSourceMAAS.py
4151@@ -198,13 +198,13 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
4152 If version is None, then <version>/ will not be used.
4153 """
4154 if read_file_or_url is None:
4155- read_file_or_url = util.read_file_or_url
4156+ read_file_or_url = url_helper.read_file_or_url
4157
4158 if seed_url.endswith("/"):
4159 seed_url = seed_url[:-1]
4160
4161 md = {}
4162- for path, dictname, binary, optional in DS_FIELDS:
4163+ for path, _dictname, binary, optional in DS_FIELDS:
4164 if version is None:
4165 url = "%s/%s" % (seed_url, path)
4166 else:
4167diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
4168index 5d3a8dd..2daea59 100644
4169--- a/cloudinit/sources/DataSourceNoCloud.py
4170+++ b/cloudinit/sources/DataSourceNoCloud.py
4171@@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource):
4172 LOG.debug("Using seeded data from %s", path)
4173 mydata = _merge_new_seed(mydata, seeded)
4174 break
4175- except ValueError as e:
4176+ except ValueError:
4177 pass
4178
4179 # If the datasource config had a 'seedfrom' entry, then that takes
4180@@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource):
4181 try:
4182 seeded = util.mount_cb(dev, _pp2d_callback,
4183 pp2d_kwargs)
4184- except ValueError as e:
4185+ except ValueError:
4186 if dev in label_list:
4187 LOG.warning("device %s with label=%s not a"
4188 "valid seed.", dev, label)
4189diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
4190index dc914a7..178ccb0 100644
4191--- a/cloudinit/sources/DataSourceOVF.py
4192+++ b/cloudinit/sources/DataSourceOVF.py
4193@@ -556,7 +556,7 @@ def search_file(dirpath, filename):
4194 if not dirpath or not filename:
4195 return None
4196
4197- for root, dirs, files in os.walk(dirpath):
4198+ for root, _dirs, files in os.walk(dirpath):
4199 if filename in files:
4200 return os.path.join(root, filename)
4201
4202diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
4203index d4a4111..16c1078 100644
4204--- a/cloudinit/sources/DataSourceOpenNebula.py
4205+++ b/cloudinit/sources/DataSourceOpenNebula.py
4206@@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None):
4207 if asuser is not None:
4208 try:
4209 pwd.getpwnam(asuser)
4210- except KeyError as e:
4211+ except KeyError:
4212 raise BrokenContextDiskDir(
4213 "configured user '{user}' does not exist".format(
4214 user=asuser))
4215diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
4216index e55a763..365af96 100644
4217--- a/cloudinit/sources/DataSourceOpenStack.py
4218+++ b/cloudinit/sources/DataSourceOpenStack.py
4219@@ -7,6 +7,7 @@
4220 import time
4221
4222 from cloudinit import log as logging
4223+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
4224 from cloudinit import sources
4225 from cloudinit import url_helper
4226 from cloudinit import util
4227@@ -22,51 +23,37 @@ DEFAULT_METADATA = {
4228 "instance-id": DEFAULT_IID,
4229 }
4230
4231+# OpenStack DMI constants
4232+DMI_PRODUCT_NOVA = 'OpenStack Nova'
4233+DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
4234+VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
4235+DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
4236+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
4237+
4238
4239 class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
4240
4241 dsname = "OpenStack"
4242
4243+ _network_config = sources.UNSET # Used to cache calculated network cfg v1
4244+
4245+ # Whether we want to get network configuration from the metadata service.
4246+ perform_dhcp_setup = False
4247+
4248 def __init__(self, sys_cfg, distro, paths):
4249 super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
4250 self.metadata_address = None
4251 self.ssl_details = util.fetch_ssl_details(self.paths)
4252 self.version = None
4253 self.files = {}
4254- self.ec2_metadata = None
4255+ self.ec2_metadata = sources.UNSET
4256+ self.network_json = sources.UNSET
4257
4258 def __str__(self):
4259 root = sources.DataSource.__str__(self)
4260 mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
4261 return mstr
4262
4263- def _get_url_settings(self):
4264- # TODO(harlowja): this is shared with ec2 datasource, we should just
4265- # move it to a shared location instead...
4266- # Note: the defaults here are different though.
4267-
4268- # max_wait < 0 indicates do not wait
4269- max_wait = -1
4270- timeout = 10
4271- retries = 5
4272-
4273- try:
4274- max_wait = int(self.ds_cfg.get("max_wait", max_wait))
4275- except Exception:
4276- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
4277-
4278- try:
4279- timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
4280- except Exception:
4281- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
4282-
4283- try:
4284- retries = int(self.ds_cfg.get("retries", retries))
4285- except Exception:
4286- util.logexc(LOG, "Failed to get retries. using %s", retries)
4287-
4288- return (max_wait, timeout, retries)
4289-
4290 def wait_for_metadata_service(self):
4291 urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
4292 filtered = [x for x in urls if util.is_resolvable_url(x)]
4293@@ -86,10 +73,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
4294 md_urls.append(md_url)
4295 url2base[md_url] = url
4296
4297- (max_wait, timeout, retries) = self._get_url_settings()
4298+ url_params = self.get_url_params()
4299 start_time = time.time()
4300- avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
4301- timeout=timeout)
4302+ avail_url = url_helper.wait_for_url(
4303+ urls=md_urls, max_wait=url_params.max_wait_seconds,
4304+ timeout=url_params.timeout_seconds)
4305 if avail_url:
4306 LOG.debug("Using metadata source: '%s'", url2base[avail_url])
4307 else:
4308@@ -99,38 +87,66 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
4309 self.metadata_address = url2base.get(avail_url)
4310 return bool(avail_url)
4311
4312- def _get_data(self):
4313- try:
4314- if not self.wait_for_metadata_service():
4315- return False
4316- except IOError:
4317- return False
4318+ def check_instance_id(self, sys_cfg):
4319+ # quickly (local check only) if self.instance_id is still valid
4320+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
4321
4322- (max_wait, timeout, retries) = self._get_url_settings()
4323+ @property
4324+ def network_config(self):
4325+ """Return a network config dict for rendering ENI or netplan files."""
4326+ if self._network_config != sources.UNSET:
4327+ return self._network_config
4328+
4329+ # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
4330+ # network_config by default unless configured in /etc/cloud/cloud.cfg*.
4331+ # Patch Xenial and Artful before release to default to False.
4332+ if util.is_false(self.ds_cfg.get('apply_network_config', True)):
4333+ self._network_config = None
4334+ return self._network_config
4335+ if self.network_json == sources.UNSET:
4336+ # this would happen if get_data hadn't been called. leave as UNSET
4337+ LOG.warning(
4338+ 'Unexpected call to network_config when network_json is None.')
4339+ return None
4340+
4341+ LOG.debug('network config provided via network_json')
4342+ self._network_config = openstack.convert_net_json(
4343+ self.network_json, known_macs=None)
4344+ return self._network_config
4345
4346- try:
4347- results = util.log_time(LOG.debug,
4348- 'Crawl of openstack metadata service',
4349- read_metadata_service,
4350- args=[self.metadata_address],
4351- kwargs={'ssl_details': self.ssl_details,
4352- 'retries': retries,
4353- 'timeout': timeout})
4354- except openstack.NonReadable:
4355- return False
4356- except (openstack.BrokenMetadata, IOError):
4357- util.logexc(LOG, "Broken metadata address %s",
4358- self.metadata_address)
4359+ def _get_data(self):
4360+ """Crawl metadata, parse and persist that data for this instance.
4361+
4362+ @return: True when metadata discovered indicates OpenStack datasource.
4363+ False when unable to contact metadata service or when metadata
4364+ format is invalid or disabled.
4365+ """
4366+ if not detect_openstack():
4367 return False
4368+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
4369+ try:
4370+ with EphemeralDHCPv4(self.fallback_interface):
4371+ results = util.log_time(
4372+ logfunc=LOG.debug, msg='Crawl of metadata service',
4373+ func=self._crawl_metadata)
4374+ except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
4375+ util.logexc(LOG, str(e))
4376+ return False
4377+ else:
4378+ try:
4379+ results = self._crawl_metadata()
4380+ except sources.InvalidMetaDataException as e:
4381+ util.logexc(LOG, str(e))
4382+ return False
4383
4384 self.dsmode = self._determine_dsmode([results.get('dsmode')])
4385 if self.dsmode == sources.DSMODE_DISABLED:
4386 return False
4387-
4388 md = results.get('metadata', {})
4389 md = util.mergemanydict([md, DEFAULT_METADATA])
4390 self.metadata = md
4391 self.ec2_metadata = results.get('ec2-metadata')
4392+ self.network_json = results.get('networkdata')
4393 self.userdata_raw = results.get('userdata')
4394 self.version = results['version']
4395 self.files.update(results.get('files', {}))
4396@@ -145,9 +161,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
4397
4398 return True
4399
4400- def check_instance_id(self, sys_cfg):
4401- # quickly (local check only) if self.instance_id is still valid
4402- return sources.instance_id_matches_system_uuid(self.get_instance_id())
4403+ def _crawl_metadata(self):
4404+ """Crawl metadata service when available.
4405+
4406+ @returns: Dictionary with all metadata discovered for this datasource.
4407+ @raise: InvalidMetaDataException on unreadable or broken
4408+ metadata.
4409+ """
4410+ try:
4411+ if not self.wait_for_metadata_service():
4412+ raise sources.InvalidMetaDataException(
4413+ 'No active metadata service found')
4414+ except IOError as e:
4415+ raise sources.InvalidMetaDataException(
4416+ 'IOError contacting metadata service: {error}'.format(
4417+ error=str(e)))
4418+
4419+ url_params = self.get_url_params()
4420+
4421+ try:
4422+ result = util.log_time(
4423+ LOG.debug, 'Crawl of openstack metadata service',
4424+ read_metadata_service, args=[self.metadata_address],
4425+ kwargs={'ssl_details': self.ssl_details,
4426+ 'retries': url_params.num_retries,
4427+ 'timeout': url_params.timeout_seconds})
4428+ except openstack.NonReadable as e:
4429+ raise sources.InvalidMetaDataException(str(e))
4430+ except (openstack.BrokenMetadata, IOError):
4431+ msg = 'Broken metadata address {addr}'.format(
4432+ addr=self.metadata_address)
4433+ raise sources.InvalidMetaDataException(msg)
4434+ return result
4435+
4436+
4437+class DataSourceOpenStackLocal(DataSourceOpenStack):
4438+ """Run in init-local using a dhcp discovery prior to metadata crawl.
4439+
4440+ In init-local, no network is available. This subclass sets up minimal
4441+ networking with dhclient on a viable nic so that it can talk to the
4442+ metadata service. If the metadata service provides network configuration
4443+ then render the network configuration for that instance based on metadata.
4444+ """
4445+
4446+ perform_dhcp_setup = True # Get metadata network config if present
4447
4448
4449 def read_metadata_service(base_url, ssl_details=None,
4450@@ -157,8 +214,23 @@ def read_metadata_service(base_url, ssl_details=None,
4451 return reader.read_v2()
4452
4453
4454+def detect_openstack():
4455+ """Return True when a potential OpenStack platform is detected."""
4456+ if not util.is_x86():
4457+ return True # Non-Intel cpus don't properly report dmi product names
4458+ product_name = util.read_dmi_data('system-product-name')
4459+ if product_name in VALID_DMI_PRODUCT_NAMES:
4460+ return True
4461+ elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
4462+ return True
4463+ elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
4464+ return True
4465+ return False
4466+
4467+
4468 # Used to match classes to dependencies
4469 datasources = [
4470+ (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)),
4471 (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
4472 ]
4473
4474diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
4475index 86bfa5d..f92e8b5 100644
4476--- a/cloudinit/sources/DataSourceSmartOS.py
4477+++ b/cloudinit/sources/DataSourceSmartOS.py
4478@@ -1,4 +1,5 @@
4479 # Copyright (C) 2013 Canonical Ltd.
4480+# Copyright (c) 2018, Joyent, Inc.
4481 #
4482 # Author: Ben Howard <ben.howard@canonical.com>
4483 #
4484@@ -10,17 +11,19 @@
4485 # SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
4486 # The meta-data is transmitted via key/value pairs made by
4487 # requests on the console. For example, to get the hostname, you
4488-# would send "GET hostname" on /dev/ttyS1.
4489+# would send "GET sdc:hostname" on /dev/ttyS1.
4490 # For Linux Guests running in LX-Brand Zones on SmartOS hosts
4491 # a socket (/native/.zonecontrol/metadata.sock) is used instead
4492 # of a serial console.
4493 #
4494 # Certain behavior is defined by the DataDictionary
4495-# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
4496+# https://eng.joyent.com/mdata/datadict.html
4497 # Comments with "@datadictionary" are snippets of the definition
4498
4499 import base64
4500 import binascii
4501+import errno
4502+import fcntl
4503 import json
4504 import os
4505 import random
4506@@ -108,7 +111,7 @@ BUILTIN_CLOUD_CONFIG = {
4507 'overwrite': False}
4508 },
4509 'fs_setup': [{'label': 'ephemeral0',
4510- 'filesystem': 'ext3',
4511+ 'filesystem': 'ext4',
4512 'device': 'ephemeral0'}],
4513 }
4514
4515@@ -162,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource):
4516
4517 dsname = "Joyent"
4518
4519- _unset = "_unset"
4520- smartos_type = _unset
4521- md_client = _unset
4522+ smartos_type = sources.UNSET
4523+ md_client = sources.UNSET
4524
4525 def __init__(self, sys_cfg, distro, paths):
4526 sources.DataSource.__init__(self, sys_cfg, distro, paths)
4527@@ -186,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource):
4528 return "%s [client=%s]" % (root, self.md_client)
4529
4530 def _init(self):
4531- if self.smartos_type == self._unset:
4532+ if self.smartos_type == sources.UNSET:
4533 self.smartos_type = get_smartos_environ()
4534 if self.smartos_type is None:
4535 self.md_client = None
4536
4537- if self.md_client == self._unset:
4538+ if self.md_client == sources.UNSET:
4539 self.md_client = jmc_client_factory(
4540 smartos_type=self.smartos_type,
4541 metadata_sockfile=self.ds_cfg['metadata_sockfile'],
4542@@ -229,6 +231,9 @@ class DataSourceSmartOS(sources.DataSource):
4543 self.md_client)
4544 return False
4545
4546+ # Open once for many requests, rather than once for each request
4547+ self.md_client.open_transport()
4548+
4549 for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
4550 smartos_noun, strip = attribute
4551 md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
4552@@ -236,6 +241,8 @@ class DataSourceSmartOS(sources.DataSource):
4553 for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
4554 md[ci_noun] = self.md_client.get_json(smartos_noun)
4555
4556+ self.md_client.close_transport()
4557+
4558 # @datadictionary: This key may contain a program that is written
4559 # to a file in the filesystem of the guest on each boot and then
4560 # executed. It may be of any format that would be considered
4561@@ -266,8 +273,14 @@ class DataSourceSmartOS(sources.DataSource):
4562 write_boot_content(u_data, u_data_f)
4563
4564 # Handle the cloud-init regular meta
4565+
4566+ # The hostname may or may not be qualified with the local domain name.
4567+ # This follows section 3.14 of RFC 2132.
4568 if not md['local-hostname']:
4569- md['local-hostname'] = md['instance-id']
4570+ if md['hostname']:
4571+ md['local-hostname'] = md['hostname']
4572+ else:
4573+ md['local-hostname'] = md['instance-id']
4574
4575 ud = None
4576 if md['user-data']:
4577@@ -285,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource):
4578 self.userdata_raw = ud
4579 self.vendordata_raw = md['vendor-data']
4580 self.network_data = md['network-data']
4581+ self.routes_data = md['routes']
4582
4583 self._set_provisioned()
4584 return True
4585@@ -308,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource):
4586 convert_smartos_network_data(
4587 network_data=self.network_data,
4588 dns_servers=self.metadata['dns_servers'],
4589- dns_domain=self.metadata['dns_domain']))
4590+ dns_domain=self.metadata['dns_domain'],
4591+ routes=self.routes_data))
4592 return self._network_config
4593
4594
4595@@ -316,6 +331,10 @@ class JoyentMetadataFetchException(Exception):
4596 pass
4597
4598
4599+class JoyentMetadataTimeoutException(JoyentMetadataFetchException):
4600+ pass
4601+
4602+
4603 class JoyentMetadataClient(object):
4604 """
4605 A client implementing v2 of the Joyent Metadata Protocol Specification.
4606@@ -360,6 +379,47 @@ class JoyentMetadataClient(object):
4607 LOG.debug('Value "%s" found.', value)
4608 return value
4609
4610+ def _readline(self):
4611+ """
4612+ Reads a line a byte at a time until \n is encountered. Returns an
4613+ ascii string with the trailing newline removed.
4614+
4615+ If a timeout (per-byte) is set and it expires, a
4616+ JoyentMetadataFetchException will be thrown.
4617+ """
4618+ response = []
4619+
4620+ def as_ascii():
4621+ return b''.join(response).decode('ascii')
4622+
4623+ msg = "Partial response: '%s'"
4624+ while True:
4625+ try:
4626+ byte = self.fp.read(1)
4627+ if len(byte) == 0:
4628+ raise JoyentMetadataTimeoutException(msg % as_ascii())
4629+ if byte == b'\n':
4630+ return as_ascii()
4631+ response.append(byte)
4632+ except OSError as exc:
4633+ if exc.errno == errno.EAGAIN:
4634+ raise JoyentMetadataTimeoutException(msg % as_ascii())
4635+ raise
4636+
4637+ def _write(self, msg):
4638+ self.fp.write(msg.encode('ascii'))
4639+ self.fp.flush()
4640+
4641+ def _negotiate(self):
4642+ LOG.debug('Negotiating protocol V2')
4643+ self._write('NEGOTIATE V2\n')
4644+ response = self._readline()
4645+ LOG.debug('read "%s"', response)
4646+ if response != 'V2_OK':
4647+ raise JoyentMetadataFetchException(
4648+ 'Invalid response "%s" to "NEGOTIATE V2"' % response)
4649+ LOG.debug('Negotiation complete')
4650+
4651 def request(self, rtype, param=None):
4652 request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
4653 message_body = ' '.join((request_id, rtype,))
4654@@ -374,18 +434,11 @@ class JoyentMetadataClient(object):
4655 self.open_transport()
4656 need_close = True
4657
4658- self.fp.write(msg.encode('ascii'))
4659- self.fp.flush()
4660-
4661- response = bytearray()
4662- response.extend(self.fp.read(1))
4663- while response[-1:] != b'\n':
4664- response.extend(self.fp.read(1))
4665-
4666+ self._write(msg)
4667+ response = self._readline()
4668 if need_close:
4669 self.close_transport()
4670
4671- response = response.rstrip().decode('ascii')
4672 LOG.debug('Read "%s" from metadata transport.', response)
4673
4674 if 'SUCCESS' not in response:
4675@@ -410,9 +463,9 @@ class JoyentMetadataClient(object):
4676
4677 def list(self):
4678 result = self.request(rtype='KEYS')
4679- if result:
4680- result = result.split('\n')
4681- return result
4682+ if not result:
4683+ return []
4684+ return result.split('\n')
4685
4686 def put(self, key, val):
4687 param = b' '.join([base64.b64encode(i.encode())
4688@@ -450,6 +503,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
4689 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
4690 sock.connect(self.socketpath)
4691 self.fp = sock.makefile('rwb')
4692+ self._negotiate()
4693
4694 def exists(self):
4695 return os.path.exists(self.socketpath)
4696@@ -459,8 +513,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
4697
4698
4699 class JoyentMetadataSerialClient(JoyentMetadataClient):
4700- def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM):
4701- super(JoyentMetadataSerialClient, self).__init__(smartos_type)
4702+ def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM,
4703+ fp=None):
4704+ super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp)
4705 self.device = device
4706 self.timeout = timeout
4707
4708@@ -468,10 +523,51 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
4709 return os.path.exists(self.device)
4710
4711 def open_transport(self):
4712- ser = serial.Serial(self.device, timeout=self.timeout)
4713- if not ser.isOpen():
4714- raise SystemError("Unable to open %s" % self.device)
4715- self.fp = ser
4716+ if self.fp is None:
4717+ ser = serial.Serial(self.device, timeout=self.timeout)
4718+ if not ser.isOpen():
4719+ raise SystemError("Unable to open %s" % self.device)
4720+ self.fp = ser
4721+ fcntl.lockf(ser, fcntl.LOCK_EX)
4722+ self._flush()
4723+ self._negotiate()
4724+
4725+ def _flush(self):
4726+ LOG.debug('Flushing input')
4727+ # Read any pending data
4728+ timeout = self.fp.timeout
4729+ self.fp.timeout = 0.1
4730+ while True:
4731+ try:
4732+ self._readline()
4733+ except JoyentMetadataTimeoutException:
4734+ break
4735+ LOG.debug('Input empty')
4736+
4737+ # Send a newline and expect "invalid command". Keep trying until
4738+ # successful. Retry rather frequently so that the "Is the host
4739+ # metadata service running" appears on the console soon after someone
4740+ # attaches in an effort to debug.
4741+ if timeout > 5:
4742+ self.fp.timeout = 5
4743+ else:
4744+ self.fp.timeout = timeout
4745+ while True:
4746+ LOG.debug('Writing newline, expecting "invalid command"')
4747+ self._write('\n')
4748+ try:
4749+ response = self._readline()
4750+ if response == 'invalid command':
4751+ break
4752+ if response == 'FAILURE':
4753+ LOG.debug('Got "FAILURE". Retrying.')
4754+ continue
4755+ LOG.warning('Unexpected response "%s" during flush', response)
4756+ except JoyentMetadataTimeoutException:
4757+ LOG.warning('Timeout while initializing metadata client. ' +
4758+ 'Is the host metadata service running?')
4759+ LOG.debug('Got "invalid command". Flush complete.')
4760+ self.fp.timeout = timeout
4761
4762 def __repr__(self):
4763 return "%s(device=%s, timeout=%s)" % (
4764@@ -650,7 +746,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
4765 # report 'BrandZ virtual linux' as the kernel version
4766 if uname_version is None:
4767 uname_version = uname[3]
4768- if uname_version.lower() == 'brandz virtual linux':
4769+ if uname_version == 'BrandZ virtual linux':
4770 return SMARTOS_ENV_LX_BRAND
4771
4772 if product_name is None:
4773@@ -658,7 +754,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
4774 else:
4775 system_type = product_name
4776
4777- if system_type and 'smartdc' in system_type.lower():
4778+ if system_type and system_type.startswith('SmartDC'):
4779 return SMARTOS_ENV_KVM
4780
4781 return None
4782@@ -666,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None):
4783
4784 # Convert SMARTOS 'sdc:nics' data to network_config yaml
4785 def convert_smartos_network_data(network_data=None,
4786- dns_servers=None, dns_domain=None):
4787+ dns_servers=None, dns_domain=None,
4788+ routes=None):
4789 """Return a dictionary of network_config by parsing provided
4790 SMARTOS sdc:nics configuration data
4791
4792@@ -684,6 +781,10 @@ def convert_smartos_network_data(network_data=None,
4793 keys are related to ip configuration. For each ip in the 'ips' list
4794 we create a subnet entry under 'subnets' pairing the ip to a one in
4795 the 'gateways' list.
4796+
4797+ Each route in sdc:routes is mapped to a route on each interface.
4798+ The sdc:routes properties 'dst' and 'gateway' map to 'network' and
4799+ 'gateway'. The 'linklocal' sdc:routes property is ignored.
4800 """
4801
4802 valid_keys = {
4803@@ -706,6 +807,10 @@ def convert_smartos_network_data(network_data=None,
4804 'scope',
4805 'type',
4806 ],
4807+ 'route': [
4808+ 'network',
4809+ 'gateway',
4810+ ],
4811 }
4812
4813 if dns_servers:
4814@@ -720,6 +825,9 @@ def convert_smartos_network_data(network_data=None,
4815 else:
4816 dns_domain = []
4817
4818+ if not routes:
4819+ routes = []
4820+
4821 def is_valid_ipv4(addr):
4822 return '.' in addr
4823
4824@@ -746,6 +854,7 @@ def convert_smartos_network_data(network_data=None,
4825 if ip == "dhcp":
4826 subnet = {'type': 'dhcp4'}
4827 else:
4828+ routeents = []
4829 subnet = dict((k, v) for k, v in nic.items()
4830 if k in valid_keys['subnet'])
4831 subnet.update({
4832@@ -767,6 +876,25 @@ def convert_smartos_network_data(network_data=None,
4833 pgws[proto]['gw'] = gateways[0]
4834 subnet.update({'gateway': pgws[proto]['gw']})
4835
4836+ for route in routes:
4837+ rcfg = dict((k, v) for k, v in route.items()
4838+ if k in valid_keys['route'])
4839+ # Linux uses the value of 'gateway' to determine
4840+ # automatically if the route is a forward/next-hop
4841+ # (non-local IP for gateway) or an interface/resolver
4842+ # (local IP for gateway). So we can ignore the
4843+ # 'interface' attribute of sdc:routes, because SDC
4844+ # guarantees that the gateway is a local IP for
4845+ # "interface=true".
4846+ #
4847+ # Eventually we should be smart and compare "gateway"
4848+ # to see if it's in the prefix. We can then smartly
4849+ # add or not-add this route. But for now,
4850+ # when in doubt, use brute force! Routes for everyone!
4851+ rcfg.update({'network': route['dst']})
4852+ routeents.append(rcfg)
4853+ subnet.update({'routes': routeents})
4854+
4855 subnets.append(subnet)
4856 cfg.update({'subnets': subnets})
4857 config.append(cfg)
4858@@ -810,12 +938,14 @@ if __name__ == "__main__":
4859 keyname = SMARTOS_ATTRIB_JSON[key]
4860 data[key] = client.get_json(keyname)
4861 elif key == "network_config":
4862- for depkey in ('network-data', 'dns_servers', 'dns_domain'):
4863+ for depkey in ('network-data', 'dns_servers', 'dns_domain',
4864+ 'routes'):
4865 load_key(client, depkey, data)
4866 data[key] = convert_smartos_network_data(
4867 network_data=data['network-data'],
4868 dns_servers=data['dns_servers'],
4869- dns_domain=data['dns_domain'])
4870+ dns_domain=data['dns_domain'],
4871+ routes=data['routes'])
4872 else:
4873 if key in SMARTOS_ATTRIB_MAP:
4874 keyname, strip = SMARTOS_ATTRIB_MAP[key]
4875diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
4876index df0b374..90d7457 100644
4877--- a/cloudinit/sources/__init__.py
4878+++ b/cloudinit/sources/__init__.py
4879@@ -9,6 +9,7 @@
4880 # This file is part of cloud-init. See LICENSE file for license information.
4881
4882 import abc
4883+from collections import namedtuple
4884 import copy
4885 import json
4886 import os
4887@@ -17,6 +18,7 @@ import six
4888 from cloudinit.atomic_helper import write_json
4889 from cloudinit import importer
4890 from cloudinit import log as logging
4891+from cloudinit import net
4892 from cloudinit import type_utils
4893 from cloudinit import user_data as ud
4894 from cloudinit import util
4895@@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json'
4896 # Key which can be provide a cloud's official product name to cloud-init
4897 METADATA_CLOUD_NAME_KEY = 'cloud-name'
4898
4899+UNSET = "_unset"
4900+
4901 LOG = logging.getLogger(__name__)
4902
4903
4904@@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception):
4905 pass
4906
4907
4908+class InvalidMetaDataException(Exception):
4909+ """Raised when metadata is broken, unavailable or disabled."""
4910+ pass
4911+
4912+
4913 def process_base64_metadata(metadata, key_path=''):
4914 """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
4915 md_copy = copy.deepcopy(metadata)
4916@@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''):
4917 return md_copy
4918
4919
4920+URLParams = namedtuple(
4921+ 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
4922+
4923+
4924 @six.add_metaclass(abc.ABCMeta)
4925 class DataSource(object):
4926
4927@@ -81,6 +94,14 @@ class DataSource(object):
4928 # Cached cloud_name as determined by _get_cloud_name
4929 _cloud_name = None
4930
4931+ # Track the discovered fallback nic for use in configuration generation.
4932+ _fallback_interface = None
4933+
4934+ # read_url_params
4935+ url_max_wait = -1 # max_wait < 0 means do not wait
4936+ url_timeout = 10 # timeout for each metadata url read attempt
4937+ url_retries = 5 # number of times to retry url upon 404
4938+
4939 def __init__(self, sys_cfg, distro, paths, ud_proc=None):
4940 self.sys_cfg = sys_cfg
4941 self.distro = distro
4942@@ -128,6 +149,14 @@ class DataSource(object):
4943 'meta-data': self.metadata,
4944 'user-data': self.get_userdata_raw(),
4945 'vendor-data': self.get_vendordata_raw()}}
4946+ if hasattr(self, 'network_json'):
4947+ network_json = getattr(self, 'network_json')
4948+ if network_json != UNSET:
4949+ instance_data['ds']['network_json'] = network_json
4950+ if hasattr(self, 'ec2_metadata'):
4951+ ec2_metadata = getattr(self, 'ec2_metadata')
4952+ if ec2_metadata != UNSET:
4953+ instance_data['ds']['ec2_metadata'] = ec2_metadata
4954 instance_data.update(
4955 self._get_standardized_metadata())
4956 try:
4957@@ -149,6 +178,42 @@ class DataSource(object):
4958 'Subclasses of DataSource must implement _get_data which'
4959 ' sets self.metadata, vendordata_raw and userdata_raw.')
4960
4961+ def get_url_params(self):
4962+ """Return the Datasource's prefered url_read parameters.
4963+
4964+ Subclasses may override url_max_wait, url_timeout, url_retries.
4965+
4966+ @return: A URLParams object with max_wait_seconds, timeout_seconds,
4967+ num_retries.
4968+ """
4969+ max_wait = self.url_max_wait
4970+ try:
4971+ max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
4972+ except ValueError:
4973+ util.logexc(
4974+ LOG, "Config max_wait '%s' is not an int, using default '%s'",
4975+ self.ds_cfg.get("max_wait"), max_wait)
4976+
4977+ timeout = self.url_timeout
4978+ try:
4979+ timeout = max(
4980+ 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
4981+ except ValueError:
4982+ timeout = self.url_timeout
4983+ util.logexc(
4984+ LOG, "Config timeout '%s' is not an int, using default '%s'",
4985+ self.ds_cfg.get('timeout'), timeout)
4986+
4987+ retries = self.url_retries
4988+ try:
4989+ retries = int(self.ds_cfg.get("retries", self.url_retries))
4990+ except Exception:
4991+ util.logexc(
4992+ LOG, "Config retries '%s' is not an int, using default '%s'",
4993+ self.ds_cfg.get('retries'), retries)
4994+
4995+ return URLParams(max_wait, timeout, retries)
4996+
4997 def get_userdata(self, apply_filter=False):
4998 if self.userdata is None:
4999 self.userdata = self.ud_proc.process(self.get_userdata_raw())
5000@@ -162,6 +227,17 @@ class DataSource(object):
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches