Merge ~chad.smith/cloud-init:ubuntu/artful into cloud-init:ubuntu/artful
- Git
- lp:~chad.smith/cloud-init
- ubuntu/artful
- Merge into ubuntu/artful
Proposed by
Chad Smith
Status: | Merged | ||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | 2022cd6ee06582153a55e51db5e5ae0b5398ba2e | ||||||||||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/artful | ||||||||||||||||||||
Merge into: | cloud-init:ubuntu/artful | ||||||||||||||||||||
Diff against target: |
15370 lines (+7268/-1952) 200 files modified
.pylintrc (+1/-1) ChangeLog (+226/-0) MANIFEST.in (+1/-0) bash_completion/cloud-init (+77/-0) cloudinit/analyze/__main__.py (+1/-1) cloudinit/analyze/dump.py (+1/-1) cloudinit/apport.py (+23/-4) cloudinit/cmd/devel/logs.py (+48/-11) cloudinit/cmd/devel/tests/test_logs.py (+18/-3) cloudinit/cmd/main.py (+1/-1) cloudinit/cmd/tests/test_main.py (+3/-3) cloudinit/config/cc_apt_configure.py (+2/-2) cloudinit/config/cc_bootcmd.py (+0/-1) cloudinit/config/cc_disable_ec2_metadata.py (+12/-2) cloudinit/config/cc_disk_setup.py (+4/-8) cloudinit/config/cc_emit_upstart.py (+1/-1) cloudinit/config/cc_lxd.py (+56/-8) cloudinit/config/cc_mounts.py (+45/-30) cloudinit/config/cc_ntp.py (+407/-78) cloudinit/config/cc_phone_home.py (+4/-3) cloudinit/config/cc_power_state_change.py (+1/-1) cloudinit/config/cc_resizefs.py (+4/-6) cloudinit/config/cc_rh_subscription.py (+8/-10) cloudinit/config/cc_rsyslog.py (+2/-2) cloudinit/config/cc_runcmd.py (+0/-1) cloudinit/config/cc_set_passwords.py (+45/-60) cloudinit/config/cc_snap.py (+2/-3) cloudinit/config/cc_snappy.py (+2/-2) cloudinit/config/cc_ubuntu_advantage.py (+2/-3) cloudinit/config/cc_users_groups.py (+6/-2) cloudinit/config/schema.py (+48/-20) cloudinit/config/tests/test_disable_ec2_metadata.py (+50/-0) cloudinit/config/tests/test_set_passwords.py (+71/-0) cloudinit/config/tests/test_snap.py (+27/-2) cloudinit/config/tests/test_ubuntu_advantage.py (+28/-2) cloudinit/distros/__init__.py (+13/-1) cloudinit/distros/freebsd.py (+5/-5) cloudinit/distros/opensuse.py (+24/-0) cloudinit/distros/ubuntu.py (+19/-0) cloudinit/ec2_utils.py (+6/-8) cloudinit/handlers/upstart_job.py (+1/-1) cloudinit/net/__init__.py (+33/-3) cloudinit/net/cmdline.py (+1/-1) cloudinit/net/dhcp.py (+1/-1) cloudinit/net/eni.py (+17/-3) cloudinit/net/netplan.py (+14/-8) cloudinit/net/network_state.py (+5/-6) cloudinit/net/sysconfig.py (+8/-2) cloudinit/net/tests/test_init.py (+1/-0) cloudinit/netinfo.py (+300/-79) cloudinit/reporting/events.py (+1/-1) cloudinit/sources/DataSourceAliYun.py (+1/-1) cloudinit/sources/DataSourceAltCloud.py (+9/-12) cloudinit/sources/DataSourceAzure.py (+75/-42) cloudinit/sources/DataSourceCloudStack.py (+10/-21) cloudinit/sources/DataSourceConfigDrive.py (+10/-5) cloudinit/sources/DataSourceEc2.py (+15/-33) cloudinit/sources/DataSourceIBMCloud.py (+92/-14) cloudinit/sources/DataSourceMAAS.py (+2/-2) cloudinit/sources/DataSourceNoCloud.py (+2/-2) cloudinit/sources/DataSourceOVF.py (+1/-1) cloudinit/sources/DataSourceOpenNebula.py (+1/-1) cloudinit/sources/DataSourceOpenStack.py (+127/-55) cloudinit/sources/DataSourceSmartOS.py (+163/-33) cloudinit/sources/__init__.py (+76/-0) cloudinit/sources/helpers/azure.py (+3/-2) cloudinit/sources/helpers/digitalocean.py (+3/-4) cloudinit/sources/helpers/openstack.py (+1/-1) cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1) cloudinit/sources/helpers/vmware/imc/config_passwd.py (+2/-2) cloudinit/sources/helpers/vmware/imc/guestcust_util.py (+2/-2) cloudinit/sources/tests/test_init.py (+88/-3) cloudinit/ssh_util.py (+63/-7) cloudinit/stages.py (+17/-9) cloudinit/templater.py (+10/-2) cloudinit/tests/helpers.py (+56/-30) cloudinit/tests/test_netinfo.py (+147/-86) cloudinit/tests/test_url_helper.py (+27/-1) cloudinit/tests/test_util.py (+127/-2) cloudinit/tests/test_version.py (+17/-0) cloudinit/url_helper.py (+29/-2) cloudinit/user_data.py (+16/-12) cloudinit/util.py (+171/-68) cloudinit/version.py (+5/-1) config/cloud.cfg.tmpl (+2/-0) debian/changelog (+92/-3) debian/patches/openstack-no-network-config.patch (+2/-4) doc/examples/cloud-config-disk-setup.txt (+2/-2) doc/examples/cloud-config-user-groups.txt (+20/-7) doc/rtd/topics/datasources.rst (+98/-0) doc/rtd/topics/datasources/aliyun.rst (+74/-0) doc/rtd/topics/datasources/cloudstack.rst (+20/-6) doc/rtd/topics/datasources/ec2.rst (+30/-0) doc/rtd/topics/datasources/openstack.rst (+21/-2) doc/rtd/topics/network-config-format-v1.rst (+27/-0) doc/rtd/topics/network-config-format-v2.rst (+6/-0) doc/rtd/topics/tests.rst (+6/-1) integration-requirements.txt (+1/-1) packages/bddeb (+36/-4) packages/brpm (+3/-3) packages/debian/changelog.in (+1/-1) packages/debian/control.in (+1/-0) packages/debian/rules.in (+2/-0) packages/redhat/cloud-init.spec.in (+8/-0) packages/suse/cloud-init.spec.in (+29/-42) setup.py (+15/-3) systemd/cloud-config.service.tmpl (+1/-0) templates/chrony.conf.debian.tmpl (+39/-0) templates/chrony.conf.fedora.tmpl (+48/-0) templates/chrony.conf.opensuse.tmpl (+38/-0) templates/chrony.conf.rhel.tmpl (+45/-0) templates/chrony.conf.sles.tmpl (+38/-0) templates/chrony.conf.ubuntu.tmpl (+42/-0) tests/cloud_tests/args.py (+3/-0) tests/cloud_tests/bddeb.py (+1/-1) tests/cloud_tests/collect.py (+5/-3) tests/cloud_tests/platforms/instances.py (+30/-11) tests/cloud_tests/platforms/lxd/instance.py (+5/-7) tests/cloud_tests/releases.yaml (+16/-0) tests/cloud_tests/setup_image.py (+5/-6) tests/cloud_tests/stage.py (+12/-3) tests/cloud_tests/testcases.yaml (+2/-2) tests/cloud_tests/testcases/base.py (+28/-6) tests/cloud_tests/testcases/examples/including_user_groups.py (+1/-1) tests/cloud_tests/testcases/modules/byobu.py (+1/-2) tests/cloud_tests/testcases/modules/byobu.yaml (+0/-3) tests/cloud_tests/testcases/modules/ca_certs.py (+17/-4) tests/cloud_tests/testcases/modules/ca_certs.yaml (+6/-2) tests/cloud_tests/testcases/modules/ntp.py (+2/-3) tests/cloud_tests/testcases/modules/ntp.yaml (+1/-0) tests/cloud_tests/testcases/modules/ntp_chrony.py (+26/-0) tests/cloud_tests/testcases/modules/ntp_chrony.yaml (+17/-0) tests/cloud_tests/testcases/modules/ntp_pools.yaml (+1/-0) tests/cloud_tests/testcases/modules/ntp_servers.yaml (+1/-0) tests/cloud_tests/testcases/modules/ntp_timesyncd.py (+15/-0) tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml (+15/-0) tests/cloud_tests/testcases/modules/package_update_upgrade_install.py (+6/-8) tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+3/-6) tests/cloud_tests/testcases/modules/salt_minion.py (+1/-2) tests/cloud_tests/testcases/modules/salt_minion.yaml (+12/-5) tests/cloud_tests/testcases/modules/user_groups.py (+1/-1) tests/cloud_tests/util.py (+1/-1) tests/cloud_tests/verify.py (+46/-1) tests/data/netinfo/netdev-formatted-output (+10/-0) tests/data/netinfo/netdev-formatted-output-down (+8/-0) tests/data/netinfo/new-ifconfig-output (+18/-0) tests/data/netinfo/new-ifconfig-output-down (+15/-0) tests/data/netinfo/old-ifconfig-output (+18/-0) tests/data/netinfo/route-formatted-output (+22/-0) tests/data/netinfo/sample-ipaddrshow-output (+13/-0) tests/data/netinfo/sample-ipaddrshow-output-down (+8/-0) tests/data/netinfo/sample-iproute-output-v4 (+3/-0) tests/data/netinfo/sample-iproute-output-v6 (+11/-0) tests/data/netinfo/sample-route-output-v4 (+5/-0) tests/data/netinfo/sample-route-output-v6 (+13/-0) tests/unittests/test__init__.py (+5/-5) tests/unittests/test_data.py (+21/-3) tests/unittests/test_datasource/test_aliyun.py (+0/-2) tests/unittests/test_datasource/test_azure.py (+209/-70) tests/unittests/test_datasource/test_azure_helper.py (+1/-1) tests/unittests/test_datasource/test_common.py (+1/-0) tests/unittests/test_datasource/test_ec2.py (+0/-12) tests/unittests/test_datasource/test_gce.py (+0/-1) tests/unittests/test_datasource/test_ibmcloud.py (+50/-0) tests/unittests/test_datasource/test_maas.py (+2/-2) tests/unittests/test_datasource/test_nocloud.py (+0/-3) tests/unittests/test_datasource/test_openstack.py (+215/-20) tests/unittests/test_datasource/test_scaleway.py (+0/-3) tests/unittests/test_datasource/test_smartos.py (+245/-5) tests/unittests/test_distros/test_create_users.py (+8/-0) tests/unittests/test_distros/test_netconfig.py (+6/-0) tests/unittests/test_distros/test_user_data_normalize.py (+6/-0) tests/unittests/test_ds_identify.py (+205/-18) tests/unittests/test_ec2_util.py (+0/-9) tests/unittests/test_filters/test_launch_index.py (+5/-5) tests/unittests/test_handler/test_handler_apt_conf_v1.py (+6/-10) tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+0/-7) tests/unittests/test_handler/test_handler_apt_source_v1.py (+10/-17) tests/unittests/test_handler/test_handler_apt_source_v3.py (+11/-18) tests/unittests/test_handler/test_handler_bootcmd.py (+26/-8) tests/unittests/test_handler/test_handler_chef.py (+12/-4) tests/unittests/test_handler/test_handler_lxd.py (+64/-16) tests/unittests/test_handler/test_handler_mounts.py (+100/-4) tests/unittests/test_handler/test_handler_ntp.py (+571/-305) tests/unittests/test_handler/test_handler_resizefs.py (+1/-1) tests/unittests/test_handler/test_handler_runcmd.py (+26/-7) tests/unittests/test_handler/test_schema.py (+33/-6) tests/unittests/test_merging.py (+1/-1) tests/unittests/test_net.py (+189/-12) tests/unittests/test_runs/test_merge_run.py (+1/-1) tests/unittests/test_runs/test_simple_run.py (+30/-2) tests/unittests/test_sshutil.py (+94/-3) tests/unittests/test_templating.py (+42/-3) tests/unittests/test_util.py (+126/-13) tools/ds-identify (+83/-28) tools/make-tarball (+12/-3) tools/read-dependencies (+6/-2) tools/run-centos (+30/-310) tools/run-container (+590/-0) tox.ini (+9/-7) |
||||||||||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
cloud-init Commiters | Pending | ||
Review via email: mp+348360@code.launchpad.net |
Commit message
Cloud-init 18.3 new-upstream-
Description of the change
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Needs Fixing
(continuous-integration)
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:2022cd6ee06
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild:
https:/
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/.pylintrc b/.pylintrc | |||
2 | index 0bdfa59..3bfa0c8 100644 | |||
3 | --- a/.pylintrc | |||
4 | +++ b/.pylintrc | |||
5 | @@ -28,7 +28,7 @@ jobs=4 | |||
6 | 28 | # W0703(broad-except) | 28 | # W0703(broad-except) |
7 | 29 | # W1401(anomalous-backslash-in-string) | 29 | # W1401(anomalous-backslash-in-string) |
8 | 30 | 30 | ||
10 | 31 | disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401 | 31 | disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401 |
11 | 32 | 32 | ||
12 | 33 | 33 | ||
13 | 34 | [REPORTS] | 34 | [REPORTS] |
14 | diff --git a/ChangeLog b/ChangeLog | |||
15 | index daa7ccf..72c5287 100644 | |||
16 | --- a/ChangeLog | |||
17 | +++ b/ChangeLog | |||
18 | @@ -1,3 +1,229 @@ | |||
19 | 1 | 18.3: | ||
20 | 2 | - docs: represent sudo:false in docs for user_groups config module | ||
21 | 3 | - Explicitly prevent `sudo` access for user module | ||
22 | 4 | [Jacob Bednarz] (LP: #1771468) | ||
23 | 5 | - lxd: Delete default network and detach device if lxd-init created them. | ||
24 | 6 | (LP: #1776958) | ||
25 | 7 | - openstack: avoid unneeded metadata probe on non-openstack platforms | ||
26 | 8 | (LP: #1776701) | ||
27 | 9 | - stages: fix tracebacks if a module stage is undefined or empty | ||
28 | 10 | [Robert Schweikert] (LP: #1770462) | ||
29 | 11 | - Be more safe on string/bytes when writing multipart user-data to disk. | ||
30 | 12 | (LP: #1768600) | ||
31 | 13 | - Fix get_proc_env for pids that have non-utf8 content in environment. | ||
32 | 14 | (LP: #1775371) | ||
33 | 15 | - tests: fix salt_minion integration test on bionic and later | ||
34 | 16 | - tests: provide human-readable integration test summary when --verbose | ||
35 | 17 | - tests: skip chrony integration tests on lxd running artful or older | ||
36 | 18 | - test: add optional --preserve-instance arg to integraiton tests | ||
37 | 19 | - netplan: fix mtu if provided by network config for all rendered types | ||
38 | 20 | (LP: #1774666) | ||
39 | 21 | - tests: remove pip install workarounds for pylxd, take upstream fix. | ||
40 | 22 | - subp: support combine_capture argument. | ||
41 | 23 | - tests: ordered tox dependencies for pylxd install | ||
42 | 24 | - util: add get_linux_distro function to replace platform.dist | ||
43 | 25 | [Robert Schweikert] (LP: #1745235) | ||
44 | 26 | - pyflakes: fix unused variable references identified by pyflakes 2.0.0. | ||
45 | 27 | - - Do not use the systemd_prefix macro, not available in this environment | ||
46 | 28 | [Robert Schweikert] | ||
47 | 29 | - doc: Add config info to ec2, openstack and cloudstack datasource docs | ||
48 | 30 | - Enable SmartOS network metadata to work with netplan via per-subnet | ||
49 | 31 | routes [Dan McDonald] (LP: #1763512) | ||
50 | 32 | - openstack: Allow discovery in init-local using dhclient in a sandbox. | ||
51 | 33 | (LP: #1749717) | ||
52 | 34 | - tests: Avoid using https in httpretty, improve HttPretty test case. | ||
53 | 35 | (LP: #1771659) | ||
54 | 36 | - yaml_load/schema: Add invalid line and column nums to error message | ||
55 | 37 | - Azure: Ignore NTFS mount errors when checking ephemeral drive | ||
56 | 38 | [Paul Meyer] | ||
57 | 39 | - packages/brpm: Get proper dependencies for cmdline distro. | ||
58 | 40 | - packages: Make rpm spec files patch in package version like in debs. | ||
59 | 41 | - tools/run-container: replace tools/run-centos with more generic. | ||
60 | 42 | - Update version.version_string to contain packaged version. (LP: #1770712) | ||
61 | 43 | - cc_mounts: Do not add devices to fstab that are already present. | ||
62 | 44 | [Lars Kellogg-Stedman] | ||
63 | 45 | - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382) | ||
64 | 46 | - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] | ||
65 | 47 | - read_file_or_url: move to url_helper, fix bug in its FileResponse. | ||
66 | 48 | - cloud_tests: help pylint [Ryan Harper] | ||
67 | 49 | - flake8: fix flake8 errors in previous commit. | ||
68 | 50 | - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] | ||
69 | 51 | - tests: restructure SSH and initial connections [Joshua Powers] | ||
70 | 52 | - ds-identify: recognize container-other as a container, test SmartOS. | ||
71 | 53 | - cloud-config.service: run After snap.seeded.service. (LP: #1767131) | ||
72 | 54 | - tests: do not rely on host /proc/cmdline in test_net.py | ||
73 | 55 | [Lars Kellogg-Stedman] (LP: #1769952) | ||
74 | 56 | - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. | ||
75 | 57 | - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. | ||
76 | 58 | - tests: fix package and ca_cert cloud_tests on bionic | ||
77 | 59 | (LP: #1769985) | ||
78 | 60 | - ds-identify: make shellcheck 0.4.6 happy with ds-identify. | ||
79 | 61 | - pycodestyle: Fix deprecated string literals, move away from flake8. | ||
80 | 62 | - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214) | ||
81 | 63 | - tools: Support adding a release suffix through packages/bddeb. | ||
82 | 64 | - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. | ||
83 | 65 | [Harm Weites] (LP: #1404745) | ||
84 | 66 | - tools: Re-use the orig tarball in packages/bddeb if it is around. | ||
85 | 67 | - netinfo: fix netdev_pformat when a nic does not have an address | ||
86 | 68 | assigned. (LP: #1766302) | ||
87 | 69 | - collect-logs: add -v flag, write to stderr, limit journal to single | ||
88 | 70 | boot. (LP: #1766335) | ||
89 | 71 | - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. | ||
90 | 72 | (LP: #1766401) | ||
91 | 73 | - Add reporting events and log_time around early source of blocking time | ||
92 | 74 | [Ryan Harper] | ||
93 | 75 | - IBMCloud: recognize provisioning environment during debug boots. | ||
94 | 76 | (LP: #1767166) | ||
95 | 77 | - net: detect unstable network names and trigger a settle if needed | ||
96 | 78 | [Ryan Harper] (LP: #1766287) | ||
97 | 79 | - IBMCloud: improve documentation in datasource. | ||
98 | 80 | - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov] | ||
99 | 81 | - packages/debian/control.in: add missing dependency on iproute2. | ||
100 | 82 | (LP: #1766711) | ||
101 | 83 | - DataSourceSmartOS: add locking of serial device. | ||
102 | 84 | [Mike Gerdts] (LP: #1746605) | ||
103 | 85 | - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085) | ||
104 | 86 | - DataSourceSmartOS: list() should always return a list | ||
105 | 87 | [Mike Gerdts] (LP: #1763480) | ||
106 | 88 | - schema: in validation, raise ImportError if strict but no jsonschema. | ||
107 | 89 | - set_passwords: Add newline to end of sshd config, only restart if | ||
108 | 90 | updated. (LP: #1677205) | ||
109 | 91 | - pylint: pay attention to unused variable warnings. | ||
110 | 92 | - doc: Add documentation for AliYun datasource. [Junjie Wang] | ||
111 | 93 | - Schema: do not warn on duplicate items in commands. (LP: #1764264) | ||
112 | 94 | - net: Depend on iproute2's ip instead of net-tools ifconfig or route | ||
113 | 95 | - DataSourceSmartOS: fix hang when metadata service is down | ||
114 | 96 | [Mike Gerdts] (LP: #1667735) | ||
115 | 97 | - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to | ||
116 | 98 | ext4. [Mike Gerdts] (LP: #1763511) | ||
117 | 99 | - pycodestyle: Fix invalid escape sequences in string literals. | ||
118 | 100 | - Implement bash completion script for cloud-init command line | ||
119 | 101 | [Ryan Harper] | ||
120 | 102 | - tools: Fix make-tarball cli tool usage for development | ||
121 | 103 | - renderer: support unicode in render_from_file. | ||
122 | 104 | - Implement ntp client spec with auto support for distro selection | ||
123 | 105 | [Ryan Harper] (LP: #1749722) | ||
124 | 106 | - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds. | ||
125 | 107 | - tests: fix ec2 integration network metadata validation | ||
126 | 108 | - tests: fix integration tests to support lxd 3.0 release | ||
127 | 109 | - correct documentation to match correct attribute name usage. | ||
128 | 110 | [Dominic Schlegel] (LP: #1420018) | ||
129 | 111 | - cc_resizefs, util: handle no /dev/zfs [Ryan Harper] | ||
130 | 112 | - doc: Fix links in OpenStack datasource documentation. | ||
131 | 113 | [Dominic Schlegel] (LP: #1721660) | ||
132 | 114 | - docs: represent sudo:false in docs for user_groups config module | ||
133 | 115 | - Explicitly prevent `sudo` access for user module | ||
134 | 116 | [Jacob Bednarz] (LP: #1771468) | ||
135 | 117 | - lxd: Delete default network and detach device if lxd-init created them. | ||
136 | 118 | (LP: #1776958) | ||
137 | 119 | - openstack: avoid unneeded metadata probe on non-openstack platforms | ||
138 | 120 | (LP: #1776701) | ||
139 | 121 | - stages: fix tracebacks if a module stage is undefined or empty | ||
140 | 122 | [Robert Schweikert] (LP: #1770462) | ||
141 | 123 | - Be more safe on string/bytes when writing multipart user-data to disk. | ||
142 | 124 | (LP: #1768600) | ||
143 | 125 | - Fix get_proc_env for pids that have non-utf8 content in environment. | ||
144 | 126 | (LP: #1775371) | ||
145 | 127 | - tests: fix salt_minion integration test on bionic and later | ||
146 | 128 | - tests: provide human-readable integration test summary when --verbose | ||
147 | 129 | - tests: skip chrony integration tests on lxd running artful or older | ||
148 | 130 | - test: add optional --preserve-instance arg to integraiton tests | ||
149 | 131 | - netplan: fix mtu if provided by network config for all rendered types | ||
150 | 132 | (LP: #1774666) | ||
151 | 133 | - tests: remove pip install workarounds for pylxd, take upstream fix. | ||
152 | 134 | - subp: support combine_capture argument. | ||
153 | 135 | - tests: ordered tox dependencies for pylxd install | ||
154 | 136 | - util: add get_linux_distro function to replace platform.dist | ||
155 | 137 | [Robert Schweikert] (LP: #1745235) | ||
156 | 138 | - pyflakes: fix unused variable references identified by pyflakes 2.0.0. | ||
157 | 139 | - - Do not use the systemd_prefix macro, not available in this environment | ||
158 | 140 | [Robert Schweikert] | ||
159 | 141 | - doc: Add config info to ec2, openstack and cloudstack datasource docs | ||
160 | 142 | - Enable SmartOS network metadata to work with netplan via per-subnet | ||
161 | 143 | routes [Dan McDonald] (LP: #1763512) | ||
162 | 144 | - openstack: Allow discovery in init-local using dhclient in a sandbox. | ||
163 | 145 | (LP: #1749717) | ||
164 | 146 | - tests: Avoid using https in httpretty, improve HttPretty test case. | ||
165 | 147 | (LP: #1771659) | ||
166 | 148 | - yaml_load/schema: Add invalid line and column nums to error message | ||
167 | 149 | - Azure: Ignore NTFS mount errors when checking ephemeral drive | ||
168 | 150 | [Paul Meyer] | ||
169 | 151 | - packages/brpm: Get proper dependencies for cmdline distro. | ||
170 | 152 | - packages: Make rpm spec files patch in package version like in debs. | ||
171 | 153 | - tools/run-container: replace tools/run-centos with more generic. | ||
172 | 154 | - Update version.version_string to contain packaged version. (LP: #1770712) | ||
173 | 155 | - cc_mounts: Do not add devices to fstab that are already present. | ||
174 | 156 | [Lars Kellogg-Stedman] | ||
175 | 157 | - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382) | ||
176 | 158 | - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] | ||
177 | 159 | - read_file_or_url: move to url_helper, fix bug in its FileResponse. | ||
178 | 160 | - cloud_tests: help pylint [Ryan Harper] | ||
179 | 161 | - flake8: fix flake8 errors in previous commit. | ||
180 | 162 | - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] | ||
181 | 163 | - tests: restructure SSH and initial connections [Joshua Powers] | ||
182 | 164 | - ds-identify: recognize container-other as a container, test SmartOS. | ||
183 | 165 | - cloud-config.service: run After snap.seeded.service. (LP: #1767131) | ||
184 | 166 | - tests: do not rely on host /proc/cmdline in test_net.py | ||
185 | 167 | [Lars Kellogg-Stedman] (LP: #1769952) | ||
186 | 168 | - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. | ||
187 | 169 | - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. | ||
188 | 170 | - tests: fix package and ca_cert cloud_tests on bionic | ||
189 | 171 | (LP: #1769985) | ||
190 | 172 | - ds-identify: make shellcheck 0.4.6 happy with ds-identify. | ||
191 | 173 | - pycodestyle: Fix deprecated string literals, move away from flake8. | ||
192 | 174 | - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214) | ||
193 | 175 | - tools: Support adding a release suffix through packages/bddeb. | ||
194 | 176 | - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. | ||
195 | 177 | [Harm Weites] (LP: #1404745) | ||
196 | 178 | - tools: Re-use the orig tarball in packages/bddeb if it is around. | ||
197 | 179 | - netinfo: fix netdev_pformat when a nic does not have an address | ||
198 | 180 | assigned. (LP: #1766302) | ||
199 | 181 | - collect-logs: add -v flag, write to stderr, limit journal to single | ||
200 | 182 | boot. (LP: #1766335) | ||
201 | 183 | - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. | ||
202 | 184 | (LP: #1766401) | ||
203 | 185 | - Add reporting events and log_time around early source of blocking time | ||
204 | 186 | [Ryan Harper] | ||
205 | 187 | - IBMCloud: recognize provisioning environment during debug boots. | ||
206 | 188 | (LP: #1767166) | ||
207 | 189 | - net: detect unstable network names and trigger a settle if needed | ||
208 | 190 | [Ryan Harper] (LP: #1766287) | ||
209 | 191 | - IBMCloud: improve documentation in datasource. | ||
210 | 192 | - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov] | ||
211 | 193 | - packages/debian/control.in: add missing dependency on iproute2. | ||
212 | 194 | (LP: #1766711) | ||
213 | 195 | - DataSourceSmartOS: add locking of serial device. | ||
214 | 196 | [Mike Gerdts] (LP: #1746605) | ||
215 | 197 | - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085) | ||
216 | 198 | - DataSourceSmartOS: list() should always return a list | ||
217 | 199 | [Mike Gerdts] (LP: #1763480) | ||
218 | 200 | - schema: in validation, raise ImportError if strict but no jsonschema. | ||
219 | 201 | - set_passwords: Add newline to end of sshd config, only restart if | ||
220 | 202 | updated. (LP: #1677205) | ||
221 | 203 | - pylint: pay attention to unused variable warnings. | ||
222 | 204 | - doc: Add documentation for AliYun datasource. [Junjie Wang] | ||
223 | 205 | - Schema: do not warn on duplicate items in commands. (LP: #1764264) | ||
224 | 206 | - net: Depend on iproute2's ip instead of net-tools ifconfig or route | ||
225 | 207 | - DataSourceSmartOS: fix hang when metadata service is down | ||
226 | 208 | [Mike Gerdts] (LP: #1667735) | ||
227 | 209 | - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to | ||
228 | 210 | ext4. [Mike Gerdts] (LP: #1763511) | ||
229 | 211 | - pycodestyle: Fix invalid escape sequences in string literals. | ||
230 | 212 | - Implement bash completion script for cloud-init command line | ||
231 | 213 | [Ryan Harper] | ||
232 | 214 | - tools: Fix make-tarball cli tool usage for development | ||
233 | 215 | - renderer: support unicode in render_from_file. | ||
234 | 216 | - Implement ntp client spec with auto support for distro selection | ||
235 | 217 | [Ryan Harper] (LP: #1749722) | ||
236 | 218 | - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds. | ||
237 | 219 | - tests: fix ec2 integration network metadata validation | ||
238 | 220 | - tests: fix integration tests to support lxd 3.0 release | ||
239 | 221 | - correct documentation to match correct attribute name usage. | ||
240 | 222 | [Dominic Schlegel] (LP: #1420018) | ||
241 | 223 | - cc_resizefs, util: handle no /dev/zfs [Ryan Harper] | ||
242 | 224 | - doc: Fix links in OpenStack datasource documentation. | ||
243 | 225 | [Dominic Schlegel] (LP: #1721660) | ||
244 | 226 | |||
245 | 1 | 18.2: | 227 | 18.2: |
246 | 2 | - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. | 228 | - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. |
247 | 3 | - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. | 229 | - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. |
248 | diff --git a/MANIFEST.in b/MANIFEST.in | |||
249 | index 1a4d771..57a85ea 100644 | |||
250 | --- a/MANIFEST.in | |||
251 | +++ b/MANIFEST.in | |||
252 | @@ -1,5 +1,6 @@ | |||
253 | 1 | include *.py MANIFEST.in LICENSE* ChangeLog | 1 | include *.py MANIFEST.in LICENSE* ChangeLog |
254 | 2 | global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh | 2 | global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh |
255 | 3 | graft bash_completion | ||
256 | 3 | graft config | 4 | graft config |
257 | 4 | graft doc | 5 | graft doc |
258 | 5 | graft packages | 6 | graft packages |
259 | diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init | |||
260 | 6 | new file mode 100644 | 7 | new file mode 100644 |
261 | index 0000000..581432c | |||
262 | --- /dev/null | |||
263 | +++ b/bash_completion/cloud-init | |||
264 | @@ -0,0 +1,77 @@ | |||
265 | 1 | # Copyright (C) 2018 Canonical Ltd. | ||
266 | 2 | # | ||
267 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
268 | 4 | |||
269 | 5 | # bash completion for cloud-init cli | ||
270 | 6 | _cloudinit_complete() | ||
271 | 7 | { | ||
272 | 8 | |||
273 | 9 | local cur_word prev_word | ||
274 | 10 | cur_word="${COMP_WORDS[COMP_CWORD]}" | ||
275 | 11 | prev_word="${COMP_WORDS[COMP_CWORD-1]}" | ||
276 | 12 | |||
277 | 13 | subcmds="analyze clean collect-logs devel dhclient-hook features init modules single status" | ||
278 | 14 | base_params="--help --file --version --debug --force" | ||
279 | 15 | case ${COMP_CWORD} in | ||
280 | 16 | 1) | ||
281 | 17 | COMPREPLY=($(compgen -W "$base_params $subcmds" -- $cur_word)) | ||
282 | 18 | ;; | ||
283 | 19 | 2) | ||
284 | 20 | case ${prev_word} in | ||
285 | 21 | analyze) | ||
286 | 22 | COMPREPLY=($(compgen -W "--help blame dump show" -- $cur_word)) | ||
287 | 23 | ;; | ||
288 | 24 | clean) | ||
289 | 25 | COMPREPLY=($(compgen -W "--help --logs --reboot --seed" -- $cur_word)) | ||
290 | 26 | ;; | ||
291 | 27 | collect-logs) | ||
292 | 28 | COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word)) | ||
293 | 29 | ;; | ||
294 | 30 | devel) | ||
295 | 31 | COMPREPLY=($(compgen -W "--help schema" -- $cur_word)) | ||
296 | 32 | ;; | ||
297 | 33 | dhclient-hook|features) | ||
298 | 34 | COMPREPLY=($(compgen -W "--help" -- $cur_word)) | ||
299 | 35 | ;; | ||
300 | 36 | init) | ||
301 | 37 | COMPREPLY=($(compgen -W "--help --local" -- $cur_word)) | ||
302 | 38 | ;; | ||
303 | 39 | modules) | ||
304 | 40 | COMPREPLY=($(compgen -W "--help --mode" -- $cur_word)) | ||
305 | 41 | ;; | ||
306 | 42 | |||
307 | 43 | single) | ||
308 | 44 | COMPREPLY=($(compgen -W "--help --name --frequency --report" -- $cur_word)) | ||
309 | 45 | ;; | ||
310 | 46 | status) | ||
311 | 47 | COMPREPLY=($(compgen -W "--help --long --wait" -- $cur_word)) | ||
312 | 48 | ;; | ||
313 | 49 | esac | ||
314 | 50 | ;; | ||
315 | 51 | 3) | ||
316 | 52 | case ${prev_word} in | ||
317 | 53 | blame|dump) | ||
318 | 54 | COMPREPLY=($(compgen -W "--help --infile --outfile" -- $cur_word)) | ||
319 | 55 | ;; | ||
320 | 56 | --mode) | ||
321 | 57 | COMPREPLY=($(compgen -W "--help init config final" -- $cur_word)) | ||
322 | 58 | ;; | ||
323 | 59 | --frequency) | ||
324 | 60 | COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word)) | ||
325 | 61 | ;; | ||
326 | 62 | schema) | ||
327 | 63 | COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word)) | ||
328 | 64 | ;; | ||
329 | 65 | show) | ||
330 | 66 | COMPREPLY=($(compgen -W "--help --format --infile --outfile" -- $cur_word)) | ||
331 | 67 | ;; | ||
332 | 68 | esac | ||
333 | 69 | ;; | ||
334 | 70 | *) | ||
335 | 71 | COMPREPLY=() | ||
336 | 72 | ;; | ||
337 | 73 | esac | ||
338 | 74 | } | ||
339 | 75 | complete -F _cloudinit_complete cloud-init | ||
340 | 76 | |||
341 | 77 | # vi: syntax=bash expandtab | ||
342 | diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py | |||
343 | index 3ba5903..f861365 100644 | |||
344 | --- a/cloudinit/analyze/__main__.py | |||
345 | +++ b/cloudinit/analyze/__main__.py | |||
346 | @@ -69,7 +69,7 @@ def analyze_blame(name, args): | |||
347 | 69 | """ | 69 | """ |
348 | 70 | (infh, outfh) = configure_io(args) | 70 | (infh, outfh) = configure_io(args) |
349 | 71 | blame_format = ' %ds (%n)' | 71 | blame_format = ' %ds (%n)' |
351 | 72 | r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE) | 72 | r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) |
352 | 73 | for idx, record in enumerate(show.show_events(_get_events(infh), | 73 | for idx, record in enumerate(show.show_events(_get_events(infh), |
353 | 74 | blame_format)): | 74 | blame_format)): |
354 | 75 | srecs = sorted(filter(r.match, record), reverse=True) | 75 | srecs = sorted(filter(r.match, record), reverse=True) |
355 | diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py | |||
356 | index b071aa1..1f3060d 100644 | |||
357 | --- a/cloudinit/analyze/dump.py | |||
358 | +++ b/cloudinit/analyze/dump.py | |||
359 | @@ -112,7 +112,7 @@ def parse_ci_logline(line): | |||
360 | 112 | return None | 112 | return None |
361 | 113 | event_description = stage_to_description[event_name] | 113 | event_description = stage_to_description[event_name] |
362 | 114 | else: | 114 | else: |
364 | 115 | (pymodloglvl, event_type, event_name) = eventstr.split()[0:3] | 115 | (_pymodloglvl, event_type, event_name) = eventstr.split()[0:3] |
365 | 116 | event_description = eventstr.split(event_name)[1].strip() | 116 | event_description = eventstr.split(event_name)[1].strip() |
366 | 117 | 117 | ||
367 | 118 | event = { | 118 | event = { |
368 | diff --git a/cloudinit/apport.py b/cloudinit/apport.py | |||
369 | index 618b016..130ff26 100644 | |||
370 | --- a/cloudinit/apport.py | |||
371 | +++ b/cloudinit/apport.py | |||
372 | @@ -13,10 +13,29 @@ except ImportError: | |||
373 | 13 | 13 | ||
374 | 14 | 14 | ||
375 | 15 | KNOWN_CLOUD_NAMES = [ | 15 | KNOWN_CLOUD_NAMES = [ |
380 | 16 | 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', | 16 | 'AliYun', |
381 | 17 | 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', | 17 | 'AltCloud', |
382 | 18 | 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', | 18 | 'Amazon - Ec2', |
383 | 19 | 'Scaleway', 'SmartOS', 'VMware', 'Other'] | 19 | 'Azure', |
384 | 20 | 'Bigstep', | ||
385 | 21 | 'Brightbox', | ||
386 | 22 | 'CloudSigma', | ||
387 | 23 | 'CloudStack', | ||
388 | 24 | 'DigitalOcean', | ||
389 | 25 | 'GCE - Google Compute Engine', | ||
390 | 26 | 'Hetzner Cloud', | ||
391 | 27 | 'IBM - (aka SoftLayer or BlueMix)', | ||
392 | 28 | 'LXD', | ||
393 | 29 | 'MAAS', | ||
394 | 30 | 'NoCloud', | ||
395 | 31 | 'OpenNebula', | ||
396 | 32 | 'OpenStack', | ||
397 | 33 | 'OVF', | ||
398 | 34 | 'OpenTelekomCloud', | ||
399 | 35 | 'Scaleway', | ||
400 | 36 | 'SmartOS', | ||
401 | 37 | 'VMware', | ||
402 | 38 | 'Other'] | ||
403 | 20 | 39 | ||
404 | 21 | # Potentially clear text collected logs | 40 | # Potentially clear text collected logs |
405 | 22 | CLOUDINIT_LOG = '/var/log/cloud-init.log' | 41 | CLOUDINIT_LOG = '/var/log/cloud-init.log' |
406 | diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py | |||
407 | index 35ca478..df72520 100644 | |||
408 | --- a/cloudinit/cmd/devel/logs.py | |||
409 | +++ b/cloudinit/cmd/devel/logs.py | |||
410 | @@ -11,6 +11,7 @@ from cloudinit.temp_utils import tempdir | |||
411 | 11 | from datetime import datetime | 11 | from datetime import datetime |
412 | 12 | import os | 12 | import os |
413 | 13 | import shutil | 13 | import shutil |
414 | 14 | import sys | ||
415 | 14 | 15 | ||
416 | 15 | 16 | ||
417 | 16 | CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] | 17 | CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] |
418 | @@ -31,6 +32,8 @@ def get_parser(parser=None): | |||
419 | 31 | parser = argparse.ArgumentParser( | 32 | parser = argparse.ArgumentParser( |
420 | 32 | prog='collect-logs', | 33 | prog='collect-logs', |
421 | 33 | description='Collect and tar all cloud-init debug info') | 34 | description='Collect and tar all cloud-init debug info') |
422 | 35 | parser.add_argument('--verbose', '-v', action='count', default=0, | ||
423 | 36 | dest='verbosity', help="Be more verbose.") | ||
424 | 34 | parser.add_argument( | 37 | parser.add_argument( |
425 | 35 | "--tarfile", '-t', default='cloud-init.tar.gz', | 38 | "--tarfile", '-t', default='cloud-init.tar.gz', |
426 | 36 | help=('The tarfile to create containing all collected logs.' | 39 | help=('The tarfile to create containing all collected logs.' |
427 | @@ -43,17 +46,33 @@ def get_parser(parser=None): | |||
428 | 43 | return parser | 46 | return parser |
429 | 44 | 47 | ||
430 | 45 | 48 | ||
432 | 46 | def _write_command_output_to_file(cmd, filename): | 49 | def _write_command_output_to_file(cmd, filename, msg, verbosity): |
433 | 47 | """Helper which runs a command and writes output or error to filename.""" | 50 | """Helper which runs a command and writes output or error to filename.""" |
434 | 48 | try: | 51 | try: |
435 | 49 | out, _ = subp(cmd) | 52 | out, _ = subp(cmd) |
436 | 50 | except ProcessExecutionError as e: | 53 | except ProcessExecutionError as e: |
437 | 51 | write_file(filename, str(e)) | 54 | write_file(filename, str(e)) |
438 | 55 | _debug("collecting %s failed.\n" % msg, 1, verbosity) | ||
439 | 52 | else: | 56 | else: |
440 | 53 | write_file(filename, out) | 57 | write_file(filename, out) |
441 | 58 | _debug("collected %s\n" % msg, 1, verbosity) | ||
442 | 59 | return out | ||
443 | 54 | 60 | ||
444 | 55 | 61 | ||
446 | 56 | def collect_logs(tarfile, include_userdata): | 62 | def _debug(msg, level, verbosity): |
447 | 63 | if level <= verbosity: | ||
448 | 64 | sys.stderr.write(msg) | ||
449 | 65 | |||
450 | 66 | |||
451 | 67 | def _collect_file(path, out_dir, verbosity): | ||
452 | 68 | if os.path.isfile(path): | ||
453 | 69 | copy(path, out_dir) | ||
454 | 70 | _debug("collected file: %s\n" % path, 1, verbosity) | ||
455 | 71 | else: | ||
456 | 72 | _debug("file %s did not exist\n" % path, 2, verbosity) | ||
457 | 73 | |||
458 | 74 | |||
459 | 75 | def collect_logs(tarfile, include_userdata, verbosity=0): | ||
460 | 57 | """Collect all cloud-init logs and tar them up into the provided tarfile. | 76 | """Collect all cloud-init logs and tar them up into the provided tarfile. |
461 | 58 | 77 | ||
462 | 59 | @param tarfile: The path of the tar-gzipped file to create. | 78 | @param tarfile: The path of the tar-gzipped file to create. |
463 | @@ -64,28 +83,46 @@ def collect_logs(tarfile, include_userdata): | |||
464 | 64 | log_dir = 'cloud-init-logs-{0}'.format(date) | 83 | log_dir = 'cloud-init-logs-{0}'.format(date) |
465 | 65 | with tempdir(dir='/tmp') as tmp_dir: | 84 | with tempdir(dir='/tmp') as tmp_dir: |
466 | 66 | log_dir = os.path.join(tmp_dir, log_dir) | 85 | log_dir = os.path.join(tmp_dir, log_dir) |
468 | 67 | _write_command_output_to_file( | 86 | version = _write_command_output_to_file( |
469 | 87 | ['cloud-init', '--version'], | ||
470 | 88 | os.path.join(log_dir, 'version'), | ||
471 | 89 | "cloud-init --version", verbosity) | ||
472 | 90 | dpkg_ver = _write_command_output_to_file( | ||
473 | 68 | ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], | 91 | ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], |
475 | 69 | os.path.join(log_dir, 'version')) | 92 | os.path.join(log_dir, 'dpkg-version'), |
476 | 93 | "dpkg version", verbosity) | ||
477 | 94 | if not version: | ||
478 | 95 | version = dpkg_ver if dpkg_ver else "not-available" | ||
479 | 96 | _debug("collected cloud-init version: %s\n" % version, 1, verbosity) | ||
480 | 70 | _write_command_output_to_file( | 97 | _write_command_output_to_file( |
482 | 71 | ['dmesg'], os.path.join(log_dir, 'dmesg.txt')) | 98 | ['dmesg'], os.path.join(log_dir, 'dmesg.txt'), |
483 | 99 | "dmesg output", verbosity) | ||
484 | 72 | _write_command_output_to_file( | 100 | _write_command_output_to_file( |
487 | 73 | ['journalctl', '-o', 'short-precise'], | 101 | ['journalctl', '--boot=0', '-o', 'short-precise'], |
488 | 74 | os.path.join(log_dir, 'journal.txt')) | 102 | os.path.join(log_dir, 'journal.txt'), |
489 | 103 | "systemd journal of current boot", verbosity) | ||
490 | 104 | |||
491 | 75 | for log in CLOUDINIT_LOGS: | 105 | for log in CLOUDINIT_LOGS: |
493 | 76 | copy(log, log_dir) | 106 | _collect_file(log, log_dir, verbosity) |
494 | 77 | if include_userdata: | 107 | if include_userdata: |
496 | 78 | copy(USER_DATA_FILE, log_dir) | 108 | _collect_file(USER_DATA_FILE, log_dir, verbosity) |
497 | 79 | run_dir = os.path.join(log_dir, 'run') | 109 | run_dir = os.path.join(log_dir, 'run') |
498 | 80 | ensure_dir(run_dir) | 110 | ensure_dir(run_dir) |
500 | 81 | shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init')) | 111 | if os.path.exists(CLOUDINIT_RUN_DIR): |
501 | 112 | shutil.copytree(CLOUDINIT_RUN_DIR, | ||
502 | 113 | os.path.join(run_dir, 'cloud-init')) | ||
503 | 114 | _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) | ||
504 | 115 | else: | ||
505 | 116 | _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, | ||
506 | 117 | verbosity) | ||
507 | 82 | with chdir(tmp_dir): | 118 | with chdir(tmp_dir): |
508 | 83 | subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) | 119 | subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) |
509 | 120 | sys.stderr.write("Wrote %s\n" % tarfile) | ||
510 | 84 | 121 | ||
511 | 85 | 122 | ||
512 | 86 | def handle_collect_logs_args(name, args): | 123 | def handle_collect_logs_args(name, args): |
513 | 87 | """Handle calls to 'cloud-init collect-logs' as a subcommand.""" | 124 | """Handle calls to 'cloud-init collect-logs' as a subcommand.""" |
515 | 88 | collect_logs(args.tarfile, args.userdata) | 125 | collect_logs(args.tarfile, args.userdata, args.verbosity) |
516 | 89 | 126 | ||
517 | 90 | 127 | ||
518 | 91 | def main(): | 128 | def main(): |
519 | diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py | |||
520 | index dc4947c..98b4756 100644 | |||
521 | --- a/cloudinit/cmd/devel/tests/test_logs.py | |||
522 | +++ b/cloudinit/cmd/devel/tests/test_logs.py | |||
523 | @@ -4,6 +4,7 @@ from cloudinit.cmd.devel import logs | |||
524 | 4 | from cloudinit.util import ensure_dir, load_file, subp, write_file | 4 | from cloudinit.util import ensure_dir, load_file, subp, write_file |
525 | 5 | from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call | 5 | from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call |
526 | 6 | from datetime import datetime | 6 | from datetime import datetime |
527 | 7 | import mock | ||
528 | 7 | import os | 8 | import os |
529 | 8 | 9 | ||
530 | 9 | 10 | ||
531 | @@ -27,11 +28,13 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
532 | 27 | date = datetime.utcnow().date().strftime('%Y-%m-%d') | 28 | date = datetime.utcnow().date().strftime('%Y-%m-%d') |
533 | 28 | date_logdir = 'cloud-init-logs-{0}'.format(date) | 29 | date_logdir = 'cloud-init-logs-{0}'.format(date) |
534 | 29 | 30 | ||
535 | 31 | version_out = '/usr/bin/cloud-init 18.2fake\n' | ||
536 | 30 | expected_subp = { | 32 | expected_subp = { |
537 | 31 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): | 33 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): |
538 | 32 | '0.7fake\n', | 34 | '0.7fake\n', |
539 | 35 | ('cloud-init', '--version'): version_out, | ||
540 | 33 | ('dmesg',): 'dmesg-out\n', | 36 | ('dmesg',): 'dmesg-out\n', |
542 | 34 | ('journalctl', '-o', 'short-precise'): 'journal-out\n', | 37 | ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', |
543 | 35 | ('tar', 'czvf', output_tarfile, date_logdir): '' | 38 | ('tar', 'czvf', output_tarfile, date_logdir): '' |
544 | 36 | } | 39 | } |
545 | 37 | 40 | ||
546 | @@ -44,9 +47,12 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
547 | 44 | subp(cmd) # Pass through tar cmd so we can check output | 47 | subp(cmd) # Pass through tar cmd so we can check output |
548 | 45 | return expected_subp[cmd_tuple], '' | 48 | return expected_subp[cmd_tuple], '' |
549 | 46 | 49 | ||
550 | 50 | fake_stderr = mock.MagicMock() | ||
551 | 51 | |||
552 | 47 | wrap_and_call( | 52 | wrap_and_call( |
553 | 48 | 'cloudinit.cmd.devel.logs', | 53 | 'cloudinit.cmd.devel.logs', |
554 | 49 | {'subp': {'side_effect': fake_subp}, | 54 | {'subp': {'side_effect': fake_subp}, |
555 | 55 | 'sys.stderr': {'new': fake_stderr}, | ||
556 | 50 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, | 56 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, |
557 | 51 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, | 57 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, |
558 | 52 | logs.collect_logs, output_tarfile, include_userdata=False) | 58 | logs.collect_logs, output_tarfile, include_userdata=False) |
559 | @@ -55,7 +61,9 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
560 | 55 | out_logdir = self.tmp_path(date_logdir, self.new_root) | 61 | out_logdir = self.tmp_path(date_logdir, self.new_root) |
561 | 56 | self.assertEqual( | 62 | self.assertEqual( |
562 | 57 | '0.7fake\n', | 63 | '0.7fake\n', |
564 | 58 | load_file(os.path.join(out_logdir, 'version'))) | 64 | load_file(os.path.join(out_logdir, 'dpkg-version'))) |
565 | 65 | self.assertEqual(version_out, | ||
566 | 66 | load_file(os.path.join(out_logdir, 'version'))) | ||
567 | 59 | self.assertEqual( | 67 | self.assertEqual( |
568 | 60 | 'cloud-init-log', | 68 | 'cloud-init-log', |
569 | 61 | load_file(os.path.join(out_logdir, 'cloud-init.log'))) | 69 | load_file(os.path.join(out_logdir, 'cloud-init.log'))) |
570 | @@ -72,6 +80,7 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
571 | 72 | 'results', | 80 | 'results', |
572 | 73 | load_file( | 81 | load_file( |
573 | 74 | os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) | 82 | os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) |
574 | 83 | fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) | ||
575 | 75 | 84 | ||
576 | 76 | def test_collect_logs_includes_optional_userdata(self): | 85 | def test_collect_logs_includes_optional_userdata(self): |
577 | 77 | """collect-logs include userdata when --include-userdata is set.""" | 86 | """collect-logs include userdata when --include-userdata is set.""" |
578 | @@ -88,11 +97,13 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
579 | 88 | date = datetime.utcnow().date().strftime('%Y-%m-%d') | 97 | date = datetime.utcnow().date().strftime('%Y-%m-%d') |
580 | 89 | date_logdir = 'cloud-init-logs-{0}'.format(date) | 98 | date_logdir = 'cloud-init-logs-{0}'.format(date) |
581 | 90 | 99 | ||
582 | 100 | version_out = '/usr/bin/cloud-init 18.2fake\n' | ||
583 | 91 | expected_subp = { | 101 | expected_subp = { |
584 | 92 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): | 102 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): |
585 | 93 | '0.7fake', | 103 | '0.7fake', |
586 | 104 | ('cloud-init', '--version'): version_out, | ||
587 | 94 | ('dmesg',): 'dmesg-out\n', | 105 | ('dmesg',): 'dmesg-out\n', |
589 | 95 | ('journalctl', '-o', 'short-precise'): 'journal-out\n', | 106 | ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', |
590 | 96 | ('tar', 'czvf', output_tarfile, date_logdir): '' | 107 | ('tar', 'czvf', output_tarfile, date_logdir): '' |
591 | 97 | } | 108 | } |
592 | 98 | 109 | ||
593 | @@ -105,9 +116,12 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
594 | 105 | subp(cmd) # Pass through tar cmd so we can check output | 116 | subp(cmd) # Pass through tar cmd so we can check output |
595 | 106 | return expected_subp[cmd_tuple], '' | 117 | return expected_subp[cmd_tuple], '' |
596 | 107 | 118 | ||
597 | 119 | fake_stderr = mock.MagicMock() | ||
598 | 120 | |||
599 | 108 | wrap_and_call( | 121 | wrap_and_call( |
600 | 109 | 'cloudinit.cmd.devel.logs', | 122 | 'cloudinit.cmd.devel.logs', |
601 | 110 | {'subp': {'side_effect': fake_subp}, | 123 | {'subp': {'side_effect': fake_subp}, |
602 | 124 | 'sys.stderr': {'new': fake_stderr}, | ||
603 | 111 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, | 125 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, |
604 | 112 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, | 126 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, |
605 | 113 | 'USER_DATA_FILE': {'new': userdata}}, | 127 | 'USER_DATA_FILE': {'new': userdata}}, |
606 | @@ -118,3 +132,4 @@ class TestCollectLogs(FilesystemMockingTestCase): | |||
607 | 118 | self.assertEqual( | 132 | self.assertEqual( |
608 | 119 | 'user-data', | 133 | 'user-data', |
609 | 120 | load_file(os.path.join(out_logdir, 'user-data.txt'))) | 134 | load_file(os.path.join(out_logdir, 'user-data.txt'))) |
610 | 135 | fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) | ||
611 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
612 | index 3f2dbb9..d6ba90f 100644 | |||
613 | --- a/cloudinit/cmd/main.py | |||
614 | +++ b/cloudinit/cmd/main.py | |||
615 | @@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None): | |||
616 | 187 | data = None | 187 | data = None |
617 | 188 | header = b'#cloud-config' | 188 | header = b'#cloud-config' |
618 | 189 | try: | 189 | try: |
620 | 190 | resp = util.read_file_or_url(**kwargs) | 190 | resp = url_helper.read_file_or_url(**kwargs) |
621 | 191 | if resp.ok(): | 191 | if resp.ok(): |
622 | 192 | data = resp.contents | 192 | data = resp.contents |
623 | 193 | if not resp.contents.startswith(header): | 193 | if not resp.contents.startswith(header): |
624 | diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py | |||
625 | index dbe421c..e2c54ae 100644 | |||
626 | --- a/cloudinit/cmd/tests/test_main.py | |||
627 | +++ b/cloudinit/cmd/tests/test_main.py | |||
628 | @@ -56,7 +56,7 @@ class TestMain(FilesystemMockingTestCase): | |||
629 | 56 | cmdargs = myargs( | 56 | cmdargs = myargs( |
630 | 57 | debug=False, files=None, force=False, local=False, reporter=None, | 57 | debug=False, files=None, force=False, local=False, reporter=None, |
631 | 58 | subcommand='init') | 58 | subcommand='init') |
633 | 59 | (item1, item2) = wrap_and_call( | 59 | (_item1, item2) = wrap_and_call( |
634 | 60 | 'cloudinit.cmd.main', | 60 | 'cloudinit.cmd.main', |
635 | 61 | {'util.close_stdin': True, | 61 | {'util.close_stdin': True, |
636 | 62 | 'netinfo.debug_info': 'my net debug info', | 62 | 'netinfo.debug_info': 'my net debug info', |
637 | @@ -85,7 +85,7 @@ class TestMain(FilesystemMockingTestCase): | |||
638 | 85 | cmdargs = myargs( | 85 | cmdargs = myargs( |
639 | 86 | debug=False, files=None, force=False, local=False, reporter=None, | 86 | debug=False, files=None, force=False, local=False, reporter=None, |
640 | 87 | subcommand='init') | 87 | subcommand='init') |
642 | 88 | (item1, item2) = wrap_and_call( | 88 | (_item1, item2) = wrap_and_call( |
643 | 89 | 'cloudinit.cmd.main', | 89 | 'cloudinit.cmd.main', |
644 | 90 | {'util.close_stdin': True, | 90 | {'util.close_stdin': True, |
645 | 91 | 'netinfo.debug_info': 'my net debug info', | 91 | 'netinfo.debug_info': 'my net debug info', |
646 | @@ -133,7 +133,7 @@ class TestMain(FilesystemMockingTestCase): | |||
647 | 133 | self.assertEqual(main.LOG, log) | 133 | self.assertEqual(main.LOG, log) |
648 | 134 | self.assertIsNone(args) | 134 | self.assertIsNone(args) |
649 | 135 | 135 | ||
651 | 136 | (item1, item2) = wrap_and_call( | 136 | (_item1, item2) = wrap_and_call( |
652 | 137 | 'cloudinit.cmd.main', | 137 | 'cloudinit.cmd.main', |
653 | 138 | {'util.close_stdin': True, | 138 | {'util.close_stdin': True, |
654 | 139 | 'netinfo.debug_info': 'my net debug info', | 139 | 'netinfo.debug_info': 'my net debug info', |
655 | diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py | |||
656 | index 5b9cbca..e18944e 100644 | |||
657 | --- a/cloudinit/config/cc_apt_configure.py | |||
658 | +++ b/cloudinit/config/cc_apt_configure.py | |||
659 | @@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for | |||
660 | 121 | All source entries in ``apt-sources`` that match regex in | 121 | All source entries in ``apt-sources`` that match regex in |
661 | 122 | ``add_apt_repo_match`` will be added to the system using | 122 | ``add_apt_repo_match`` will be added to the system using |
662 | 123 | ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults | 123 | ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults |
664 | 124 | to ``^[\w-]+:\w`` | 124 | to ``^[\\w-]+:\\w`` |
665 | 125 | 125 | ||
666 | 126 | **Add source list entries:** | 126 | **Add source list entries:** |
667 | 127 | 127 | ||
668 | @@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None): | |||
669 | 378 | 378 | ||
670 | 379 | # get a complete list of packages listed in input | 379 | # get a complete list of packages listed in input |
671 | 380 | pkgs_cfgd = set() | 380 | pkgs_cfgd = set() |
673 | 381 | for key, content in selsets.items(): | 381 | for _key, content in selsets.items(): |
674 | 382 | for line in content.splitlines(): | 382 | for line in content.splitlines(): |
675 | 383 | if line.startswith("#"): | 383 | if line.startswith("#"): |
676 | 384 | continue | 384 | continue |
677 | diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py | |||
678 | index 233da1e..db64f0a 100644 | |||
679 | --- a/cloudinit/config/cc_bootcmd.py | |||
680 | +++ b/cloudinit/config/cc_bootcmd.py | |||
681 | @@ -63,7 +63,6 @@ schema = { | |||
682 | 63 | 'additionalProperties': False, | 63 | 'additionalProperties': False, |
683 | 64 | 'minItems': 1, | 64 | 'minItems': 1, |
684 | 65 | 'required': [], | 65 | 'required': [], |
685 | 66 | 'uniqueItems': True | ||
686 | 67 | } | 66 | } |
687 | 68 | } | 67 | } |
688 | 69 | } | 68 | } |
689 | diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py | |||
690 | index c56319b..885b313 100644 | |||
691 | --- a/cloudinit/config/cc_disable_ec2_metadata.py | |||
692 | +++ b/cloudinit/config/cc_disable_ec2_metadata.py | |||
693 | @@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS | |||
694 | 32 | 32 | ||
695 | 33 | frequency = PER_ALWAYS | 33 | frequency = PER_ALWAYS |
696 | 34 | 34 | ||
698 | 35 | REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject'] | 35 | REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] |
699 | 36 | REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] | ||
700 | 36 | 37 | ||
701 | 37 | 38 | ||
702 | 38 | def handle(name, cfg, _cloud, log, _args): | 39 | def handle(name, cfg, _cloud, log, _args): |
703 | 39 | disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) | 40 | disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) |
704 | 40 | if disabled: | 41 | if disabled: |
706 | 41 | util.subp(REJECT_CMD, capture=False) | 42 | reject_cmd = None |
707 | 43 | if util.which('ip'): | ||
708 | 44 | reject_cmd = REJECT_CMD_IP | ||
709 | 45 | elif util.which('ifconfig'): | ||
710 | 46 | reject_cmd = REJECT_CMD_IF | ||
711 | 47 | else: | ||
712 | 48 | log.error(('Neither "route" nor "ip" command found, unable to ' | ||
713 | 49 | 'manipulate routing table')) | ||
714 | 50 | return | ||
715 | 51 | util.subp(reject_cmd, capture=False) | ||
716 | 42 | else: | 52 | else: |
717 | 43 | log.debug(("Skipping module named %s," | 53 | log.debug(("Skipping module named %s," |
718 | 44 | " disabling the ec2 route not enabled"), name) | 54 | " disabling the ec2 route not enabled"), name) |
719 | diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py | |||
720 | index c3e8c48..943089e 100644 | |||
721 | --- a/cloudinit/config/cc_disk_setup.py | |||
722 | +++ b/cloudinit/config/cc_disk_setup.py | |||
723 | @@ -680,13 +680,13 @@ def read_parttbl(device): | |||
724 | 680 | reliable way to probe the partition table. | 680 | reliable way to probe the partition table. |
725 | 681 | """ | 681 | """ |
726 | 682 | blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] | 682 | blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] |
728 | 683 | udevadm_settle() | 683 | util.udevadm_settle() |
729 | 684 | try: | 684 | try: |
730 | 685 | util.subp(blkdev_cmd) | 685 | util.subp(blkdev_cmd) |
731 | 686 | except Exception as e: | 686 | except Exception as e: |
732 | 687 | util.logexc(LOG, "Failed reading the partition table %s" % e) | 687 | util.logexc(LOG, "Failed reading the partition table %s" % e) |
733 | 688 | 688 | ||
735 | 689 | udevadm_settle() | 689 | util.udevadm_settle() |
736 | 690 | 690 | ||
737 | 691 | 691 | ||
738 | 692 | def exec_mkpart_mbr(device, layout): | 692 | def exec_mkpart_mbr(device, layout): |
739 | @@ -737,14 +737,10 @@ def exec_mkpart(table_type, device, layout): | |||
740 | 737 | return get_dyn_func("exec_mkpart_%s", table_type, device, layout) | 737 | return get_dyn_func("exec_mkpart_%s", table_type, device, layout) |
741 | 738 | 738 | ||
742 | 739 | 739 | ||
743 | 740 | def udevadm_settle(): | ||
744 | 741 | util.subp(['udevadm', 'settle']) | ||
745 | 742 | |||
746 | 743 | |||
747 | 744 | def assert_and_settle_device(device): | 740 | def assert_and_settle_device(device): |
748 | 745 | """Assert that device exists and settle so it is fully recognized.""" | 741 | """Assert that device exists and settle so it is fully recognized.""" |
749 | 746 | if not os.path.exists(device): | 742 | if not os.path.exists(device): |
751 | 747 | udevadm_settle() | 743 | util.udevadm_settle() |
752 | 748 | if not os.path.exists(device): | 744 | if not os.path.exists(device): |
753 | 749 | raise RuntimeError("Device %s did not exist and was not created " | 745 | raise RuntimeError("Device %s did not exist and was not created " |
754 | 750 | "with a udevamd settle." % device) | 746 | "with a udevamd settle." % device) |
755 | @@ -752,7 +748,7 @@ def assert_and_settle_device(device): | |||
756 | 752 | # Whether or not the device existed above, it is possible that udev | 748 | # Whether or not the device existed above, it is possible that udev |
757 | 753 | # events that would populate udev database (for reading by lsdname) have | 749 | # events that would populate udev database (for reading by lsdname) have |
758 | 754 | # not yet finished. So settle again. | 750 | # not yet finished. So settle again. |
760 | 755 | udevadm_settle() | 751 | util.udevadm_settle() |
761 | 756 | 752 | ||
762 | 757 | 753 | ||
763 | 758 | def mkpart(device, definition): | 754 | def mkpart(device, definition): |
764 | diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py | |||
765 | index 69dc2d5..eb9fbe6 100644 | |||
766 | --- a/cloudinit/config/cc_emit_upstart.py | |||
767 | +++ b/cloudinit/config/cc_emit_upstart.py | |||
768 | @@ -43,7 +43,7 @@ def is_upstart_system(): | |||
769 | 43 | del myenv['UPSTART_SESSION'] | 43 | del myenv['UPSTART_SESSION'] |
770 | 44 | check_cmd = ['initctl', 'version'] | 44 | check_cmd = ['initctl', 'version'] |
771 | 45 | try: | 45 | try: |
773 | 46 | (out, err) = util.subp(check_cmd, env=myenv) | 46 | (out, _err) = util.subp(check_cmd, env=myenv) |
774 | 47 | return 'upstart' in out | 47 | return 'upstart' in out |
775 | 48 | except util.ProcessExecutionError as e: | 48 | except util.ProcessExecutionError as e: |
776 | 49 | LOG.debug("'%s' returned '%s', not using upstart", | 49 | LOG.debug("'%s' returned '%s', not using upstart", |
777 | diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py | |||
778 | index 09374d2..ac72ac4 100644 | |||
779 | --- a/cloudinit/config/cc_lxd.py | |||
780 | +++ b/cloudinit/config/cc_lxd.py | |||
781 | @@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly. | |||
782 | 47 | domain: <domain> | 47 | domain: <domain> |
783 | 48 | """ | 48 | """ |
784 | 49 | 49 | ||
785 | 50 | from cloudinit import log as logging | ||
786 | 50 | from cloudinit import util | 51 | from cloudinit import util |
787 | 51 | import os | 52 | import os |
788 | 52 | 53 | ||
789 | 53 | distros = ['ubuntu'] | 54 | distros = ['ubuntu'] |
790 | 54 | 55 | ||
791 | 56 | LOG = logging.getLogger(__name__) | ||
792 | 57 | |||
793 | 58 | _DEFAULT_NETWORK_NAME = "lxdbr0" | ||
794 | 59 | |||
795 | 55 | 60 | ||
796 | 56 | def handle(name, cfg, cloud, log, args): | 61 | def handle(name, cfg, cloud, log, args): |
797 | 57 | # Get config | 62 | # Get config |
798 | @@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args): | |||
799 | 109 | # Set up lxd-bridge if bridge config is given | 114 | # Set up lxd-bridge if bridge config is given |
800 | 110 | dconf_comm = "debconf-communicate" | 115 | dconf_comm = "debconf-communicate" |
801 | 111 | if bridge_cfg: | 116 | if bridge_cfg: |
802 | 117 | net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) | ||
803 | 112 | if os.path.exists("/etc/default/lxd-bridge") \ | 118 | if os.path.exists("/etc/default/lxd-bridge") \ |
804 | 113 | and util.which(dconf_comm): | 119 | and util.which(dconf_comm): |
805 | 114 | # Bridge configured through packaging | 120 | # Bridge configured through packaging |
806 | @@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args): | |||
807 | 135 | else: | 141 | else: |
808 | 136 | # Built-in LXD bridge support | 142 | # Built-in LXD bridge support |
809 | 137 | cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) | 143 | cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) |
810 | 144 | maybe_cleanup_default( | ||
811 | 145 | net_name=net_name, did_init=bool(init_cfg), | ||
812 | 146 | create=bool(cmd_create), attach=bool(cmd_attach)) | ||
813 | 138 | if cmd_create: | 147 | if cmd_create: |
814 | 139 | log.debug("Creating lxd bridge: %s" % | 148 | log.debug("Creating lxd bridge: %s" % |
815 | 140 | " ".join(cmd_create)) | 149 | " ".join(cmd_create)) |
817 | 141 | util.subp(cmd_create) | 150 | _lxc(cmd_create) |
818 | 142 | 151 | ||
819 | 143 | if cmd_attach: | 152 | if cmd_attach: |
820 | 144 | log.debug("Setting up default lxd bridge: %s" % | 153 | log.debug("Setting up default lxd bridge: %s" % |
821 | 145 | " ".join(cmd_create)) | 154 | " ".join(cmd_create)) |
823 | 146 | util.subp(cmd_attach) | 155 | _lxc(cmd_attach) |
824 | 147 | 156 | ||
825 | 148 | elif bridge_cfg: | 157 | elif bridge_cfg: |
826 | 149 | raise RuntimeError( | 158 | raise RuntimeError( |
827 | @@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg): | |||
828 | 204 | if bridge_cfg.get("mode") == "none": | 213 | if bridge_cfg.get("mode") == "none": |
829 | 205 | return None, None | 214 | return None, None |
830 | 206 | 215 | ||
832 | 207 | bridge_name = bridge_cfg.get("name", "lxdbr0") | 216 | bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) |
833 | 208 | cmd_create = [] | 217 | cmd_create = [] |
836 | 209 | cmd_attach = ["lxc", "network", "attach-profile", bridge_name, | 218 | cmd_attach = ["network", "attach-profile", bridge_name, |
837 | 210 | "default", "eth0", "--force-local"] | 219 | "default", "eth0"] |
838 | 211 | 220 | ||
839 | 212 | if bridge_cfg.get("mode") == "existing": | 221 | if bridge_cfg.get("mode") == "existing": |
840 | 213 | return None, cmd_attach | 222 | return None, cmd_attach |
841 | @@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg): | |||
842 | 215 | if bridge_cfg.get("mode") != "new": | 224 | if bridge_cfg.get("mode") != "new": |
843 | 216 | raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) | 225 | raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) |
844 | 217 | 226 | ||
846 | 218 | cmd_create = ["lxc", "network", "create", bridge_name] | 227 | cmd_create = ["network", "create", bridge_name] |
847 | 219 | 228 | ||
848 | 220 | if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): | 229 | if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): |
849 | 221 | cmd_create.append("ipv4.address=%s/%s" % | 230 | cmd_create.append("ipv4.address=%s/%s" % |
850 | @@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg): | |||
851 | 247 | if bridge_cfg.get("domain"): | 256 | if bridge_cfg.get("domain"): |
852 | 248 | cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) | 257 | cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) |
853 | 249 | 258 | ||
854 | 250 | cmd_create.append("--force-local") | ||
855 | 251 | |||
856 | 252 | return cmd_create, cmd_attach | 259 | return cmd_create, cmd_attach |
857 | 253 | 260 | ||
858 | 261 | |||
859 | 262 | def _lxc(cmd): | ||
860 | 263 | env = {'LC_ALL': 'C'} | ||
861 | 264 | util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) | ||
862 | 265 | |||
863 | 266 | |||
864 | 267 | def maybe_cleanup_default(net_name, did_init, create, attach, | ||
865 | 268 | profile="default", nic_name="eth0"): | ||
866 | 269 | """Newer versions of lxc (3.0.1+) create a lxdbr0 network when | ||
867 | 270 | 'lxd init --auto' is run. Older versions did not. | ||
868 | 271 | |||
869 | 272 | By removing ay that lxd-init created, we simply leave the add/attach | ||
870 | 273 | code in-tact. | ||
871 | 274 | |||
872 | 275 | https://github.com/lxc/lxd/issues/4649""" | ||
873 | 276 | if net_name != _DEFAULT_NETWORK_NAME or not did_init: | ||
874 | 277 | return | ||
875 | 278 | |||
876 | 279 | fail_assume_enoent = " failed. Assuming it did not exist." | ||
877 | 280 | succeeded = " succeeded." | ||
878 | 281 | if create: | ||
879 | 282 | msg = "Deletion of lxd network '%s'" % net_name | ||
880 | 283 | try: | ||
881 | 284 | _lxc(["network", "delete", net_name]) | ||
882 | 285 | LOG.debug(msg + succeeded) | ||
883 | 286 | except util.ProcessExecutionError as e: | ||
884 | 287 | if e.exit_code != 1: | ||
885 | 288 | raise e | ||
886 | 289 | LOG.debug(msg + fail_assume_enoent) | ||
887 | 290 | |||
888 | 291 | if attach: | ||
889 | 292 | msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile) | ||
890 | 293 | try: | ||
891 | 294 | _lxc(["profile", "device", "remove", profile, nic_name]) | ||
892 | 295 | LOG.debug(msg + succeeded) | ||
893 | 296 | except util.ProcessExecutionError as e: | ||
894 | 297 | if e.exit_code != 1: | ||
895 | 298 | raise e | ||
896 | 299 | LOG.debug(msg + fail_assume_enoent) | ||
897 | 300 | |||
898 | 301 | |||
899 | 254 | # vi: ts=4 expandtab | 302 | # vi: ts=4 expandtab |
900 | diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py | |||
901 | index f14a4fc..339baba 100644 | |||
902 | --- a/cloudinit/config/cc_mounts.py | |||
903 | +++ b/cloudinit/config/cc_mounts.py | |||
904 | @@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" | |||
905 | 76 | DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) | 76 | DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) |
906 | 77 | WS = re.compile("[%s]+" % (whitespace)) | 77 | WS = re.compile("[%s]+" % (whitespace)) |
907 | 78 | FSTAB_PATH = "/etc/fstab" | 78 | FSTAB_PATH = "/etc/fstab" |
908 | 79 | MNT_COMMENT = "comment=cloudconfig" | ||
909 | 79 | 80 | ||
910 | 80 | LOG = logging.getLogger(__name__) | 81 | LOG = logging.getLogger(__name__) |
911 | 81 | 82 | ||
912 | @@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None): | |||
913 | 232 | if str(size).lower() == "auto": | 233 | if str(size).lower() == "auto": |
914 | 233 | try: | 234 | try: |
915 | 234 | memsize = util.read_meminfo()['total'] | 235 | memsize = util.read_meminfo()['total'] |
918 | 235 | except IOError as e: | 236 | except IOError: |
919 | 236 | LOG.debug("Not creating swap. failed to read meminfo") | 237 | LOG.debug("Not creating swap: failed to read meminfo") |
920 | 237 | return | 238 | return |
921 | 238 | 239 | ||
922 | 239 | util.ensure_dir(tdir) | 240 | util.ensure_dir(tdir) |
923 | @@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg): | |||
924 | 280 | 281 | ||
925 | 281 | if os.path.exists(fname): | 282 | if os.path.exists(fname): |
926 | 282 | if not os.path.exists("/proc/swaps"): | 283 | if not os.path.exists("/proc/swaps"): |
929 | 283 | LOG.debug("swap file %s existed. no /proc/swaps. Being safe.", | 284 | LOG.debug("swap file %s exists, but no /proc/swaps exists, " |
930 | 284 | fname) | 285 | "being safe", fname) |
931 | 285 | return fname | 286 | return fname |
932 | 286 | try: | 287 | try: |
933 | 287 | for line in util.load_file("/proc/swaps").splitlines(): | 288 | for line in util.load_file("/proc/swaps").splitlines(): |
934 | 288 | if line.startswith(fname + " "): | 289 | if line.startswith(fname + " "): |
936 | 289 | LOG.debug("swap file %s already in use.", fname) | 290 | LOG.debug("swap file %s already in use", fname) |
937 | 290 | return fname | 291 | return fname |
939 | 291 | LOG.debug("swap file %s existed, but not in /proc/swaps", fname) | 292 | LOG.debug("swap file %s exists, but not in /proc/swaps", fname) |
940 | 292 | except Exception: | 293 | except Exception: |
942 | 293 | LOG.warning("swap file %s existed. Error reading /proc/swaps", | 294 | LOG.warning("swap file %s exists. Error reading /proc/swaps", |
943 | 294 | fname) | 295 | fname) |
944 | 295 | return fname | 296 | return fname |
945 | 296 | 297 | ||
946 | @@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args): | |||
947 | 327 | 328 | ||
948 | 328 | LOG.debug("mounts configuration is %s", cfgmnt) | 329 | LOG.debug("mounts configuration is %s", cfgmnt) |
949 | 329 | 330 | ||
950 | 331 | fstab_lines = [] | ||
951 | 332 | fstab_devs = {} | ||
952 | 333 | fstab_removed = [] | ||
953 | 334 | |||
954 | 335 | for line in util.load_file(FSTAB_PATH).splitlines(): | ||
955 | 336 | if MNT_COMMENT in line: | ||
956 | 337 | fstab_removed.append(line) | ||
957 | 338 | continue | ||
958 | 339 | |||
959 | 340 | try: | ||
960 | 341 | toks = WS.split(line) | ||
961 | 342 | except Exception: | ||
962 | 343 | pass | ||
963 | 344 | fstab_devs[toks[0]] = line | ||
964 | 345 | fstab_lines.append(line) | ||
965 | 346 | |||
966 | 330 | for i in range(len(cfgmnt)): | 347 | for i in range(len(cfgmnt)): |
967 | 331 | # skip something that wasn't a list | 348 | # skip something that wasn't a list |
968 | 332 | if not isinstance(cfgmnt[i], list): | 349 | if not isinstance(cfgmnt[i], list): |
969 | @@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args): | |||
970 | 336 | 353 | ||
971 | 337 | start = str(cfgmnt[i][0]) | 354 | start = str(cfgmnt[i][0]) |
972 | 338 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) | 355 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) |
973 | 356 | if sanitized != start: | ||
974 | 357 | log.debug("changed %s => %s" % (start, sanitized)) | ||
975 | 358 | |||
976 | 339 | if sanitized is None: | 359 | if sanitized is None: |
978 | 340 | log.debug("Ignorming nonexistant named mount %s", start) | 360 | log.debug("Ignoring nonexistent named mount %s", start) |
979 | 361 | continue | ||
980 | 362 | elif sanitized in fstab_devs: | ||
981 | 363 | log.info("Device %s already defined in fstab: %s", | ||
982 | 364 | sanitized, fstab_devs[sanitized]) | ||
983 | 341 | continue | 365 | continue |
984 | 342 | 366 | ||
985 | 343 | if sanitized != start: | ||
986 | 344 | log.debug("changed %s => %s" % (start, sanitized)) | ||
987 | 345 | cfgmnt[i][0] = sanitized | 367 | cfgmnt[i][0] = sanitized |
988 | 346 | 368 | ||
989 | 347 | # in case the user did not quote a field (likely fs-freq, fs_passno) | 369 | # in case the user did not quote a field (likely fs-freq, fs_passno) |
990 | @@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args): | |||
991 | 373 | for defmnt in defmnts: | 395 | for defmnt in defmnts: |
992 | 374 | start = defmnt[0] | 396 | start = defmnt[0] |
993 | 375 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) | 397 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) |
994 | 376 | if sanitized is None: | ||
995 | 377 | log.debug("Ignoring nonexistant default named mount %s", start) | ||
996 | 378 | continue | ||
997 | 379 | if sanitized != start: | 398 | if sanitized != start: |
998 | 380 | log.debug("changed default device %s => %s" % (start, sanitized)) | 399 | log.debug("changed default device %s => %s" % (start, sanitized)) |
999 | 400 | |||
1000 | 401 | if sanitized is None: | ||
1001 | 402 | log.debug("Ignoring nonexistent default named mount %s", start) | ||
1002 | 403 | continue | ||
1003 | 404 | elif sanitized in fstab_devs: | ||
1004 | 405 | log.debug("Device %s already defined in fstab: %s", | ||
1005 | 406 | sanitized, fstab_devs[sanitized]) | ||
1006 | 407 | continue | ||
1007 | 408 | |||
1008 | 381 | defmnt[0] = sanitized | 409 | defmnt[0] = sanitized |
1009 | 382 | 410 | ||
1010 | 383 | cfgmnt_has = False | 411 | cfgmnt_has = False |
1011 | @@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args): | |||
1012 | 397 | actlist = [] | 425 | actlist = [] |
1013 | 398 | for x in cfgmnt: | 426 | for x in cfgmnt: |
1014 | 399 | if x[1] is None: | 427 | if x[1] is None: |
1016 | 400 | log.debug("Skipping non-existent device named %s", x[0]) | 428 | log.debug("Skipping nonexistent device named %s", x[0]) |
1017 | 401 | else: | 429 | else: |
1018 | 402 | actlist.append(x) | 430 | actlist.append(x) |
1019 | 403 | 431 | ||
1020 | @@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args): | |||
1021 | 406 | actlist.append([swapret, "none", "swap", "sw", "0", "0"]) | 434 | actlist.append([swapret, "none", "swap", "sw", "0", "0"]) |
1022 | 407 | 435 | ||
1023 | 408 | if len(actlist) == 0: | 436 | if len(actlist) == 0: |
1025 | 409 | log.debug("No modifications to fstab needed.") | 437 | log.debug("No modifications to fstab needed") |
1026 | 410 | return | 438 | return |
1027 | 411 | 439 | ||
1028 | 412 | comment = "comment=cloudconfig" | ||
1029 | 413 | cc_lines = [] | 440 | cc_lines = [] |
1030 | 414 | needswap = False | 441 | needswap = False |
1031 | 415 | dirs = [] | 442 | dirs = [] |
1032 | 416 | for line in actlist: | 443 | for line in actlist: |
1033 | 417 | # write 'comment' in the fs_mntops, entry, claiming this | 444 | # write 'comment' in the fs_mntops, entry, claiming this |
1035 | 418 | line[3] = "%s,%s" % (line[3], comment) | 445 | line[3] = "%s,%s" % (line[3], MNT_COMMENT) |
1036 | 419 | if line[2] == "swap": | 446 | if line[2] == "swap": |
1037 | 420 | needswap = True | 447 | needswap = True |
1038 | 421 | if line[1].startswith("/"): | 448 | if line[1].startswith("/"): |
1039 | 422 | dirs.append(line[1]) | 449 | dirs.append(line[1]) |
1040 | 423 | cc_lines.append('\t'.join(line)) | 450 | cc_lines.append('\t'.join(line)) |
1041 | 424 | 451 | ||
1042 | 425 | fstab_lines = [] | ||
1043 | 426 | removed = [] | ||
1044 | 427 | for line in util.load_file(FSTAB_PATH).splitlines(): | ||
1045 | 428 | try: | ||
1046 | 429 | toks = WS.split(line) | ||
1047 | 430 | if toks[3].find(comment) != -1: | ||
1048 | 431 | removed.append(line) | ||
1049 | 432 | continue | ||
1050 | 433 | except Exception: | ||
1051 | 434 | pass | ||
1052 | 435 | fstab_lines.append(line) | ||
1053 | 436 | |||
1054 | 437 | for d in dirs: | 452 | for d in dirs: |
1055 | 438 | try: | 453 | try: |
1056 | 439 | util.ensure_dir(d) | 454 | util.ensure_dir(d) |
1057 | @@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args): | |||
1058 | 441 | util.logexc(log, "Failed to make '%s' config-mount", d) | 456 | util.logexc(log, "Failed to make '%s' config-mount", d) |
1059 | 442 | 457 | ||
1060 | 443 | sadds = [WS.sub(" ", n) for n in cc_lines] | 458 | sadds = [WS.sub(" ", n) for n in cc_lines] |
1062 | 444 | sdrops = [WS.sub(" ", n) for n in removed] | 459 | sdrops = [WS.sub(" ", n) for n in fstab_removed] |
1063 | 445 | 460 | ||
1064 | 446 | sops = (["- " + drop for drop in sdrops if drop not in sadds] + | 461 | sops = (["- " + drop for drop in sdrops if drop not in sadds] + |
1065 | 447 | ["+ " + add for add in sadds if add not in sdrops]) | 462 | ["+ " + add for add in sadds if add not in sdrops]) |
1066 | diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py | |||
1067 | index cbd0237..9e074bd 100644 | |||
1068 | --- a/cloudinit/config/cc_ntp.py | |||
1069 | +++ b/cloudinit/config/cc_ntp.py | |||
1070 | @@ -10,20 +10,95 @@ from cloudinit.config.schema import ( | |||
1071 | 10 | get_schema_doc, validate_cloudconfig_schema) | 10 | get_schema_doc, validate_cloudconfig_schema) |
1072 | 11 | from cloudinit import log as logging | 11 | from cloudinit import log as logging |
1073 | 12 | from cloudinit.settings import PER_INSTANCE | 12 | from cloudinit.settings import PER_INSTANCE |
1074 | 13 | from cloudinit import temp_utils | ||
1075 | 13 | from cloudinit import templater | 14 | from cloudinit import templater |
1076 | 14 | from cloudinit import type_utils | 15 | from cloudinit import type_utils |
1077 | 15 | from cloudinit import util | 16 | from cloudinit import util |
1078 | 16 | 17 | ||
1079 | 18 | import copy | ||
1080 | 17 | import os | 19 | import os |
1081 | 20 | import six | ||
1082 | 18 | from textwrap import dedent | 21 | from textwrap import dedent |
1083 | 19 | 22 | ||
1084 | 20 | LOG = logging.getLogger(__name__) | 23 | LOG = logging.getLogger(__name__) |
1085 | 21 | 24 | ||
1086 | 22 | frequency = PER_INSTANCE | 25 | frequency = PER_INSTANCE |
1087 | 23 | NTP_CONF = '/etc/ntp.conf' | 26 | NTP_CONF = '/etc/ntp.conf' |
1088 | 24 | TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf' | ||
1089 | 25 | NR_POOL_SERVERS = 4 | 27 | NR_POOL_SERVERS = 4 |
1091 | 26 | distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu'] | 28 | distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu'] |
1092 | 29 | |||
1093 | 30 | NTP_CLIENT_CONFIG = { | ||
1094 | 31 | 'chrony': { | ||
1095 | 32 | 'check_exe': 'chronyd', | ||
1096 | 33 | 'confpath': '/etc/chrony.conf', | ||
1097 | 34 | 'packages': ['chrony'], | ||
1098 | 35 | 'service_name': 'chrony', | ||
1099 | 36 | 'template_name': 'chrony.conf.{distro}', | ||
1100 | 37 | 'template': None, | ||
1101 | 38 | }, | ||
1102 | 39 | 'ntp': { | ||
1103 | 40 | 'check_exe': 'ntpd', | ||
1104 | 41 | 'confpath': NTP_CONF, | ||
1105 | 42 | 'packages': ['ntp'], | ||
1106 | 43 | 'service_name': 'ntp', | ||
1107 | 44 | 'template_name': 'ntp.conf.{distro}', | ||
1108 | 45 | 'template': None, | ||
1109 | 46 | }, | ||
1110 | 47 | 'ntpdate': { | ||
1111 | 48 | 'check_exe': 'ntpdate', | ||
1112 | 49 | 'confpath': NTP_CONF, | ||
1113 | 50 | 'packages': ['ntpdate'], | ||
1114 | 51 | 'service_name': 'ntpdate', | ||
1115 | 52 | 'template_name': 'ntp.conf.{distro}', | ||
1116 | 53 | 'template': None, | ||
1117 | 54 | }, | ||
1118 | 55 | 'systemd-timesyncd': { | ||
1119 | 56 | 'check_exe': '/lib/systemd/systemd-timesyncd', | ||
1120 | 57 | 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', | ||
1121 | 58 | 'packages': [], | ||
1122 | 59 | 'service_name': 'systemd-timesyncd', | ||
1123 | 60 | 'template_name': 'timesyncd.conf', | ||
1124 | 61 | 'template': None, | ||
1125 | 62 | }, | ||
1126 | 63 | } | ||
1127 | 64 | |||
1128 | 65 | # This is Distro-specific configuration overrides of the base config | ||
1129 | 66 | DISTRO_CLIENT_CONFIG = { | ||
1130 | 67 | 'debian': { | ||
1131 | 68 | 'chrony': { | ||
1132 | 69 | 'confpath': '/etc/chrony/chrony.conf', | ||
1133 | 70 | }, | ||
1134 | 71 | }, | ||
1135 | 72 | 'opensuse': { | ||
1136 | 73 | 'chrony': { | ||
1137 | 74 | 'service_name': 'chronyd', | ||
1138 | 75 | }, | ||
1139 | 76 | 'ntp': { | ||
1140 | 77 | 'confpath': '/etc/ntp.conf', | ||
1141 | 78 | 'service_name': 'ntpd', | ||
1142 | 79 | }, | ||
1143 | 80 | 'systemd-timesyncd': { | ||
1144 | 81 | 'check_exe': '/usr/lib/systemd/systemd-timesyncd', | ||
1145 | 82 | }, | ||
1146 | 83 | }, | ||
1147 | 84 | 'sles': { | ||
1148 | 85 | 'chrony': { | ||
1149 | 86 | 'service_name': 'chronyd', | ||
1150 | 87 | }, | ||
1151 | 88 | 'ntp': { | ||
1152 | 89 | 'confpath': '/etc/ntp.conf', | ||
1153 | 90 | 'service_name': 'ntpd', | ||
1154 | 91 | }, | ||
1155 | 92 | 'systemd-timesyncd': { | ||
1156 | 93 | 'check_exe': '/usr/lib/systemd/systemd-timesyncd', | ||
1157 | 94 | }, | ||
1158 | 95 | }, | ||
1159 | 96 | 'ubuntu': { | ||
1160 | 97 | 'chrony': { | ||
1161 | 98 | 'confpath': '/etc/chrony/chrony.conf', | ||
1162 | 99 | }, | ||
1163 | 100 | }, | ||
1164 | 101 | } | ||
1165 | 27 | 102 | ||
1166 | 28 | 103 | ||
1167 | 29 | # The schema definition for each cloud-config module is a strict contract for | 104 | # The schema definition for each cloud-config module is a strict contract for |
1168 | @@ -48,7 +123,34 @@ schema = { | |||
1169 | 48 | 'distros': distros, | 123 | 'distros': distros, |
1170 | 49 | 'examples': [ | 124 | 'examples': [ |
1171 | 50 | dedent("""\ | 125 | dedent("""\ |
1172 | 126 | # Override ntp with chrony configuration on Ubuntu | ||
1173 | 127 | ntp: | ||
1174 | 128 | enabled: true | ||
1175 | 129 | ntp_client: chrony # Uses cloud-init default chrony configuration | ||
1176 | 130 | """), | ||
1177 | 131 | dedent("""\ | ||
1178 | 132 | # Provide a custom ntp client configuration | ||
1179 | 51 | ntp: | 133 | ntp: |
1180 | 134 | enabled: true | ||
1181 | 135 | ntp_client: myntpclient | ||
1182 | 136 | config: | ||
1183 | 137 | confpath: /etc/myntpclient/myntpclient.conf | ||
1184 | 138 | check_exe: myntpclientd | ||
1185 | 139 | packages: | ||
1186 | 140 | - myntpclient | ||
1187 | 141 | service_name: myntpclient | ||
1188 | 142 | template: | | ||
1189 | 143 | ## template:jinja | ||
1190 | 144 | # My NTP Client config | ||
1191 | 145 | {% if pools -%}# pools{% endif %} | ||
1192 | 146 | {% for pool in pools -%} | ||
1193 | 147 | pool {{pool}} iburst | ||
1194 | 148 | {% endfor %} | ||
1195 | 149 | {%- if servers %}# servers | ||
1196 | 150 | {% endif %} | ||
1197 | 151 | {% for server in servers -%} | ||
1198 | 152 | server {{server}} iburst | ||
1199 | 153 | {% endfor %} | ||
1200 | 52 | pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] | 154 | pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] |
1201 | 53 | servers: | 155 | servers: |
1202 | 54 | - ntp.server.local | 156 | - ntp.server.local |
1203 | @@ -83,79 +185,159 @@ schema = { | |||
1204 | 83 | List of ntp servers. If both pools and servers are | 185 | List of ntp servers. If both pools and servers are |
1205 | 84 | empty, 4 default pool servers will be provided with | 186 | empty, 4 default pool servers will be provided with |
1206 | 85 | the format ``{0-3}.{distro}.pool.ntp.org``.""") | 187 | the format ``{0-3}.{distro}.pool.ntp.org``.""") |
1208 | 86 | } | 188 | }, |
1209 | 189 | 'ntp_client': { | ||
1210 | 190 | 'type': 'string', | ||
1211 | 191 | 'default': 'auto', | ||
1212 | 192 | 'description': dedent("""\ | ||
1213 | 193 | Name of an NTP client to use to configure system NTP. | ||
1214 | 194 | When unprovided or 'auto' the default client preferred | ||
1215 | 195 | by the distribution will be used. The following | ||
1216 | 196 | built-in client names can be used to override existing | ||
1217 | 197 | configuration defaults: chrony, ntp, ntpdate, | ||
1218 | 198 | systemd-timesyncd."""), | ||
1219 | 199 | }, | ||
1220 | 200 | 'enabled': { | ||
1221 | 201 | 'type': 'boolean', | ||
1222 | 202 | 'default': True, | ||
1223 | 203 | 'description': dedent("""\ | ||
1224 | 204 | Attempt to enable ntp clients if set to True. If set | ||
1225 | 205 | to False, ntp client will not be configured or | ||
1226 | 206 | installed"""), | ||
1227 | 207 | }, | ||
1228 | 208 | 'config': { | ||
1229 | 209 | 'description': dedent("""\ | ||
1230 | 210 | Configuration settings or overrides for the | ||
1231 | 211 | ``ntp_client`` specified."""), | ||
1232 | 212 | 'type': ['object'], | ||
1233 | 213 | 'properties': { | ||
1234 | 214 | 'confpath': { | ||
1235 | 215 | 'type': 'string', | ||
1236 | 216 | 'description': dedent("""\ | ||
1237 | 217 | The path to where the ``ntp_client`` | ||
1238 | 218 | configuration is written."""), | ||
1239 | 219 | }, | ||
1240 | 220 | 'check_exe': { | ||
1241 | 221 | 'type': 'string', | ||
1242 | 222 | 'description': dedent("""\ | ||
1243 | 223 | The executable name for the ``ntp_client``. | ||
1244 | 224 | For example, ntp service ``check_exe`` is | ||
1245 | 225 | 'ntpd' because it runs the ntpd binary."""), | ||
1246 | 226 | }, | ||
1247 | 227 | 'packages': { | ||
1248 | 228 | 'type': 'array', | ||
1249 | 229 | 'items': { | ||
1250 | 230 | 'type': 'string', | ||
1251 | 231 | }, | ||
1252 | 232 | 'uniqueItems': True, | ||
1253 | 233 | 'description': dedent("""\ | ||
1254 | 234 | List of packages needed to be installed for the | ||
1255 | 235 | selected ``ntp_client``."""), | ||
1256 | 236 | }, | ||
1257 | 237 | 'service_name': { | ||
1258 | 238 | 'type': 'string', | ||
1259 | 239 | 'description': dedent("""\ | ||
1260 | 240 | The systemd or sysvinit service name used to | ||
1261 | 241 | start and stop the ``ntp_client`` | ||
1262 | 242 | service."""), | ||
1263 | 243 | }, | ||
1264 | 244 | 'template': { | ||
1265 | 245 | 'type': 'string', | ||
1266 | 246 | 'description': dedent("""\ | ||
1267 | 247 | Inline template allowing users to define their | ||
1268 | 248 | own ``ntp_client`` configuration template. | ||
1269 | 249 | The value must start with '## template:jinja' | ||
1270 | 250 | to enable use of templating support. | ||
1271 | 251 | """), | ||
1272 | 252 | }, | ||
1273 | 253 | }, | ||
1274 | 254 | # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override | ||
1275 | 255 | # of builtin client values. | ||
1276 | 256 | 'required': [], | ||
1277 | 257 | 'minProperties': 1, # If we have config, define something | ||
1278 | 258 | 'additionalProperties': False | ||
1279 | 259 | }, | ||
1280 | 87 | }, | 260 | }, |
1281 | 88 | 'required': [], | 261 | 'required': [], |
1282 | 89 | 'additionalProperties': False | 262 | 'additionalProperties': False |
1283 | 90 | } | 263 | } |
1284 | 91 | } | 264 | } |
1285 | 92 | } | 265 | } |
1288 | 93 | 266 | REQUIRED_NTP_CONFIG_KEYS = frozenset([ | |
1289 | 94 | __doc__ = get_schema_doc(schema) # Supplement python help() | 267 | 'check_exe', 'confpath', 'packages', 'service_name']) |
1290 | 95 | 268 | ||
1291 | 96 | 269 | ||
1301 | 97 | def handle(name, cfg, cloud, log, _args): | 270 | __doc__ = get_schema_doc(schema) # Supplement python help() |
1293 | 98 | """Enable and configure ntp.""" | ||
1294 | 99 | if 'ntp' not in cfg: | ||
1295 | 100 | LOG.debug( | ||
1296 | 101 | "Skipping module named %s, not present or disabled by cfg", name) | ||
1297 | 102 | return | ||
1298 | 103 | ntp_cfg = cfg['ntp'] | ||
1299 | 104 | if ntp_cfg is None: | ||
1300 | 105 | ntp_cfg = {} # Allow empty config which will install the package | ||
1302 | 106 | 271 | ||
1303 | 107 | # TODO drop this when validate_cloudconfig_schema is strict=True | ||
1304 | 108 | if not isinstance(ntp_cfg, (dict)): | ||
1305 | 109 | raise RuntimeError( | ||
1306 | 110 | "'ntp' key existed in config, but not a dictionary type," | ||
1307 | 111 | " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) | ||
1308 | 112 | 272 | ||
1329 | 113 | validate_cloudconfig_schema(cfg, schema) | 273 | def distro_ntp_client_configs(distro): |
1330 | 114 | if ntp_installable(): | 274 | """Construct a distro-specific ntp client config dictionary by merging |
1331 | 115 | service_name = 'ntp' | 275 | distro specific changes into base config. |
1312 | 116 | confpath = NTP_CONF | ||
1313 | 117 | template_name = None | ||
1314 | 118 | packages = ['ntp'] | ||
1315 | 119 | check_exe = 'ntpd' | ||
1316 | 120 | else: | ||
1317 | 121 | service_name = 'systemd-timesyncd' | ||
1318 | 122 | confpath = TIMESYNCD_CONF | ||
1319 | 123 | template_name = 'timesyncd.conf' | ||
1320 | 124 | packages = [] | ||
1321 | 125 | check_exe = '/lib/systemd/systemd-timesyncd' | ||
1322 | 126 | |||
1323 | 127 | rename_ntp_conf() | ||
1324 | 128 | # ensure when ntp is installed it has a configuration file | ||
1325 | 129 | # to use instead of starting up with packaged defaults | ||
1326 | 130 | write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name) | ||
1327 | 131 | install_ntp(cloud.distro.install_packages, packages=packages, | ||
1328 | 132 | check_exe=check_exe) | ||
1332 | 133 | 276 | ||
1338 | 134 | try: | 277 | @param distro: String providing the distro class name. |
1339 | 135 | reload_ntp(service_name, systemd=cloud.distro.uses_systemd()) | 278 | @returns: Dict of distro configurations for ntp clients. |
1340 | 136 | except util.ProcessExecutionError as e: | 279 | """ |
1341 | 137 | LOG.exception("Failed to reload/start ntp service: %s", e) | 280 | dcfg = DISTRO_CLIENT_CONFIG |
1342 | 138 | raise | 281 | cfg = copy.copy(NTP_CLIENT_CONFIG) |
1343 | 282 | if distro in dcfg: | ||
1344 | 283 | cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True) | ||
1345 | 284 | return cfg | ||
1346 | 139 | 285 | ||
1347 | 140 | 286 | ||
1350 | 141 | def ntp_installable(): | 287 | def select_ntp_client(ntp_client, distro): |
1351 | 142 | """Check if we can install ntp package | 288 | """Determine which ntp client is to be used, consulting the distro |
1352 | 289 | for its preference. | ||
1353 | 143 | 290 | ||
1358 | 144 | Ubuntu-Core systems do not have an ntp package available, so | 291 | @param ntp_client: String name of the ntp client to use. |
1359 | 145 | we always return False. Other systems require package managers to install | 292 | @param distro: Distro class instance. |
1360 | 146 | the ntp package If we fail to find one of the package managers, then we | 293 | @returns: Dict of the selected ntp client or {} if none selected. |
1357 | 147 | cannot install ntp. | ||
1361 | 148 | """ | 294 | """ |
1362 | 149 | if util.system_is_snappy(): | ||
1363 | 150 | return False | ||
1364 | 151 | 295 | ||
1367 | 152 | if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])): | 296 | # construct distro-specific ntp_client_config dict |
1368 | 153 | return True | 297 | distro_cfg = distro_ntp_client_configs(distro.name) |
1369 | 298 | |||
1370 | 299 | # user specified client, return its config | ||
1371 | 300 | if ntp_client and ntp_client != 'auto': | ||
1372 | 301 | LOG.debug('Selected NTP client "%s" via user-data configuration', | ||
1373 | 302 | ntp_client) | ||
1374 | 303 | return distro_cfg.get(ntp_client, {}) | ||
1375 | 304 | |||
1376 | 305 | # default to auto if unset in distro | ||
1377 | 306 | distro_ntp_client = distro.get_option('ntp_client', 'auto') | ||
1378 | 307 | |||
1379 | 308 | clientcfg = {} | ||
1380 | 309 | if distro_ntp_client == "auto": | ||
1381 | 310 | for client in distro.preferred_ntp_clients: | ||
1382 | 311 | cfg = distro_cfg.get(client) | ||
1383 | 312 | if util.which(cfg.get('check_exe')): | ||
1384 | 313 | LOG.debug('Selected NTP client "%s", already installed', | ||
1385 | 314 | client) | ||
1386 | 315 | clientcfg = cfg | ||
1387 | 316 | break | ||
1388 | 317 | |||
1389 | 318 | if not clientcfg: | ||
1390 | 319 | client = distro.preferred_ntp_clients[0] | ||
1391 | 320 | LOG.debug( | ||
1392 | 321 | 'Selected distro preferred NTP client "%s", not yet installed', | ||
1393 | 322 | client) | ||
1394 | 323 | clientcfg = distro_cfg.get(client) | ||
1395 | 324 | else: | ||
1396 | 325 | LOG.debug('Selected NTP client "%s" via distro system config', | ||
1397 | 326 | distro_ntp_client) | ||
1398 | 327 | clientcfg = distro_cfg.get(distro_ntp_client, {}) | ||
1399 | 328 | |||
1400 | 329 | return clientcfg | ||
1401 | 154 | 330 | ||
1402 | 155 | return False | ||
1403 | 156 | 331 | ||
1404 | 332 | def install_ntp_client(install_func, packages=None, check_exe="ntpd"): | ||
1405 | 333 | """Install ntp client package if not already installed. | ||
1406 | 157 | 334 | ||
1408 | 158 | def install_ntp(install_func, packages=None, check_exe="ntpd"): | 335 | @param install_func: function. This parameter is invoked with the contents |
1409 | 336 | of the packages parameter. | ||
1410 | 337 | @param packages: list. This parameter defaults to ['ntp']. | ||
1411 | 338 | @param check_exe: string. The name of a binary that indicates the package | ||
1412 | 339 | the specified package is already installed. | ||
1413 | 340 | """ | ||
1414 | 159 | if util.which(check_exe): | 341 | if util.which(check_exe): |
1415 | 160 | return | 342 | return |
1416 | 161 | if packages is None: | 343 | if packages is None: |
1417 | @@ -164,15 +346,23 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): | |||
1418 | 164 | install_func(packages) | 346 | install_func(packages) |
1419 | 165 | 347 | ||
1420 | 166 | 348 | ||
1427 | 167 | def rename_ntp_conf(config=None): | 349 | def rename_ntp_conf(confpath=None): |
1428 | 168 | """Rename any existing ntp.conf file""" | 350 | """Rename any existing ntp client config file |
1429 | 169 | if config is None: # For testing | 351 | |
1430 | 170 | config = NTP_CONF | 352 | @param confpath: string. Specify a path to an existing ntp client |
1431 | 171 | if os.path.exists(config): | 353 | configuration file. |
1432 | 172 | util.rename(config, config + ".dist") | 354 | """ |
1433 | 355 | if os.path.exists(confpath): | ||
1434 | 356 | util.rename(confpath, confpath + ".dist") | ||
1435 | 173 | 357 | ||
1436 | 174 | 358 | ||
1437 | 175 | def generate_server_names(distro): | 359 | def generate_server_names(distro): |
1438 | 360 | """Generate a list of server names to populate an ntp client configuration | ||
1439 | 361 | file. | ||
1440 | 362 | |||
1441 | 363 | @param distro: string. Specify the distro name | ||
1442 | 364 | @returns: list: A list of strings representing ntp servers for this distro. | ||
1443 | 365 | """ | ||
1444 | 176 | names = [] | 366 | names = [] |
1445 | 177 | pool_distro = distro | 367 | pool_distro = distro |
1446 | 178 | # For legal reasons x.pool.sles.ntp.org does not exist, | 368 | # For legal reasons x.pool.sles.ntp.org does not exist, |
1447 | @@ -185,34 +375,60 @@ def generate_server_names(distro): | |||
1448 | 185 | return names | 375 | return names |
1449 | 186 | 376 | ||
1450 | 187 | 377 | ||
1454 | 188 | def write_ntp_config_template(cfg, cloud, path, template=None): | 378 | def write_ntp_config_template(distro_name, servers=None, pools=None, |
1455 | 189 | servers = cfg.get('servers', []) | 379 | path=None, template_fn=None, template=None): |
1456 | 190 | pools = cfg.get('pools', []) | 380 | """Render a ntp client configuration for the specified client. |
1457 | 381 | |||
1458 | 382 | @param distro_name: string. The distro class name. | ||
1459 | 383 | @param servers: A list of strings specifying ntp servers. Defaults to empty | ||
1460 | 384 | list. | ||
1461 | 385 | @param pools: A list of strings specifying ntp pools. Defaults to empty | ||
1462 | 386 | list. | ||
1463 | 387 | @param path: A string to specify where to write the rendered template. | ||
1464 | 388 | @param template_fn: A string to specify the template source file. | ||
1465 | 389 | @param template: A string specifying the contents of the template. This | ||
1466 | 390 | content will be written to a temporary file before being used to render | ||
1467 | 391 | the configuration file. | ||
1468 | 392 | |||
1469 | 393 | @raises: ValueError when path is None. | ||
1470 | 394 | @raises: ValueError when template_fn is None and template is None. | ||
1471 | 395 | """ | ||
1472 | 396 | if not servers: | ||
1473 | 397 | servers = [] | ||
1474 | 398 | if not pools: | ||
1475 | 399 | pools = [] | ||
1476 | 191 | 400 | ||
1477 | 192 | if len(servers) == 0 and len(pools) == 0: | 401 | if len(servers) == 0 and len(pools) == 0: |
1479 | 193 | pools = generate_server_names(cloud.distro.name) | 402 | pools = generate_server_names(distro_name) |
1480 | 194 | LOG.debug( | 403 | LOG.debug( |
1481 | 195 | 'Adding distro default ntp pool servers: %s', ','.join(pools)) | 404 | 'Adding distro default ntp pool servers: %s', ','.join(pools)) |
1482 | 196 | 405 | ||
1487 | 197 | params = { | 406 | if not path: |
1488 | 198 | 'servers': servers, | 407 | raise ValueError('Invalid value for path parameter') |
1485 | 199 | 'pools': pools, | ||
1486 | 200 | } | ||
1489 | 201 | 408 | ||
1492 | 202 | if template is None: | 409 | if not template_fn and not template: |
1493 | 203 | template = 'ntp.conf.%s' % cloud.distro.name | 410 | raise ValueError('Not template_fn or template provided') |
1494 | 204 | 411 | ||
1501 | 205 | template_fn = cloud.get_template_filename(template) | 412 | params = {'servers': servers, 'pools': pools} |
1502 | 206 | if not template_fn: | 413 | if template: |
1503 | 207 | template_fn = cloud.get_template_filename('ntp.conf') | 414 | tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") |
1504 | 208 | if not template_fn: | 415 | template_fn = tfile[1] # filepath is second item in tuple |
1505 | 209 | raise RuntimeError( | 416 | util.write_file(template_fn, content=template) |
1500 | 210 | 'No template found, not rendering {path}'.format(path=path)) | ||
1506 | 211 | 417 | ||
1507 | 212 | templater.render_to_file(template_fn, path, params) | 418 | templater.render_to_file(template_fn, path, params) |
1508 | 419 | # clean up temporary template | ||
1509 | 420 | if template: | ||
1510 | 421 | util.del_file(template_fn) | ||
1511 | 213 | 422 | ||
1512 | 214 | 423 | ||
1513 | 215 | def reload_ntp(service, systemd=False): | 424 | def reload_ntp(service, systemd=False): |
1514 | 425 | """Restart or reload an ntp system service. | ||
1515 | 426 | |||
1516 | 427 | @param service: A string specifying the name of the service to be affected. | ||
1517 | 428 | @param systemd: A boolean indicating if the distro uses systemd, defaults | ||
1518 | 429 | to False. | ||
1519 | 430 | @returns: A tuple of stdout, stderr results from executing the action. | ||
1520 | 431 | """ | ||
1521 | 216 | if systemd: | 432 | if systemd: |
1522 | 217 | cmd = ['systemctl', 'reload-or-restart', service] | 433 | cmd = ['systemctl', 'reload-or-restart', service] |
1523 | 218 | else: | 434 | else: |
1524 | @@ -220,4 +436,117 @@ def reload_ntp(service, systemd=False): | |||
1525 | 220 | util.subp(cmd, capture=True) | 436 | util.subp(cmd, capture=True) |
1526 | 221 | 437 | ||
1527 | 222 | 438 | ||
1528 | 439 | def supplemental_schema_validation(ntp_config): | ||
1529 | 440 | """Validate user-provided ntp:config option values. | ||
1530 | 441 | |||
1531 | 442 | This function supplements flexible jsonschema validation with specific | ||
1532 | 443 | value checks to aid in triage of invalid user-provided configuration. | ||
1533 | 444 | |||
1534 | 445 | @param ntp_config: Dictionary of configuration value under 'ntp'. | ||
1535 | 446 | |||
1536 | 447 | @raises: ValueError describing invalid values provided. | ||
1537 | 448 | """ | ||
1538 | 449 | errors = [] | ||
1539 | 450 | missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) | ||
1540 | 451 | if missing: | ||
1541 | 452 | keys = ', '.join(sorted(missing)) | ||
1542 | 453 | errors.append( | ||
1543 | 454 | 'Missing required ntp:config keys: {keys}'.format(keys=keys)) | ||
1544 | 455 | elif not any([ntp_config.get('template'), | ||
1545 | 456 | ntp_config.get('template_name')]): | ||
1546 | 457 | errors.append( | ||
1547 | 458 | 'Either ntp:config:template or ntp:config:template_name values' | ||
1548 | 459 | ' are required') | ||
1549 | 460 | for key, value in sorted(ntp_config.items()): | ||
1550 | 461 | keypath = 'ntp:config:' + key | ||
1551 | 462 | if key == 'confpath': | ||
1552 | 463 | if not all([value, isinstance(value, six.string_types)]): | ||
1553 | 464 | errors.append( | ||
1554 | 465 | 'Expected a config file path {keypath}.' | ||
1555 | 466 | ' Found ({value})'.format(keypath=keypath, value=value)) | ||
1556 | 467 | elif key == 'packages': | ||
1557 | 468 | if not isinstance(value, list): | ||
1558 | 469 | errors.append( | ||
1559 | 470 | 'Expected a list of required package names for {keypath}.' | ||
1560 | 471 | ' Found ({value})'.format(keypath=keypath, value=value)) | ||
1561 | 472 | elif key in ('template', 'template_name'): | ||
1562 | 473 | if value is None: # Either template or template_name can be none | ||
1563 | 474 | continue | ||
1564 | 475 | if not isinstance(value, six.string_types): | ||
1565 | 476 | errors.append( | ||
1566 | 477 | 'Expected a string type for {keypath}.' | ||
1567 | 478 | ' Found ({value})'.format(keypath=keypath, value=value)) | ||
1568 | 479 | elif not isinstance(value, six.string_types): | ||
1569 | 480 | errors.append( | ||
1570 | 481 | 'Expected a string type for {keypath}.' | ||
1571 | 482 | ' Found ({value})'.format(keypath=keypath, value=value)) | ||
1572 | 483 | |||
1573 | 484 | if errors: | ||
1574 | 485 | raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( | ||
1575 | 486 | errors='\n'.join(errors))) | ||
1576 | 487 | |||
1577 | 488 | |||
1578 | 489 | def handle(name, cfg, cloud, log, _args): | ||
1579 | 490 | """Enable and configure ntp.""" | ||
1580 | 491 | if 'ntp' not in cfg: | ||
1581 | 492 | LOG.debug( | ||
1582 | 493 | "Skipping module named %s, not present or disabled by cfg", name) | ||
1583 | 494 | return | ||
1584 | 495 | ntp_cfg = cfg['ntp'] | ||
1585 | 496 | if ntp_cfg is None: | ||
1586 | 497 | ntp_cfg = {} # Allow empty config which will install the package | ||
1587 | 498 | |||
1588 | 499 | # TODO drop this when validate_cloudconfig_schema is strict=True | ||
1589 | 500 | if not isinstance(ntp_cfg, (dict)): | ||
1590 | 501 | raise RuntimeError( | ||
1591 | 502 | "'ntp' key existed in config, but not a dictionary type," | ||
1592 | 503 | " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) | ||
1593 | 504 | |||
1594 | 505 | validate_cloudconfig_schema(cfg, schema) | ||
1595 | 506 | |||
1596 | 507 | # Allow users to explicitly enable/disable | ||
1597 | 508 | enabled = ntp_cfg.get('enabled', True) | ||
1598 | 509 | if util.is_false(enabled): | ||
1599 | 510 | LOG.debug("Skipping module named %s, disabled by cfg", name) | ||
1600 | 511 | return | ||
1601 | 512 | |||
1602 | 513 | # Select which client is going to be used and get the configuration | ||
1603 | 514 | ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), | ||
1604 | 515 | cloud.distro) | ||
1605 | 516 | |||
1606 | 517 | # Allow user ntp config to override distro configurations | ||
1607 | 518 | ntp_client_config = util.mergemanydict( | ||
1608 | 519 | [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) | ||
1609 | 520 | |||
1610 | 521 | supplemental_schema_validation(ntp_client_config) | ||
1611 | 522 | rename_ntp_conf(confpath=ntp_client_config.get('confpath')) | ||
1612 | 523 | |||
1613 | 524 | template_fn = None | ||
1614 | 525 | if not ntp_client_config.get('template'): | ||
1615 | 526 | template_name = ( | ||
1616 | 527 | ntp_client_config.get('template_name').replace('{distro}', | ||
1617 | 528 | cloud.distro.name)) | ||
1618 | 529 | template_fn = cloud.get_template_filename(template_name) | ||
1619 | 530 | if not template_fn: | ||
1620 | 531 | msg = ('No template found, not rendering %s' % | ||
1621 | 532 | ntp_client_config.get('template_name')) | ||
1622 | 533 | raise RuntimeError(msg) | ||
1623 | 534 | |||
1624 | 535 | write_ntp_config_template(cloud.distro.name, | ||
1625 | 536 | servers=ntp_cfg.get('servers', []), | ||
1626 | 537 | pools=ntp_cfg.get('pools', []), | ||
1627 | 538 | path=ntp_client_config.get('confpath'), | ||
1628 | 539 | template_fn=template_fn, | ||
1629 | 540 | template=ntp_client_config.get('template')) | ||
1630 | 541 | |||
1631 | 542 | install_ntp_client(cloud.distro.install_packages, | ||
1632 | 543 | packages=ntp_client_config['packages'], | ||
1633 | 544 | check_exe=ntp_client_config['check_exe']) | ||
1634 | 545 | try: | ||
1635 | 546 | reload_ntp(ntp_client_config['service_name'], | ||
1636 | 547 | systemd=cloud.distro.uses_systemd()) | ||
1637 | 548 | except util.ProcessExecutionError as e: | ||
1638 | 549 | LOG.exception("Failed to reload/start ntp service: %s", e) | ||
1639 | 550 | raise | ||
1640 | 551 | |||
1641 | 223 | # vi: ts=4 expandtab | 552 | # vi: ts=4 expandtab |
1642 | diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py | |||
1643 | index 878069b..3be0d1c 100644 | |||
1644 | --- a/cloudinit/config/cc_phone_home.py | |||
1645 | +++ b/cloudinit/config/cc_phone_home.py | |||
1646 | @@ -41,6 +41,7 @@ keys to post. Available keys are: | |||
1647 | 41 | """ | 41 | """ |
1648 | 42 | 42 | ||
1649 | 43 | from cloudinit import templater | 43 | from cloudinit import templater |
1650 | 44 | from cloudinit import url_helper | ||
1651 | 44 | from cloudinit import util | 45 | from cloudinit import util |
1652 | 45 | 46 | ||
1653 | 46 | from cloudinit.settings import PER_INSTANCE | 47 | from cloudinit.settings import PER_INSTANCE |
1654 | @@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args): | |||
1655 | 136 | } | 137 | } |
1656 | 137 | url = templater.render_string(url, url_params) | 138 | url = templater.render_string(url, url_params) |
1657 | 138 | try: | 139 | try: |
1661 | 139 | util.read_file_or_url(url, data=real_submit_keys, | 140 | url_helper.read_file_or_url( |
1662 | 140 | retries=tries, sec_between=3, | 141 | url, data=real_submit_keys, retries=tries, sec_between=3, |
1663 | 141 | ssl_details=util.fetch_ssl_details(cloud.paths)) | 142 | ssl_details=util.fetch_ssl_details(cloud.paths)) |
1664 | 142 | except Exception: | 143 | except Exception: |
1665 | 143 | util.logexc(log, "Failed to post phone home data to %s in %s tries", | 144 | util.logexc(log, "Failed to post phone home data to %s in %s tries", |
1666 | 144 | url, tries) | 145 | url, tries) |
1667 | diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py | |||
1668 | index 4da3a58..50b3747 100644 | |||
1669 | --- a/cloudinit/config/cc_power_state_change.py | |||
1670 | +++ b/cloudinit/config/cc_power_state_change.py | |||
1671 | @@ -74,7 +74,7 @@ def givecmdline(pid): | |||
1672 | 74 | if util.is_FreeBSD(): | 74 | if util.is_FreeBSD(): |
1673 | 75 | (output, _err) = util.subp(['procstat', '-c', str(pid)]) | 75 | (output, _err) = util.subp(['procstat', '-c', str(pid)]) |
1674 | 76 | line = output.splitlines()[1] | 76 | line = output.splitlines()[1] |
1676 | 77 | m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) | 77 | m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) |
1677 | 78 | return m.group(2) | 78 | return m.group(2) |
1678 | 79 | else: | 79 | else: |
1679 | 80 | return util.load_file("/proc/%s/cmdline" % pid) | 80 | return util.load_file("/proc/%s/cmdline" % pid) |
1680 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py | |||
1681 | index 013e69b..2edddd0 100644 | |||
1682 | --- a/cloudinit/config/cc_resizefs.py | |||
1683 | +++ b/cloudinit/config/cc_resizefs.py | |||
1684 | @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): | |||
1685 | 81 | 81 | ||
1686 | 82 | 82 | ||
1687 | 83 | def _resize_ufs(mount_point, devpth): | 83 | def _resize_ufs(mount_point, devpth): |
1689 | 84 | return ('growfs', devpth) | 84 | return ('growfs', '-y', devpth) |
1690 | 85 | 85 | ||
1691 | 86 | 86 | ||
1692 | 87 | def _resize_zfs(mount_point, devpth): | 87 | def _resize_zfs(mount_point, devpth): |
1693 | @@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth): | |||
1694 | 89 | 89 | ||
1695 | 90 | 90 | ||
1696 | 91 | def _get_dumpfs_output(mount_point): | 91 | def _get_dumpfs_output(mount_point): |
1699 | 92 | dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) | 92 | return util.subp(['dumpfs', '-m', mount_point])[0] |
1698 | 93 | return dumpfs_res | ||
1700 | 94 | 93 | ||
1701 | 95 | 94 | ||
1702 | 96 | def _get_gpart_output(part): | 95 | def _get_gpart_output(part): |
1705 | 97 | gpart_res, err = util.subp(['gpart', 'show', part]) | 96 | return util.subp(['gpart', 'show', part])[0] |
1704 | 98 | return gpart_res | ||
1706 | 99 | 97 | ||
1707 | 100 | 98 | ||
1708 | 101 | def _can_skip_resize_ufs(mount_point, devpth): | 99 | def _can_skip_resize_ufs(mount_point, devpth): |
1709 | @@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth): | |||
1710 | 113 | if not line.startswith('#'): | 111 | if not line.startswith('#'): |
1711 | 114 | newfs_cmd = shlex.split(line) | 112 | newfs_cmd = shlex.split(line) |
1712 | 115 | opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' | 113 | opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' |
1714 | 116 | optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) | 114 | optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) |
1715 | 117 | for o, a in optlist: | 115 | for o, a in optlist: |
1716 | 118 | if o == "-s": | 116 | if o == "-s": |
1717 | 119 | cur_fs_sz = int(a) | 117 | cur_fs_sz = int(a) |
1718 | diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py | |||
1719 | index 530808c..1c67943 100644 | |||
1720 | --- a/cloudinit/config/cc_rh_subscription.py | |||
1721 | +++ b/cloudinit/config/cc_rh_subscription.py | |||
1722 | @@ -209,8 +209,7 @@ class SubscriptionManager(object): | |||
1723 | 209 | cmd.append("--serverurl={0}".format(self.server_hostname)) | 209 | cmd.append("--serverurl={0}".format(self.server_hostname)) |
1724 | 210 | 210 | ||
1725 | 211 | try: | 211 | try: |
1728 | 212 | return_out, return_err = self._sub_man_cli(cmd, | 212 | return_out = self._sub_man_cli(cmd, logstring_val=True)[0] |
1727 | 213 | logstring_val=True) | ||
1729 | 214 | except util.ProcessExecutionError as e: | 213 | except util.ProcessExecutionError as e: |
1730 | 215 | if e.stdout == "": | 214 | if e.stdout == "": |
1731 | 216 | self.log_warn("Registration failed due " | 215 | self.log_warn("Registration failed due " |
1732 | @@ -233,8 +232,7 @@ class SubscriptionManager(object): | |||
1733 | 233 | 232 | ||
1734 | 234 | # Attempting to register the system only | 233 | # Attempting to register the system only |
1735 | 235 | try: | 234 | try: |
1738 | 236 | return_out, return_err = self._sub_man_cli(cmd, | 235 | return_out = self._sub_man_cli(cmd, logstring_val=True)[0] |
1737 | 237 | logstring_val=True) | ||
1739 | 238 | except util.ProcessExecutionError as e: | 236 | except util.ProcessExecutionError as e: |
1740 | 239 | if e.stdout == "": | 237 | if e.stdout == "": |
1741 | 240 | self.log_warn("Registration failed due " | 238 | self.log_warn("Registration failed due " |
1742 | @@ -257,7 +255,7 @@ class SubscriptionManager(object): | |||
1743 | 257 | .format(self.servicelevel)] | 255 | .format(self.servicelevel)] |
1744 | 258 | 256 | ||
1745 | 259 | try: | 257 | try: |
1747 | 260 | return_out, return_err = self._sub_man_cli(cmd) | 258 | return_out = self._sub_man_cli(cmd)[0] |
1748 | 261 | except util.ProcessExecutionError as e: | 259 | except util.ProcessExecutionError as e: |
1749 | 262 | if e.stdout.rstrip() != '': | 260 | if e.stdout.rstrip() != '': |
1750 | 263 | for line in e.stdout.split("\n"): | 261 | for line in e.stdout.split("\n"): |
1751 | @@ -275,7 +273,7 @@ class SubscriptionManager(object): | |||
1752 | 275 | def _set_auto_attach(self): | 273 | def _set_auto_attach(self): |
1753 | 276 | cmd = ['attach', '--auto'] | 274 | cmd = ['attach', '--auto'] |
1754 | 277 | try: | 275 | try: |
1756 | 278 | return_out, return_err = self._sub_man_cli(cmd) | 276 | return_out = self._sub_man_cli(cmd)[0] |
1757 | 279 | except util.ProcessExecutionError as e: | 277 | except util.ProcessExecutionError as e: |
1758 | 280 | self.log_warn("Auto-attach failed with: {0}".format(e)) | 278 | self.log_warn("Auto-attach failed with: {0}".format(e)) |
1759 | 281 | return False | 279 | return False |
1760 | @@ -294,12 +292,12 @@ class SubscriptionManager(object): | |||
1761 | 294 | 292 | ||
1762 | 295 | # Get all available pools | 293 | # Get all available pools |
1763 | 296 | cmd = ['list', '--available', '--pool-only'] | 294 | cmd = ['list', '--available', '--pool-only'] |
1765 | 297 | results, errors = self._sub_man_cli(cmd) | 295 | results = self._sub_man_cli(cmd)[0] |
1766 | 298 | available = (results.rstrip()).split("\n") | 296 | available = (results.rstrip()).split("\n") |
1767 | 299 | 297 | ||
1768 | 300 | # Get all consumed pools | 298 | # Get all consumed pools |
1769 | 301 | cmd = ['list', '--consumed', '--pool-only'] | 299 | cmd = ['list', '--consumed', '--pool-only'] |
1771 | 302 | results, errors = self._sub_man_cli(cmd) | 300 | results = self._sub_man_cli(cmd)[0] |
1772 | 303 | consumed = (results.rstrip()).split("\n") | 301 | consumed = (results.rstrip()).split("\n") |
1773 | 304 | 302 | ||
1774 | 305 | return available, consumed | 303 | return available, consumed |
1775 | @@ -311,14 +309,14 @@ class SubscriptionManager(object): | |||
1776 | 311 | ''' | 309 | ''' |
1777 | 312 | 310 | ||
1778 | 313 | cmd = ['repos', '--list-enabled'] | 311 | cmd = ['repos', '--list-enabled'] |
1780 | 314 | return_out, return_err = self._sub_man_cli(cmd) | 312 | return_out = self._sub_man_cli(cmd)[0] |
1781 | 315 | active_repos = [] | 313 | active_repos = [] |
1782 | 316 | for repo in return_out.split("\n"): | 314 | for repo in return_out.split("\n"): |
1783 | 317 | if "Repo ID:" in repo: | 315 | if "Repo ID:" in repo: |
1784 | 318 | active_repos.append((repo.split(':')[1]).strip()) | 316 | active_repos.append((repo.split(':')[1]).strip()) |
1785 | 319 | 317 | ||
1786 | 320 | cmd = ['repos', '--list-disabled'] | 318 | cmd = ['repos', '--list-disabled'] |
1788 | 321 | return_out, return_err = self._sub_man_cli(cmd) | 319 | return_out = self._sub_man_cli(cmd)[0] |
1789 | 322 | 320 | ||
1790 | 323 | inactive_repos = [] | 321 | inactive_repos = [] |
1791 | 324 | for repo in return_out.split("\n"): | 322 | for repo in return_out.split("\n"): |
1792 | diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py | |||
1793 | index af08788..27d2366 100644 | |||
1794 | --- a/cloudinit/config/cc_rsyslog.py | |||
1795 | +++ b/cloudinit/config/cc_rsyslog.py | |||
1796 | @@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__) | |||
1797 | 203 | COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') | 203 | COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') |
1798 | 204 | HOST_PORT_RE = re.compile( | 204 | HOST_PORT_RE = re.compile( |
1799 | 205 | r'^(?P<proto>[@]{0,2})' | 205 | r'^(?P<proto>[@]{0,2})' |
1802 | 206 | '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' | 206 | r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' |
1803 | 207 | '([:](?P<port>[0-9]+))?$') | 207 | r'([:](?P<port>[0-9]+))?$') |
1804 | 208 | 208 | ||
1805 | 209 | 209 | ||
1806 | 210 | def reload_syslog(command=DEF_RELOAD, systemd=False): | 210 | def reload_syslog(command=DEF_RELOAD, systemd=False): |
1807 | diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py | |||
1808 | index 539cbd5..b6f6c80 100644 | |||
1809 | --- a/cloudinit/config/cc_runcmd.py | |||
1810 | +++ b/cloudinit/config/cc_runcmd.py | |||
1811 | @@ -66,7 +66,6 @@ schema = { | |||
1812 | 66 | 'additionalProperties': False, | 66 | 'additionalProperties': False, |
1813 | 67 | 'minItems': 1, | 67 | 'minItems': 1, |
1814 | 68 | 'required': [], | 68 | 'required': [], |
1815 | 69 | 'uniqueItems': True | ||
1816 | 70 | } | 69 | } |
1817 | 71 | } | 70 | } |
1818 | 72 | } | 71 | } |
1819 | diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py | |||
1820 | index bb24d57..5ef9737 100755 | |||
1821 | --- a/cloudinit/config/cc_set_passwords.py | |||
1822 | +++ b/cloudinit/config/cc_set_passwords.py | |||
1823 | @@ -68,16 +68,57 @@ import re | |||
1824 | 68 | import sys | 68 | import sys |
1825 | 69 | 69 | ||
1826 | 70 | from cloudinit.distros import ug_util | 70 | from cloudinit.distros import ug_util |
1828 | 71 | from cloudinit import ssh_util | 71 | from cloudinit import log as logging |
1829 | 72 | from cloudinit.ssh_util import update_ssh_config | ||
1830 | 72 | from cloudinit import util | 73 | from cloudinit import util |
1831 | 73 | 74 | ||
1832 | 74 | from string import ascii_letters, digits | 75 | from string import ascii_letters, digits |
1833 | 75 | 76 | ||
1834 | 77 | LOG = logging.getLogger(__name__) | ||
1835 | 78 | |||
1836 | 76 | # We are removing certain 'painful' letters/numbers | 79 | # We are removing certain 'painful' letters/numbers |
1837 | 77 | PW_SET = (''.join([x for x in ascii_letters + digits | 80 | PW_SET = (''.join([x for x in ascii_letters + digits |
1838 | 78 | if x not in 'loLOI01'])) | 81 | if x not in 'loLOI01'])) |
1839 | 79 | 82 | ||
1840 | 80 | 83 | ||
1841 | 84 | def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): | ||
1842 | 85 | """Apply sshd PasswordAuthentication changes. | ||
1843 | 86 | |||
1844 | 87 | @param pw_auth: config setting from 'pw_auth'. | ||
1845 | 88 | Best given as True, False, or "unchanged". | ||
1846 | 89 | @param service_cmd: The service command list (['service']) | ||
1847 | 90 | @param service_name: The name of the sshd service for the system. | ||
1848 | 91 | |||
1849 | 92 | @return: None""" | ||
1850 | 93 | cfg_name = "PasswordAuthentication" | ||
1851 | 94 | if service_cmd is None: | ||
1852 | 95 | service_cmd = ["service"] | ||
1853 | 96 | |||
1854 | 97 | if util.is_true(pw_auth): | ||
1855 | 98 | cfg_val = 'yes' | ||
1856 | 99 | elif util.is_false(pw_auth): | ||
1857 | 100 | cfg_val = 'no' | ||
1858 | 101 | else: | ||
1859 | 102 | bmsg = "Leaving ssh config '%s' unchanged." % cfg_name | ||
1860 | 103 | if pw_auth is None or pw_auth.lower() == 'unchanged': | ||
1861 | 104 | LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) | ||
1862 | 105 | else: | ||
1863 | 106 | LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth) | ||
1864 | 107 | return | ||
1865 | 108 | |||
1866 | 109 | updated = update_ssh_config({cfg_name: cfg_val}) | ||
1867 | 110 | if not updated: | ||
1868 | 111 | LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) | ||
1869 | 112 | return | ||
1870 | 113 | |||
1871 | 114 | if 'systemctl' in service_cmd: | ||
1872 | 115 | cmd = list(service_cmd) + ["restart", service_name] | ||
1873 | 116 | else: | ||
1874 | 117 | cmd = list(service_cmd) + [service_name, "restart"] | ||
1875 | 118 | util.subp(cmd) | ||
1876 | 119 | LOG.debug("Restarted the ssh daemon.") | ||
1877 | 120 | |||
1878 | 121 | |||
1879 | 81 | def handle(_name, cfg, cloud, log, args): | 122 | def handle(_name, cfg, cloud, log, args): |
1880 | 82 | if len(args) != 0: | 123 | if len(args) != 0: |
1881 | 83 | # if run from command line, and give args, wipe the chpasswd['list'] | 124 | # if run from command line, and give args, wipe the chpasswd['list'] |
1882 | @@ -170,65 +211,9 @@ def handle(_name, cfg, cloud, log, args): | |||
1883 | 170 | if expired_users: | 211 | if expired_users: |
1884 | 171 | log.debug("Expired passwords for: %s users", expired_users) | 212 | log.debug("Expired passwords for: %s users", expired_users) |
1885 | 172 | 213 | ||
1945 | 173 | change_pwauth = False | 214 | handle_ssh_pwauth( |
1946 | 174 | pw_auth = None | 215 | cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd, |
1947 | 175 | if 'ssh_pwauth' in cfg: | 216 | service_name=cloud.distro.get_option('ssh_svcname', 'ssh')) |
1889 | 176 | if util.is_true(cfg['ssh_pwauth']): | ||
1890 | 177 | change_pwauth = True | ||
1891 | 178 | pw_auth = 'yes' | ||
1892 | 179 | elif util.is_false(cfg['ssh_pwauth']): | ||
1893 | 180 | change_pwauth = True | ||
1894 | 181 | pw_auth = 'no' | ||
1895 | 182 | elif str(cfg['ssh_pwauth']).lower() == 'unchanged': | ||
1896 | 183 | log.debug('Leaving auth line unchanged') | ||
1897 | 184 | change_pwauth = False | ||
1898 | 185 | elif not str(cfg['ssh_pwauth']).strip(): | ||
1899 | 186 | log.debug('Leaving auth line unchanged') | ||
1900 | 187 | change_pwauth = False | ||
1901 | 188 | elif not cfg['ssh_pwauth']: | ||
1902 | 189 | log.debug('Leaving auth line unchanged') | ||
1903 | 190 | change_pwauth = False | ||
1904 | 191 | else: | ||
1905 | 192 | msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth'] | ||
1906 | 193 | util.logexc(log, msg) | ||
1907 | 194 | |||
1908 | 195 | if change_pwauth: | ||
1909 | 196 | replaced_auth = False | ||
1910 | 197 | |||
1911 | 198 | # See: man sshd_config | ||
1912 | 199 | old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) | ||
1913 | 200 | new_lines = [] | ||
1914 | 201 | i = 0 | ||
1915 | 202 | for (i, line) in enumerate(old_lines): | ||
1916 | 203 | # Keywords are case-insensitive and arguments are case-sensitive | ||
1917 | 204 | if line.key == 'passwordauthentication': | ||
1918 | 205 | log.debug("Replacing auth line %s with %s", i + 1, pw_auth) | ||
1919 | 206 | replaced_auth = True | ||
1920 | 207 | line.value = pw_auth | ||
1921 | 208 | new_lines.append(line) | ||
1922 | 209 | |||
1923 | 210 | if not replaced_auth: | ||
1924 | 211 | log.debug("Adding new auth line %s", i + 1) | ||
1925 | 212 | replaced_auth = True | ||
1926 | 213 | new_lines.append(ssh_util.SshdConfigLine('', | ||
1927 | 214 | 'PasswordAuthentication', | ||
1928 | 215 | pw_auth)) | ||
1929 | 216 | |||
1930 | 217 | lines = [str(l) for l in new_lines] | ||
1931 | 218 | util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines), | ||
1932 | 219 | copy_mode=True) | ||
1933 | 220 | |||
1934 | 221 | try: | ||
1935 | 222 | cmd = cloud.distro.init_cmd # Default service | ||
1936 | 223 | cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh')) | ||
1937 | 224 | cmd.append('restart') | ||
1938 | 225 | if 'systemctl' in cmd: # Switch action ordering | ||
1939 | 226 | cmd[1], cmd[2] = cmd[2], cmd[1] | ||
1940 | 227 | cmd = filter(None, cmd) # Remove empty arguments | ||
1941 | 228 | util.subp(cmd) | ||
1942 | 229 | log.debug("Restarted the ssh daemon") | ||
1943 | 230 | except Exception: | ||
1944 | 231 | util.logexc(log, "Restarting of the ssh daemon failed") | ||
1948 | 232 | 217 | ||
1949 | 233 | if len(errors): | 218 | if len(errors): |
1950 | 234 | log.debug("%s errors occured, re-raising the last one", len(errors)) | 219 | log.debug("%s errors occured, re-raising the last one", len(errors)) |
1951 | diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py | |||
1952 | index 34a53fd..90724b8 100644 | |||
1953 | --- a/cloudinit/config/cc_snap.py | |||
1954 | +++ b/cloudinit/config/cc_snap.py | |||
1955 | @@ -110,7 +110,6 @@ schema = { | |||
1956 | 110 | 'additionalItems': False, # Reject non-string & non-list | 110 | 'additionalItems': False, # Reject non-string & non-list |
1957 | 111 | 'minItems': 1, | 111 | 'minItems': 1, |
1958 | 112 | 'minProperties': 1, | 112 | 'minProperties': 1, |
1959 | 113 | 'uniqueItems': True | ||
1960 | 114 | }, | 113 | }, |
1961 | 115 | 'squashfuse_in_container': { | 114 | 'squashfuse_in_container': { |
1962 | 116 | 'type': 'boolean' | 115 | 'type': 'boolean' |
1963 | @@ -204,12 +203,12 @@ def maybe_install_squashfuse(cloud): | |||
1964 | 204 | return | 203 | return |
1965 | 205 | try: | 204 | try: |
1966 | 206 | cloud.distro.update_package_sources() | 205 | cloud.distro.update_package_sources() |
1968 | 207 | except Exception as e: | 206 | except Exception: |
1969 | 208 | util.logexc(LOG, "Package update failed") | 207 | util.logexc(LOG, "Package update failed") |
1970 | 209 | raise | 208 | raise |
1971 | 210 | try: | 209 | try: |
1972 | 211 | cloud.distro.install_packages(['squashfuse']) | 210 | cloud.distro.install_packages(['squashfuse']) |
1974 | 212 | except Exception as e: | 211 | except Exception: |
1975 | 213 | util.logexc(LOG, "Failed to install squashfuse") | 212 | util.logexc(LOG, "Failed to install squashfuse") |
1976 | 214 | raise | 213 | raise |
1977 | 215 | 214 | ||
1978 | diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py | |||
1979 | index bab80bb..15bee2d 100644 | |||
1980 | --- a/cloudinit/config/cc_snappy.py | |||
1981 | +++ b/cloudinit/config/cc_snappy.py | |||
1982 | @@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None): | |||
1983 | 213 | 213 | ||
1984 | 214 | def read_installed_packages(): | 214 | def read_installed_packages(): |
1985 | 215 | ret = [] | 215 | ret = [] |
1987 | 216 | for (name, date, version, dev) in read_pkg_data(): | 216 | for (name, _date, _version, dev) in read_pkg_data(): |
1988 | 217 | if dev: | 217 | if dev: |
1989 | 218 | ret.append(NAMESPACE_DELIM.join([name, dev])) | 218 | ret.append(NAMESPACE_DELIM.join([name, dev])) |
1990 | 219 | else: | 219 | else: |
1991 | @@ -222,7 +222,7 @@ def read_installed_packages(): | |||
1992 | 222 | 222 | ||
1993 | 223 | 223 | ||
1994 | 224 | def read_pkg_data(): | 224 | def read_pkg_data(): |
1996 | 225 | out, err = util.subp([SNAPPY_CMD, "list"]) | 225 | out, _err = util.subp([SNAPPY_CMD, "list"]) |
1997 | 226 | pkg_data = [] | 226 | pkg_data = [] |
1998 | 227 | for line in out.splitlines()[1:]: | 227 | for line in out.splitlines()[1:]: |
1999 | 228 | toks = line.split(sep=None, maxsplit=3) | 228 | toks = line.split(sep=None, maxsplit=3) |
2000 | diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py | |||
2001 | index 16b1868..5e082bd 100644 | |||
2002 | --- a/cloudinit/config/cc_ubuntu_advantage.py | |||
2003 | +++ b/cloudinit/config/cc_ubuntu_advantage.py | |||
2004 | @@ -87,7 +87,6 @@ schema = { | |||
2005 | 87 | 'additionalItems': False, # Reject non-string & non-list | 87 | 'additionalItems': False, # Reject non-string & non-list |
2006 | 88 | 'minItems': 1, | 88 | 'minItems': 1, |
2007 | 89 | 'minProperties': 1, | 89 | 'minProperties': 1, |
2008 | 90 | 'uniqueItems': True | ||
2009 | 91 | } | 90 | } |
2010 | 92 | }, | 91 | }, |
2011 | 93 | 'additionalProperties': False, # Reject keys not in schema | 92 | 'additionalProperties': False, # Reject keys not in schema |
2012 | @@ -149,12 +148,12 @@ def maybe_install_ua_tools(cloud): | |||
2013 | 149 | return | 148 | return |
2014 | 150 | try: | 149 | try: |
2015 | 151 | cloud.distro.update_package_sources() | 150 | cloud.distro.update_package_sources() |
2017 | 152 | except Exception as e: | 151 | except Exception: |
2018 | 153 | util.logexc(LOG, "Package update failed") | 152 | util.logexc(LOG, "Package update failed") |
2019 | 154 | raise | 153 | raise |
2020 | 155 | try: | 154 | try: |
2021 | 156 | cloud.distro.install_packages(['ubuntu-advantage-tools']) | 155 | cloud.distro.install_packages(['ubuntu-advantage-tools']) |
2023 | 157 | except Exception as e: | 156 | except Exception: |
2024 | 158 | util.logexc(LOG, "Failed to install ubuntu-advantage-tools") | 157 | util.logexc(LOG, "Failed to install ubuntu-advantage-tools") |
2025 | 159 | raise | 158 | raise |
2026 | 160 | 159 | ||
2027 | diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py | |||
2028 | index b215e95..c95bdaa 100644 | |||
2029 | --- a/cloudinit/config/cc_users_groups.py | |||
2030 | +++ b/cloudinit/config/cc_users_groups.py | |||
2031 | @@ -54,8 +54,9 @@ config keys for an entry in ``users`` are as follows: | |||
2032 | 54 | - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's | 54 | - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's |
2033 | 55 | authkeys file. Default: none | 55 | authkeys file. Default: none |
2034 | 56 | - ``ssh_import_id``: Optional. SSH id to import for user. Default: none | 56 | - ``ssh_import_id``: Optional. SSH id to import for user. Default: none |
2037 | 57 | - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use. | 57 | - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False. |
2038 | 58 | Default: none. | 58 | Default: none. An absence of sudo key, or a value of none or false |
2039 | 59 | will result in no sudo rules being written for the user. | ||
2040 | 59 | - ``system``: Optional. Create user as system user with no home directory. | 60 | - ``system``: Optional. Create user as system user with no home directory. |
2041 | 60 | Default: false | 61 | Default: false |
2042 | 61 | - ``uid``: Optional. The user's ID. Default: The next available value. | 62 | - ``uid``: Optional. The user's ID. Default: The next available value. |
2043 | @@ -82,6 +83,9 @@ config keys for an entry in ``users`` are as follows: | |||
2044 | 82 | 83 | ||
2045 | 83 | users: | 84 | users: |
2046 | 84 | - default | 85 | - default |
2047 | 86 | # User explicitly omitted from sudo permission; also default behavior. | ||
2048 | 87 | - name: <some_restricted_user> | ||
2049 | 88 | sudo: false | ||
2050 | 85 | - name: <username> | 89 | - name: <username> |
2051 | 86 | expiredate: <date> | 90 | expiredate: <date> |
2052 | 87 | gecos: <comment> | 91 | gecos: <comment> |
2053 | diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py | |||
2054 | index ca7d0d5..080a6d0 100644 | |||
2055 | --- a/cloudinit/config/schema.py | |||
2056 | +++ b/cloudinit/config/schema.py | |||
2057 | @@ -4,7 +4,7 @@ | |||
2058 | 4 | from __future__ import print_function | 4 | from __future__ import print_function |
2059 | 5 | 5 | ||
2060 | 6 | from cloudinit import importer | 6 | from cloudinit import importer |
2062 | 7 | from cloudinit.util import find_modules, read_file_or_url | 7 | from cloudinit.util import find_modules, load_file |
2063 | 8 | 8 | ||
2064 | 9 | import argparse | 9 | import argparse |
2065 | 10 | from collections import defaultdict | 10 | from collections import defaultdict |
2066 | @@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False): | |||
2067 | 93 | def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): | 93 | def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): |
2068 | 94 | """Return contents of the cloud-config file annotated with schema errors. | 94 | """Return contents of the cloud-config file annotated with schema errors. |
2069 | 95 | 95 | ||
2071 | 96 | @param cloudconfig: YAML-loaded object from the original_content. | 96 | @param cloudconfig: YAML-loaded dict from the original_content or empty |
2072 | 97 | dict if unparseable. | ||
2073 | 97 | @param original_content: The contents of a cloud-config file | 98 | @param original_content: The contents of a cloud-config file |
2074 | 98 | @param schema_errors: List of tuples from a JSONSchemaValidationError. The | 99 | @param schema_errors: List of tuples from a JSONSchemaValidationError. The |
2075 | 99 | tuples consist of (schemapath, error_message). | 100 | tuples consist of (schemapath, error_message). |
2076 | 100 | """ | 101 | """ |
2077 | 101 | if not schema_errors: | 102 | if not schema_errors: |
2078 | 102 | return original_content | 103 | return original_content |
2080 | 103 | schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content) | 104 | schemapaths = {} |
2081 | 105 | if cloudconfig: | ||
2082 | 106 | schemapaths = _schemapath_for_cloudconfig( | ||
2083 | 107 | cloudconfig, original_content) | ||
2084 | 104 | errors_by_line = defaultdict(list) | 108 | errors_by_line = defaultdict(list) |
2085 | 105 | error_count = 1 | 109 | error_count = 1 |
2086 | 106 | error_footer = [] | 110 | error_footer = [] |
2087 | 107 | annotated_content = [] | 111 | annotated_content = [] |
2088 | 108 | for path, msg in schema_errors: | 112 | for path, msg in schema_errors: |
2090 | 109 | errors_by_line[schemapaths[path]].append(msg) | 113 | match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path) |
2091 | 114 | if match: | ||
2092 | 115 | line, col = match.groups() | ||
2093 | 116 | errors_by_line[int(line)].append(msg) | ||
2094 | 117 | else: | ||
2095 | 118 | col = None | ||
2096 | 119 | errors_by_line[schemapaths[path]].append(msg) | ||
2097 | 120 | if col is not None: | ||
2098 | 121 | msg = 'Line {line} column {col}: {msg}'.format( | ||
2099 | 122 | line=line, col=col, msg=msg) | ||
2100 | 110 | error_footer.append('# E{0}: {1}'.format(error_count, msg)) | 123 | error_footer.append('# E{0}: {1}'.format(error_count, msg)) |
2101 | 111 | error_count += 1 | 124 | error_count += 1 |
2102 | 112 | lines = original_content.decode().split('\n') | 125 | lines = original_content.decode().split('\n') |
2103 | @@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): | |||
2104 | 139 | """ | 152 | """ |
2105 | 140 | if not os.path.exists(config_path): | 153 | if not os.path.exists(config_path): |
2106 | 141 | raise RuntimeError('Configfile {0} does not exist'.format(config_path)) | 154 | raise RuntimeError('Configfile {0} does not exist'.format(config_path)) |
2108 | 142 | content = read_file_or_url('file://{0}'.format(config_path)).contents | 155 | content = load_file(config_path, decode=False) |
2109 | 143 | if not content.startswith(CLOUD_CONFIG_HEADER): | 156 | if not content.startswith(CLOUD_CONFIG_HEADER): |
2110 | 144 | errors = ( | 157 | errors = ( |
2112 | 145 | ('header', 'File {0} needs to begin with "{1}"'.format( | 158 | ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format( |
2113 | 146 | config_path, CLOUD_CONFIG_HEADER.decode())),) | 159 | config_path, CLOUD_CONFIG_HEADER.decode())),) |
2116 | 147 | raise SchemaValidationError(errors) | 160 | error = SchemaValidationError(errors) |
2117 | 148 | 161 | if annotate: | |
2118 | 162 | print(annotated_cloudconfig_file({}, content, error.schema_errors)) | ||
2119 | 163 | raise error | ||
2120 | 149 | try: | 164 | try: |
2121 | 150 | cloudconfig = yaml.safe_load(content) | 165 | cloudconfig = yaml.safe_load(content) |
2128 | 151 | except yaml.parser.ParserError as e: | 166 | except (yaml.YAMLError) as e: |
2129 | 152 | errors = ( | 167 | line = column = 1 |
2130 | 153 | ('format', 'File {0} is not valid yaml. {1}'.format( | 168 | mark = None |
2131 | 154 | config_path, str(e))),) | 169 | if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): |
2132 | 155 | raise SchemaValidationError(errors) | 170 | mark = getattr(e, 'context_mark') |
2133 | 156 | 171 | elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): | |
2134 | 172 | mark = getattr(e, 'problem_mark') | ||
2135 | 173 | if mark: | ||
2136 | 174 | line = mark.line + 1 | ||
2137 | 175 | column = mark.column + 1 | ||
2138 | 176 | errors = (('format-l{line}.c{col}'.format(line=line, col=column), | ||
2139 | 177 | 'File {0} is not valid yaml. {1}'.format( | ||
2140 | 178 | config_path, str(e))),) | ||
2141 | 179 | error = SchemaValidationError(errors) | ||
2142 | 180 | if annotate: | ||
2143 | 181 | print(annotated_cloudconfig_file({}, content, error.schema_errors)) | ||
2144 | 182 | raise error | ||
2145 | 157 | try: | 183 | try: |
2146 | 158 | validate_cloudconfig_schema( | 184 | validate_cloudconfig_schema( |
2147 | 159 | cloudconfig, schema, strict=True) | 185 | cloudconfig, schema, strict=True) |
2148 | @@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content): | |||
2149 | 176 | list_index = 0 | 202 | list_index = 0 |
2150 | 177 | RE_YAML_INDENT = r'^(\s*)' | 203 | RE_YAML_INDENT = r'^(\s*)' |
2151 | 178 | scopes = [] | 204 | scopes = [] |
2153 | 179 | for line_number, line in enumerate(content_lines): | 205 | for line_number, line in enumerate(content_lines, 1): |
2154 | 180 | indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) | 206 | indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) |
2155 | 181 | line = line.strip() | 207 | line = line.strip() |
2156 | 182 | if not line or line.startswith('#'): | 208 | if not line or line.startswith('#'): |
2157 | @@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content): | |||
2158 | 208 | scopes.append((indent_depth + 2, key + '.0')) | 234 | scopes.append((indent_depth + 2, key + '.0')) |
2159 | 209 | for inner_list_index in range(0, len(yaml.safe_load(value))): | 235 | for inner_list_index in range(0, len(yaml.safe_load(value))): |
2160 | 210 | list_key = key + '.' + str(inner_list_index) | 236 | list_key = key + '.' + str(inner_list_index) |
2163 | 211 | schema_line_numbers[list_key] = line_number + 1 | 237 | schema_line_numbers[list_key] = line_number |
2164 | 212 | schema_line_numbers[key] = line_number + 1 | 238 | schema_line_numbers[key] = line_number |
2165 | 213 | return schema_line_numbers | 239 | return schema_line_numbers |
2166 | 214 | 240 | ||
2167 | 215 | 241 | ||
2168 | @@ -297,8 +323,8 @@ def get_schema(): | |||
2169 | 297 | 323 | ||
2170 | 298 | configs_dir = os.path.dirname(os.path.abspath(__file__)) | 324 | configs_dir = os.path.dirname(os.path.abspath(__file__)) |
2171 | 299 | potential_handlers = find_modules(configs_dir) | 325 | potential_handlers = find_modules(configs_dir) |
2174 | 300 | for (fname, mod_name) in potential_handlers.items(): | 326 | for (_fname, mod_name) in potential_handlers.items(): |
2175 | 301 | mod_locs, looked_locs = importer.find_module( | 327 | mod_locs, _looked_locs = importer.find_module( |
2176 | 302 | mod_name, ['cloudinit.config'], ['schema']) | 328 | mod_name, ['cloudinit.config'], ['schema']) |
2177 | 303 | if mod_locs: | 329 | if mod_locs: |
2178 | 304 | mod = importer.import_module(mod_locs[0]) | 330 | mod = importer.import_module(mod_locs[0]) |
2179 | @@ -337,9 +363,11 @@ def handle_schema_args(name, args): | |||
2180 | 337 | try: | 363 | try: |
2181 | 338 | validate_cloudconfig_file( | 364 | validate_cloudconfig_file( |
2182 | 339 | args.config_file, full_schema, args.annotate) | 365 | args.config_file, full_schema, args.annotate) |
2184 | 340 | except (SchemaValidationError, RuntimeError) as e: | 366 | except SchemaValidationError as e: |
2185 | 341 | if not args.annotate: | 367 | if not args.annotate: |
2186 | 342 | error(str(e)) | 368 | error(str(e)) |
2187 | 369 | except RuntimeError as e: | ||
2188 | 370 | error(str(e)) | ||
2189 | 343 | else: | 371 | else: |
2190 | 344 | print("Valid cloud-config file {0}".format(args.config_file)) | 372 | print("Valid cloud-config file {0}".format(args.config_file)) |
2191 | 345 | if args.doc: | 373 | if args.doc: |
2192 | diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py | |||
2193 | 346 | new file mode 100644 | 374 | new file mode 100644 |
2194 | index 0000000..67646b0 | |||
2195 | --- /dev/null | |||
2196 | +++ b/cloudinit/config/tests/test_disable_ec2_metadata.py | |||
2197 | @@ -0,0 +1,50 @@ | |||
2198 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
2199 | 2 | |||
2200 | 3 | """Tests cc_disable_ec2_metadata handler""" | ||
2201 | 4 | |||
2202 | 5 | import cloudinit.config.cc_disable_ec2_metadata as ec2_meta | ||
2203 | 6 | |||
2204 | 7 | from cloudinit.tests.helpers import CiTestCase, mock | ||
2205 | 8 | |||
2206 | 9 | import logging | ||
2207 | 10 | |||
2208 | 11 | LOG = logging.getLogger(__name__) | ||
2209 | 12 | |||
2210 | 13 | DISABLE_CFG = {'disable_ec2_metadata': 'true'} | ||
2211 | 14 | |||
2212 | 15 | |||
2213 | 16 | class TestEC2MetadataRoute(CiTestCase): | ||
2214 | 17 | |||
2215 | 18 | with_logs = True | ||
2216 | 19 | |||
2217 | 20 | @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') | ||
2218 | 21 | @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') | ||
2219 | 22 | def test_disable_ifconfig(self, m_subp, m_which): | ||
2220 | 23 | """Set the route if ifconfig command is available""" | ||
2221 | 24 | m_which.side_effect = lambda x: x if x == 'ifconfig' else None | ||
2222 | 25 | ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) | ||
2223 | 26 | m_subp.assert_called_with( | ||
2224 | 27 | ['route', 'add', '-host', '169.254.169.254', 'reject'], | ||
2225 | 28 | capture=False) | ||
2226 | 29 | |||
2227 | 30 | @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') | ||
2228 | 31 | @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') | ||
2229 | 32 | def test_disable_ip(self, m_subp, m_which): | ||
2230 | 33 | """Set the route if ip command is available""" | ||
2231 | 34 | m_which.side_effect = lambda x: x if x == 'ip' else None | ||
2232 | 35 | ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) | ||
2233 | 36 | m_subp.assert_called_with( | ||
2234 | 37 | ['ip', 'route', 'add', 'prohibit', '169.254.169.254'], | ||
2235 | 38 | capture=False) | ||
2236 | 39 | |||
2237 | 40 | @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') | ||
2238 | 41 | @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') | ||
2239 | 42 | def test_disable_no_tool(self, m_subp, m_which): | ||
2240 | 43 | """Log error when neither route nor ip commands are available""" | ||
2241 | 44 | m_which.return_value = None # Find neither ifconfig nor ip | ||
2242 | 45 | ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) | ||
2243 | 46 | self.assertEqual( | ||
2244 | 47 | [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list) | ||
2245 | 48 | m_subp.assert_not_called() | ||
2246 | 49 | |||
2247 | 50 | # vi: ts=4 expandtab | ||
2248 | diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py | |||
2249 | 0 | new file mode 100644 | 51 | new file mode 100644 |
2250 | index 0000000..b051ec8 | |||
2251 | --- /dev/null | |||
2252 | +++ b/cloudinit/config/tests/test_set_passwords.py | |||
2253 | @@ -0,0 +1,71 @@ | |||
2254 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
2255 | 2 | |||
2256 | 3 | import mock | ||
2257 | 4 | |||
2258 | 5 | from cloudinit.config import cc_set_passwords as setpass | ||
2259 | 6 | from cloudinit.tests.helpers import CiTestCase | ||
2260 | 7 | from cloudinit import util | ||
2261 | 8 | |||
2262 | 9 | MODPATH = "cloudinit.config.cc_set_passwords." | ||
2263 | 10 | |||
2264 | 11 | |||
2265 | 12 | class TestHandleSshPwauth(CiTestCase): | ||
2266 | 13 | """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth.""" | ||
2267 | 14 | |||
2268 | 15 | with_logs = True | ||
2269 | 16 | |||
2270 | 17 | @mock.patch(MODPATH + "util.subp") | ||
2271 | 18 | def test_unknown_value_logs_warning(self, m_subp): | ||
2272 | 19 | setpass.handle_ssh_pwauth("floo") | ||
2273 | 20 | self.assertIn("Unrecognized value: ssh_pwauth=floo", | ||
2274 | 21 | self.logs.getvalue()) | ||
2275 | 22 | m_subp.assert_not_called() | ||
2276 | 23 | |||
2277 | 24 | @mock.patch(MODPATH + "update_ssh_config", return_value=True) | ||
2278 | 25 | @mock.patch(MODPATH + "util.subp") | ||
2279 | 26 | def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): | ||
2280 | 27 | """If systemctl in service cmd: systemctl restart name.""" | ||
2281 | 28 | setpass.handle_ssh_pwauth( | ||
2282 | 29 | True, service_cmd=["systemctl"], service_name="myssh") | ||
2283 | 30 | self.assertEqual(mock.call(["systemctl", "restart", "myssh"]), | ||
2284 | 31 | m_subp.call_args) | ||
2285 | 32 | |||
2286 | 33 | @mock.patch(MODPATH + "update_ssh_config", return_value=True) | ||
2287 | 34 | @mock.patch(MODPATH + "util.subp") | ||
2288 | 35 | def test_service_as_service_cmd(self, m_subp, m_update_ssh_config): | ||
2289 | 36 | """If systemctl in service cmd: systemctl restart name.""" | ||
2290 | 37 | setpass.handle_ssh_pwauth( | ||
2291 | 38 | True, service_cmd=["service"], service_name="myssh") | ||
2292 | 39 | self.assertEqual(mock.call(["service", "myssh", "restart"]), | ||
2293 | 40 | m_subp.call_args) | ||
2294 | 41 | |||
2295 | 42 | @mock.patch(MODPATH + "update_ssh_config", return_value=False) | ||
2296 | 43 | @mock.patch(MODPATH + "util.subp") | ||
2297 | 44 | def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): | ||
2298 | 45 | """If config is not updated, then no system restart should be done.""" | ||
2299 | 46 | setpass.handle_ssh_pwauth(True) | ||
2300 | 47 | m_subp.assert_not_called() | ||
2301 | 48 | self.assertIn("No need to restart ssh", self.logs.getvalue()) | ||
2302 | 49 | |||
2303 | 50 | @mock.patch(MODPATH + "update_ssh_config", return_value=True) | ||
2304 | 51 | @mock.patch(MODPATH + "util.subp") | ||
2305 | 52 | def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): | ||
2306 | 53 | """If 'unchanged', then no updates to config and no restart.""" | ||
2307 | 54 | setpass.handle_ssh_pwauth( | ||
2308 | 55 | "unchanged", service_cmd=["systemctl"], service_name="myssh") | ||
2309 | 56 | m_update_ssh_config.assert_not_called() | ||
2310 | 57 | m_subp.assert_not_called() | ||
2311 | 58 | |||
2312 | 59 | @mock.patch(MODPATH + "util.subp") | ||
2313 | 60 | def test_valid_change_values(self, m_subp): | ||
2314 | 61 | """If value is a valid changen value, then update should be called.""" | ||
2315 | 62 | upname = MODPATH + "update_ssh_config" | ||
2316 | 63 | optname = "PasswordAuthentication" | ||
2317 | 64 | for value in util.FALSE_STRINGS + util.TRUE_STRINGS: | ||
2318 | 65 | optval = "yes" if value in util.TRUE_STRINGS else "no" | ||
2319 | 66 | with mock.patch(upname, return_value=False) as m_update: | ||
2320 | 67 | setpass.handle_ssh_pwauth(value) | ||
2321 | 68 | m_update.assert_called_with({optname: optval}) | ||
2322 | 69 | m_subp.assert_not_called() | ||
2323 | 70 | |||
2324 | 71 | # vi: ts=4 expandtab | ||
2325 | diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py | |||
2326 | index c5b4a9d..34c80f1 100644 | |||
2327 | --- a/cloudinit/config/tests/test_snap.py | |||
2328 | +++ b/cloudinit/config/tests/test_snap.py | |||
2329 | @@ -9,7 +9,7 @@ from cloudinit.config.cc_snap import ( | |||
2330 | 9 | from cloudinit.config.schema import validate_cloudconfig_schema | 9 | from cloudinit.config.schema import validate_cloudconfig_schema |
2331 | 10 | from cloudinit import util | 10 | from cloudinit import util |
2332 | 11 | from cloudinit.tests.helpers import ( | 11 | from cloudinit.tests.helpers import ( |
2334 | 12 | CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema) | 12 | CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema) |
2335 | 13 | 13 | ||
2336 | 14 | 14 | ||
2337 | 15 | SYSTEM_USER_ASSERTION = """\ | 15 | SYSTEM_USER_ASSERTION = """\ |
2338 | @@ -245,9 +245,10 @@ class TestRunCommands(CiTestCase): | |||
2339 | 245 | 245 | ||
2340 | 246 | 246 | ||
2341 | 247 | @skipUnlessJsonSchema() | 247 | @skipUnlessJsonSchema() |
2343 | 248 | class TestSchema(CiTestCase): | 248 | class TestSchema(CiTestCase, SchemaTestCaseMixin): |
2344 | 249 | 249 | ||
2345 | 250 | with_logs = True | 250 | with_logs = True |
2346 | 251 | schema = schema | ||
2347 | 251 | 252 | ||
2348 | 252 | def test_schema_warns_on_snap_not_as_dict(self): | 253 | def test_schema_warns_on_snap_not_as_dict(self): |
2349 | 253 | """If the snap configuration is not a dict, emit a warning.""" | 254 | """If the snap configuration is not a dict, emit a warning.""" |
2350 | @@ -340,6 +341,30 @@ class TestSchema(CiTestCase): | |||
2351 | 340 | {'snap': {'assertions': {'01': 'also valid'}}}, schema) | 341 | {'snap': {'assertions': {'01': 'also valid'}}}, schema) |
2352 | 341 | self.assertEqual('', self.logs.getvalue()) | 342 | self.assertEqual('', self.logs.getvalue()) |
2353 | 342 | 343 | ||
2354 | 344 | def test_duplicates_are_fine_array_array(self): | ||
2355 | 345 | """Duplicated commands array/array entries are allowed.""" | ||
2356 | 346 | self.assertSchemaValid( | ||
2357 | 347 | {'commands': [["echo", "bye"], ["echo" "bye"]]}, | ||
2358 | 348 | "command entries can be duplicate.") | ||
2359 | 349 | |||
2360 | 350 | def test_duplicates_are_fine_array_string(self): | ||
2361 | 351 | """Duplicated commands array/string entries are allowed.""" | ||
2362 | 352 | self.assertSchemaValid( | ||
2363 | 353 | {'commands': ["echo bye", "echo bye"]}, | ||
2364 | 354 | "command entries can be duplicate.") | ||
2365 | 355 | |||
2366 | 356 | def test_duplicates_are_fine_dict_array(self): | ||
2367 | 357 | """Duplicated commands dict/array entries are allowed.""" | ||
2368 | 358 | self.assertSchemaValid( | ||
2369 | 359 | {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, | ||
2370 | 360 | "command entries can be duplicate.") | ||
2371 | 361 | |||
2372 | 362 | def test_duplicates_are_fine_dict_string(self): | ||
2373 | 363 | """Duplicated commands dict/string entries are allowed.""" | ||
2374 | 364 | self.assertSchemaValid( | ||
2375 | 365 | {'commands': {'00': "echo bye", '01': "echo bye"}}, | ||
2376 | 366 | "command entries can be duplicate.") | ||
2377 | 367 | |||
2378 | 343 | 368 | ||
2379 | 344 | class TestHandle(CiTestCase): | 369 | class TestHandle(CiTestCase): |
2380 | 345 | 370 | ||
2381 | diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py | |||
2382 | index f2a59fa..f1beeff 100644 | |||
2383 | --- a/cloudinit/config/tests/test_ubuntu_advantage.py | |||
2384 | +++ b/cloudinit/config/tests/test_ubuntu_advantage.py | |||
2385 | @@ -7,7 +7,8 @@ from cloudinit.config.cc_ubuntu_advantage import ( | |||
2386 | 7 | handle, maybe_install_ua_tools, run_commands, schema) | 7 | handle, maybe_install_ua_tools, run_commands, schema) |
2387 | 8 | from cloudinit.config.schema import validate_cloudconfig_schema | 8 | from cloudinit.config.schema import validate_cloudconfig_schema |
2388 | 9 | from cloudinit import util | 9 | from cloudinit import util |
2390 | 10 | from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema | 10 | from cloudinit.tests.helpers import ( |
2391 | 11 | CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) | ||
2392 | 11 | 12 | ||
2393 | 12 | 13 | ||
2394 | 13 | # Module path used in mocks | 14 | # Module path used in mocks |
2395 | @@ -105,9 +106,10 @@ class TestRunCommands(CiTestCase): | |||
2396 | 105 | 106 | ||
2397 | 106 | 107 | ||
2398 | 107 | @skipUnlessJsonSchema() | 108 | @skipUnlessJsonSchema() |
2400 | 108 | class TestSchema(CiTestCase): | 109 | class TestSchema(CiTestCase, SchemaTestCaseMixin): |
2401 | 109 | 110 | ||
2402 | 110 | with_logs = True | 111 | with_logs = True |
2403 | 112 | schema = schema | ||
2404 | 111 | 113 | ||
2405 | 112 | def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): | 114 | def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): |
2406 | 113 | """If ubuntu-advantage configuration is not a dict, emit a warning.""" | 115 | """If ubuntu-advantage configuration is not a dict, emit a warning.""" |
2407 | @@ -169,6 +171,30 @@ class TestSchema(CiTestCase): | |||
2408 | 169 | {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) | 171 | {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) |
2409 | 170 | self.assertEqual('', self.logs.getvalue()) | 172 | self.assertEqual('', self.logs.getvalue()) |
2410 | 171 | 173 | ||
2411 | 174 | def test_duplicates_are_fine_array_array(self): | ||
2412 | 175 | """Duplicated commands array/array entries are allowed.""" | ||
2413 | 176 | self.assertSchemaValid( | ||
2414 | 177 | {'commands': [["echo", "bye"], ["echo" "bye"]]}, | ||
2415 | 178 | "command entries can be duplicate.") | ||
2416 | 179 | |||
2417 | 180 | def test_duplicates_are_fine_array_string(self): | ||
2418 | 181 | """Duplicated commands array/string entries are allowed.""" | ||
2419 | 182 | self.assertSchemaValid( | ||
2420 | 183 | {'commands': ["echo bye", "echo bye"]}, | ||
2421 | 184 | "command entries can be duplicate.") | ||
2422 | 185 | |||
2423 | 186 | def test_duplicates_are_fine_dict_array(self): | ||
2424 | 187 | """Duplicated commands dict/array entries are allowed.""" | ||
2425 | 188 | self.assertSchemaValid( | ||
2426 | 189 | {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, | ||
2427 | 190 | "command entries can be duplicate.") | ||
2428 | 191 | |||
2429 | 192 | def test_duplicates_are_fine_dict_string(self): | ||
2430 | 193 | """Duplicated commands dict/string entries are allowed.""" | ||
2431 | 194 | self.assertSchemaValid( | ||
2432 | 195 | {'commands': {'00': "echo bye", '01': "echo bye"}}, | ||
2433 | 196 | "command entries can be duplicate.") | ||
2434 | 197 | |||
2435 | 172 | 198 | ||
2436 | 173 | class TestHandle(CiTestCase): | 199 | class TestHandle(CiTestCase): |
2437 | 174 | 200 | ||
2438 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py | |||
2439 | index 55260ea..ab0b077 100755 | |||
2440 | --- a/cloudinit/distros/__init__.py | |||
2441 | +++ b/cloudinit/distros/__init__.py | |||
2442 | @@ -49,6 +49,9 @@ LOG = logging.getLogger(__name__) | |||
2443 | 49 | # It could break when Amazon adds new regions and new AZs. | 49 | # It could break when Amazon adds new regions and new AZs. |
2444 | 50 | _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') | 50 | _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') |
2445 | 51 | 51 | ||
2446 | 52 | # Default NTP Client Configurations | ||
2447 | 53 | PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate'] | ||
2448 | 54 | |||
2449 | 52 | 55 | ||
2450 | 53 | @six.add_metaclass(abc.ABCMeta) | 56 | @six.add_metaclass(abc.ABCMeta) |
2451 | 54 | class Distro(object): | 57 | class Distro(object): |
2452 | @@ -60,6 +63,7 @@ class Distro(object): | |||
2453 | 60 | tz_zone_dir = "/usr/share/zoneinfo" | 63 | tz_zone_dir = "/usr/share/zoneinfo" |
2454 | 61 | init_cmd = ['service'] # systemctl, service etc | 64 | init_cmd = ['service'] # systemctl, service etc |
2455 | 62 | renderer_configs = {} | 65 | renderer_configs = {} |
2456 | 66 | _preferred_ntp_clients = None | ||
2457 | 63 | 67 | ||
2458 | 64 | def __init__(self, name, cfg, paths): | 68 | def __init__(self, name, cfg, paths): |
2459 | 65 | self._paths = paths | 69 | self._paths = paths |
2460 | @@ -339,6 +343,14 @@ class Distro(object): | |||
2461 | 339 | contents.write("%s\n" % (eh)) | 343 | contents.write("%s\n" % (eh)) |
2462 | 340 | util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644) | 344 | util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644) |
2463 | 341 | 345 | ||
2464 | 346 | @property | ||
2465 | 347 | def preferred_ntp_clients(self): | ||
2466 | 348 | """Allow distro to determine the preferred ntp client list""" | ||
2467 | 349 | if not self._preferred_ntp_clients: | ||
2468 | 350 | self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS) | ||
2469 | 351 | |||
2470 | 352 | return self._preferred_ntp_clients | ||
2471 | 353 | |||
2472 | 342 | def _bring_up_interface(self, device_name): | 354 | def _bring_up_interface(self, device_name): |
2473 | 343 | cmd = ['ifup', device_name] | 355 | cmd = ['ifup', device_name] |
2474 | 344 | LOG.debug("Attempting to run bring up interface %s using command %s", | 356 | LOG.debug("Attempting to run bring up interface %s using command %s", |
2475 | @@ -519,7 +531,7 @@ class Distro(object): | |||
2476 | 519 | self.lock_passwd(name) | 531 | self.lock_passwd(name) |
2477 | 520 | 532 | ||
2478 | 521 | # Configure sudo access | 533 | # Configure sudo access |
2480 | 522 | if 'sudo' in kwargs: | 534 | if 'sudo' in kwargs and kwargs['sudo'] is not False: |
2481 | 523 | self.write_sudo_rules(name, kwargs['sudo']) | 535 | self.write_sudo_rules(name, kwargs['sudo']) |
2482 | 524 | 536 | ||
2483 | 525 | # Import SSH keys | 537 | # Import SSH keys |
2484 | diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py | |||
2485 | index 754d3df..ff22d56 100644 | |||
2486 | --- a/cloudinit/distros/freebsd.py | |||
2487 | +++ b/cloudinit/distros/freebsd.py | |||
2488 | @@ -110,15 +110,15 @@ class Distro(distros.Distro): | |||
2489 | 110 | if dev.startswith('lo'): | 110 | if dev.startswith('lo'): |
2490 | 111 | return dev | 111 | return dev |
2491 | 112 | 112 | ||
2493 | 113 | n = re.search('\d+$', dev) | 113 | n = re.search(r'\d+$', dev) |
2494 | 114 | index = n.group(0) | 114 | index = n.group(0) |
2495 | 115 | 115 | ||
2497 | 116 | (out, err) = util.subp(['ifconfig', '-a']) | 116 | (out, _err) = util.subp(['ifconfig', '-a']) |
2498 | 117 | ifconfigoutput = [x for x in (out.strip()).splitlines() | 117 | ifconfigoutput = [x for x in (out.strip()).splitlines() |
2499 | 118 | if len(x.split()) > 0] | 118 | if len(x.split()) > 0] |
2500 | 119 | bsddev = 'NOT_FOUND' | 119 | bsddev = 'NOT_FOUND' |
2501 | 120 | for line in ifconfigoutput: | 120 | for line in ifconfigoutput: |
2503 | 121 | m = re.match('^\w+', line) | 121 | m = re.match(r'^\w+', line) |
2504 | 122 | if m: | 122 | if m: |
2505 | 123 | if m.group(0).startswith('lo'): | 123 | if m.group(0).startswith('lo'): |
2506 | 124 | continue | 124 | continue |
2507 | @@ -128,7 +128,7 @@ class Distro(distros.Distro): | |||
2508 | 128 | break | 128 | break |
2509 | 129 | 129 | ||
2510 | 130 | # Replace the index with the one we're after. | 130 | # Replace the index with the one we're after. |
2512 | 131 | bsddev = re.sub('\d+$', index, bsddev) | 131 | bsddev = re.sub(r'\d+$', index, bsddev) |
2513 | 132 | LOG.debug("Using network interface %s", bsddev) | 132 | LOG.debug("Using network interface %s", bsddev) |
2514 | 133 | return bsddev | 133 | return bsddev |
2515 | 134 | 134 | ||
2516 | @@ -266,7 +266,7 @@ class Distro(distros.Distro): | |||
2517 | 266 | self.lock_passwd(name) | 266 | self.lock_passwd(name) |
2518 | 267 | 267 | ||
2519 | 268 | # Configure sudo access | 268 | # Configure sudo access |
2521 | 269 | if 'sudo' in kwargs: | 269 | if 'sudo' in kwargs and kwargs['sudo'] is not False: |
2522 | 270 | self.write_sudo_rules(name, kwargs['sudo']) | 270 | self.write_sudo_rules(name, kwargs['sudo']) |
2523 | 271 | 271 | ||
2524 | 272 | # Import SSH keys | 272 | # Import SSH keys |
2525 | diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py | |||
2526 | index 162dfa0..9f90e95 100644 | |||
2527 | --- a/cloudinit/distros/opensuse.py | |||
2528 | +++ b/cloudinit/distros/opensuse.py | |||
2529 | @@ -208,4 +208,28 @@ class Distro(distros.Distro): | |||
2530 | 208 | nameservers, searchservers) | 208 | nameservers, searchservers) |
2531 | 209 | return dev_names | 209 | return dev_names |
2532 | 210 | 210 | ||
2533 | 211 | @property | ||
2534 | 212 | def preferred_ntp_clients(self): | ||
2535 | 213 | """The preferred ntp client is dependent on the version.""" | ||
2536 | 214 | |||
2537 | 215 | """Allow distro to determine the preferred ntp client list""" | ||
2538 | 216 | if not self._preferred_ntp_clients: | ||
2539 | 217 | distro_info = util.system_info()['dist'] | ||
2540 | 218 | name = distro_info[0] | ||
2541 | 219 | major_ver = int(distro_info[1].split('.')[0]) | ||
2542 | 220 | |||
2543 | 221 | # This is horribly complicated because of a case of | ||
2544 | 222 | # "we do not care if versions should be increasing syndrome" | ||
2545 | 223 | if ( | ||
2546 | 224 | (major_ver >= 15 and 'openSUSE' not in name) or | ||
2547 | 225 | (major_ver >= 15 and 'openSUSE' in name and major_ver != 42) | ||
2548 | 226 | ): | ||
2549 | 227 | self._preferred_ntp_clients = ['chrony', | ||
2550 | 228 | 'systemd-timesyncd', 'ntp'] | ||
2551 | 229 | else: | ||
2552 | 230 | self._preferred_ntp_clients = ['ntp', | ||
2553 | 231 | 'systemd-timesyncd', 'chrony'] | ||
2554 | 232 | |||
2555 | 233 | return self._preferred_ntp_clients | ||
2556 | 234 | |||
2557 | 211 | # vi: ts=4 expandtab | 235 | # vi: ts=4 expandtab |
2558 | diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py | |||
2559 | index 82ca34f..6815410 100644 | |||
2560 | --- a/cloudinit/distros/ubuntu.py | |||
2561 | +++ b/cloudinit/distros/ubuntu.py | |||
2562 | @@ -10,12 +10,31 @@ | |||
2563 | 10 | # This file is part of cloud-init. See LICENSE file for license information. | 10 | # This file is part of cloud-init. See LICENSE file for license information. |
2564 | 11 | 11 | ||
2565 | 12 | from cloudinit.distros import debian | 12 | from cloudinit.distros import debian |
2566 | 13 | from cloudinit.distros import PREFERRED_NTP_CLIENTS | ||
2567 | 13 | from cloudinit import log as logging | 14 | from cloudinit import log as logging |
2568 | 15 | from cloudinit import util | ||
2569 | 16 | |||
2570 | 17 | import copy | ||
2571 | 14 | 18 | ||
2572 | 15 | LOG = logging.getLogger(__name__) | 19 | LOG = logging.getLogger(__name__) |
2573 | 16 | 20 | ||
2574 | 17 | 21 | ||
2575 | 18 | class Distro(debian.Distro): | 22 | class Distro(debian.Distro): |
2576 | 23 | |||
2577 | 24 | @property | ||
2578 | 25 | def preferred_ntp_clients(self): | ||
2579 | 26 | """The preferred ntp client is dependent on the version.""" | ||
2580 | 27 | if not self._preferred_ntp_clients: | ||
2581 | 28 | (_name, _version, codename) = util.system_info()['dist'] | ||
2582 | 29 | # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd. | ||
2583 | 30 | if codename == "xenial" and not util.system_is_snappy(): | ||
2584 | 31 | self._preferred_ntp_clients = ['ntp'] | ||
2585 | 32 | else: | ||
2586 | 33 | self._preferred_ntp_clients = ( | ||
2587 | 34 | copy.deepcopy(PREFERRED_NTP_CLIENTS)) | ||
2588 | 35 | return self._preferred_ntp_clients | ||
2589 | 36 | |||
2590 | 19 | pass | 37 | pass |
2591 | 20 | 38 | ||
2592 | 39 | |||
2593 | 21 | # vi: ts=4 expandtab | 40 | # vi: ts=4 expandtab |
2594 | diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py | |||
2595 | index dc3f0fc..3b7b17f 100644 | |||
2596 | --- a/cloudinit/ec2_utils.py | |||
2597 | +++ b/cloudinit/ec2_utils.py | |||
2598 | @@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest', | |||
2599 | 150 | # NOT_FOUND occurs) and just in that case returning an empty string. | 150 | # NOT_FOUND occurs) and just in that case returning an empty string. |
2600 | 151 | exception_cb = functools.partial(_skip_retry_on_codes, | 151 | exception_cb = functools.partial(_skip_retry_on_codes, |
2601 | 152 | SKIP_USERDATA_CODES) | 152 | SKIP_USERDATA_CODES) |
2607 | 153 | response = util.read_file_or_url(ud_url, | 153 | response = url_helper.read_file_or_url( |
2608 | 154 | ssl_details=ssl_details, | 154 | ud_url, ssl_details=ssl_details, timeout=timeout, |
2609 | 155 | timeout=timeout, | 155 | retries=retries, exception_cb=exception_cb) |
2605 | 156 | retries=retries, | ||
2606 | 157 | exception_cb=exception_cb) | ||
2610 | 158 | user_data = response.contents | 156 | user_data = response.contents |
2611 | 159 | except url_helper.UrlError as e: | 157 | except url_helper.UrlError as e: |
2612 | 160 | if e.code not in SKIP_USERDATA_CODES: | 158 | if e.code not in SKIP_USERDATA_CODES: |
2613 | @@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest', | |||
2614 | 169 | ssl_details=None, timeout=5, retries=5, | 167 | ssl_details=None, timeout=5, retries=5, |
2615 | 170 | leaf_decoder=None): | 168 | leaf_decoder=None): |
2616 | 171 | md_url = url_helper.combine_url(metadata_address, api_version, tree) | 169 | md_url = url_helper.combine_url(metadata_address, api_version, tree) |
2620 | 172 | caller = functools.partial(util.read_file_or_url, | 170 | caller = functools.partial( |
2621 | 173 | ssl_details=ssl_details, timeout=timeout, | 171 | url_helper.read_file_or_url, ssl_details=ssl_details, |
2622 | 174 | retries=retries) | 172 | timeout=timeout, retries=retries) |
2623 | 175 | 173 | ||
2624 | 176 | def mcaller(url): | 174 | def mcaller(url): |
2625 | 177 | return caller(url).contents | 175 | return caller(url).contents |
2626 | diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py | |||
2627 | index 1ca92d4..dc33876 100644 | |||
2628 | --- a/cloudinit/handlers/upstart_job.py | |||
2629 | +++ b/cloudinit/handlers/upstart_job.py | |||
2630 | @@ -97,7 +97,7 @@ def _has_suitable_upstart(): | |||
2631 | 97 | else: | 97 | else: |
2632 | 98 | util.logexc(LOG, "dpkg --compare-versions failed [%s]", | 98 | util.logexc(LOG, "dpkg --compare-versions failed [%s]", |
2633 | 99 | e.exit_code) | 99 | e.exit_code) |
2635 | 100 | except Exception as e: | 100 | except Exception: |
2636 | 101 | util.logexc(LOG, "dpkg --compare-versions failed") | 101 | util.logexc(LOG, "dpkg --compare-versions failed") |
2637 | 102 | return False | 102 | return False |
2638 | 103 | else: | 103 | else: |
2639 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py | |||
2640 | index f69c0ef..3ffde52 100644 | |||
2641 | --- a/cloudinit/net/__init__.py | |||
2642 | +++ b/cloudinit/net/__init__.py | |||
2643 | @@ -107,6 +107,21 @@ def is_bond(devname): | |||
2644 | 107 | return os.path.exists(sys_dev_path(devname, "bonding")) | 107 | return os.path.exists(sys_dev_path(devname, "bonding")) |
2645 | 108 | 108 | ||
2646 | 109 | 109 | ||
2647 | 110 | def is_renamed(devname): | ||
2648 | 111 | """ | ||
2649 | 112 | /* interface name assignment types (sysfs name_assign_type attribute) */ | ||
2650 | 113 | #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */ | ||
2651 | 114 | #define NET_NAME_ENUM 1 /* enumerated by kernel */ | ||
2652 | 115 | #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */ | ||
2653 | 116 | #define NET_NAME_USER 3 /* provided by user-space */ | ||
2654 | 117 | #define NET_NAME_RENAMED 4 /* renamed by user-space */ | ||
2655 | 118 | """ | ||
2656 | 119 | name_assign_type = read_sys_net_safe(devname, 'name_assign_type') | ||
2657 | 120 | if name_assign_type and name_assign_type in ['3', '4']: | ||
2658 | 121 | return True | ||
2659 | 122 | return False | ||
2660 | 123 | |||
2661 | 124 | |||
2662 | 110 | def is_vlan(devname): | 125 | def is_vlan(devname): |
2663 | 111 | uevent = str(read_sys_net_safe(devname, "uevent")) | 126 | uevent = str(read_sys_net_safe(devname, "uevent")) |
2664 | 112 | return 'DEVTYPE=vlan' in uevent.splitlines() | 127 | return 'DEVTYPE=vlan' in uevent.splitlines() |
2665 | @@ -180,6 +195,17 @@ def find_fallback_nic(blacklist_drivers=None): | |||
2666 | 180 | if not blacklist_drivers: | 195 | if not blacklist_drivers: |
2667 | 181 | blacklist_drivers = [] | 196 | blacklist_drivers = [] |
2668 | 182 | 197 | ||
2669 | 198 | if 'net.ifnames=0' in util.get_cmdline(): | ||
2670 | 199 | LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline') | ||
2671 | 200 | else: | ||
2672 | 201 | unstable = [device for device in get_devicelist() | ||
2673 | 202 | if device != 'lo' and not is_renamed(device)] | ||
2674 | 203 | if len(unstable): | ||
2675 | 204 | LOG.debug('Found unstable nic names: %s; calling udevadm settle', | ||
2676 | 205 | unstable) | ||
2677 | 206 | msg = 'Waiting for udev events to settle' | ||
2678 | 207 | util.log_time(LOG.debug, msg, func=util.udevadm_settle) | ||
2679 | 208 | |||
2680 | 183 | # get list of interfaces that could have connections | 209 | # get list of interfaces that could have connections |
2681 | 184 | invalid_interfaces = set(['lo']) | 210 | invalid_interfaces = set(['lo']) |
2682 | 185 | potential_interfaces = set([device for device in get_devicelist() | 211 | potential_interfaces = set([device for device in get_devicelist() |
2683 | @@ -295,7 +321,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): | |||
2684 | 295 | 321 | ||
2685 | 296 | def _version_2(netcfg): | 322 | def _version_2(netcfg): |
2686 | 297 | renames = [] | 323 | renames = [] |
2688 | 298 | for key, ent in netcfg.get('ethernets', {}).items(): | 324 | for ent in netcfg.get('ethernets', {}).values(): |
2689 | 299 | # only rename if configured to do so | 325 | # only rename if configured to do so |
2690 | 300 | name = ent.get('set-name') | 326 | name = ent.get('set-name') |
2691 | 301 | if not name: | 327 | if not name: |
2692 | @@ -333,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False): | |||
2693 | 333 | 1: randomly generated 3: set using dev_set_mac_address""" | 359 | 1: randomly generated 3: set using dev_set_mac_address""" |
2694 | 334 | 360 | ||
2695 | 335 | assign_type = read_sys_net_int(ifname, "addr_assign_type") | 361 | assign_type = read_sys_net_int(ifname, "addr_assign_type") |
2698 | 336 | if strict and assign_type is None: | 362 | if assign_type is None: |
2699 | 337 | raise ValueError("%s had no addr_assign_type.") | 363 | # None is returned if this nic had no 'addr_assign_type' entry. |
2700 | 364 | # if strict, raise an error, if not return True. | ||
2701 | 365 | if strict: | ||
2702 | 366 | raise ValueError("%s had no addr_assign_type.") | ||
2703 | 367 | return True | ||
2704 | 338 | return assign_type in (0, 1, 3) | 368 | return assign_type in (0, 1, 3) |
2705 | 339 | 369 | ||
2706 | 340 | 370 | ||
2707 | diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py | |||
2708 | index 9e9fe0f..f89a0f7 100755 | |||
2709 | --- a/cloudinit/net/cmdline.py | |||
2710 | +++ b/cloudinit/net/cmdline.py | |||
2711 | @@ -65,7 +65,7 @@ def _klibc_to_config_entry(content, mac_addrs=None): | |||
2712 | 65 | iface['mac_address'] = mac_addrs[name] | 65 | iface['mac_address'] = mac_addrs[name] |
2713 | 66 | 66 | ||
2714 | 67 | # Handle both IPv4 and IPv6 values | 67 | # Handle both IPv4 and IPv6 values |
2716 | 68 | for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')): | 68 | for pre in ('IPV4', 'IPV6'): |
2717 | 69 | # if no IPV4ADDR or IPV6ADDR, then go on. | 69 | # if no IPV4ADDR or IPV6ADDR, then go on. |
2718 | 70 | if pre + "ADDR" not in data: | 70 | if pre + "ADDR" not in data: |
2719 | 71 | continue | 71 | continue |
2720 | diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py | |||
2721 | index 087c0c0..12cf509 100644 | |||
2722 | --- a/cloudinit/net/dhcp.py | |||
2723 | +++ b/cloudinit/net/dhcp.py | |||
2724 | @@ -216,7 +216,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None): | |||
2725 | 216 | if leases_d is None: | 216 | if leases_d is None: |
2726 | 217 | leases_d = NETWORKD_LEASES_DIR | 217 | leases_d = NETWORKD_LEASES_DIR |
2727 | 218 | leases = networkd_load_leases(leases_d=leases_d) | 218 | leases = networkd_load_leases(leases_d=leases_d) |
2729 | 219 | for ifindex, data in sorted(leases.items()): | 219 | for _ifindex, data in sorted(leases.items()): |
2730 | 220 | if data.get(keyname): | 220 | if data.get(keyname): |
2731 | 221 | return data[keyname] | 221 | return data[keyname] |
2732 | 222 | return None | 222 | return None |
2733 | diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py | |||
2734 | index c6a71d1..bd20a36 100644 | |||
2735 | --- a/cloudinit/net/eni.py | |||
2736 | +++ b/cloudinit/net/eni.py | |||
2737 | @@ -10,9 +10,12 @@ from . import ParserError | |||
2738 | 10 | from . import renderer | 10 | from . import renderer |
2739 | 11 | from .network_state import subnet_is_ipv6 | 11 | from .network_state import subnet_is_ipv6 |
2740 | 12 | 12 | ||
2741 | 13 | from cloudinit import log as logging | ||
2742 | 13 | from cloudinit import util | 14 | from cloudinit import util |
2743 | 14 | 15 | ||
2744 | 15 | 16 | ||
2745 | 17 | LOG = logging.getLogger(__name__) | ||
2746 | 18 | |||
2747 | 16 | NET_CONFIG_COMMANDS = [ | 19 | NET_CONFIG_COMMANDS = [ |
2748 | 17 | "pre-up", "up", "post-up", "down", "pre-down", "post-down", | 20 | "pre-up", "up", "post-up", "down", "pre-down", "post-down", |
2749 | 18 | ] | 21 | ] |
2750 | @@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet): | |||
2751 | 61 | 64 | ||
2752 | 62 | 65 | ||
2753 | 63 | # TODO: switch to valid_map for attrs | 66 | # TODO: switch to valid_map for attrs |
2755 | 64 | def _iface_add_attrs(iface, index): | 67 | def _iface_add_attrs(iface, index, ipv4_subnet_mtu): |
2756 | 65 | # If the index is non-zero, this is an alias interface. Alias interfaces | 68 | # If the index is non-zero, this is an alias interface. Alias interfaces |
2757 | 66 | # represent additional interface addresses, and should not have additional | 69 | # represent additional interface addresses, and should not have additional |
2758 | 67 | # attributes. (extra attributes here are almost always either incorrect, | 70 | # attributes. (extra attributes here are almost always either incorrect, |
2759 | @@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index): | |||
2760 | 100 | value = 'on' if iface[key] else 'off' | 103 | value = 'on' if iface[key] else 'off' |
2761 | 101 | if not value or key in ignore_map: | 104 | if not value or key in ignore_map: |
2762 | 102 | continue | 105 | continue |
2763 | 106 | if key == 'mtu' and ipv4_subnet_mtu: | ||
2764 | 107 | if value != ipv4_subnet_mtu: | ||
2765 | 108 | LOG.warning( | ||
2766 | 109 | "Network config: ignoring %s device-level mtu:%s because" | ||
2767 | 110 | " ipv4 subnet-level mtu:%s provided.", | ||
2768 | 111 | iface['name'], value, ipv4_subnet_mtu) | ||
2769 | 112 | continue | ||
2770 | 103 | if key in multiline_keys: | 113 | if key in multiline_keys: |
2771 | 104 | for v in value: | 114 | for v in value: |
2772 | 105 | content.append(" {0} {1}".format(renames.get(key, key), v)) | 115 | content.append(" {0} {1}".format(renames.get(key, key), v)) |
2773 | @@ -377,12 +387,15 @@ class Renderer(renderer.Renderer): | |||
2774 | 377 | subnets = iface.get('subnets', {}) | 387 | subnets = iface.get('subnets', {}) |
2775 | 378 | if subnets: | 388 | if subnets: |
2776 | 379 | for index, subnet in enumerate(subnets): | 389 | for index, subnet in enumerate(subnets): |
2777 | 390 | ipv4_subnet_mtu = None | ||
2778 | 380 | iface['index'] = index | 391 | iface['index'] = index |
2779 | 381 | iface['mode'] = subnet['type'] | 392 | iface['mode'] = subnet['type'] |
2780 | 382 | iface['control'] = subnet.get('control', 'auto') | 393 | iface['control'] = subnet.get('control', 'auto') |
2781 | 383 | subnet_inet = 'inet' | 394 | subnet_inet = 'inet' |
2782 | 384 | if subnet_is_ipv6(subnet): | 395 | if subnet_is_ipv6(subnet): |
2783 | 385 | subnet_inet += '6' | 396 | subnet_inet += '6' |
2784 | 397 | else: | ||
2785 | 398 | ipv4_subnet_mtu = subnet.get('mtu') | ||
2786 | 386 | iface['inet'] = subnet_inet | 399 | iface['inet'] = subnet_inet |
2787 | 387 | if subnet['type'].startswith('dhcp'): | 400 | if subnet['type'].startswith('dhcp'): |
2788 | 388 | iface['mode'] = 'dhcp' | 401 | iface['mode'] = 'dhcp' |
2789 | @@ -397,7 +410,7 @@ class Renderer(renderer.Renderer): | |||
2790 | 397 | _iface_start_entry( | 410 | _iface_start_entry( |
2791 | 398 | iface, index, render_hwaddress=render_hwaddress) + | 411 | iface, index, render_hwaddress=render_hwaddress) + |
2792 | 399 | _iface_add_subnet(iface, subnet) + | 412 | _iface_add_subnet(iface, subnet) + |
2794 | 400 | _iface_add_attrs(iface, index) | 413 | _iface_add_attrs(iface, index, ipv4_subnet_mtu) |
2795 | 401 | ) | 414 | ) |
2796 | 402 | for route in subnet.get('routes', []): | 415 | for route in subnet.get('routes', []): |
2797 | 403 | lines.extend(self._render_route(route, indent=" ")) | 416 | lines.extend(self._render_route(route, indent=" ")) |
2798 | @@ -409,7 +422,8 @@ class Renderer(renderer.Renderer): | |||
2799 | 409 | if 'bond-master' in iface or 'bond-slaves' in iface: | 422 | if 'bond-master' in iface or 'bond-slaves' in iface: |
2800 | 410 | lines.append("auto {name}".format(**iface)) | 423 | lines.append("auto {name}".format(**iface)) |
2801 | 411 | lines.append("iface {name} {inet} {mode}".format(**iface)) | 424 | lines.append("iface {name} {inet} {mode}".format(**iface)) |
2803 | 412 | lines.extend(_iface_add_attrs(iface, index=0)) | 425 | lines.extend( |
2804 | 426 | _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None)) | ||
2805 | 413 | sections.append(lines) | 427 | sections.append(lines) |
2806 | 414 | return sections | 428 | return sections |
2807 | 415 | 429 | ||
2808 | diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py | |||
2809 | index 6344348..4014363 100644 | |||
2810 | --- a/cloudinit/net/netplan.py | |||
2811 | +++ b/cloudinit/net/netplan.py | |||
2812 | @@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match): | |||
2813 | 34 | if key.startswith(match)) | 34 | if key.startswith(match)) |
2814 | 35 | 35 | ||
2815 | 36 | 36 | ||
2817 | 37 | def _extract_addresses(config, entry): | 37 | def _extract_addresses(config, entry, ifname): |
2818 | 38 | """This method parse a cloudinit.net.network_state dictionary (config) and | 38 | """This method parse a cloudinit.net.network_state dictionary (config) and |
2819 | 39 | maps netstate keys/values into a dictionary (entry) to represent | 39 | maps netstate keys/values into a dictionary (entry) to represent |
2820 | 40 | netplan yaml. | 40 | netplan yaml. |
2821 | @@ -124,6 +124,15 @@ def _extract_addresses(config, entry): | |||
2822 | 124 | 124 | ||
2823 | 125 | addresses.append(addr) | 125 | addresses.append(addr) |
2824 | 126 | 126 | ||
2825 | 127 | if 'mtu' in config: | ||
2826 | 128 | entry_mtu = entry.get('mtu') | ||
2827 | 129 | if entry_mtu and config['mtu'] != entry_mtu: | ||
2828 | 130 | LOG.warning( | ||
2829 | 131 | "Network config: ignoring %s device-level mtu:%s because" | ||
2830 | 132 | " ipv4 subnet-level mtu:%s provided.", | ||
2831 | 133 | ifname, config['mtu'], entry_mtu) | ||
2832 | 134 | else: | ||
2833 | 135 | entry['mtu'] = config['mtu'] | ||
2834 | 127 | if len(addresses) > 0: | 136 | if len(addresses) > 0: |
2835 | 128 | entry.update({'addresses': addresses}) | 137 | entry.update({'addresses': addresses}) |
2836 | 129 | if len(routes) > 0: | 138 | if len(routes) > 0: |
2837 | @@ -262,10 +271,7 @@ class Renderer(renderer.Renderer): | |||
2838 | 262 | else: | 271 | else: |
2839 | 263 | del eth['match'] | 272 | del eth['match'] |
2840 | 264 | del eth['set-name'] | 273 | del eth['set-name'] |
2845 | 265 | if 'mtu' in ifcfg: | 274 | _extract_addresses(ifcfg, eth, ifname) |
2842 | 266 | eth['mtu'] = ifcfg.get('mtu') | ||
2843 | 267 | |||
2844 | 268 | _extract_addresses(ifcfg, eth) | ||
2846 | 269 | ethernets.update({ifname: eth}) | 275 | ethernets.update({ifname: eth}) |
2847 | 270 | 276 | ||
2848 | 271 | elif if_type == 'bond': | 277 | elif if_type == 'bond': |
2849 | @@ -288,7 +294,7 @@ class Renderer(renderer.Renderer): | |||
2850 | 288 | slave_interfaces = ifcfg.get('bond-slaves') | 294 | slave_interfaces = ifcfg.get('bond-slaves') |
2851 | 289 | if slave_interfaces == 'none': | 295 | if slave_interfaces == 'none': |
2852 | 290 | _extract_bond_slaves_by_name(interfaces, bond, ifname) | 296 | _extract_bond_slaves_by_name(interfaces, bond, ifname) |
2854 | 291 | _extract_addresses(ifcfg, bond) | 297 | _extract_addresses(ifcfg, bond, ifname) |
2855 | 292 | bonds.update({ifname: bond}) | 298 | bonds.update({ifname: bond}) |
2856 | 293 | 299 | ||
2857 | 294 | elif if_type == 'bridge': | 300 | elif if_type == 'bridge': |
2858 | @@ -321,7 +327,7 @@ class Renderer(renderer.Renderer): | |||
2859 | 321 | 327 | ||
2860 | 322 | if len(br_config) > 0: | 328 | if len(br_config) > 0: |
2861 | 323 | bridge.update({'parameters': br_config}) | 329 | bridge.update({'parameters': br_config}) |
2863 | 324 | _extract_addresses(ifcfg, bridge) | 330 | _extract_addresses(ifcfg, bridge, ifname) |
2864 | 325 | bridges.update({ifname: bridge}) | 331 | bridges.update({ifname: bridge}) |
2865 | 326 | 332 | ||
2866 | 327 | elif if_type == 'vlan': | 333 | elif if_type == 'vlan': |
2867 | @@ -333,7 +339,7 @@ class Renderer(renderer.Renderer): | |||
2868 | 333 | macaddr = ifcfg.get('mac_address', None) | 339 | macaddr = ifcfg.get('mac_address', None) |
2869 | 334 | if macaddr is not None: | 340 | if macaddr is not None: |
2870 | 335 | vlan['macaddress'] = macaddr.lower() | 341 | vlan['macaddress'] = macaddr.lower() |
2872 | 336 | _extract_addresses(ifcfg, vlan) | 342 | _extract_addresses(ifcfg, vlan, ifname) |
2873 | 337 | vlans.update({ifname: vlan}) | 343 | vlans.update({ifname: vlan}) |
2874 | 338 | 344 | ||
2875 | 339 | # inject global nameserver values under each all interface which | 345 | # inject global nameserver values under each all interface which |
2876 | diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py | |||
2877 | index 6d63e5c..72c803e 100644 | |||
2878 | --- a/cloudinit/net/network_state.py | |||
2879 | +++ b/cloudinit/net/network_state.py | |||
2880 | @@ -7,6 +7,8 @@ | |||
2881 | 7 | import copy | 7 | import copy |
2882 | 8 | import functools | 8 | import functools |
2883 | 9 | import logging | 9 | import logging |
2884 | 10 | import socket | ||
2885 | 11 | import struct | ||
2886 | 10 | 12 | ||
2887 | 11 | import six | 13 | import six |
2888 | 12 | 14 | ||
2889 | @@ -886,12 +888,9 @@ def net_prefix_to_ipv4_mask(prefix): | |||
2890 | 886 | This is the inverse of ipv4_mask_to_net_prefix. | 888 | This is the inverse of ipv4_mask_to_net_prefix. |
2891 | 887 | 24 -> "255.255.255.0" | 889 | 24 -> "255.255.255.0" |
2892 | 888 | Also supports input as a string.""" | 890 | Also supports input as a string.""" |
2899 | 889 | 891 | mask = socket.inet_ntoa( | |
2900 | 890 | mask = [0, 0, 0, 0] | 892 | struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff))) |
2901 | 891 | for i in list(range(0, int(prefix))): | 893 | return mask |
2896 | 892 | idx = int(i / 8) | ||
2897 | 893 | mask[idx] = mask[idx] + (1 << (7 - i % 8)) | ||
2898 | 894 | return ".".join([str(x) for x in mask]) | ||
2902 | 895 | 894 | ||
2903 | 896 | 895 | ||
2904 | 897 | def ipv4_mask_to_net_prefix(mask): | 896 | def ipv4_mask_to_net_prefix(mask): |
2905 | diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py | |||
2906 | index 39d89c4..3d71923 100644 | |||
2907 | --- a/cloudinit/net/sysconfig.py | |||
2908 | +++ b/cloudinit/net/sysconfig.py | |||
2909 | @@ -287,7 +287,6 @@ class Renderer(renderer.Renderer): | |||
2910 | 287 | if subnet_type == 'dhcp6': | 287 | if subnet_type == 'dhcp6': |
2911 | 288 | iface_cfg['IPV6INIT'] = True | 288 | iface_cfg['IPV6INIT'] = True |
2912 | 289 | iface_cfg['DHCPV6C'] = True | 289 | iface_cfg['DHCPV6C'] = True |
2913 | 290 | iface_cfg['BOOTPROTO'] = 'dhcp' | ||
2914 | 291 | elif subnet_type in ['dhcp4', 'dhcp']: | 290 | elif subnet_type in ['dhcp4', 'dhcp']: |
2915 | 292 | iface_cfg['BOOTPROTO'] = 'dhcp' | 291 | iface_cfg['BOOTPROTO'] = 'dhcp' |
2916 | 293 | elif subnet_type == 'static': | 292 | elif subnet_type == 'static': |
2917 | @@ -305,6 +304,13 @@ class Renderer(renderer.Renderer): | |||
2918 | 305 | mtu_key = 'IPV6_MTU' | 304 | mtu_key = 'IPV6_MTU' |
2919 | 306 | iface_cfg['IPV6INIT'] = True | 305 | iface_cfg['IPV6INIT'] = True |
2920 | 307 | if 'mtu' in subnet: | 306 | if 'mtu' in subnet: |
2921 | 307 | mtu_mismatch = bool(mtu_key in iface_cfg and | ||
2922 | 308 | subnet['mtu'] != iface_cfg[mtu_key]) | ||
2923 | 309 | if mtu_mismatch: | ||
2924 | 310 | LOG.warning( | ||
2925 | 311 | 'Network config: ignoring %s device-level mtu:%s' | ||
2926 | 312 | ' because ipv4 subnet-level mtu:%s provided.', | ||
2927 | 313 | iface_cfg.name, iface_cfg[mtu_key], subnet['mtu']) | ||
2928 | 308 | iface_cfg[mtu_key] = subnet['mtu'] | 314 | iface_cfg[mtu_key] = subnet['mtu'] |
2929 | 309 | elif subnet_type == 'manual': | 315 | elif subnet_type == 'manual': |
2930 | 310 | # If the subnet has an MTU setting, then ONBOOT=True | 316 | # If the subnet has an MTU setting, then ONBOOT=True |
2931 | @@ -364,7 +370,7 @@ class Renderer(renderer.Renderer): | |||
2932 | 364 | 370 | ||
2933 | 365 | @classmethod | 371 | @classmethod |
2934 | 366 | def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): | 372 | def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): |
2936 | 367 | for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): | 373 | for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): |
2937 | 368 | for route in subnet.get('routes', []): | 374 | for route in subnet.get('routes', []): |
2938 | 369 | is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) | 375 | is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) |
2939 | 370 | 376 | ||
2940 | diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py | |||
2941 | index 276556e..5c017d1 100644 | |||
2942 | --- a/cloudinit/net/tests/test_init.py | |||
2943 | +++ b/cloudinit/net/tests/test_init.py | |||
2944 | @@ -199,6 +199,7 @@ class TestGenerateFallbackConfig(CiTestCase): | |||
2945 | 199 | self.sysdir = self.tmp_dir() + '/' | 199 | self.sysdir = self.tmp_dir() + '/' |
2946 | 200 | self.m_sys_path.return_value = self.sysdir | 200 | self.m_sys_path.return_value = self.sysdir |
2947 | 201 | self.addCleanup(sys_mock.stop) | 201 | self.addCleanup(sys_mock.stop) |
2948 | 202 | self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') | ||
2949 | 202 | 203 | ||
2950 | 203 | def test_generate_fallback_finds_connected_eth_with_mac(self): | 204 | def test_generate_fallback_finds_connected_eth_with_mac(self): |
2951 | 204 | """generate_fallback_config finds any connected device with a mac.""" | 205 | """generate_fallback_config finds any connected device with a mac.""" |
2952 | diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py | |||
2953 | index 993b26c..9ff929c 100644 | |||
2954 | --- a/cloudinit/netinfo.py | |||
2955 | +++ b/cloudinit/netinfo.py | |||
2956 | @@ -8,9 +8,11 @@ | |||
2957 | 8 | # | 8 | # |
2958 | 9 | # This file is part of cloud-init. See LICENSE file for license information. | 9 | # This file is part of cloud-init. See LICENSE file for license information. |
2959 | 10 | 10 | ||
2960 | 11 | from copy import copy, deepcopy | ||
2961 | 11 | import re | 12 | import re |
2962 | 12 | 13 | ||
2963 | 13 | from cloudinit import log as logging | 14 | from cloudinit import log as logging |
2964 | 15 | from cloudinit.net.network_state import net_prefix_to_ipv4_mask | ||
2965 | 14 | from cloudinit import util | 16 | from cloudinit import util |
2966 | 15 | 17 | ||
2967 | 16 | from cloudinit.simpletable import SimpleTable | 18 | from cloudinit.simpletable import SimpleTable |
2968 | @@ -18,18 +20,90 @@ from cloudinit.simpletable import SimpleTable | |||
2969 | 18 | LOG = logging.getLogger() | 20 | LOG = logging.getLogger() |
2970 | 19 | 21 | ||
2971 | 20 | 22 | ||
2975 | 21 | def netdev_info(empty=""): | 23 | DEFAULT_NETDEV_INFO = { |
2976 | 22 | fields = ("hwaddr", "addr", "bcast", "mask") | 24 | "ipv4": [], |
2977 | 23 | (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) | 25 | "ipv6": [], |
2978 | 26 | "hwaddr": "", | ||
2979 | 27 | "up": False | ||
2980 | 28 | } | ||
2981 | 29 | |||
2982 | 30 | |||
2983 | 31 | def _netdev_info_iproute(ipaddr_out): | ||
2984 | 32 | """ | ||
2985 | 33 | Get network device dicts from ip route and ip link info. | ||
2986 | 34 | |||
2987 | 35 | @param ipaddr_out: Output string from 'ip addr show' command. | ||
2988 | 36 | |||
2989 | 37 | @returns: A dict of device info keyed by network device name containing | ||
2990 | 38 | device configuration values. | ||
2991 | 39 | @raise: TypeError if ipaddr_out isn't a string. | ||
2992 | 40 | """ | ||
2993 | 41 | devs = {} | ||
2994 | 42 | dev_name = None | ||
2995 | 43 | for num, line in enumerate(ipaddr_out.splitlines()): | ||
2996 | 44 | m = re.match(r'^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*', line) | ||
2997 | 45 | if m: | ||
2998 | 46 | dev_name = m.group('dev').lower().split('@')[0] | ||
2999 | 47 | flags = m.group('flags').split(',') | ||
3000 | 48 | devs[dev_name] = { | ||
3001 | 49 | 'ipv4': [], 'ipv6': [], 'hwaddr': '', | ||
3002 | 50 | 'up': bool('UP' in flags and 'LOWER_UP' in flags), | ||
3003 | 51 | } | ||
3004 | 52 | elif 'inet6' in line: | ||
3005 | 53 | m = re.match( | ||
3006 | 54 | r'\s+inet6\s(?P<ip>\S+)\sscope\s(?P<scope6>\S+).*', line) | ||
3007 | 55 | if not m: | ||
3008 | 56 | LOG.warning( | ||
3009 | 57 | 'Could not parse ip addr show: (line:%d) %s', num, line) | ||
3010 | 58 | continue | ||
3011 | 59 | devs[dev_name]['ipv6'].append(m.groupdict()) | ||
3012 | 60 | elif 'inet' in line: | ||
3013 | 61 | m = re.match( | ||
3014 | 62 | r'\s+inet\s(?P<cidr4>\S+)(\sbrd\s(?P<bcast>\S+))?\sscope\s' | ||
3015 | 63 | r'(?P<scope>\S+).*', line) | ||
3016 | 64 | if not m: | ||
3017 | 65 | LOG.warning( | ||
3018 | 66 | 'Could not parse ip addr show: (line:%d) %s', num, line) | ||
3019 | 67 | continue | ||
3020 | 68 | match = m.groupdict() | ||
3021 | 69 | cidr4 = match.pop('cidr4') | ||
3022 | 70 | addr, _, prefix = cidr4.partition('/') | ||
3023 | 71 | if not prefix: | ||
3024 | 72 | prefix = '32' | ||
3025 | 73 | devs[dev_name]['ipv4'].append({ | ||
3026 | 74 | 'ip': addr, | ||
3027 | 75 | 'bcast': match['bcast'] if match['bcast'] else '', | ||
3028 | 76 | 'mask': net_prefix_to_ipv4_mask(prefix), | ||
3029 | 77 | 'scope': match['scope']}) | ||
3030 | 78 | elif 'link' in line: | ||
3031 | 79 | m = re.match( | ||
3032 | 80 | r'\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*', line) | ||
3033 | 81 | if not m: | ||
3034 | 82 | LOG.warning( | ||
3035 | 83 | 'Could not parse ip addr show: (line:%d) %s', num, line) | ||
3036 | 84 | continue | ||
3037 | 85 | if m.group('link_type') == 'ether': | ||
3038 | 86 | devs[dev_name]['hwaddr'] = m.group('hwaddr') | ||
3039 | 87 | else: | ||
3040 | 88 | devs[dev_name]['hwaddr'] = '' | ||
3041 | 89 | else: | ||
3042 | 90 | continue | ||
3043 | 91 | return devs | ||
3044 | 92 | |||
3045 | 93 | |||
3046 | 94 | def _netdev_info_ifconfig(ifconfig_data): | ||
3047 | 95 | # fields that need to be returned in devs for each dev | ||
3048 | 24 | devs = {} | 96 | devs = {} |
3050 | 25 | for line in str(ifcfg_out).splitlines(): | 97 | for line in ifconfig_data.splitlines(): |
3051 | 26 | if len(line) == 0: | 98 | if len(line) == 0: |
3052 | 27 | continue | 99 | continue |
3053 | 28 | if line[0] not in ("\t", " "): | 100 | if line[0] not in ("\t", " "): |
3054 | 29 | curdev = line.split()[0] | 101 | curdev = line.split()[0] |
3058 | 30 | devs[curdev] = {"up": False} | 102 | # current ifconfig pops a ':' on the end of the device |
3059 | 31 | for field in fields: | 103 | if curdev.endswith(':'): |
3060 | 32 | devs[curdev][field] = "" | 104 | curdev = curdev[:-1] |
3061 | 105 | if curdev not in devs: | ||
3062 | 106 | devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO) | ||
3063 | 33 | toks = line.lower().strip().split() | 107 | toks = line.lower().strip().split() |
3064 | 34 | if toks[0] == "up": | 108 | if toks[0] == "up": |
3065 | 35 | devs[curdev]['up'] = True | 109 | devs[curdev]['up'] = True |
3066 | @@ -39,59 +113,164 @@ def netdev_info(empty=""): | |||
3067 | 39 | if re.search(r"flags=\d+<up,", toks[1]): | 113 | if re.search(r"flags=\d+<up,", toks[1]): |
3068 | 40 | devs[curdev]['up'] = True | 114 | devs[curdev]['up'] = True |
3069 | 41 | 115 | ||
3070 | 42 | fieldpost = "" | ||
3071 | 43 | if toks[0] == "inet6": | ||
3072 | 44 | fieldpost = "6" | ||
3073 | 45 | |||
3074 | 46 | for i in range(len(toks)): | 116 | for i in range(len(toks)): |
3111 | 47 | # older net-tools (ubuntu) show 'inet addr:xx.yy', | 117 | if toks[i] == "inet": # Create new ipv4 addr entry |
3112 | 48 | # newer (freebsd and fedora) show 'inet xx.yy' | 118 | devs[curdev]['ipv4'].append( |
3113 | 49 | # just skip this 'inet' entry. (LP: #1285185) | 119 | {'ip': toks[i + 1].lstrip("addr:")}) |
3114 | 50 | try: | 120 | elif toks[i].startswith("bcast:"): |
3115 | 51 | if ((toks[i] in ("inet", "inet6") and | 121 | devs[curdev]['ipv4'][-1]['bcast'] = toks[i].lstrip("bcast:") |
3116 | 52 | toks[i + 1].startswith("addr:"))): | 122 | elif toks[i] == "broadcast": |
3117 | 53 | continue | 123 | devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1] |
3118 | 54 | except IndexError: | 124 | elif toks[i].startswith("mask:"): |
3119 | 55 | pass | 125 | devs[curdev]['ipv4'][-1]['mask'] = toks[i].lstrip("mask:") |
3120 | 56 | 126 | elif toks[i] == "netmask": | |
3121 | 57 | # Couple the different items we're interested in with the correct | 127 | devs[curdev]['ipv4'][-1]['mask'] = toks[i + 1] |
3122 | 58 | # field since FreeBSD/CentOS/Fedora differ in the output. | 128 | elif toks[i] == "hwaddr" or toks[i] == "ether": |
3123 | 59 | ifconfigfields = { | 129 | devs[curdev]['hwaddr'] = toks[i + 1] |
3124 | 60 | "addr:": "addr", "inet": "addr", | 130 | elif toks[i] == "inet6": |
3125 | 61 | "bcast:": "bcast", "broadcast": "bcast", | 131 | if toks[i + 1] == "addr:": |
3126 | 62 | "mask:": "mask", "netmask": "mask", | 132 | devs[curdev]['ipv6'].append({'ip': toks[i + 2]}) |
3127 | 63 | "hwaddr": "hwaddr", "ether": "hwaddr", | 133 | else: |
3128 | 64 | "scope": "scope", | 134 | devs[curdev]['ipv6'].append({'ip': toks[i + 1]}) |
3129 | 65 | } | 135 | elif toks[i] == "prefixlen": # Add prefix to current ipv6 value |
3130 | 66 | for origfield, field in ifconfigfields.items(): | 136 | addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1] |
3131 | 67 | target = "%s%s" % (field, fieldpost) | 137 | devs[curdev]['ipv6'][-1]['ip'] = addr6 |
3132 | 68 | if devs[curdev].get(target, ""): | 138 | elif toks[i].startswith("scope:"): |
3133 | 69 | continue | 139 | devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") |
3134 | 70 | if toks[i] == "%s" % origfield: | 140 | elif toks[i] == "scopeid": |
3135 | 71 | try: | 141 | res = re.match(r'.*<(\S+)>', toks[i + 1]) |
3136 | 72 | devs[curdev][target] = toks[i + 1] | 142 | if res: |
3137 | 73 | except IndexError: | 143 | devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) |
3138 | 74 | pass | 144 | return devs |
3139 | 75 | elif toks[i].startswith("%s" % origfield): | 145 | |
3140 | 76 | devs[curdev][target] = toks[i][len(field) + 1:] | 146 | |
3141 | 77 | 147 | def netdev_info(empty=""): | |
3142 | 78 | if empty != "": | 148 | devs = {} |
3143 | 79 | for (_devname, dev) in devs.items(): | 149 | if util.which('ip'): |
3144 | 80 | for field in dev: | 150 | # Try iproute first of all |
3145 | 81 | if dev[field] == "": | 151 | (ipaddr_out, _err) = util.subp(["ip", "addr", "show"]) |
3146 | 82 | dev[field] = empty | 152 | devs = _netdev_info_iproute(ipaddr_out) |
3147 | 153 | elif util.which('ifconfig'): | ||
3148 | 154 | # Fall back to net-tools if iproute2 is not present | ||
3149 | 155 | (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) | ||
3150 | 156 | devs = _netdev_info_ifconfig(ifcfg_out) | ||
3151 | 157 | else: | ||
3152 | 158 | LOG.warning( | ||
3153 | 159 | "Could not print networks: missing 'ip' and 'ifconfig' commands") | ||
3154 | 83 | 160 | ||
3155 | 161 | if empty == "": | ||
3156 | 162 | return devs | ||
3157 | 163 | |||
3158 | 164 | recurse_types = (dict, tuple, list) | ||
3159 | 165 | |||
3160 | 166 | def fill(data, new_val="", empty_vals=("", b"")): | ||
3161 | 167 | """Recursively replace 'empty_vals' in data (dict, tuple, list) | ||
3162 | 168 | with new_val""" | ||
3163 | 169 | if isinstance(data, dict): | ||
3164 | 170 | myiter = data.items() | ||
3165 | 171 | elif isinstance(data, (tuple, list)): | ||
3166 | 172 | myiter = enumerate(data) | ||
3167 | 173 | else: | ||
3168 | 174 | raise TypeError("Unexpected input to fill") | ||
3169 | 175 | |||
3170 | 176 | for key, val in myiter: | ||
3171 | 177 | if val in empty_vals: | ||
3172 | 178 | data[key] = new_val | ||
3173 | 179 | elif isinstance(val, recurse_types): | ||
3174 | 180 | fill(val, new_val) | ||
3175 | 181 | |||
3176 | 182 | fill(devs, new_val=empty) | ||
3177 | 84 | return devs | 183 | return devs |
3178 | 85 | 184 | ||
3179 | 86 | 185 | ||
3182 | 87 | def route_info(): | 186 | def _netdev_route_info_iproute(iproute_data): |
3183 | 88 | (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1]) | 187 | """ |
3184 | 188 | Get network route dicts from ip route info. | ||
3185 | 189 | |||
3186 | 190 | @param iproute_data: Output string from ip route command. | ||
3187 | 191 | |||
3188 | 192 | @returns: A dict containing ipv4 and ipv6 route entries as lists. Each | ||
3189 | 193 | item in the list is a route dictionary representing destination, | ||
3190 | 194 | gateway, flags, genmask and interface information. | ||
3191 | 195 | """ | ||
3192 | 196 | |||
3193 | 197 | routes = {} | ||
3194 | 198 | routes['ipv4'] = [] | ||
3195 | 199 | routes['ipv6'] = [] | ||
3196 | 200 | entries = iproute_data.splitlines() | ||
3197 | 201 | default_route_entry = { | ||
3198 | 202 | 'destination': '', 'flags': '', 'gateway': '', 'genmask': '', | ||
3199 | 203 | 'iface': '', 'metric': ''} | ||
3200 | 204 | for line in entries: | ||
3201 | 205 | entry = copy(default_route_entry) | ||
3202 | 206 | if not line: | ||
3203 | 207 | continue | ||
3204 | 208 | toks = line.split() | ||
3205 | 209 | flags = ['U'] | ||
3206 | 210 | if toks[0] == "default": | ||
3207 | 211 | entry['destination'] = "0.0.0.0" | ||
3208 | 212 | entry['genmask'] = "0.0.0.0" | ||
3209 | 213 | else: | ||
3210 | 214 | if '/' in toks[0]: | ||
3211 | 215 | (addr, cidr) = toks[0].split("/") | ||
3212 | 216 | else: | ||
3213 | 217 | addr = toks[0] | ||
3214 | 218 | cidr = '32' | ||
3215 | 219 | flags.append("H") | ||
3216 | 220 | entry['genmask'] = net_prefix_to_ipv4_mask(cidr) | ||
3217 | 221 | entry['destination'] = addr | ||
3218 | 222 | entry['genmask'] = net_prefix_to_ipv4_mask(cidr) | ||
3219 | 223 | entry['gateway'] = "0.0.0.0" | ||
3220 | 224 | for i in range(len(toks)): | ||
3221 | 225 | if toks[i] == "via": | ||
3222 | 226 | entry['gateway'] = toks[i + 1] | ||
3223 | 227 | flags.insert(1, "G") | ||
3224 | 228 | if toks[i] == "dev": | ||
3225 | 229 | entry["iface"] = toks[i + 1] | ||
3226 | 230 | if toks[i] == "metric": | ||
3227 | 231 | entry['metric'] = toks[i + 1] | ||
3228 | 232 | entry['flags'] = ''.join(flags) | ||
3229 | 233 | routes['ipv4'].append(entry) | ||
3230 | 234 | try: | ||
3231 | 235 | (iproute_data6, _err6) = util.subp( | ||
3232 | 236 | ["ip", "--oneline", "-6", "route", "list", "table", "all"], | ||
3233 | 237 | rcs=[0, 1]) | ||
3234 | 238 | except util.ProcessExecutionError: | ||
3235 | 239 | pass | ||
3236 | 240 | else: | ||
3237 | 241 | entries6 = iproute_data6.splitlines() | ||
3238 | 242 | for line in entries6: | ||
3239 | 243 | entry = {} | ||
3240 | 244 | if not line: | ||
3241 | 245 | continue | ||
3242 | 246 | toks = line.split() | ||
3243 | 247 | if toks[0] == "default": | ||
3244 | 248 | entry['destination'] = "::/0" | ||
3245 | 249 | entry['flags'] = "UG" | ||
3246 | 250 | else: | ||
3247 | 251 | entry['destination'] = toks[0] | ||
3248 | 252 | entry['gateway'] = "::" | ||
3249 | 253 | entry['flags'] = "U" | ||
3250 | 254 | for i in range(len(toks)): | ||
3251 | 255 | if toks[i] == "via": | ||
3252 | 256 | entry['gateway'] = toks[i + 1] | ||
3253 | 257 | entry['flags'] = "UG" | ||
3254 | 258 | if toks[i] == "dev": | ||
3255 | 259 | entry["iface"] = toks[i + 1] | ||
3256 | 260 | if toks[i] == "metric": | ||
3257 | 261 | entry['metric'] = toks[i + 1] | ||
3258 | 262 | if toks[i] == "expires": | ||
3259 | 263 | entry['flags'] = entry['flags'] + 'e' | ||
3260 | 264 | routes['ipv6'].append(entry) | ||
3261 | 265 | return routes | ||
3262 | 266 | |||
3263 | 89 | 267 | ||
3264 | 268 | def _netdev_route_info_netstat(route_data): | ||
3265 | 90 | routes = {} | 269 | routes = {} |
3266 | 91 | routes['ipv4'] = [] | 270 | routes['ipv4'] = [] |
3267 | 92 | routes['ipv6'] = [] | 271 | routes['ipv6'] = [] |
3268 | 93 | 272 | ||
3270 | 94 | entries = route_out.splitlines()[1:] | 273 | entries = route_data.splitlines() |
3271 | 95 | for line in entries: | 274 | for line in entries: |
3272 | 96 | if not line: | 275 | if not line: |
3273 | 97 | continue | 276 | continue |
3274 | @@ -101,8 +280,8 @@ def route_info(): | |||
3275 | 101 | # default 10.65.0.1 UGS 0 34920 vtnet0 | 280 | # default 10.65.0.1 UGS 0 34920 vtnet0 |
3276 | 102 | # | 281 | # |
3277 | 103 | # Linux netstat shows 2 more: | 282 | # Linux netstat shows 2 more: |
3280 | 104 | # Destination Gateway Genmask Flags MSS Window irtt Iface | 283 | # Destination Gateway Genmask Flags Metric Ref Use Iface |
3281 | 105 | # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 | 284 | # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 |
3282 | 106 | if (len(toks) < 6 or toks[0] == "Kernel" or | 285 | if (len(toks) < 6 or toks[0] == "Kernel" or |
3283 | 107 | toks[0] == "Destination" or toks[0] == "Internet" or | 286 | toks[0] == "Destination" or toks[0] == "Internet" or |
3284 | 108 | toks[0] == "Internet6" or toks[0] == "Routing"): | 287 | toks[0] == "Internet6" or toks[0] == "Routing"): |
3285 | @@ -125,31 +304,57 @@ def route_info(): | |||
3286 | 125 | routes['ipv4'].append(entry) | 304 | routes['ipv4'].append(entry) |
3287 | 126 | 305 | ||
3288 | 127 | try: | 306 | try: |
3291 | 128 | (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"], | 307 | (route_data6, _err6) = util.subp( |
3292 | 129 | rcs=[0, 1]) | 308 | ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1]) |
3293 | 130 | except util.ProcessExecutionError: | 309 | except util.ProcessExecutionError: |
3294 | 131 | pass | 310 | pass |
3295 | 132 | else: | 311 | else: |
3297 | 133 | entries6 = route_out6.splitlines()[1:] | 312 | entries6 = route_data6.splitlines() |
3298 | 134 | for line in entries6: | 313 | for line in entries6: |
3299 | 135 | if not line: | 314 | if not line: |
3300 | 136 | continue | 315 | continue |
3301 | 137 | toks = line.split() | 316 | toks = line.split() |
3303 | 138 | if (len(toks) < 6 or toks[0] == "Kernel" or | 317 | if (len(toks) < 7 or toks[0] == "Kernel" or |
3304 | 318 | toks[0] == "Destination" or toks[0] == "Internet" or | ||
3305 | 139 | toks[0] == "Proto" or toks[0] == "Active"): | 319 | toks[0] == "Proto" or toks[0] == "Active"): |
3306 | 140 | continue | 320 | continue |
3307 | 141 | entry = { | 321 | entry = { |
3314 | 142 | 'proto': toks[0], | 322 | 'destination': toks[0], |
3315 | 143 | 'recv-q': toks[1], | 323 | 'gateway': toks[1], |
3316 | 144 | 'send-q': toks[2], | 324 | 'flags': toks[2], |
3317 | 145 | 'local address': toks[3], | 325 | 'metric': toks[3], |
3318 | 146 | 'foreign address': toks[4], | 326 | 'ref': toks[4], |
3319 | 147 | 'state': toks[5], | 327 | 'use': toks[5], |
3320 | 328 | 'iface': toks[6], | ||
3321 | 148 | } | 329 | } |
3322 | 330 | # skip lo interface on ipv6 | ||
3323 | 331 | if entry['iface'] == "lo": | ||
3324 | 332 | continue | ||
3325 | 333 | # strip /128 from address if it's included | ||
3326 | 334 | if entry['destination'].endswith('/128'): | ||
3327 | 335 | entry['destination'] = re.sub( | ||
3328 | 336 | r'\/128$', '', entry['destination']) | ||
3329 | 149 | routes['ipv6'].append(entry) | 337 | routes['ipv6'].append(entry) |
3330 | 150 | return routes | 338 | return routes |
3331 | 151 | 339 | ||
3332 | 152 | 340 | ||
3333 | 341 | def route_info(): | ||
3334 | 342 | routes = {} | ||
3335 | 343 | if util.which('ip'): | ||
3336 | 344 | # Try iproute first of all | ||
3337 | 345 | (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"]) | ||
3338 | 346 | routes = _netdev_route_info_iproute(iproute_out) | ||
3339 | 347 | elif util.which('netstat'): | ||
3340 | 348 | # Fall back to net-tools if iproute2 is not present | ||
3341 | 349 | (route_out, _err) = util.subp( | ||
3342 | 350 | ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1]) | ||
3343 | 351 | routes = _netdev_route_info_netstat(route_out) | ||
3344 | 352 | else: | ||
3345 | 353 | LOG.warning( | ||
3346 | 354 | "Could not print routes: missing 'ip' and 'netstat' commands") | ||
3347 | 355 | return routes | ||
3348 | 356 | |||
3349 | 357 | |||
3350 | 153 | def getgateway(): | 358 | def getgateway(): |
3351 | 154 | try: | 359 | try: |
3352 | 155 | routes = route_info() | 360 | routes = route_info() |
3353 | @@ -164,23 +369,36 @@ def getgateway(): | |||
3354 | 164 | 369 | ||
3355 | 165 | def netdev_pformat(): | 370 | def netdev_pformat(): |
3356 | 166 | lines = [] | 371 | lines = [] |
3357 | 372 | empty = "." | ||
3358 | 167 | try: | 373 | try: |
3362 | 168 | netdev = netdev_info(empty=".") | 374 | netdev = netdev_info(empty=empty) |
3363 | 169 | except Exception: | 375 | except Exception as e: |
3364 | 170 | lines.append(util.center("Net device info failed", '!', 80)) | 376 | lines.append( |
3365 | 377 | util.center( | ||
3366 | 378 | "Net device info failed ({error})".format(error=str(e)), | ||
3367 | 379 | '!', 80)) | ||
3368 | 171 | else: | 380 | else: |
3369 | 381 | if not netdev: | ||
3370 | 382 | return '\n' | ||
3371 | 172 | fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] | 383 | fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] |
3372 | 173 | tbl = SimpleTable(fields) | 384 | tbl = SimpleTable(fields) |
3378 | 174 | for (dev, d) in sorted(netdev.items()): | 385 | for (dev, data) in sorted(netdev.items()): |
3379 | 175 | tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]]) | 386 | for addr in data.get('ipv4'): |
3380 | 176 | if d.get('addr6'): | 387 | tbl.add_row( |
3381 | 177 | tbl.add_row([dev, d["up"], | 388 | (dev, data["up"], addr["ip"], addr["mask"], |
3382 | 178 | d["addr6"], ".", d.get("scope6"), d["hwaddr"]]) | 389 | addr.get('scope', empty), data["hwaddr"])) |
3383 | 390 | for addr in data.get('ipv6'): | ||
3384 | 391 | tbl.add_row( | ||
3385 | 392 | (dev, data["up"], addr["ip"], empty, addr["scope6"], | ||
3386 | 393 | data["hwaddr"])) | ||
3387 | 394 | if len(data.get('ipv6')) + len(data.get('ipv4')) == 0: | ||
3388 | 395 | tbl.add_row((dev, data["up"], empty, empty, empty, | ||
3389 | 396 | data["hwaddr"])) | ||
3390 | 179 | netdev_s = tbl.get_string() | 397 | netdev_s = tbl.get_string() |
3391 | 180 | max_len = len(max(netdev_s.splitlines(), key=len)) | 398 | max_len = len(max(netdev_s.splitlines(), key=len)) |
3392 | 181 | header = util.center("Net device info", "+", max_len) | 399 | header = util.center("Net device info", "+", max_len) |
3393 | 182 | lines.extend([header, netdev_s]) | 400 | lines.extend([header, netdev_s]) |
3395 | 183 | return "\n".join(lines) | 401 | return "\n".join(lines) + "\n" |
3396 | 184 | 402 | ||
3397 | 185 | 403 | ||
3398 | 186 | def route_pformat(): | 404 | def route_pformat(): |
3399 | @@ -188,7 +406,10 @@ def route_pformat(): | |||
3400 | 188 | try: | 406 | try: |
3401 | 189 | routes = route_info() | 407 | routes = route_info() |
3402 | 190 | except Exception as e: | 408 | except Exception as e: |
3404 | 191 | lines.append(util.center('Route info failed', '!', 80)) | 409 | lines.append( |
3405 | 410 | util.center( | ||
3406 | 411 | 'Route info failed ({error})'.format(error=str(e)), | ||
3407 | 412 | '!', 80)) | ||
3408 | 192 | util.logexc(LOG, "Route info failed: %s" % e) | 413 | util.logexc(LOG, "Route info failed: %s" % e) |
3409 | 193 | else: | 414 | else: |
3410 | 194 | if routes.get('ipv4'): | 415 | if routes.get('ipv4'): |
3411 | @@ -205,20 +426,20 @@ def route_pformat(): | |||
3412 | 205 | header = util.center("Route IPv4 info", "+", max_len) | 426 | header = util.center("Route IPv4 info", "+", max_len) |
3413 | 206 | lines.extend([header, route_s]) | 427 | lines.extend([header, route_s]) |
3414 | 207 | if routes.get('ipv6'): | 428 | if routes.get('ipv6'): |
3417 | 208 | fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q', | 429 | fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface', |
3418 | 209 | 'Local Address', 'Foreign Address', 'State'] | 430 | 'Flags'] |
3419 | 210 | tbl_v6 = SimpleTable(fields_v6) | 431 | tbl_v6 = SimpleTable(fields_v6) |
3420 | 211 | for (n, r) in enumerate(routes.get('ipv6')): | 432 | for (n, r) in enumerate(routes.get('ipv6')): |
3421 | 212 | route_id = str(n) | 433 | route_id = str(n) |
3426 | 213 | tbl_v6.add_row([route_id, r['proto'], | 434 | if r['iface'] == 'lo': |
3427 | 214 | r['recv-q'], r['send-q'], | 435 | continue |
3428 | 215 | r['local address'], r['foreign address'], | 436 | tbl_v6.add_row([route_id, r['destination'], |
3429 | 216 | r['state']]) | 437 | r['gateway'], r['iface'], r['flags']]) |
3430 | 217 | route_s = tbl_v6.get_string() | 438 | route_s = tbl_v6.get_string() |
3431 | 218 | max_len = len(max(route_s.splitlines(), key=len)) | 439 | max_len = len(max(route_s.splitlines(), key=len)) |
3432 | 219 | header = util.center("Route IPv6 info", "+", max_len) | 440 | header = util.center("Route IPv6 info", "+", max_len) |
3433 | 220 | lines.extend([header, route_s]) | 441 | lines.extend([header, route_s]) |
3435 | 221 | return "\n".join(lines) | 442 | return "\n".join(lines) + "\n" |
3436 | 222 | 443 | ||
3437 | 223 | 444 | ||
3438 | 224 | def debug_info(prefix='ci-info: '): | 445 | def debug_info(prefix='ci-info: '): |
3439 | diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py | |||
3440 | index 4f62d2f..e5dfab3 100644 | |||
3441 | --- a/cloudinit/reporting/events.py | |||
3442 | +++ b/cloudinit/reporting/events.py | |||
3443 | @@ -192,7 +192,7 @@ class ReportEventStack(object): | |||
3444 | 192 | 192 | ||
3445 | 193 | def _childrens_finish_info(self): | 193 | def _childrens_finish_info(self): |
3446 | 194 | for cand_result in (status.FAIL, status.WARN): | 194 | for cand_result in (status.FAIL, status.WARN): |
3448 | 195 | for name, (value, msg) in self.children.items(): | 195 | for _name, (value, _msg) in self.children.items(): |
3449 | 196 | if value == cand_result: | 196 | if value == cand_result: |
3450 | 197 | return (value, self.message) | 197 | return (value, self.message) |
3451 | 198 | return (self.result, self.message) | 198 | return (self.result, self.message) |
3452 | diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py | |||
3453 | index 22279d0..858e082 100644 | |||
3454 | --- a/cloudinit/sources/DataSourceAliYun.py | |||
3455 | +++ b/cloudinit/sources/DataSourceAliYun.py | |||
3456 | @@ -45,7 +45,7 @@ def _is_aliyun(): | |||
3457 | 45 | 45 | ||
3458 | 46 | def parse_public_keys(public_keys): | 46 | def parse_public_keys(public_keys): |
3459 | 47 | keys = [] | 47 | keys = [] |
3461 | 48 | for key_id, key_body in public_keys.items(): | 48 | for _key_id, key_body in public_keys.items(): |
3462 | 49 | if isinstance(key_body, str): | 49 | if isinstance(key_body, str): |
3463 | 50 | keys.append(key_body.strip()) | 50 | keys.append(key_body.strip()) |
3464 | 51 | elif isinstance(key_body, list): | 51 | elif isinstance(key_body, list): |
3465 | diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py | |||
3466 | index e1d0055..24fd65f 100644 | |||
3467 | --- a/cloudinit/sources/DataSourceAltCloud.py | |||
3468 | +++ b/cloudinit/sources/DataSourceAltCloud.py | |||
3469 | @@ -29,7 +29,6 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' | |||
3470 | 29 | 29 | ||
3471 | 30 | # Shell command lists | 30 | # Shell command lists |
3472 | 31 | CMD_PROBE_FLOPPY = ['modprobe', 'floppy'] | 31 | CMD_PROBE_FLOPPY = ['modprobe', 'floppy'] |
3473 | 32 | CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5'] | ||
3474 | 33 | 32 | ||
3475 | 34 | META_DATA_NOT_SUPPORTED = { | 33 | META_DATA_NOT_SUPPORTED = { |
3476 | 35 | 'block-device-mapping': {}, | 34 | 'block-device-mapping': {}, |
3477 | @@ -185,26 +184,24 @@ class DataSourceAltCloud(sources.DataSource): | |||
3478 | 185 | cmd = CMD_PROBE_FLOPPY | 184 | cmd = CMD_PROBE_FLOPPY |
3479 | 186 | (cmd_out, _err) = util.subp(cmd) | 185 | (cmd_out, _err) = util.subp(cmd) |
3480 | 187 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) | 186 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) |
3483 | 188 | except ProcessExecutionError as _err: | 187 | except ProcessExecutionError as e: |
3484 | 189 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) | 188 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
3485 | 190 | return False | 189 | return False |
3488 | 191 | except OSError as _err: | 190 | except OSError as e: |
3489 | 192 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) | 191 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
3490 | 193 | return False | 192 | return False |
3491 | 194 | 193 | ||
3492 | 195 | floppy_dev = '/dev/fd0' | 194 | floppy_dev = '/dev/fd0' |
3493 | 196 | 195 | ||
3494 | 197 | # udevadm settle for floppy device | 196 | # udevadm settle for floppy device |
3495 | 198 | try: | 197 | try: |
3499 | 199 | cmd = CMD_UDEVADM_SETTLE | 198 | (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5) |
3497 | 200 | cmd.append('--exit-if-exists=' + floppy_dev) | ||
3498 | 201 | (cmd_out, _err) = util.subp(cmd) | ||
3500 | 202 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) | 199 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) |
3503 | 203 | except ProcessExecutionError as _err: | 200 | except ProcessExecutionError as e: |
3504 | 204 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) | 201 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
3505 | 205 | return False | 202 | return False |
3508 | 206 | except OSError as _err: | 203 | except OSError as e: |
3509 | 207 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) | 204 | util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
3510 | 208 | return False | 205 | return False |
3511 | 209 | 206 | ||
3512 | 210 | try: | 207 | try: |
3513 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py | |||
3514 | index 0ee622e..7007d9e 100644 | |||
3515 | --- a/cloudinit/sources/DataSourceAzure.py | |||
3516 | +++ b/cloudinit/sources/DataSourceAzure.py | |||
3517 | @@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4' | |||
3518 | 48 | # DMI chassis-asset-tag is set static for all azure instances | 48 | # DMI chassis-asset-tag is set static for all azure instances |
3519 | 49 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' | 49 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' |
3520 | 50 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" | 50 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" |
3521 | 51 | REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" | ||
3522 | 51 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" | 52 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" |
3523 | 52 | 53 | ||
3524 | 53 | 54 | ||
3525 | @@ -107,31 +108,24 @@ def find_dev_from_busdev(camcontrol_out, busdev): | |||
3526 | 107 | return None | 108 | return None |
3527 | 108 | 109 | ||
3528 | 109 | 110 | ||
3530 | 110 | def get_dev_storvsc_sysctl(): | 111 | def execute_or_debug(cmd, fail_ret=None): |
3531 | 111 | try: | 112 | try: |
3533 | 112 | sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) | 113 | return util.subp(cmd)[0] |
3534 | 113 | except util.ProcessExecutionError: | 114 | except util.ProcessExecutionError: |
3538 | 114 | LOG.debug("Fail to execute sysctl dev.storvsc") | 115 | LOG.debug("Failed to execute: %s", ' '.join(cmd)) |
3539 | 115 | sysctl_out = "" | 116 | return fail_ret |
3540 | 116 | return sysctl_out | 117 | |
3541 | 118 | |||
3542 | 119 | def get_dev_storvsc_sysctl(): | ||
3543 | 120 | return execute_or_debug(["sysctl", "dev.storvsc"], fail_ret="") | ||
3544 | 117 | 121 | ||
3545 | 118 | 122 | ||
3546 | 119 | def get_camcontrol_dev_bus(): | 123 | def get_camcontrol_dev_bus(): |
3553 | 120 | try: | 124 | return execute_or_debug(['camcontrol', 'devlist', '-b']) |
3548 | 121 | camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b']) | ||
3549 | 122 | except util.ProcessExecutionError: | ||
3550 | 123 | LOG.debug("Fail to execute camcontrol devlist -b") | ||
3551 | 124 | return None | ||
3552 | 125 | return camcontrol_b_out | ||
3554 | 126 | 125 | ||
3555 | 127 | 126 | ||
3556 | 128 | def get_camcontrol_dev(): | 127 | def get_camcontrol_dev(): |
3563 | 129 | try: | 128 | return execute_or_debug(['camcontrol', 'devlist']) |
3558 | 130 | camcontrol_out, err = util.subp(['camcontrol', 'devlist']) | ||
3559 | 131 | except util.ProcessExecutionError: | ||
3560 | 132 | LOG.debug("Fail to execute camcontrol devlist") | ||
3561 | 133 | return None | ||
3562 | 134 | return camcontrol_out | ||
3564 | 135 | 129 | ||
3565 | 136 | 130 | ||
3566 | 137 | def get_resource_disk_on_freebsd(port_id): | 131 | def get_resource_disk_on_freebsd(port_id): |
3567 | @@ -214,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = { | |||
3568 | 214 | } | 208 | } |
3569 | 215 | 209 | ||
3570 | 216 | DS_CFG_PATH = ['datasource', DS_NAME] | 210 | DS_CFG_PATH = ['datasource', DS_NAME] |
3571 | 211 | DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs' | ||
3572 | 217 | DEF_EPHEMERAL_LABEL = 'Temporary Storage' | 212 | DEF_EPHEMERAL_LABEL = 'Temporary Storage' |
3573 | 218 | 213 | ||
3574 | 219 | # The redacted password fails to meet password complexity requirements | 214 | # The redacted password fails to meet password complexity requirements |
3575 | @@ -400,14 +395,9 @@ class DataSourceAzure(sources.DataSource): | |||
3576 | 400 | if found == ddir: | 395 | if found == ddir: |
3577 | 401 | LOG.debug("using files cached in %s", ddir) | 396 | LOG.debug("using files cached in %s", ddir) |
3578 | 402 | 397 | ||
3587 | 403 | # azure / hyper-v provides random data here | 398 | seed = _get_random_seed() |
3588 | 404 | # TODO. find the seed on FreeBSD platform | 399 | if seed: |
3589 | 405 | # now update ds_cfg to reflect contents pass in config | 400 | self.metadata['random_seed'] = seed |
3582 | 406 | if not util.is_FreeBSD(): | ||
3583 | 407 | seed = util.load_file("/sys/firmware/acpi/tables/OEM0", | ||
3584 | 408 | quiet=True, decode=False) | ||
3585 | 409 | if seed: | ||
3586 | 410 | self.metadata['random_seed'] = seed | ||
3590 | 411 | 401 | ||
3591 | 412 | user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) | 402 | user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) |
3592 | 413 | self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) | 403 | self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) |
3593 | @@ -443,11 +433,12 @@ class DataSourceAzure(sources.DataSource): | |||
3594 | 443 | LOG.debug("negotiating already done for %s", | 433 | LOG.debug("negotiating already done for %s", |
3595 | 444 | self.get_instance_id()) | 434 | self.get_instance_id()) |
3596 | 445 | 435 | ||
3598 | 446 | def _poll_imds(self, report_ready=True): | 436 | def _poll_imds(self): |
3599 | 447 | """Poll IMDS for the new provisioning data until we get a valid | 437 | """Poll IMDS for the new provisioning data until we get a valid |
3600 | 448 | response. Then return the returned JSON object.""" | 438 | response. Then return the returned JSON object.""" |
3601 | 449 | url = IMDS_URL + "?api-version=2017-04-02" | 439 | url = IMDS_URL + "?api-version=2017-04-02" |
3602 | 450 | headers = {"Metadata": "true"} | 440 | headers = {"Metadata": "true"} |
3603 | 441 | report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) | ||
3604 | 451 | LOG.debug("Start polling IMDS") | 442 | LOG.debug("Start polling IMDS") |
3605 | 452 | 443 | ||
3606 | 453 | def exc_cb(msg, exception): | 444 | def exc_cb(msg, exception): |
3607 | @@ -457,13 +448,17 @@ class DataSourceAzure(sources.DataSource): | |||
3608 | 457 | # call DHCP and setup the ephemeral network to acquire the new IP. | 448 | # call DHCP and setup the ephemeral network to acquire the new IP. |
3609 | 458 | return False | 449 | return False |
3610 | 459 | 450 | ||
3611 | 460 | need_report = report_ready | ||
3612 | 461 | while True: | 451 | while True: |
3613 | 462 | try: | 452 | try: |
3614 | 463 | with EphemeralDHCPv4() as lease: | 453 | with EphemeralDHCPv4() as lease: |
3616 | 464 | if need_report: | 454 | if report_ready: |
3617 | 455 | path = REPORTED_READY_MARKER_FILE | ||
3618 | 456 | LOG.info( | ||
3619 | 457 | "Creating a marker file to report ready: %s", path) | ||
3620 | 458 | util.write_file(path, "{pid}: {time}\n".format( | ||
3621 | 459 | pid=os.getpid(), time=time())) | ||
3622 | 465 | self._report_ready(lease=lease) | 460 | self._report_ready(lease=lease) |
3624 | 466 | need_report = False | 461 | report_ready = False |
3625 | 467 | return readurl(url, timeout=1, headers=headers, | 462 | return readurl(url, timeout=1, headers=headers, |
3626 | 468 | exception_cb=exc_cb, infinite=True).contents | 463 | exception_cb=exc_cb, infinite=True).contents |
3627 | 469 | except UrlError: | 464 | except UrlError: |
3628 | @@ -474,7 +469,7 @@ class DataSourceAzure(sources.DataSource): | |||
3629 | 474 | before we go into our polling loop.""" | 469 | before we go into our polling loop.""" |
3630 | 475 | try: | 470 | try: |
3631 | 476 | get_metadata_from_fabric(None, lease['unknown-245']) | 471 | get_metadata_from_fabric(None, lease['unknown-245']) |
3633 | 477 | except Exception as exc: | 472 | except Exception: |
3634 | 478 | LOG.warning( | 473 | LOG.warning( |
3635 | 479 | "Error communicating with Azure fabric; You may experience." | 474 | "Error communicating with Azure fabric; You may experience." |
3636 | 480 | "connectivity issues.", exc_info=True) | 475 | "connectivity issues.", exc_info=True) |
3637 | @@ -492,13 +487,15 @@ class DataSourceAzure(sources.DataSource): | |||
3638 | 492 | jump back into the polling loop in order to retrieve the ovf_env.""" | 487 | jump back into the polling loop in order to retrieve the ovf_env.""" |
3639 | 493 | if not ret: | 488 | if not ret: |
3640 | 494 | return False | 489 | return False |
3642 | 495 | (md, self.userdata_raw, cfg, files) = ret | 490 | (_md, self.userdata_raw, cfg, _files) = ret |
3643 | 496 | path = REPROVISION_MARKER_FILE | 491 | path = REPROVISION_MARKER_FILE |
3644 | 497 | if (cfg.get('PreprovisionedVm') is True or | 492 | if (cfg.get('PreprovisionedVm') is True or |
3645 | 498 | os.path.isfile(path)): | 493 | os.path.isfile(path)): |
3646 | 499 | if not os.path.isfile(path): | 494 | if not os.path.isfile(path): |
3649 | 500 | LOG.info("Creating a marker file to poll imds") | 495 | LOG.info("Creating a marker file to poll imds: %s", |
3650 | 501 | util.write_file(path, "%s: %s\n" % (os.getpid(), time())) | 496 | path) |
3651 | 497 | util.write_file(path, "{pid}: {time}\n".format( | ||
3652 | 498 | pid=os.getpid(), time=time())) | ||
3653 | 502 | return True | 499 | return True |
3654 | 503 | return False | 500 | return False |
3655 | 504 | 501 | ||
3656 | @@ -528,16 +525,19 @@ class DataSourceAzure(sources.DataSource): | |||
3657 | 528 | self.ds_cfg['agent_command']) | 525 | self.ds_cfg['agent_command']) |
3658 | 529 | try: | 526 | try: |
3659 | 530 | fabric_data = metadata_func() | 527 | fabric_data = metadata_func() |
3661 | 531 | except Exception as exc: | 528 | except Exception: |
3662 | 532 | LOG.warning( | 529 | LOG.warning( |
3663 | 533 | "Error communicating with Azure fabric; You may experience." | 530 | "Error communicating with Azure fabric; You may experience." |
3664 | 534 | "connectivity issues.", exc_info=True) | 531 | "connectivity issues.", exc_info=True) |
3665 | 535 | return False | 532 | return False |
3666 | 533 | util.del_file(REPORTED_READY_MARKER_FILE) | ||
3667 | 536 | util.del_file(REPROVISION_MARKER_FILE) | 534 | util.del_file(REPROVISION_MARKER_FILE) |
3668 | 537 | return fabric_data | 535 | return fabric_data |
3669 | 538 | 536 | ||
3670 | 539 | def activate(self, cfg, is_new_instance): | 537 | def activate(self, cfg, is_new_instance): |
3672 | 540 | address_ephemeral_resize(is_new_instance=is_new_instance) | 538 | address_ephemeral_resize(is_new_instance=is_new_instance, |
3673 | 539 | preserve_ntfs=self.ds_cfg.get( | ||
3674 | 540 | DS_CFG_KEY_PRESERVE_NTFS, False)) | ||
3675 | 541 | return | 541 | return |
3676 | 542 | 542 | ||
3677 | 543 | @property | 543 | @property |
3678 | @@ -581,17 +581,29 @@ def _has_ntfs_filesystem(devpath): | |||
3679 | 581 | return os.path.realpath(devpath) in ntfs_devices | 581 | return os.path.realpath(devpath) in ntfs_devices |
3680 | 582 | 582 | ||
3681 | 583 | 583 | ||
3684 | 584 | def can_dev_be_reformatted(devpath): | 584 | def can_dev_be_reformatted(devpath, preserve_ntfs): |
3685 | 585 | """Determine if block device devpath is newly formatted ephemeral. | 585 | """Determine if the ephemeral drive at devpath should be reformatted. |
3686 | 586 | 586 | ||
3688 | 587 | A newly formatted disk will: | 587 | A fresh ephemeral disk is formatted by Azure and will: |
3689 | 588 | a.) have a partition table (dos or gpt) | 588 | a.) have a partition table (dos or gpt) |
3690 | 589 | b.) have 1 partition that is ntfs formatted, or | 589 | b.) have 1 partition that is ntfs formatted, or |
3691 | 590 | have 2 partitions with the second partition ntfs formatted. | 590 | have 2 partitions with the second partition ntfs formatted. |
3692 | 591 | (larger instances with >2TB ephemeral disk have gpt, and will | 591 | (larger instances with >2TB ephemeral disk have gpt, and will |
3693 | 592 | have a microsoft reserved partition as part 1. LP: #1686514) | 592 | have a microsoft reserved partition as part 1. LP: #1686514) |
3694 | 593 | c.) the ntfs partition will have no files other than possibly | 593 | c.) the ntfs partition will have no files other than possibly |
3696 | 594 | 'dataloss_warning_readme.txt'""" | 594 | 'dataloss_warning_readme.txt' |
3697 | 595 | |||
3698 | 596 | User can indicate that NTFS should never be destroyed by setting | ||
3699 | 597 | DS_CFG_KEY_PRESERVE_NTFS in dscfg. | ||
3700 | 598 | If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS | ||
3701 | 599 | to make sure cloud-init does not accidentally wipe their data. | ||
3702 | 600 | If cloud-init cannot mount the disk to check for data, destruction | ||
3703 | 601 | will be allowed, unless the dscfg key is set.""" | ||
3704 | 602 | if preserve_ntfs: | ||
3705 | 603 | msg = ('config says to never destroy NTFS (%s.%s), skipping checks' % | ||
3706 | 604 | (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS)) | ||
3707 | 605 | return False, msg | ||
3708 | 606 | |||
3709 | 595 | if not os.path.exists(devpath): | 607 | if not os.path.exists(devpath): |
3710 | 596 | return False, 'device %s does not exist' % devpath | 608 | return False, 'device %s does not exist' % devpath |
3711 | 597 | 609 | ||
3712 | @@ -624,18 +636,27 @@ def can_dev_be_reformatted(devpath): | |||
3713 | 624 | bmsg = ('partition %s (%s) on device %s was ntfs formatted' % | 636 | bmsg = ('partition %s (%s) on device %s was ntfs formatted' % |
3714 | 625 | (cand_part, cand_path, devpath)) | 637 | (cand_part, cand_path, devpath)) |
3715 | 626 | try: | 638 | try: |
3717 | 627 | file_count = util.mount_cb(cand_path, count_files) | 639 | file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", |
3718 | 640 | update_env_for_mount={'LANG': 'C'}) | ||
3719 | 628 | except util.MountFailedError as e: | 641 | except util.MountFailedError as e: |
3720 | 642 | if "mount: unknown filesystem type 'ntfs'" in str(e): | ||
3721 | 643 | return True, (bmsg + ' but this system cannot mount NTFS,' | ||
3722 | 644 | ' assuming there are no important files.' | ||
3723 | 645 | ' Formatting allowed.') | ||
3724 | 629 | return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) | 646 | return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) |
3725 | 630 | 647 | ||
3726 | 631 | if file_count != 0: | 648 | if file_count != 0: |
3727 | 649 | LOG.warning("it looks like you're using NTFS on the ephemeral disk, " | ||
3728 | 650 | 'to ensure that filesystem does not get wiped, set ' | ||
3729 | 651 | '%s.%s in config', '.'.join(DS_CFG_PATH), | ||
3730 | 652 | DS_CFG_KEY_PRESERVE_NTFS) | ||
3731 | 632 | return False, bmsg + ' but had %d files on it.' % file_count | 653 | return False, bmsg + ' but had %d files on it.' % file_count |
3732 | 633 | 654 | ||
3733 | 634 | return True, bmsg + ' and had no important files. Safe for reformatting.' | 655 | return True, bmsg + ' and had no important files. Safe for reformatting.' |
3734 | 635 | 656 | ||
3735 | 636 | 657 | ||
3736 | 637 | def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, | 658 | def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, |
3738 | 638 | is_new_instance=False): | 659 | is_new_instance=False, preserve_ntfs=False): |
3739 | 639 | # wait for ephemeral disk to come up | 660 | # wait for ephemeral disk to come up |
3740 | 640 | naplen = .2 | 661 | naplen = .2 |
3741 | 641 | missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, | 662 | missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, |
3742 | @@ -651,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, | |||
3743 | 651 | if is_new_instance: | 672 | if is_new_instance: |
3744 | 652 | result, msg = (True, "First instance boot.") | 673 | result, msg = (True, "First instance boot.") |
3745 | 653 | else: | 674 | else: |
3747 | 654 | result, msg = can_dev_be_reformatted(devpath) | 675 | result, msg = can_dev_be_reformatted(devpath, preserve_ntfs) |
3748 | 655 | 676 | ||
3749 | 656 | LOG.debug("reformattable=%s: %s", result, msg) | 677 | LOG.debug("reformattable=%s: %s", result, msg) |
3750 | 657 | if not result: | 678 | if not result: |
3751 | @@ -965,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev): | |||
3752 | 965 | return False | 986 | return False |
3753 | 966 | 987 | ||
3754 | 967 | 988 | ||
3755 | 989 | def _get_random_seed(): | ||
3756 | 990 | """Return content random seed file if available, otherwise, | ||
3757 | 991 | return None.""" | ||
3758 | 992 | # azure / hyper-v provides random data here | ||
3759 | 993 | # TODO. find the seed on FreeBSD platform | ||
3760 | 994 | # now update ds_cfg to reflect contents pass in config | ||
3761 | 995 | if util.is_FreeBSD(): | ||
3762 | 996 | return None | ||
3763 | 997 | return util.load_file("/sys/firmware/acpi/tables/OEM0", | ||
3764 | 998 | quiet=True, decode=False) | ||
3765 | 999 | |||
3766 | 1000 | |||
3767 | 968 | def list_possible_azure_ds_devs(): | 1001 | def list_possible_azure_ds_devs(): |
3768 | 969 | devlist = [] | 1002 | devlist = [] |
3769 | 970 | if util.is_FreeBSD(): | 1003 | if util.is_FreeBSD(): |
3770 | diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py | |||
3771 | index 0df545f..d4b758f 100644 | |||
3772 | --- a/cloudinit/sources/DataSourceCloudStack.py | |||
3773 | +++ b/cloudinit/sources/DataSourceCloudStack.py | |||
3774 | @@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource): | |||
3775 | 68 | 68 | ||
3776 | 69 | dsname = 'CloudStack' | 69 | dsname = 'CloudStack' |
3777 | 70 | 70 | ||
3778 | 71 | # Setup read_url parameters per get_url_params. | ||
3779 | 72 | url_max_wait = 120 | ||
3780 | 73 | url_timeout = 50 | ||
3781 | 74 | |||
3782 | 71 | def __init__(self, sys_cfg, distro, paths): | 75 | def __init__(self, sys_cfg, distro, paths): |
3783 | 72 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 76 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
3784 | 73 | self.seed_dir = os.path.join(paths.seed_dir, 'cs') | 77 | self.seed_dir = os.path.join(paths.seed_dir, 'cs') |
3785 | @@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource): | |||
3786 | 80 | self.metadata_address = "http://%s/" % (self.vr_addr,) | 84 | self.metadata_address = "http://%s/" % (self.vr_addr,) |
3787 | 81 | self.cfg = {} | 85 | self.cfg = {} |
3788 | 82 | 86 | ||
3796 | 83 | def _get_url_settings(self): | 87 | def wait_for_metadata_service(self): |
3797 | 84 | mcfg = self.ds_cfg | 88 | url_params = self.get_url_params() |
3791 | 85 | max_wait = 120 | ||
3792 | 86 | try: | ||
3793 | 87 | max_wait = int(mcfg.get("max_wait", max_wait)) | ||
3794 | 88 | except Exception: | ||
3795 | 89 | util.logexc(LOG, "Failed to get max wait. using %s", max_wait) | ||
3798 | 90 | 89 | ||
3800 | 91 | if max_wait == 0: | 90 | if url_params.max_wait_seconds <= 0: |
3801 | 92 | return False | 91 | return False |
3802 | 93 | 92 | ||
3803 | 94 | timeout = 50 | ||
3804 | 95 | try: | ||
3805 | 96 | timeout = int(mcfg.get("timeout", timeout)) | ||
3806 | 97 | except Exception: | ||
3807 | 98 | util.logexc(LOG, "Failed to get timeout, using %s", timeout) | ||
3808 | 99 | |||
3809 | 100 | return (max_wait, timeout) | ||
3810 | 101 | |||
3811 | 102 | def wait_for_metadata_service(self): | ||
3812 | 103 | (max_wait, timeout) = self._get_url_settings() | ||
3813 | 104 | |||
3814 | 105 | urls = [uhelp.combine_url(self.metadata_address, | 93 | urls = [uhelp.combine_url(self.metadata_address, |
3815 | 106 | 'latest/meta-data/instance-id')] | 94 | 'latest/meta-data/instance-id')] |
3816 | 107 | start_time = time.time() | 95 | start_time = time.time() |
3819 | 108 | url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, | 96 | url = uhelp.wait_for_url( |
3820 | 109 | timeout=timeout, status_cb=LOG.warn) | 97 | urls=urls, max_wait=url_params.max_wait_seconds, |
3821 | 98 | timeout=url_params.timeout_seconds, status_cb=LOG.warn) | ||
3822 | 110 | 99 | ||
3823 | 111 | if url: | 100 | if url: |
3824 | 112 | LOG.debug("Using metadata source: '%s'", url) | 101 | LOG.debug("Using metadata source: '%s'", url) |
3825 | diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py | |||
3826 | index c7b5fe5..4cb2897 100644 | |||
3827 | --- a/cloudinit/sources/DataSourceConfigDrive.py | |||
3828 | +++ b/cloudinit/sources/DataSourceConfigDrive.py | |||
3829 | @@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): | |||
3830 | 43 | self.version = None | 43 | self.version = None |
3831 | 44 | self.ec2_metadata = None | 44 | self.ec2_metadata = None |
3832 | 45 | self._network_config = None | 45 | self._network_config = None |
3834 | 46 | self.network_json = None | 46 | self.network_json = sources.UNSET |
3835 | 47 | self.network_eni = None | 47 | self.network_eni = None |
3836 | 48 | self.known_macs = None | 48 | self.known_macs = None |
3837 | 49 | self.files = {} | 49 | self.files = {} |
3838 | @@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): | |||
3839 | 69 | util.logexc(LOG, "Failed reading config drive from %s", sdir) | 69 | util.logexc(LOG, "Failed reading config drive from %s", sdir) |
3840 | 70 | 70 | ||
3841 | 71 | if not found: | 71 | if not found: |
3843 | 72 | for dev in find_candidate_devs(): | 72 | dslist = self.sys_cfg.get('datasource_list') |
3844 | 73 | for dev in find_candidate_devs(dslist=dslist): | ||
3845 | 73 | try: | 74 | try: |
3846 | 74 | # Set mtype if freebsd and turn off sync | 75 | # Set mtype if freebsd and turn off sync |
3847 | 75 | if dev.startswith("/dev/cd"): | 76 | if dev.startswith("/dev/cd"): |
3848 | @@ -148,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): | |||
3849 | 148 | @property | 149 | @property |
3850 | 149 | def network_config(self): | 150 | def network_config(self): |
3851 | 150 | if self._network_config is None: | 151 | if self._network_config is None: |
3853 | 151 | if self.network_json is not None: | 152 | if self.network_json not in (None, sources.UNSET): |
3854 | 152 | LOG.debug("network config provided via network_json") | 153 | LOG.debug("network config provided via network_json") |
3855 | 153 | self._network_config = openstack.convert_net_json( | 154 | self._network_config = openstack.convert_net_json( |
3856 | 154 | self.network_json, known_macs=self.known_macs) | 155 | self.network_json, known_macs=self.known_macs) |
3857 | @@ -211,7 +212,7 @@ def write_injected_files(files): | |||
3858 | 211 | util.logexc(LOG, "Failed writing file: %s", filename) | 212 | util.logexc(LOG, "Failed writing file: %s", filename) |
3859 | 212 | 213 | ||
3860 | 213 | 214 | ||
3862 | 214 | def find_candidate_devs(probe_optical=True): | 215 | def find_candidate_devs(probe_optical=True, dslist=None): |
3863 | 215 | """Return a list of devices that may contain the config drive. | 216 | """Return a list of devices that may contain the config drive. |
3864 | 216 | 217 | ||
3865 | 217 | The returned list is sorted by search order where the first item has | 218 | The returned list is sorted by search order where the first item has |
3866 | @@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True): | |||
3867 | 227 | * either vfat or iso9660 formated | 228 | * either vfat or iso9660 formated |
3868 | 228 | * labeled with 'config-2' or 'CONFIG-2' | 229 | * labeled with 'config-2' or 'CONFIG-2' |
3869 | 229 | """ | 230 | """ |
3870 | 231 | if dslist is None: | ||
3871 | 232 | dslist = [] | ||
3872 | 233 | |||
3873 | 230 | # query optical drive to get it in blkid cache for 2.6 kernels | 234 | # query optical drive to get it in blkid cache for 2.6 kernels |
3874 | 231 | if probe_optical: | 235 | if probe_optical: |
3875 | 232 | for device in OPTICAL_DEVICES: | 236 | for device in OPTICAL_DEVICES: |
3876 | @@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True): | |||
3877 | 257 | devices = [d for d in candidates | 261 | devices = [d for d in candidates |
3878 | 258 | if d in by_label or not util.is_partition(d)] | 262 | if d in by_label or not util.is_partition(d)] |
3879 | 259 | 263 | ||
3881 | 260 | if devices: | 264 | LOG.debug("devices=%s dslist=%s", devices, dslist) |
3882 | 265 | if devices and "IBMCloud" in dslist: | ||
3883 | 261 | # IBMCloud uses config-2 label, but limited to a single UUID. | 266 | # IBMCloud uses config-2 label, but limited to a single UUID. |
3884 | 262 | ibm_platform, ibm_path = get_ibm_platform() | 267 | ibm_platform, ibm_path = get_ibm_platform() |
3885 | 263 | if ibm_path in devices: | 268 | if ibm_path in devices: |
3886 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py | |||
3887 | index 21e9ef8..968ab3f 100644 | |||
3888 | --- a/cloudinit/sources/DataSourceEc2.py | |||
3889 | +++ b/cloudinit/sources/DataSourceEc2.py | |||
3890 | @@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) | |||
3891 | 27 | STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") | 27 | STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") |
3892 | 28 | STRICT_ID_DEFAULT = "warn" | 28 | STRICT_ID_DEFAULT = "warn" |
3893 | 29 | 29 | ||
3894 | 30 | _unset = "_unset" | ||
3895 | 31 | |||
3896 | 32 | 30 | ||
3897 | 33 | class Platforms(object): | 31 | class Platforms(object): |
3898 | 34 | # TODO Rename and move to cloudinit.cloud.CloudNames | 32 | # TODO Rename and move to cloudinit.cloud.CloudNames |
3899 | @@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource): | |||
3900 | 59 | # for extended metadata content. IPv6 support comes in 2016-09-02 | 57 | # for extended metadata content. IPv6 support comes in 2016-09-02 |
3901 | 60 | extended_metadata_versions = ['2016-09-02'] | 58 | extended_metadata_versions = ['2016-09-02'] |
3902 | 61 | 59 | ||
3903 | 60 | # Setup read_url parameters per get_url_params. | ||
3904 | 61 | url_max_wait = 120 | ||
3905 | 62 | url_timeout = 50 | ||
3906 | 63 | |||
3907 | 62 | _cloud_platform = None | 64 | _cloud_platform = None |
3908 | 63 | 65 | ||
3910 | 64 | _network_config = _unset # Used for caching calculated network config v1 | 66 | _network_config = sources.UNSET # Used to cache calculated network cfg v1 |
3911 | 65 | 67 | ||
3912 | 66 | # Whether we want to get network configuration from the metadata service. | 68 | # Whether we want to get network configuration from the metadata service. |
3917 | 67 | get_network_metadata = False | 69 | perform_dhcp_setup = False |
3914 | 68 | |||
3915 | 69 | # Track the discovered fallback nic for use in configuration generation. | ||
3916 | 70 | _fallback_interface = None | ||
3918 | 71 | 70 | ||
3919 | 72 | def __init__(self, sys_cfg, distro, paths): | 71 | def __init__(self, sys_cfg, distro, paths): |
3920 | 73 | super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) | 72 | super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) |
3921 | @@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource): | |||
3922 | 98 | elif self.cloud_platform == Platforms.NO_EC2_METADATA: | 97 | elif self.cloud_platform == Platforms.NO_EC2_METADATA: |
3923 | 99 | return False | 98 | return False |
3924 | 100 | 99 | ||
3926 | 101 | if self.get_network_metadata: # Setup networking in init-local stage. | 100 | if self.perform_dhcp_setup: # Setup networking in init-local stage. |
3927 | 102 | if util.is_FreeBSD(): | 101 | if util.is_FreeBSD(): |
3928 | 103 | LOG.debug("FreeBSD doesn't support running dhclient with -sf") | 102 | LOG.debug("FreeBSD doesn't support running dhclient with -sf") |
3929 | 104 | return False | 103 | return False |
3930 | @@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource): | |||
3931 | 158 | else: | 157 | else: |
3932 | 159 | return self.metadata['instance-id'] | 158 | return self.metadata['instance-id'] |
3933 | 160 | 159 | ||
3934 | 161 | def _get_url_settings(self): | ||
3935 | 162 | mcfg = self.ds_cfg | ||
3936 | 163 | max_wait = 120 | ||
3937 | 164 | try: | ||
3938 | 165 | max_wait = int(mcfg.get("max_wait", max_wait)) | ||
3939 | 166 | except Exception: | ||
3940 | 167 | util.logexc(LOG, "Failed to get max wait. using %s", max_wait) | ||
3941 | 168 | |||
3942 | 169 | timeout = 50 | ||
3943 | 170 | try: | ||
3944 | 171 | timeout = max(0, int(mcfg.get("timeout", timeout))) | ||
3945 | 172 | except Exception: | ||
3946 | 173 | util.logexc(LOG, "Failed to get timeout, using %s", timeout) | ||
3947 | 174 | |||
3948 | 175 | return (max_wait, timeout) | ||
3949 | 176 | |||
3950 | 177 | def wait_for_metadata_service(self): | 160 | def wait_for_metadata_service(self): |
3951 | 178 | mcfg = self.ds_cfg | 161 | mcfg = self.ds_cfg |
3952 | 179 | 162 | ||
3955 | 180 | (max_wait, timeout) = self._get_url_settings() | 163 | url_params = self.get_url_params() |
3956 | 181 | if max_wait <= 0: | 164 | if url_params.max_wait_seconds <= 0: |
3957 | 182 | return False | 165 | return False |
3958 | 183 | 166 | ||
3959 | 184 | # Remove addresses from the list that wont resolve. | 167 | # Remove addresses from the list that wont resolve. |
3960 | @@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource): | |||
3961 | 205 | 188 | ||
3962 | 206 | start_time = time.time() | 189 | start_time = time.time() |
3963 | 207 | url = uhelp.wait_for_url( | 190 | url = uhelp.wait_for_url( |
3965 | 208 | urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn) | 191 | urls=urls, max_wait=url_params.max_wait_seconds, |
3966 | 192 | timeout=url_params.timeout_seconds, status_cb=LOG.warn) | ||
3967 | 209 | 193 | ||
3968 | 210 | if url: | 194 | if url: |
3969 | 211 | self.metadata_address = url2base[url] | 195 | self.metadata_address = url2base[url] |
3970 | @@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource): | |||
3971 | 310 | @property | 294 | @property |
3972 | 311 | def network_config(self): | 295 | def network_config(self): |
3973 | 312 | """Return a network config dict for rendering ENI or netplan files.""" | 296 | """Return a network config dict for rendering ENI or netplan files.""" |
3975 | 313 | if self._network_config != _unset: | 297 | if self._network_config != sources.UNSET: |
3976 | 314 | return self._network_config | 298 | return self._network_config |
3977 | 315 | 299 | ||
3978 | 316 | if self.metadata is None: | 300 | if self.metadata is None: |
3980 | 317 | # this would happen if get_data hadn't been called. leave as _unset | 301 | # this would happen if get_data hadn't been called. leave as UNSET |
3981 | 318 | LOG.warning( | 302 | LOG.warning( |
3982 | 319 | "Unexpected call to network_config when metadata is None.") | 303 | "Unexpected call to network_config when metadata is None.") |
3983 | 320 | return None | 304 | return None |
3984 | @@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource): | |||
3985 | 353 | self._fallback_interface = _legacy_fbnic | 337 | self._fallback_interface = _legacy_fbnic |
3986 | 354 | self.fallback_nic = None | 338 | self.fallback_nic = None |
3987 | 355 | else: | 339 | else: |
3991 | 356 | self._fallback_interface = net.find_fallback_nic() | 340 | return super(DataSourceEc2, self).fallback_interface |
3989 | 357 | if self._fallback_interface is None: | ||
3990 | 358 | LOG.warning("Did not find a fallback interface on EC2.") | ||
3992 | 359 | return self._fallback_interface | 341 | return self._fallback_interface |
3993 | 360 | 342 | ||
3994 | 361 | def _crawl_metadata(self): | 343 | def _crawl_metadata(self): |
3995 | @@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2): | |||
3996 | 390 | metadata service. If the metadata service provides network configuration | 372 | metadata service. If the metadata service provides network configuration |
3997 | 391 | then render the network configuration for that instance based on metadata. | 373 | then render the network configuration for that instance based on metadata. |
3998 | 392 | """ | 374 | """ |
4000 | 393 | get_network_metadata = True # Get metadata network config if present | 375 | perform_dhcp_setup = True # Use dhcp before querying metadata |
4001 | 394 | 376 | ||
4002 | 395 | def get_data(self): | 377 | def get_data(self): |
4003 | 396 | supported_platforms = (Platforms.AWS,) | 378 | supported_platforms = (Platforms.AWS,) |
4004 | diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py | |||
4005 | index 02b3d56..01106ec 100644 | |||
4006 | --- a/cloudinit/sources/DataSourceIBMCloud.py | |||
4007 | +++ b/cloudinit/sources/DataSourceIBMCloud.py | |||
4008 | @@ -8,17 +8,11 @@ There are 2 different api exposed launch methods. | |||
4009 | 8 | * template: This is the legacy method of launching instances. | 8 | * template: This is the legacy method of launching instances. |
4010 | 9 | When booting from an image template, the system boots first into | 9 | When booting from an image template, the system boots first into |
4011 | 10 | a "provisioning" mode. There, host <-> guest mechanisms are utilized | 10 | a "provisioning" mode. There, host <-> guest mechanisms are utilized |
4013 | 11 | to execute code in the guest and provision it. | 11 | to execute code in the guest and configure it. The configuration |
4014 | 12 | includes configuring the system network and possibly installing | ||
4015 | 13 | packages and other software stack. | ||
4016 | 12 | 14 | ||
4026 | 13 | Cloud-init will disable itself when it detects that it is in the | 15 | After the provisioning is finished, the system reboots. |
4018 | 14 | provisioning mode. It detects this by the presence of | ||
4019 | 15 | a file '/root/provisioningConfiguration.cfg'. | ||
4020 | 16 | |||
4021 | 17 | When provided with user-data, the "first boot" will contain a | ||
4022 | 18 | ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data | ||
4023 | 19 | provided, then there is no data-source. | ||
4024 | 20 | |||
4025 | 21 | Cloud-init never does any network configuration in this mode. | ||
4027 | 22 | 16 | ||
4028 | 23 | * os_code: Essentially "launch by OS Code" (Operating System Code). | 17 | * os_code: Essentially "launch by OS Code" (Operating System Code). |
4029 | 24 | This is a more modern approach. There is no specific "provisioning" boot. | 18 | This is a more modern approach. There is no specific "provisioning" boot. |
4030 | @@ -30,11 +24,73 @@ There are 2 different api exposed launch methods. | |||
4031 | 30 | mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be | 24 | mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be |
4032 | 31 | incorrectly identified as IBMCloud. | 25 | incorrectly identified as IBMCloud. |
4033 | 32 | 26 | ||
4034 | 27 | The combination of these 2 launch methods and with or without user-data | ||
4035 | 28 | creates 6 boot scenarios. | ||
4036 | 29 | A. os_code with user-data | ||
4037 | 30 | B. os_code without user-data | ||
4038 | 31 | Cloud-init is fully operational in this mode. | ||
4039 | 32 | |||
4040 | 33 | There is a block device attached with label 'config-2'. | ||
4041 | 34 | As it differs from OpenStack's config-2, we have to differentiate. | ||
4042 | 35 | We do so by requiring the UUID on the filesystem to be "9796-932E". | ||
4043 | 36 | |||
4044 | 37 | This disk will have the following files. Specifically note, there | ||
4045 | 38 | is no versioned path to the meta-data, only 'latest': | ||
4046 | 39 | openstack/latest/meta_data.json | ||
4047 | 40 | openstack/latest/network_data.json | ||
4048 | 41 | openstack/latest/user_data [optional] | ||
4049 | 42 | openstack/latest/vendor_data.json | ||
4050 | 43 | |||
4051 | 44 | vendor_data.json as of 2018-04 looks like this: | ||
4052 | 45 | {"cloud-init":"#!/bin/bash\necho 'root:$6$<snip>' | chpasswd -e"} | ||
4053 | 46 | |||
4054 | 47 | The only difference between A and B in this mode is the presence | ||
4055 | 48 | of user_data on the config disk. | ||
4056 | 49 | |||
4057 | 50 | C. template, provisioning boot with user-data | ||
4058 | 51 | D. template, provisioning boot without user-data. | ||
4059 | 52 | With ds-identify cloud-init is fully disabled in this mode. | ||
4060 | 53 | Without ds-identify, cloud-init None datasource will be used. | ||
4061 | 54 | |||
4062 | 55 | This is currently identified by the presence of | ||
4063 | 56 | /root/provisioningConfiguration.cfg . That file is placed into the | ||
4064 | 57 | system before it is booted. | ||
4065 | 58 | |||
4066 | 59 | The difference between C and D is the presence of the METADATA disk | ||
4067 | 60 | as described in E below. There is no METADATA disk attached unless | ||
4068 | 61 | user-data is provided. | ||
4069 | 62 | |||
4070 | 63 | E. template, post-provisioning boot with user-data. | ||
4071 | 64 | Cloud-init is fully operational in this mode. | ||
4072 | 65 | |||
4073 | 66 | This is identified by a block device with filesystem label "METADATA". | ||
4074 | 67 | The looks similar to a version-1 OpenStack config drive. It will | ||
4075 | 68 | have the following files: | ||
4076 | 69 | |||
4077 | 70 | openstack/latest/user_data | ||
4078 | 71 | openstack/latest/meta_data.json | ||
4079 | 72 | openstack/content/interfaces | ||
4080 | 73 | meta.js | ||
4081 | 74 | |||
4082 | 75 | meta.js contains something similar to user_data. cloud-init ignores it. | ||
4083 | 76 | cloud-init ignores the 'interfaces' style file here. | ||
4084 | 77 | In this mode, cloud-init has networking code disabled. It relies | ||
4085 | 78 | on the provisioning boot to have configured networking. | ||
4086 | 79 | |||
4087 | 80 | F. template, post-provisioning boot without user-data. | ||
4088 | 81 | With ds-identify, cloud-init will be fully disabled. | ||
4089 | 82 | Without ds-identify, cloud-init None datasource will be used. | ||
4090 | 83 | |||
4091 | 84 | There is no information available to identify this scenario. | ||
4092 | 85 | |||
4093 | 86 | The user will be able to ssh in as as root with their public keys that | ||
4094 | 87 | have been installed into /root/ssh/.authorized_keys | ||
4095 | 88 | during the provisioning stage. | ||
4096 | 89 | |||
4097 | 33 | TODO: | 90 | TODO: |
4098 | 34 | * is uuid (/sys/hypervisor/uuid) stable for life of an instance? | 91 | * is uuid (/sys/hypervisor/uuid) stable for life of an instance? |
4099 | 35 | it seems it is not the same as data's uuid in the os_code case | 92 | it seems it is not the same as data's uuid in the os_code case |
4100 | 36 | but is in the template case. | 93 | but is in the template case. |
4101 | 37 | |||
4102 | 38 | """ | 94 | """ |
4103 | 39 | import base64 | 95 | import base64 |
4104 | 40 | import json | 96 | import json |
4105 | @@ -138,8 +194,30 @@ def _is_xen(): | |||
4106 | 138 | return os.path.exists("/proc/xen") | 194 | return os.path.exists("/proc/xen") |
4107 | 139 | 195 | ||
4108 | 140 | 196 | ||
4111 | 141 | def _is_ibm_provisioning(): | 197 | def _is_ibm_provisioning( |
4112 | 142 | return os.path.exists("/root/provisioningConfiguration.cfg") | 198 | prov_cfg="/root/provisioningConfiguration.cfg", |
4113 | 199 | inst_log="/root/swinstall.log", | ||
4114 | 200 | boot_ref="/proc/1/environ"): | ||
4115 | 201 | """Return boolean indicating if this boot is ibm provisioning boot.""" | ||
4116 | 202 | if os.path.exists(prov_cfg): | ||
4117 | 203 | msg = "config '%s' exists." % prov_cfg | ||
4118 | 204 | result = True | ||
4119 | 205 | if os.path.exists(inst_log): | ||
4120 | 206 | if os.path.exists(boot_ref): | ||
4121 | 207 | result = (os.stat(inst_log).st_mtime > | ||
4122 | 208 | os.stat(boot_ref).st_mtime) | ||
4123 | 209 | msg += (" log '%s' from %s boot." % | ||
4124 | 210 | (inst_log, "current" if result else "previous")) | ||
4125 | 211 | else: | ||
4126 | 212 | msg += (" log '%s' existed, but no reference file '%s'." % | ||
4127 | 213 | (inst_log, boot_ref)) | ||
4128 | 214 | result = False | ||
4129 | 215 | else: | ||
4130 | 216 | msg += " log '%s' did not exist." % inst_log | ||
4131 | 217 | else: | ||
4132 | 218 | result, msg = (False, "config '%s' did not exist." % prov_cfg) | ||
4133 | 219 | LOG.debug("ibm_provisioning=%s: %s", result, msg) | ||
4134 | 220 | return result | ||
4135 | 143 | 221 | ||
4136 | 144 | 222 | ||
4137 | 145 | def get_ibm_platform(): | 223 | def get_ibm_platform(): |
4138 | @@ -189,7 +267,7 @@ def get_ibm_platform(): | |||
4139 | 189 | else: | 267 | else: |
4140 | 190 | return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) | 268 | return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) |
4141 | 191 | elif _is_ibm_provisioning(): | 269 | elif _is_ibm_provisioning(): |
4143 | 192 | return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) | 270 | return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) |
4144 | 193 | return not_found | 271 | return not_found |
4145 | 194 | 272 | ||
4146 | 195 | 273 | ||
4147 | diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py | |||
4148 | index 6ac8863..bcb3854 100644 | |||
4149 | --- a/cloudinit/sources/DataSourceMAAS.py | |||
4150 | +++ b/cloudinit/sources/DataSourceMAAS.py | |||
4151 | @@ -198,13 +198,13 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, | |||
4152 | 198 | If version is None, then <version>/ will not be used. | 198 | If version is None, then <version>/ will not be used. |
4153 | 199 | """ | 199 | """ |
4154 | 200 | if read_file_or_url is None: | 200 | if read_file_or_url is None: |
4156 | 201 | read_file_or_url = util.read_file_or_url | 201 | read_file_or_url = url_helper.read_file_or_url |
4157 | 202 | 202 | ||
4158 | 203 | if seed_url.endswith("/"): | 203 | if seed_url.endswith("/"): |
4159 | 204 | seed_url = seed_url[:-1] | 204 | seed_url = seed_url[:-1] |
4160 | 205 | 205 | ||
4161 | 206 | md = {} | 206 | md = {} |
4163 | 207 | for path, dictname, binary, optional in DS_FIELDS: | 207 | for path, _dictname, binary, optional in DS_FIELDS: |
4164 | 208 | if version is None: | 208 | if version is None: |
4165 | 209 | url = "%s/%s" % (seed_url, path) | 209 | url = "%s/%s" % (seed_url, path) |
4166 | 210 | else: | 210 | else: |
4167 | diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py | |||
4168 | index 5d3a8dd..2daea59 100644 | |||
4169 | --- a/cloudinit/sources/DataSourceNoCloud.py | |||
4170 | +++ b/cloudinit/sources/DataSourceNoCloud.py | |||
4171 | @@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource): | |||
4172 | 78 | LOG.debug("Using seeded data from %s", path) | 78 | LOG.debug("Using seeded data from %s", path) |
4173 | 79 | mydata = _merge_new_seed(mydata, seeded) | 79 | mydata = _merge_new_seed(mydata, seeded) |
4174 | 80 | break | 80 | break |
4176 | 81 | except ValueError as e: | 81 | except ValueError: |
4177 | 82 | pass | 82 | pass |
4178 | 83 | 83 | ||
4179 | 84 | # If the datasource config had a 'seedfrom' entry, then that takes | 84 | # If the datasource config had a 'seedfrom' entry, then that takes |
4180 | @@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource): | |||
4181 | 117 | try: | 117 | try: |
4182 | 118 | seeded = util.mount_cb(dev, _pp2d_callback, | 118 | seeded = util.mount_cb(dev, _pp2d_callback, |
4183 | 119 | pp2d_kwargs) | 119 | pp2d_kwargs) |
4185 | 120 | except ValueError as e: | 120 | except ValueError: |
4186 | 121 | if dev in label_list: | 121 | if dev in label_list: |
4187 | 122 | LOG.warning("device %s with label=%s not a" | 122 | LOG.warning("device %s with label=%s not a" |
4188 | 123 | "valid seed.", dev, label) | 123 | "valid seed.", dev, label) |
4189 | diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py | |||
4190 | index dc914a7..178ccb0 100644 | |||
4191 | --- a/cloudinit/sources/DataSourceOVF.py | |||
4192 | +++ b/cloudinit/sources/DataSourceOVF.py | |||
4193 | @@ -556,7 +556,7 @@ def search_file(dirpath, filename): | |||
4194 | 556 | if not dirpath or not filename: | 556 | if not dirpath or not filename: |
4195 | 557 | return None | 557 | return None |
4196 | 558 | 558 | ||
4198 | 559 | for root, dirs, files in os.walk(dirpath): | 559 | for root, _dirs, files in os.walk(dirpath): |
4199 | 560 | if filename in files: | 560 | if filename in files: |
4200 | 561 | return os.path.join(root, filename) | 561 | return os.path.join(root, filename) |
4201 | 562 | 562 | ||
4202 | diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py | |||
4203 | index d4a4111..16c1078 100644 | |||
4204 | --- a/cloudinit/sources/DataSourceOpenNebula.py | |||
4205 | +++ b/cloudinit/sources/DataSourceOpenNebula.py | |||
4206 | @@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None): | |||
4207 | 378 | if asuser is not None: | 378 | if asuser is not None: |
4208 | 379 | try: | 379 | try: |
4209 | 380 | pwd.getpwnam(asuser) | 380 | pwd.getpwnam(asuser) |
4211 | 381 | except KeyError as e: | 381 | except KeyError: |
4212 | 382 | raise BrokenContextDiskDir( | 382 | raise BrokenContextDiskDir( |
4213 | 383 | "configured user '{user}' does not exist".format( | 383 | "configured user '{user}' does not exist".format( |
4214 | 384 | user=asuser)) | 384 | user=asuser)) |
4215 | diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py | |||
4216 | index e55a763..365af96 100644 | |||
4217 | --- a/cloudinit/sources/DataSourceOpenStack.py | |||
4218 | +++ b/cloudinit/sources/DataSourceOpenStack.py | |||
4219 | @@ -7,6 +7,7 @@ | |||
4220 | 7 | import time | 7 | import time |
4221 | 8 | 8 | ||
4222 | 9 | from cloudinit import log as logging | 9 | from cloudinit import log as logging |
4223 | 10 | from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError | ||
4224 | 10 | from cloudinit import sources | 11 | from cloudinit import sources |
4225 | 11 | from cloudinit import url_helper | 12 | from cloudinit import url_helper |
4226 | 12 | from cloudinit import util | 13 | from cloudinit import util |
4227 | @@ -22,51 +23,37 @@ DEFAULT_METADATA = { | |||
4228 | 22 | "instance-id": DEFAULT_IID, | 23 | "instance-id": DEFAULT_IID, |
4229 | 23 | } | 24 | } |
4230 | 24 | 25 | ||
4231 | 26 | # OpenStack DMI constants | ||
4232 | 27 | DMI_PRODUCT_NOVA = 'OpenStack Nova' | ||
4233 | 28 | DMI_PRODUCT_COMPUTE = 'OpenStack Compute' | ||
4234 | 29 | VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE] | ||
4235 | 30 | DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud' | ||
4236 | 31 | VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM] | ||
4237 | 32 | |||
4238 | 25 | 33 | ||
4239 | 26 | class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | 34 | class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
4240 | 27 | 35 | ||
4241 | 28 | dsname = "OpenStack" | 36 | dsname = "OpenStack" |
4242 | 29 | 37 | ||
4243 | 38 | _network_config = sources.UNSET # Used to cache calculated network cfg v1 | ||
4244 | 39 | |||
4245 | 40 | # Whether we want to get network configuration from the metadata service. | ||
4246 | 41 | perform_dhcp_setup = False | ||
4247 | 42 | |||
4248 | 30 | def __init__(self, sys_cfg, distro, paths): | 43 | def __init__(self, sys_cfg, distro, paths): |
4249 | 31 | super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) | 44 | super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) |
4250 | 32 | self.metadata_address = None | 45 | self.metadata_address = None |
4251 | 33 | self.ssl_details = util.fetch_ssl_details(self.paths) | 46 | self.ssl_details = util.fetch_ssl_details(self.paths) |
4252 | 34 | self.version = None | 47 | self.version = None |
4253 | 35 | self.files = {} | 48 | self.files = {} |
4255 | 36 | self.ec2_metadata = None | 49 | self.ec2_metadata = sources.UNSET |
4256 | 50 | self.network_json = sources.UNSET | ||
4257 | 37 | 51 | ||
4258 | 38 | def __str__(self): | 52 | def __str__(self): |
4259 | 39 | root = sources.DataSource.__str__(self) | 53 | root = sources.DataSource.__str__(self) |
4260 | 40 | mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) | 54 | mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) |
4261 | 41 | return mstr | 55 | return mstr |
4262 | 42 | 56 | ||
4263 | 43 | def _get_url_settings(self): | ||
4264 | 44 | # TODO(harlowja): this is shared with ec2 datasource, we should just | ||
4265 | 45 | # move it to a shared location instead... | ||
4266 | 46 | # Note: the defaults here are different though. | ||
4267 | 47 | |||
4268 | 48 | # max_wait < 0 indicates do not wait | ||
4269 | 49 | max_wait = -1 | ||
4270 | 50 | timeout = 10 | ||
4271 | 51 | retries = 5 | ||
4272 | 52 | |||
4273 | 53 | try: | ||
4274 | 54 | max_wait = int(self.ds_cfg.get("max_wait", max_wait)) | ||
4275 | 55 | except Exception: | ||
4276 | 56 | util.logexc(LOG, "Failed to get max wait. using %s", max_wait) | ||
4277 | 57 | |||
4278 | 58 | try: | ||
4279 | 59 | timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) | ||
4280 | 60 | except Exception: | ||
4281 | 61 | util.logexc(LOG, "Failed to get timeout, using %s", timeout) | ||
4282 | 62 | |||
4283 | 63 | try: | ||
4284 | 64 | retries = int(self.ds_cfg.get("retries", retries)) | ||
4285 | 65 | except Exception: | ||
4286 | 66 | util.logexc(LOG, "Failed to get retries. using %s", retries) | ||
4287 | 67 | |||
4288 | 68 | return (max_wait, timeout, retries) | ||
4289 | 69 | |||
4290 | 70 | def wait_for_metadata_service(self): | 57 | def wait_for_metadata_service(self): |
4291 | 71 | urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) | 58 | urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) |
4292 | 72 | filtered = [x for x in urls if util.is_resolvable_url(x)] | 59 | filtered = [x for x in urls if util.is_resolvable_url(x)] |
4293 | @@ -86,10 +73,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | |||
4294 | 86 | md_urls.append(md_url) | 73 | md_urls.append(md_url) |
4295 | 87 | url2base[md_url] = url | 74 | url2base[md_url] = url |
4296 | 88 | 75 | ||
4298 | 89 | (max_wait, timeout, retries) = self._get_url_settings() | 76 | url_params = self.get_url_params() |
4299 | 90 | start_time = time.time() | 77 | start_time = time.time() |
4302 | 91 | avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, | 78 | avail_url = url_helper.wait_for_url( |
4303 | 92 | timeout=timeout) | 79 | urls=md_urls, max_wait=url_params.max_wait_seconds, |
4304 | 80 | timeout=url_params.timeout_seconds) | ||
4305 | 93 | if avail_url: | 81 | if avail_url: |
4306 | 94 | LOG.debug("Using metadata source: '%s'", url2base[avail_url]) | 82 | LOG.debug("Using metadata source: '%s'", url2base[avail_url]) |
4307 | 95 | else: | 83 | else: |
4308 | @@ -99,38 +87,66 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | |||
4309 | 99 | self.metadata_address = url2base.get(avail_url) | 87 | self.metadata_address = url2base.get(avail_url) |
4310 | 100 | return bool(avail_url) | 88 | return bool(avail_url) |
4311 | 101 | 89 | ||
4318 | 102 | def _get_data(self): | 90 | def check_instance_id(self, sys_cfg): |
4319 | 103 | try: | 91 | # quickly (local check only) if self.instance_id is still valid |
4320 | 104 | if not self.wait_for_metadata_service(): | 92 | return sources.instance_id_matches_system_uuid(self.get_instance_id()) |
4315 | 105 | return False | ||
4316 | 106 | except IOError: | ||
4317 | 107 | return False | ||
4321 | 108 | 93 | ||
4323 | 109 | (max_wait, timeout, retries) = self._get_url_settings() | 94 | @property |
4324 | 95 | def network_config(self): | ||
4325 | 96 | """Return a network config dict for rendering ENI or netplan files.""" | ||
4326 | 97 | if self._network_config != sources.UNSET: | ||
4327 | 98 | return self._network_config | ||
4328 | 99 | |||
4329 | 100 | # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide | ||
4330 | 101 | # network_config by default unless configured in /etc/cloud/cloud.cfg*. | ||
4331 | 102 | # Patch Xenial and Artful before release to default to False. | ||
4332 | 103 | if util.is_false(self.ds_cfg.get('apply_network_config', True)): | ||
4333 | 104 | self._network_config = None | ||
4334 | 105 | return self._network_config | ||
4335 | 106 | if self.network_json == sources.UNSET: | ||
4336 | 107 | # this would happen if get_data hadn't been called. leave as UNSET | ||
4337 | 108 | LOG.warning( | ||
4338 | 109 | 'Unexpected call to network_config when network_json is None.') | ||
4339 | 110 | return None | ||
4340 | 111 | |||
4341 | 112 | LOG.debug('network config provided via network_json') | ||
4342 | 113 | self._network_config = openstack.convert_net_json( | ||
4343 | 114 | self.network_json, known_macs=None) | ||
4344 | 115 | return self._network_config | ||
4345 | 110 | 116 | ||
4359 | 111 | try: | 117 | def _get_data(self): |
4360 | 112 | results = util.log_time(LOG.debug, | 118 | """Crawl metadata, parse and persist that data for this instance. |
4361 | 113 | 'Crawl of openstack metadata service', | 119 | |
4362 | 114 | read_metadata_service, | 120 | @return: True when metadata discovered indicates OpenStack datasource. |
4363 | 115 | args=[self.metadata_address], | 121 | False when unable to contact metadata service or when metadata |
4364 | 116 | kwargs={'ssl_details': self.ssl_details, | 122 | format is invalid or disabled. |
4365 | 117 | 'retries': retries, | 123 | """ |
4366 | 118 | 'timeout': timeout}) | 124 | if not detect_openstack(): |
4354 | 119 | except openstack.NonReadable: | ||
4355 | 120 | return False | ||
4356 | 121 | except (openstack.BrokenMetadata, IOError): | ||
4357 | 122 | util.logexc(LOG, "Broken metadata address %s", | ||
4358 | 123 | self.metadata_address) | ||
4367 | 124 | return False | 125 | return False |
4368 | 126 | if self.perform_dhcp_setup: # Setup networking in init-local stage. | ||
4369 | 127 | try: | ||
4370 | 128 | with EphemeralDHCPv4(self.fallback_interface): | ||
4371 | 129 | results = util.log_time( | ||
4372 | 130 | logfunc=LOG.debug, msg='Crawl of metadata service', | ||
4373 | 131 | func=self._crawl_metadata) | ||
4374 | 132 | except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e: | ||
4375 | 133 | util.logexc(LOG, str(e)) | ||
4376 | 134 | return False | ||
4377 | 135 | else: | ||
4378 | 136 | try: | ||
4379 | 137 | results = self._crawl_metadata() | ||
4380 | 138 | except sources.InvalidMetaDataException as e: | ||
4381 | 139 | util.logexc(LOG, str(e)) | ||
4382 | 140 | return False | ||
4383 | 125 | 141 | ||
4384 | 126 | self.dsmode = self._determine_dsmode([results.get('dsmode')]) | 142 | self.dsmode = self._determine_dsmode([results.get('dsmode')]) |
4385 | 127 | if self.dsmode == sources.DSMODE_DISABLED: | 143 | if self.dsmode == sources.DSMODE_DISABLED: |
4386 | 128 | return False | 144 | return False |
4387 | 129 | |||
4388 | 130 | md = results.get('metadata', {}) | 145 | md = results.get('metadata', {}) |
4389 | 131 | md = util.mergemanydict([md, DEFAULT_METADATA]) | 146 | md = util.mergemanydict([md, DEFAULT_METADATA]) |
4390 | 132 | self.metadata = md | 147 | self.metadata = md |
4391 | 133 | self.ec2_metadata = results.get('ec2-metadata') | 148 | self.ec2_metadata = results.get('ec2-metadata') |
4392 | 149 | self.network_json = results.get('networkdata') | ||
4393 | 134 | self.userdata_raw = results.get('userdata') | 150 | self.userdata_raw = results.get('userdata') |
4394 | 135 | self.version = results['version'] | 151 | self.version = results['version'] |
4395 | 136 | self.files.update(results.get('files', {})) | 152 | self.files.update(results.get('files', {})) |
4396 | @@ -145,9 +161,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | |||
4397 | 145 | 161 | ||
4398 | 146 | return True | 162 | return True |
4399 | 147 | 163 | ||
4403 | 148 | def check_instance_id(self, sys_cfg): | 164 | def _crawl_metadata(self): |
4404 | 149 | # quickly (local check only) if self.instance_id is still valid | 165 | """Crawl metadata service when available. |
4405 | 150 | return sources.instance_id_matches_system_uuid(self.get_instance_id()) | 166 | |
4406 | 167 | @returns: Dictionary with all metadata discovered for this datasource. | ||
4407 | 168 | @raise: InvalidMetaDataException on unreadable or broken | ||
4408 | 169 | metadata. | ||
4409 | 170 | """ | ||
4410 | 171 | try: | ||
4411 | 172 | if not self.wait_for_metadata_service(): | ||
4412 | 173 | raise sources.InvalidMetaDataException( | ||
4413 | 174 | 'No active metadata service found') | ||
4414 | 175 | except IOError as e: | ||
4415 | 176 | raise sources.InvalidMetaDataException( | ||
4416 | 177 | 'IOError contacting metadata service: {error}'.format( | ||
4417 | 178 | error=str(e))) | ||
4418 | 179 | |||
4419 | 180 | url_params = self.get_url_params() | ||
4420 | 181 | |||
4421 | 182 | try: | ||
4422 | 183 | result = util.log_time( | ||
4423 | 184 | LOG.debug, 'Crawl of openstack metadata service', | ||
4424 | 185 | read_metadata_service, args=[self.metadata_address], | ||
4425 | 186 | kwargs={'ssl_details': self.ssl_details, | ||
4426 | 187 | 'retries': url_params.num_retries, | ||
4427 | 188 | 'timeout': url_params.timeout_seconds}) | ||
4428 | 189 | except openstack.NonReadable as e: | ||
4429 | 190 | raise sources.InvalidMetaDataException(str(e)) | ||
4430 | 191 | except (openstack.BrokenMetadata, IOError): | ||
4431 | 192 | msg = 'Broken metadata address {addr}'.format( | ||
4432 | 193 | addr=self.metadata_address) | ||
4433 | 194 | raise sources.InvalidMetaDataException(msg) | ||
4434 | 195 | return result | ||
4435 | 196 | |||
4436 | 197 | |||
4437 | 198 | class DataSourceOpenStackLocal(DataSourceOpenStack): | ||
4438 | 199 | """Run in init-local using a dhcp discovery prior to metadata crawl. | ||
4439 | 200 | |||
4440 | 201 | In init-local, no network is available. This subclass sets up minimal | ||
4441 | 202 | networking with dhclient on a viable nic so that it can talk to the | ||
4442 | 203 | metadata service. If the metadata service provides network configuration | ||
4443 | 204 | then render the network configuration for that instance based on metadata. | ||
4444 | 205 | """ | ||
4445 | 206 | |||
4446 | 207 | perform_dhcp_setup = True # Get metadata network config if present | ||
4447 | 151 | 208 | ||
4448 | 152 | 209 | ||
4449 | 153 | def read_metadata_service(base_url, ssl_details=None, | 210 | def read_metadata_service(base_url, ssl_details=None, |
4450 | @@ -157,8 +214,23 @@ def read_metadata_service(base_url, ssl_details=None, | |||
4451 | 157 | return reader.read_v2() | 214 | return reader.read_v2() |
4452 | 158 | 215 | ||
4453 | 159 | 216 | ||
4454 | 217 | def detect_openstack(): | ||
4455 | 218 | """Return True when a potential OpenStack platform is detected.""" | ||
4456 | 219 | if not util.is_x86(): | ||
4457 | 220 | return True # Non-Intel cpus don't properly report dmi product names | ||
4458 | 221 | product_name = util.read_dmi_data('system-product-name') | ||
4459 | 222 | if product_name in VALID_DMI_PRODUCT_NAMES: | ||
4460 | 223 | return True | ||
4461 | 224 | elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: | ||
4462 | 225 | return True | ||
4463 | 226 | elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: | ||
4464 | 227 | return True | ||
4465 | 228 | return False | ||
4466 | 229 | |||
4467 | 230 | |||
4468 | 160 | # Used to match classes to dependencies | 231 | # Used to match classes to dependencies |
4469 | 161 | datasources = [ | 232 | datasources = [ |
4470 | 233 | (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)), | ||
4471 | 162 | (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), | 234 | (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), |
4472 | 163 | ] | 235 | ] |
4473 | 164 | 236 | ||
4474 | diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py | |||
4475 | index 86bfa5d..f92e8b5 100644 | |||
4476 | --- a/cloudinit/sources/DataSourceSmartOS.py | |||
4477 | +++ b/cloudinit/sources/DataSourceSmartOS.py | |||
4478 | @@ -1,4 +1,5 @@ | |||
4479 | 1 | # Copyright (C) 2013 Canonical Ltd. | 1 | # Copyright (C) 2013 Canonical Ltd. |
4480 | 2 | # Copyright (c) 2018, Joyent, Inc. | ||
4481 | 2 | # | 3 | # |
4482 | 3 | # Author: Ben Howard <ben.howard@canonical.com> | 4 | # Author: Ben Howard <ben.howard@canonical.com> |
4483 | 4 | # | 5 | # |
4484 | @@ -10,17 +11,19 @@ | |||
4485 | 10 | # SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests | 11 | # SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests |
4486 | 11 | # The meta-data is transmitted via key/value pairs made by | 12 | # The meta-data is transmitted via key/value pairs made by |
4487 | 12 | # requests on the console. For example, to get the hostname, you | 13 | # requests on the console. For example, to get the hostname, you |
4489 | 13 | # would send "GET hostname" on /dev/ttyS1. | 14 | # would send "GET sdc:hostname" on /dev/ttyS1. |
4490 | 14 | # For Linux Guests running in LX-Brand Zones on SmartOS hosts | 15 | # For Linux Guests running in LX-Brand Zones on SmartOS hosts |
4491 | 15 | # a socket (/native/.zonecontrol/metadata.sock) is used instead | 16 | # a socket (/native/.zonecontrol/metadata.sock) is used instead |
4492 | 16 | # of a serial console. | 17 | # of a serial console. |
4493 | 17 | # | 18 | # |
4494 | 18 | # Certain behavior is defined by the DataDictionary | 19 | # Certain behavior is defined by the DataDictionary |
4496 | 19 | # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html | 20 | # https://eng.joyent.com/mdata/datadict.html |
4497 | 20 | # Comments with "@datadictionary" are snippets of the definition | 21 | # Comments with "@datadictionary" are snippets of the definition |
4498 | 21 | 22 | ||
4499 | 22 | import base64 | 23 | import base64 |
4500 | 23 | import binascii | 24 | import binascii |
4501 | 25 | import errno | ||
4502 | 26 | import fcntl | ||
4503 | 24 | import json | 27 | import json |
4504 | 25 | import os | 28 | import os |
4505 | 26 | import random | 29 | import random |
4506 | @@ -108,7 +111,7 @@ BUILTIN_CLOUD_CONFIG = { | |||
4507 | 108 | 'overwrite': False} | 111 | 'overwrite': False} |
4508 | 109 | }, | 112 | }, |
4509 | 110 | 'fs_setup': [{'label': 'ephemeral0', | 113 | 'fs_setup': [{'label': 'ephemeral0', |
4511 | 111 | 'filesystem': 'ext3', | 114 | 'filesystem': 'ext4', |
4512 | 112 | 'device': 'ephemeral0'}], | 115 | 'device': 'ephemeral0'}], |
4513 | 113 | } | 116 | } |
4514 | 114 | 117 | ||
4515 | @@ -162,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource): | |||
4516 | 162 | 165 | ||
4517 | 163 | dsname = "Joyent" | 166 | dsname = "Joyent" |
4518 | 164 | 167 | ||
4522 | 165 | _unset = "_unset" | 168 | smartos_type = sources.UNSET |
4523 | 166 | smartos_type = _unset | 169 | md_client = sources.UNSET |
4521 | 167 | md_client = _unset | ||
4524 | 168 | 170 | ||
4525 | 169 | def __init__(self, sys_cfg, distro, paths): | 171 | def __init__(self, sys_cfg, distro, paths): |
4526 | 170 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 172 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
4527 | @@ -186,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource): | |||
4528 | 186 | return "%s [client=%s]" % (root, self.md_client) | 188 | return "%s [client=%s]" % (root, self.md_client) |
4529 | 187 | 189 | ||
4530 | 188 | def _init(self): | 190 | def _init(self): |
4532 | 189 | if self.smartos_type == self._unset: | 191 | if self.smartos_type == sources.UNSET: |
4533 | 190 | self.smartos_type = get_smartos_environ() | 192 | self.smartos_type = get_smartos_environ() |
4534 | 191 | if self.smartos_type is None: | 193 | if self.smartos_type is None: |
4535 | 192 | self.md_client = None | 194 | self.md_client = None |
4536 | 193 | 195 | ||
4538 | 194 | if self.md_client == self._unset: | 196 | if self.md_client == sources.UNSET: |
4539 | 195 | self.md_client = jmc_client_factory( | 197 | self.md_client = jmc_client_factory( |
4540 | 196 | smartos_type=self.smartos_type, | 198 | smartos_type=self.smartos_type, |
4541 | 197 | metadata_sockfile=self.ds_cfg['metadata_sockfile'], | 199 | metadata_sockfile=self.ds_cfg['metadata_sockfile'], |
4542 | @@ -229,6 +231,9 @@ class DataSourceSmartOS(sources.DataSource): | |||
4543 | 229 | self.md_client) | 231 | self.md_client) |
4544 | 230 | return False | 232 | return False |
4545 | 231 | 233 | ||
4546 | 234 | # Open once for many requests, rather than once for each request | ||
4547 | 235 | self.md_client.open_transport() | ||
4548 | 236 | |||
4549 | 232 | for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): | 237 | for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): |
4550 | 233 | smartos_noun, strip = attribute | 238 | smartos_noun, strip = attribute |
4551 | 234 | md[ci_noun] = self.md_client.get(smartos_noun, strip=strip) | 239 | md[ci_noun] = self.md_client.get(smartos_noun, strip=strip) |
4552 | @@ -236,6 +241,8 @@ class DataSourceSmartOS(sources.DataSource): | |||
4553 | 236 | for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items(): | 241 | for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items(): |
4554 | 237 | md[ci_noun] = self.md_client.get_json(smartos_noun) | 242 | md[ci_noun] = self.md_client.get_json(smartos_noun) |
4555 | 238 | 243 | ||
4556 | 244 | self.md_client.close_transport() | ||
4557 | 245 | |||
4558 | 239 | # @datadictionary: This key may contain a program that is written | 246 | # @datadictionary: This key may contain a program that is written |
4559 | 240 | # to a file in the filesystem of the guest on each boot and then | 247 | # to a file in the filesystem of the guest on each boot and then |
4560 | 241 | # executed. It may be of any format that would be considered | 248 | # executed. It may be of any format that would be considered |
4561 | @@ -266,8 +273,14 @@ class DataSourceSmartOS(sources.DataSource): | |||
4562 | 266 | write_boot_content(u_data, u_data_f) | 273 | write_boot_content(u_data, u_data_f) |
4563 | 267 | 274 | ||
4564 | 268 | # Handle the cloud-init regular meta | 275 | # Handle the cloud-init regular meta |
4565 | 276 | |||
4566 | 277 | # The hostname may or may not be qualified with the local domain name. | ||
4567 | 278 | # This follows section 3.14 of RFC 2132. | ||
4568 | 269 | if not md['local-hostname']: | 279 | if not md['local-hostname']: |
4570 | 270 | md['local-hostname'] = md['instance-id'] | 280 | if md['hostname']: |
4571 | 281 | md['local-hostname'] = md['hostname'] | ||
4572 | 282 | else: | ||
4573 | 283 | md['local-hostname'] = md['instance-id'] | ||
4574 | 271 | 284 | ||
4575 | 272 | ud = None | 285 | ud = None |
4576 | 273 | if md['user-data']: | 286 | if md['user-data']: |
4577 | @@ -285,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource): | |||
4578 | 285 | self.userdata_raw = ud | 298 | self.userdata_raw = ud |
4579 | 286 | self.vendordata_raw = md['vendor-data'] | 299 | self.vendordata_raw = md['vendor-data'] |
4580 | 287 | self.network_data = md['network-data'] | 300 | self.network_data = md['network-data'] |
4581 | 301 | self.routes_data = md['routes'] | ||
4582 | 288 | 302 | ||
4583 | 289 | self._set_provisioned() | 303 | self._set_provisioned() |
4584 | 290 | return True | 304 | return True |
4585 | @@ -308,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource): | |||
4586 | 308 | convert_smartos_network_data( | 322 | convert_smartos_network_data( |
4587 | 309 | network_data=self.network_data, | 323 | network_data=self.network_data, |
4588 | 310 | dns_servers=self.metadata['dns_servers'], | 324 | dns_servers=self.metadata['dns_servers'], |
4590 | 311 | dns_domain=self.metadata['dns_domain'])) | 325 | dns_domain=self.metadata['dns_domain'], |
4591 | 326 | routes=self.routes_data)) | ||
4592 | 312 | return self._network_config | 327 | return self._network_config |
4593 | 313 | 328 | ||
4594 | 314 | 329 | ||
4595 | @@ -316,6 +331,10 @@ class JoyentMetadataFetchException(Exception): | |||
4596 | 316 | pass | 331 | pass |
4597 | 317 | 332 | ||
4598 | 318 | 333 | ||
4599 | 334 | class JoyentMetadataTimeoutException(JoyentMetadataFetchException): | ||
4600 | 335 | pass | ||
4601 | 336 | |||
4602 | 337 | |||
4603 | 319 | class JoyentMetadataClient(object): | 338 | class JoyentMetadataClient(object): |
4604 | 320 | """ | 339 | """ |
4605 | 321 | A client implementing v2 of the Joyent Metadata Protocol Specification. | 340 | A client implementing v2 of the Joyent Metadata Protocol Specification. |
4606 | @@ -360,6 +379,47 @@ class JoyentMetadataClient(object): | |||
4607 | 360 | LOG.debug('Value "%s" found.', value) | 379 | LOG.debug('Value "%s" found.', value) |
4608 | 361 | return value | 380 | return value |
4609 | 362 | 381 | ||
4610 | 382 | def _readline(self): | ||
4611 | 383 | """ | ||
4612 | 384 | Reads a line a byte at a time until \n is encountered. Returns an | ||
4613 | 385 | ascii string with the trailing newline removed. | ||
4614 | 386 | |||
4615 | 387 | If a timeout (per-byte) is set and it expires, a | ||
4616 | 388 | JoyentMetadataFetchException will be thrown. | ||
4617 | 389 | """ | ||
4618 | 390 | response = [] | ||
4619 | 391 | |||
4620 | 392 | def as_ascii(): | ||
4621 | 393 | return b''.join(response).decode('ascii') | ||
4622 | 394 | |||
4623 | 395 | msg = "Partial response: '%s'" | ||
4624 | 396 | while True: | ||
4625 | 397 | try: | ||
4626 | 398 | byte = self.fp.read(1) | ||
4627 | 399 | if len(byte) == 0: | ||
4628 | 400 | raise JoyentMetadataTimeoutException(msg % as_ascii()) | ||
4629 | 401 | if byte == b'\n': | ||
4630 | 402 | return as_ascii() | ||
4631 | 403 | response.append(byte) | ||
4632 | 404 | except OSError as exc: | ||
4633 | 405 | if exc.errno == errno.EAGAIN: | ||
4634 | 406 | raise JoyentMetadataTimeoutException(msg % as_ascii()) | ||
4635 | 407 | raise | ||
4636 | 408 | |||
4637 | 409 | def _write(self, msg): | ||
4638 | 410 | self.fp.write(msg.encode('ascii')) | ||
4639 | 411 | self.fp.flush() | ||
4640 | 412 | |||
4641 | 413 | def _negotiate(self): | ||
4642 | 414 | LOG.debug('Negotiating protocol V2') | ||
4643 | 415 | self._write('NEGOTIATE V2\n') | ||
4644 | 416 | response = self._readline() | ||
4645 | 417 | LOG.debug('read "%s"', response) | ||
4646 | 418 | if response != 'V2_OK': | ||
4647 | 419 | raise JoyentMetadataFetchException( | ||
4648 | 420 | 'Invalid response "%s" to "NEGOTIATE V2"' % response) | ||
4649 | 421 | LOG.debug('Negotiation complete') | ||
4650 | 422 | |||
4651 | 363 | def request(self, rtype, param=None): | 423 | def request(self, rtype, param=None): |
4652 | 364 | request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) | 424 | request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) |
4653 | 365 | message_body = ' '.join((request_id, rtype,)) | 425 | message_body = ' '.join((request_id, rtype,)) |
4654 | @@ -374,18 +434,11 @@ class JoyentMetadataClient(object): | |||
4655 | 374 | self.open_transport() | 434 | self.open_transport() |
4656 | 375 | need_close = True | 435 | need_close = True |
4657 | 376 | 436 | ||
4666 | 377 | self.fp.write(msg.encode('ascii')) | 437 | self._write(msg) |
4667 | 378 | self.fp.flush() | 438 | response = self._readline() |
4660 | 379 | |||
4661 | 380 | response = bytearray() | ||
4662 | 381 | response.extend(self.fp.read(1)) | ||
4663 | 382 | while response[-1:] != b'\n': | ||
4664 | 383 | response.extend(self.fp.read(1)) | ||
4665 | 384 | |||
4668 | 385 | if need_close: | 439 | if need_close: |
4669 | 386 | self.close_transport() | 440 | self.close_transport() |
4670 | 387 | 441 | ||
4671 | 388 | response = response.rstrip().decode('ascii') | ||
4672 | 389 | LOG.debug('Read "%s" from metadata transport.', response) | 442 | LOG.debug('Read "%s" from metadata transport.', response) |
4673 | 390 | 443 | ||
4674 | 391 | if 'SUCCESS' not in response: | 444 | if 'SUCCESS' not in response: |
4675 | @@ -410,9 +463,9 @@ class JoyentMetadataClient(object): | |||
4676 | 410 | 463 | ||
4677 | 411 | def list(self): | 464 | def list(self): |
4678 | 412 | result = self.request(rtype='KEYS') | 465 | result = self.request(rtype='KEYS') |
4682 | 413 | if result: | 466 | if not result: |
4683 | 414 | result = result.split('\n') | 467 | return [] |
4684 | 415 | return result | 468 | return result.split('\n') |
4685 | 416 | 469 | ||
4686 | 417 | def put(self, key, val): | 470 | def put(self, key, val): |
4687 | 418 | param = b' '.join([base64.b64encode(i.encode()) | 471 | param = b' '.join([base64.b64encode(i.encode()) |
4688 | @@ -450,6 +503,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): | |||
4689 | 450 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) | 503 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) |
4690 | 451 | sock.connect(self.socketpath) | 504 | sock.connect(self.socketpath) |
4691 | 452 | self.fp = sock.makefile('rwb') | 505 | self.fp = sock.makefile('rwb') |
4692 | 506 | self._negotiate() | ||
4693 | 453 | 507 | ||
4694 | 454 | def exists(self): | 508 | def exists(self): |
4695 | 455 | return os.path.exists(self.socketpath) | 509 | return os.path.exists(self.socketpath) |
4696 | @@ -459,8 +513,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): | |||
4697 | 459 | 513 | ||
4698 | 460 | 514 | ||
4699 | 461 | class JoyentMetadataSerialClient(JoyentMetadataClient): | 515 | class JoyentMetadataSerialClient(JoyentMetadataClient): |
4702 | 462 | def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM): | 516 | def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM, |
4703 | 463 | super(JoyentMetadataSerialClient, self).__init__(smartos_type) | 517 | fp=None): |
4704 | 518 | super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp) | ||
4705 | 464 | self.device = device | 519 | self.device = device |
4706 | 465 | self.timeout = timeout | 520 | self.timeout = timeout |
4707 | 466 | 521 | ||
4708 | @@ -468,10 +523,51 @@ class JoyentMetadataSerialClient(JoyentMetadataClient): | |||
4709 | 468 | return os.path.exists(self.device) | 523 | return os.path.exists(self.device) |
4710 | 469 | 524 | ||
4711 | 470 | def open_transport(self): | 525 | def open_transport(self): |
4716 | 471 | ser = serial.Serial(self.device, timeout=self.timeout) | 526 | if self.fp is None: |
4717 | 472 | if not ser.isOpen(): | 527 | ser = serial.Serial(self.device, timeout=self.timeout) |
4718 | 473 | raise SystemError("Unable to open %s" % self.device) | 528 | if not ser.isOpen(): |
4719 | 474 | self.fp = ser | 529 | raise SystemError("Unable to open %s" % self.device) |
4720 | 530 | self.fp = ser | ||
4721 | 531 | fcntl.lockf(ser, fcntl.LOCK_EX) | ||
4722 | 532 | self._flush() | ||
4723 | 533 | self._negotiate() | ||
4724 | 534 | |||
4725 | 535 | def _flush(self): | ||
4726 | 536 | LOG.debug('Flushing input') | ||
4727 | 537 | # Read any pending data | ||
4728 | 538 | timeout = self.fp.timeout | ||
4729 | 539 | self.fp.timeout = 0.1 | ||
4730 | 540 | while True: | ||
4731 | 541 | try: | ||
4732 | 542 | self._readline() | ||
4733 | 543 | except JoyentMetadataTimeoutException: | ||
4734 | 544 | break | ||
4735 | 545 | LOG.debug('Input empty') | ||
4736 | 546 | |||
4737 | 547 | # Send a newline and expect "invalid command". Keep trying until | ||
4738 | 548 | # successful. Retry rather frequently so that the "Is the host | ||
4739 | 549 | # metadata service running" appears on the console soon after someone | ||
4740 | 550 | # attaches in an effort to debug. | ||
4741 | 551 | if timeout > 5: | ||
4742 | 552 | self.fp.timeout = 5 | ||
4743 | 553 | else: | ||
4744 | 554 | self.fp.timeout = timeout | ||
4745 | 555 | while True: | ||
4746 | 556 | LOG.debug('Writing newline, expecting "invalid command"') | ||
4747 | 557 | self._write('\n') | ||
4748 | 558 | try: | ||
4749 | 559 | response = self._readline() | ||
4750 | 560 | if response == 'invalid command': | ||
4751 | 561 | break | ||
4752 | 562 | if response == 'FAILURE': | ||
4753 | 563 | LOG.debug('Got "FAILURE". Retrying.') | ||
4754 | 564 | continue | ||
4755 | 565 | LOG.warning('Unexpected response "%s" during flush', response) | ||
4756 | 566 | except JoyentMetadataTimeoutException: | ||
4757 | 567 | LOG.warning('Timeout while initializing metadata client. ' + | ||
4758 | 568 | 'Is the host metadata service running?') | ||
4759 | 569 | LOG.debug('Got "invalid command". Flush complete.') | ||
4760 | 570 | self.fp.timeout = timeout | ||
4761 | 475 | 571 | ||
4762 | 476 | def __repr__(self): | 572 | def __repr__(self): |
4763 | 477 | return "%s(device=%s, timeout=%s)" % ( | 573 | return "%s(device=%s, timeout=%s)" % ( |
4764 | @@ -650,7 +746,7 @@ def get_smartos_environ(uname_version=None, product_name=None): | |||
4765 | 650 | # report 'BrandZ virtual linux' as the kernel version | 746 | # report 'BrandZ virtual linux' as the kernel version |
4766 | 651 | if uname_version is None: | 747 | if uname_version is None: |
4767 | 652 | uname_version = uname[3] | 748 | uname_version = uname[3] |
4769 | 653 | if uname_version.lower() == 'brandz virtual linux': | 749 | if uname_version == 'BrandZ virtual linux': |
4770 | 654 | return SMARTOS_ENV_LX_BRAND | 750 | return SMARTOS_ENV_LX_BRAND |
4771 | 655 | 751 | ||
4772 | 656 | if product_name is None: | 752 | if product_name is None: |
4773 | @@ -658,7 +754,7 @@ def get_smartos_environ(uname_version=None, product_name=None): | |||
4774 | 658 | else: | 754 | else: |
4775 | 659 | system_type = product_name | 755 | system_type = product_name |
4776 | 660 | 756 | ||
4778 | 661 | if system_type and 'smartdc' in system_type.lower(): | 757 | if system_type and system_type.startswith('SmartDC'): |
4779 | 662 | return SMARTOS_ENV_KVM | 758 | return SMARTOS_ENV_KVM |
4780 | 663 | 759 | ||
4781 | 664 | return None | 760 | return None |
4782 | @@ -666,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None): | |||
4783 | 666 | 762 | ||
4784 | 667 | # Convert SMARTOS 'sdc:nics' data to network_config yaml | 763 | # Convert SMARTOS 'sdc:nics' data to network_config yaml |
4785 | 668 | def convert_smartos_network_data(network_data=None, | 764 | def convert_smartos_network_data(network_data=None, |
4787 | 669 | dns_servers=None, dns_domain=None): | 765 | dns_servers=None, dns_domain=None, |
4788 | 766 | routes=None): | ||
4789 | 670 | """Return a dictionary of network_config by parsing provided | 767 | """Return a dictionary of network_config by parsing provided |
4790 | 671 | SMARTOS sdc:nics configuration data | 768 | SMARTOS sdc:nics configuration data |
4791 | 672 | 769 | ||
4792 | @@ -684,6 +781,10 @@ def convert_smartos_network_data(network_data=None, | |||
4793 | 684 | keys are related to ip configuration. For each ip in the 'ips' list | 781 | keys are related to ip configuration. For each ip in the 'ips' list |
4794 | 685 | we create a subnet entry under 'subnets' pairing the ip to a one in | 782 | we create a subnet entry under 'subnets' pairing the ip to a one in |
4795 | 686 | the 'gateways' list. | 783 | the 'gateways' list. |
4796 | 784 | |||
4797 | 785 | Each route in sdc:routes is mapped to a route on each interface. | ||
4798 | 786 | The sdc:routes properties 'dst' and 'gateway' map to 'network' and | ||
4799 | 787 | 'gateway'. The 'linklocal' sdc:routes property is ignored. | ||
4800 | 687 | """ | 788 | """ |
4801 | 688 | 789 | ||
4802 | 689 | valid_keys = { | 790 | valid_keys = { |
4803 | @@ -706,6 +807,10 @@ def convert_smartos_network_data(network_data=None, | |||
4804 | 706 | 'scope', | 807 | 'scope', |
4805 | 707 | 'type', | 808 | 'type', |
4806 | 708 | ], | 809 | ], |
4807 | 810 | 'route': [ | ||
4808 | 811 | 'network', | ||
4809 | 812 | 'gateway', | ||
4810 | 813 | ], | ||
4811 | 709 | } | 814 | } |
4812 | 710 | 815 | ||
4813 | 711 | if dns_servers: | 816 | if dns_servers: |
4814 | @@ -720,6 +825,9 @@ def convert_smartos_network_data(network_data=None, | |||
4815 | 720 | else: | 825 | else: |
4816 | 721 | dns_domain = [] | 826 | dns_domain = [] |
4817 | 722 | 827 | ||
4818 | 828 | if not routes: | ||
4819 | 829 | routes = [] | ||
4820 | 830 | |||
4821 | 723 | def is_valid_ipv4(addr): | 831 | def is_valid_ipv4(addr): |
4822 | 724 | return '.' in addr | 832 | return '.' in addr |
4823 | 725 | 833 | ||
4824 | @@ -746,6 +854,7 @@ def convert_smartos_network_data(network_data=None, | |||
4825 | 746 | if ip == "dhcp": | 854 | if ip == "dhcp": |
4826 | 747 | subnet = {'type': 'dhcp4'} | 855 | subnet = {'type': 'dhcp4'} |
4827 | 748 | else: | 856 | else: |
4828 | 857 | routeents = [] | ||
4829 | 749 | subnet = dict((k, v) for k, v in nic.items() | 858 | subnet = dict((k, v) for k, v in nic.items() |
4830 | 750 | if k in valid_keys['subnet']) | 859 | if k in valid_keys['subnet']) |
4831 | 751 | subnet.update({ | 860 | subnet.update({ |
4832 | @@ -767,6 +876,25 @@ def convert_smartos_network_data(network_data=None, | |||
4833 | 767 | pgws[proto]['gw'] = gateways[0] | 876 | pgws[proto]['gw'] = gateways[0] |
4834 | 768 | subnet.update({'gateway': pgws[proto]['gw']}) | 877 | subnet.update({'gateway': pgws[proto]['gw']}) |
4835 | 769 | 878 | ||
4836 | 879 | for route in routes: | ||
4837 | 880 | rcfg = dict((k, v) for k, v in route.items() | ||
4838 | 881 | if k in valid_keys['route']) | ||
4839 | 882 | # Linux uses the value of 'gateway' to determine | ||
4840 | 883 | # automatically if the route is a forward/next-hop | ||
4841 | 884 | # (non-local IP for gateway) or an interface/resolver | ||
4842 | 885 | # (local IP for gateway). So we can ignore the | ||
4843 | 886 | # 'interface' attribute of sdc:routes, because SDC | ||
4844 | 887 | # guarantees that the gateway is a local IP for | ||
4845 | 888 | # "interface=true". | ||
4846 | 889 | # | ||
4847 | 890 | # Eventually we should be smart and compare "gateway" | ||
4848 | 891 | # to see if it's in the prefix. We can then smartly | ||
4849 | 892 | # add or not-add this route. But for now, | ||
4850 | 893 | # when in doubt, use brute force! Routes for everyone! | ||
4851 | 894 | rcfg.update({'network': route['dst']}) | ||
4852 | 895 | routeents.append(rcfg) | ||
4853 | 896 | subnet.update({'routes': routeents}) | ||
4854 | 897 | |||
4855 | 770 | subnets.append(subnet) | 898 | subnets.append(subnet) |
4856 | 771 | cfg.update({'subnets': subnets}) | 899 | cfg.update({'subnets': subnets}) |
4857 | 772 | config.append(cfg) | 900 | config.append(cfg) |
4858 | @@ -810,12 +938,14 @@ if __name__ == "__main__": | |||
4859 | 810 | keyname = SMARTOS_ATTRIB_JSON[key] | 938 | keyname = SMARTOS_ATTRIB_JSON[key] |
4860 | 811 | data[key] = client.get_json(keyname) | 939 | data[key] = client.get_json(keyname) |
4861 | 812 | elif key == "network_config": | 940 | elif key == "network_config": |
4863 | 813 | for depkey in ('network-data', 'dns_servers', 'dns_domain'): | 941 | for depkey in ('network-data', 'dns_servers', 'dns_domain', |
4864 | 942 | 'routes'): | ||
4865 | 814 | load_key(client, depkey, data) | 943 | load_key(client, depkey, data) |
4866 | 815 | data[key] = convert_smartos_network_data( | 944 | data[key] = convert_smartos_network_data( |
4867 | 816 | network_data=data['network-data'], | 945 | network_data=data['network-data'], |
4868 | 817 | dns_servers=data['dns_servers'], | 946 | dns_servers=data['dns_servers'], |
4870 | 818 | dns_domain=data['dns_domain']) | 947 | dns_domain=data['dns_domain'], |
4871 | 948 | routes=data['routes']) | ||
4872 | 819 | else: | 949 | else: |
4873 | 820 | if key in SMARTOS_ATTRIB_MAP: | 950 | if key in SMARTOS_ATTRIB_MAP: |
4874 | 821 | keyname, strip = SMARTOS_ATTRIB_MAP[key] | 951 | keyname, strip = SMARTOS_ATTRIB_MAP[key] |
4875 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py | |||
4876 | index df0b374..90d7457 100644 | |||
4877 | --- a/cloudinit/sources/__init__.py | |||
4878 | +++ b/cloudinit/sources/__init__.py | |||
4879 | @@ -9,6 +9,7 @@ | |||
4880 | 9 | # This file is part of cloud-init. See LICENSE file for license information. | 9 | # This file is part of cloud-init. See LICENSE file for license information. |
4881 | 10 | 10 | ||
4882 | 11 | import abc | 11 | import abc |
4883 | 12 | from collections import namedtuple | ||
4884 | 12 | import copy | 13 | import copy |
4885 | 13 | import json | 14 | import json |
4886 | 14 | import os | 15 | import os |
4887 | @@ -17,6 +18,7 @@ import six | |||
4888 | 17 | from cloudinit.atomic_helper import write_json | 18 | from cloudinit.atomic_helper import write_json |
4889 | 18 | from cloudinit import importer | 19 | from cloudinit import importer |
4890 | 19 | from cloudinit import log as logging | 20 | from cloudinit import log as logging |
4891 | 21 | from cloudinit import net | ||
4892 | 20 | from cloudinit import type_utils | 22 | from cloudinit import type_utils |
4893 | 21 | from cloudinit import user_data as ud | 23 | from cloudinit import user_data as ud |
4894 | 22 | from cloudinit import util | 24 | from cloudinit import util |
4895 | @@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json' | |||
4896 | 41 | # Key which can be provide a cloud's official product name to cloud-init | 43 | # Key which can be provide a cloud's official product name to cloud-init |
4897 | 42 | METADATA_CLOUD_NAME_KEY = 'cloud-name' | 44 | METADATA_CLOUD_NAME_KEY = 'cloud-name' |
4898 | 43 | 45 | ||
4899 | 46 | UNSET = "_unset" | ||
4900 | 47 | |||
4901 | 44 | LOG = logging.getLogger(__name__) | 48 | LOG = logging.getLogger(__name__) |
4902 | 45 | 49 | ||
4903 | 46 | 50 | ||
4904 | @@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception): | |||
4905 | 48 | pass | 52 | pass |
4906 | 49 | 53 | ||
4907 | 50 | 54 | ||
4908 | 55 | class InvalidMetaDataException(Exception): | ||
4909 | 56 | """Raised when metadata is broken, unavailable or disabled.""" | ||
4910 | 57 | pass | ||
4911 | 58 | |||
4912 | 59 | |||
4913 | 51 | def process_base64_metadata(metadata, key_path=''): | 60 | def process_base64_metadata(metadata, key_path=''): |
4914 | 52 | """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" | 61 | """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" |
4915 | 53 | md_copy = copy.deepcopy(metadata) | 62 | md_copy = copy.deepcopy(metadata) |
4916 | @@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''): | |||
4917 | 68 | return md_copy | 77 | return md_copy |
4918 | 69 | 78 | ||
4919 | 70 | 79 | ||
4920 | 80 | URLParams = namedtuple( | ||
4921 | 81 | 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) | ||
4922 | 82 | |||
4923 | 83 | |||
4924 | 71 | @six.add_metaclass(abc.ABCMeta) | 84 | @six.add_metaclass(abc.ABCMeta) |
4925 | 72 | class DataSource(object): | 85 | class DataSource(object): |
4926 | 73 | 86 | ||
4927 | @@ -81,6 +94,14 @@ class DataSource(object): | |||
4928 | 81 | # Cached cloud_name as determined by _get_cloud_name | 94 | # Cached cloud_name as determined by _get_cloud_name |
4929 | 82 | _cloud_name = None | 95 | _cloud_name = None |
4930 | 83 | 96 | ||
4931 | 97 | # Track the discovered fallback nic for use in configuration generation. | ||
4932 | 98 | _fallback_interface = None | ||
4933 | 99 | |||
4934 | 100 | # read_url_params | ||
4935 | 101 | url_max_wait = -1 # max_wait < 0 means do not wait | ||
4936 | 102 | url_timeout = 10 # timeout for each metadata url read attempt | ||
4937 | 103 | url_retries = 5 # number of times to retry url upon 404 | ||
4938 | 104 | |||
4939 | 84 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): | 105 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): |
4940 | 85 | self.sys_cfg = sys_cfg | 106 | self.sys_cfg = sys_cfg |
4941 | 86 | self.distro = distro | 107 | self.distro = distro |
4942 | @@ -128,6 +149,14 @@ class DataSource(object): | |||
4943 | 128 | 'meta-data': self.metadata, | 149 | 'meta-data': self.metadata, |
4944 | 129 | 'user-data': self.get_userdata_raw(), | 150 | 'user-data': self.get_userdata_raw(), |
4945 | 130 | 'vendor-data': self.get_vendordata_raw()}} | 151 | 'vendor-data': self.get_vendordata_raw()}} |
4946 | 152 | if hasattr(self, 'network_json'): | ||
4947 | 153 | network_json = getattr(self, 'network_json') | ||
4948 | 154 | if network_json != UNSET: | ||
4949 | 155 | instance_data['ds']['network_json'] = network_json | ||
4950 | 156 | if hasattr(self, 'ec2_metadata'): | ||
4951 | 157 | ec2_metadata = getattr(self, 'ec2_metadata') | ||
4952 | 158 | if ec2_metadata != UNSET: | ||
4953 | 159 | instance_data['ds']['ec2_metadata'] = ec2_metadata | ||
4954 | 131 | instance_data.update( | 160 | instance_data.update( |
4955 | 132 | self._get_standardized_metadata()) | 161 | self._get_standardized_metadata()) |
4956 | 133 | try: | 162 | try: |
4957 | @@ -149,6 +178,42 @@ class DataSource(object): | |||
4958 | 149 | 'Subclasses of DataSource must implement _get_data which' | 178 | 'Subclasses of DataSource must implement _get_data which' |
4959 | 150 | ' sets self.metadata, vendordata_raw and userdata_raw.') | 179 | ' sets self.metadata, vendordata_raw and userdata_raw.') |
4960 | 151 | 180 | ||
4961 | 181 | def get_url_params(self): | ||
4962 | 182 | """Return the Datasource's prefered url_read parameters. | ||
4963 | 183 | |||
4964 | 184 | Subclasses may override url_max_wait, url_timeout, url_retries. | ||
4965 | 185 | |||
4966 | 186 | @return: A URLParams object with max_wait_seconds, timeout_seconds, | ||
4967 | 187 | num_retries. | ||
4968 | 188 | """ | ||
4969 | 189 | max_wait = self.url_max_wait | ||
4970 | 190 | try: | ||
4971 | 191 | max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait)) | ||
4972 | 192 | except ValueError: | ||
4973 | 193 | util.logexc( | ||
4974 | 194 | LOG, "Config max_wait '%s' is not an int, using default '%s'", | ||
4975 | 195 | self.ds_cfg.get("max_wait"), max_wait) | ||
4976 | 196 | |||
4977 | 197 | timeout = self.url_timeout | ||
4978 | 198 | try: | ||
4979 | 199 | timeout = max( | ||
4980 | 200 | 0, int(self.ds_cfg.get("timeout", self.url_timeout))) | ||
4981 | 201 | except ValueError: | ||
4982 | 202 | timeout = self.url_timeout | ||
4983 | 203 | util.logexc( | ||
4984 | 204 | LOG, "Config timeout '%s' is not an int, using default '%s'", | ||
4985 | 205 | self.ds_cfg.get('timeout'), timeout) | ||
4986 | 206 | |||
4987 | 207 | retries = self.url_retries | ||
4988 | 208 | try: | ||
4989 | 209 | retries = int(self.ds_cfg.get("retries", self.url_retries)) | ||
4990 | 210 | except Exception: | ||
4991 | 211 | util.logexc( | ||
4992 | 212 | LOG, "Config retries '%s' is not an int, using default '%s'", | ||
4993 | 213 | self.ds_cfg.get('retries'), retries) | ||
4994 | 214 | |||
4995 | 215 | return URLParams(max_wait, timeout, retries) | ||
4996 | 216 | |||
4997 | 152 | def get_userdata(self, apply_filter=False): | 217 | def get_userdata(self, apply_filter=False): |
4998 | 153 | if self.userdata is None: | 218 | if self.userdata is None: |
4999 | 154 | self.userdata = self.ud_proc.process(self.get_userdata_raw()) | 219 | self.userdata = self.ud_proc.process(self.get_userdata_raw()) |
5000 | @@ -162,6 +227,17 @@ class DataSource(object): |
The diff has been truncated for viewing.
FAILED: Continuous integration, rev:0bb961d0481 e0158c0ef115f71 8a0f4ebcafdcea /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 122/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
FAILED: Ubuntu LTS: Build
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 122/rebuild
https:/