Merge ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial
- Git
- lp:~chad.smith/cloud-init
- ubuntu/xenial
- Merge into ubuntu/xenial
Proposed by
Chad Smith
Status: | Merged | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Approved by: | Scott Moser | ||||||||||||
Approved revision: | 04b240a3e24e9813314a2159d0c4999a876f0d18 | ||||||||||||
Merged at revision: | a48cab85b23b542f4bfe9072282b573aa59987ab | ||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/xenial | ||||||||||||
Merge into: | cloud-init:ubuntu/xenial | ||||||||||||
Diff against target: |
10355 lines (+5108/-1157) 144 files modified
.gitignore (+1/-0) .pylintrc (+2/-2) ChangeLog (+85/-0) HACKING.rst (+8/-0) cloudinit/analyze/__main__.py (+3/-1) cloudinit/analyze/dump.py (+1/-7) cloudinit/cmd/clean.py (+103/-0) cloudinit/cmd/main.py (+37/-7) cloudinit/cmd/status.py (+160/-0) cloudinit/cmd/tests/__init__.py (+0/-0) cloudinit/cmd/tests/test_clean.py (+176/-0) cloudinit/cmd/tests/test_status.py (+368/-0) cloudinit/config/cc_apt_configure.py (+3/-2) cloudinit/config/cc_disk_setup.py (+5/-3) cloudinit/config/cc_landscape.py (+4/-4) cloudinit/config/cc_ntp.py (+5/-5) cloudinit/config/cc_power_state_change.py (+1/-0) cloudinit/config/cc_resizefs.py (+11/-1) cloudinit/config/cc_rh_subscription.py (+2/-3) cloudinit/config/cc_rsyslog.py (+5/-5) cloudinit/config/cc_seed_random.py (+2/-1) cloudinit/config/cc_snap_config.py (+5/-2) cloudinit/distros/__init__.py (+18/-13) cloudinit/distros/freebsd.py (+3/-8) cloudinit/ec2_utils.py (+30/-9) cloudinit/net/__init__.py (+2/-2) cloudinit/net/cmdline.py (+5/-4) cloudinit/net/dhcp.py (+42/-1) cloudinit/net/network_state.py (+17/-3) cloudinit/sources/DataSourceAliYun.py (+1/-0) cloudinit/sources/DataSourceAltCloud.py (+5/-2) cloudinit/sources/DataSourceAzure.py (+150/-21) cloudinit/sources/DataSourceBigstep.py (+4/-1) cloudinit/sources/DataSourceCloudSigma.py (+4/-1) cloudinit/sources/DataSourceCloudStack.py (+4/-1) cloudinit/sources/DataSourceConfigDrive.py (+6/-3) cloudinit/sources/DataSourceDigitalOcean.py (+4/-1) cloudinit/sources/DataSourceEc2.py (+41/-24) cloudinit/sources/DataSourceGCE.py (+99/-40) cloudinit/sources/DataSourceMAAS.py (+44/-15) cloudinit/sources/DataSourceNoCloud.py (+4/-1) cloudinit/sources/DataSourceNone.py (+4/-1) cloudinit/sources/DataSourceOVF.py (+92/-38) cloudinit/sources/DataSourceOpenNebula.py (+66/-56) cloudinit/sources/DataSourceOpenStack.py (+4/-1) cloudinit/sources/DataSourceScaleway.py (+3/-1) cloudinit/sources/DataSourceSmartOS.py (+4/-1) cloudinit/sources/__init__.py (+117/-14) cloudinit/sources/helpers/azure.py (+16/-9) cloudinit/sources/helpers/vmware/imc/config.py (+4/-0) cloudinit/sources/helpers/vmware/imc/config_custom_script.py (+153/-0) cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1) cloudinit/sources/tests/__init__.py (+0/-0) cloudinit/sources/tests/test_init.py (+202/-0) cloudinit/temp_utils.py (+8/-3) cloudinit/tests/helpers.py (+35/-7) cloudinit/tests/test_util.py (+46/-0) cloudinit/url_helper.py (+20/-9) cloudinit/util.py (+129/-64) cloudinit/version.py (+1/-1) debian/changelog (+55/-3) dev/null (+0/-172) doc/rtd/topics/boot.rst (+10/-3) doc/rtd/topics/capabilities.rst (+153/-7) doc/rtd/topics/debugging.rst (+1/-0) doc/rtd/topics/modules.rst (+2/-0) doc/rtd/topics/network-config-format-v1.rst (+1/-1) doc/rtd/topics/tests.rst (+32/-6) integration-requirements.txt (+20/-0) setup.py (+24/-1) systemd/cloud-init-local.service.tmpl (+0/-6) tests/cloud_tests/__init__.py (+6/-0) tests/cloud_tests/bddeb.py (+4/-4) tests/cloud_tests/collect.py (+28/-16) tests/cloud_tests/config.py (+3/-1) tests/cloud_tests/platforms.yaml (+6/-5) tests/cloud_tests/platforms/__init__.py (+20/-2) tests/cloud_tests/platforms/ec2/image.py (+99/-0) tests/cloud_tests/platforms/ec2/instance.py (+132/-0) tests/cloud_tests/platforms/ec2/platform.py (+258/-0) tests/cloud_tests/platforms/ec2/snapshot.py (+66/-0) tests/cloud_tests/platforms/images.py (+2/-1) tests/cloud_tests/platforms/instances.py (+69/-1) tests/cloud_tests/platforms/lxd/image.py (+5/-6) tests/cloud_tests/platforms/lxd/instance.py (+22/-27) tests/cloud_tests/platforms/lxd/platform.py (+7/-7) tests/cloud_tests/platforms/lxd/snapshot.py (+2/-2) tests/cloud_tests/platforms/nocloudkvm/image.py (+5/-16) tests/cloud_tests/platforms/nocloudkvm/instance.py (+72/-59) tests/cloud_tests/platforms/nocloudkvm/platform.py (+11/-9) tests/cloud_tests/platforms/nocloudkvm/snapshot.py (+2/-22) tests/cloud_tests/platforms/platforms.py (+96/-0) tests/cloud_tests/platforms/snapshots.py (+0/-0) tests/cloud_tests/releases.yaml (+10/-22) tests/cloud_tests/setup_image.py (+0/-18) tests/cloud_tests/testcases.yaml (+21/-6) tests/cloud_tests/testcases/base.py (+6/-3) tests/cloud_tests/testcases/modules/apt_configure_sources_list.py (+5/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml (+6/-0) tests/cloud_tests/testcases/modules/ntp_pools.yaml (+1/-1) tests/cloud_tests/testcases/modules/ntp_servers.yaml (+1/-1) tests/cloud_tests/testcases/modules/set_hostname_fqdn.py (+1/-1) tests/cloud_tests/util.py (+16/-3) tests/cloud_tests/verify.py (+1/-1) tests/unittests/test_cli.py (+99/-6) tests/unittests/test_cs_util.py (+1/-0) tests/unittests/test_datasource/test_aliyun.py (+17/-1) tests/unittests/test_datasource/test_altcloud.py (+13/-9) tests/unittests/test_datasource/test_azure.py (+204/-40) tests/unittests/test_datasource/test_cloudsigma.py (+9/-4) tests/unittests/test_datasource/test_cloudstack.py (+13/-6) tests/unittests/test_datasource/test_configdrive.py (+25/-37) tests/unittests/test_datasource/test_digitalocean.py (+13/-7) tests/unittests/test_datasource/test_ec2.py (+5/-3) tests/unittests/test_datasource/test_gce.py (+174/-22) tests/unittests/test_datasource/test_maas.py (+46/-7) tests/unittests/test_datasource/test_nocloud.py (+6/-8) tests/unittests/test_datasource/test_opennebula.py (+182/-53) tests/unittests/test_datasource/test_openstack.py (+8/-4) tests/unittests/test_datasource/test_ovf.py (+107/-4) tests/unittests/test_datasource/test_scaleway.py (+9/-4) tests/unittests/test_datasource/test_smartos.py (+2/-1) tests/unittests/test_distros/test_create_users.py (+5/-2) tests/unittests/test_distros/test_netconfig.py (+46/-6) tests/unittests/test_ds_identify.py (+130/-3) tests/unittests/test_handler/test_handler_lxd.py (+0/-3) tests/unittests/test_handler/test_handler_power_state.py (+0/-3) tests/unittests/test_handler/test_handler_resizefs.py (+21/-1) tests/unittests/test_handler/test_handler_yum_add_repo.py (+2/-8) tests/unittests/test_handler/test_handler_zypper_add_repo.py (+1/-6) tests/unittests/test_net.py (+15/-3) tests/unittests/test_reporting.py (+1/-1) tests/unittests/test_runs/test_merge_run.py (+1/-0) tests/unittests/test_runs/test_simple_run.py (+2/-1) tests/unittests/test_templating.py (+1/-1) tests/unittests/test_util.py (+64/-3) tests/unittests/test_vmware/__init__.py (+0/-0) tests/unittests/test_vmware/test_custom_script.py (+99/-0) tests/unittests/test_vmware_config_file.py (+9/-1) tools/ds-identify (+81/-35) tools/make-mime.py (+1/-1) tools/mock-meta.py (+21/-24) tools/read-version (+14/-1) tox.ini (+5/-6) |
||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Needs Fixing | |
Scott Moser | Pending | ||
Review via email: mp+337098@code.launchpad.net |
Commit message
Description of the change
Sync snapshot of master into xenial per SRU.
LP: #1747059
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Needs Fixing
(continuous-integration)
There was an error fetching revisions from git servers. Please try again in a few minutes. If the problem persists, contact Launchpad support.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/.gitignore b/.gitignore | |||
2 | index b0500a6..75565ed 100644 | |||
3 | --- a/.gitignore | |||
4 | +++ b/.gitignore | |||
5 | @@ -10,3 +10,4 @@ parts | |||
6 | 10 | prime | 10 | prime |
7 | 11 | stage | 11 | stage |
8 | 12 | *.snap | 12 | *.snap |
9 | 13 | *.cover | ||
10 | diff --git a/.pylintrc b/.pylintrc | |||
11 | index b160ce7..05a086d 100644 | |||
12 | --- a/.pylintrc | |||
13 | +++ b/.pylintrc | |||
14 | @@ -46,7 +46,7 @@ reports=no | |||
15 | 46 | # (useful for modules/projects where namespaces are manipulated during runtime | 46 | # (useful for modules/projects where namespaces are manipulated during runtime |
16 | 47 | # and thus existing member attributes cannot be deduced by static analysis. It | 47 | # and thus existing member attributes cannot be deduced by static analysis. It |
17 | 48 | # supports qualified module names, as well as Unix pattern matching. | 48 | # supports qualified module names, as well as Unix pattern matching. |
19 | 49 | ignored-modules=six.moves,pkg_resources,httplib,http.client | 49 | ignored-modules=six.moves,pkg_resources,httplib,http.client,paramiko,simplestreams |
20 | 50 | 50 | ||
21 | 51 | # List of class names for which member attributes should not be checked (useful | 51 | # List of class names for which member attributes should not be checked (useful |
22 | 52 | # for classes with dynamically set attributes). This supports the use of | 52 | # for classes with dynamically set attributes). This supports the use of |
23 | @@ -56,5 +56,5 @@ ignored-classes=optparse.Values,thread._local | |||
24 | 56 | # List of members which are set dynamically and missed by pylint inference | 56 | # List of members which are set dynamically and missed by pylint inference |
25 | 57 | # system, and so shouldn't trigger E1101 when accessed. Python regular | 57 | # system, and so shouldn't trigger E1101 when accessed. Python regular |
26 | 58 | # expressions are accepted. | 58 | # expressions are accepted. |
28 | 59 | generated-members=types,http.client,command_handlers | 59 | generated-members=types,http.client,command_handlers,m_.* |
29 | 60 | 60 | ||
30 | diff --git a/ChangeLog b/ChangeLog | |||
31 | index 0260c57..31c2dcb 100644 | |||
32 | --- a/ChangeLog | |||
33 | +++ b/ChangeLog | |||
34 | @@ -1,3 +1,88 @@ | |||
35 | 1 | 17.2: | ||
36 | 2 | - ds-identify: failure in NoCloud due to unset variable usage. | ||
37 | 3 | (LP: #1737704) | ||
38 | 4 | - tests: fix collect_console when not implemented [Joshua Powers] | ||
39 | 5 | - ec2: Use instance-identity doc for region and instance-id | ||
40 | 6 | [Andrew Jorgensen] | ||
41 | 7 | - tests: remove leaked tmp files in config drive tests. | ||
42 | 8 | - setup.py: Do not include rendered files in SOURCES.txt | ||
43 | 9 | - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert] | ||
44 | 10 | - tests: move to using tox 1.7.5 | ||
45 | 11 | - OVF: improve ds-identify to support finding OVF iso transport. | ||
46 | 12 | (LP: #1731868) | ||
47 | 13 | - VMware: Support for user provided pre and post-customization scripts | ||
48 | 14 | [Maitreyee Saikia] | ||
49 | 15 | - citest: In NoCloudKVM provide keys via metadata not userdata. | ||
50 | 16 | - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix | ||
51 | 17 | complaints. | ||
52 | 18 | - Datasources: Formalize DataSource get_data and related properties. | ||
53 | 19 | - cli: Add clean and status subcommands | ||
54 | 20 | - tests: consolidate platforms into specific dirs | ||
55 | 21 | - ec2: Fix sandboxed dhclient background process cleanup. (LP: #1735331) | ||
56 | 22 | - tests: NoCloudKVMImage do not modify the original local cache image. | ||
57 | 23 | - tests: Enable bionic in integration tests. [Joshua Powers] | ||
58 | 24 | - tests: Use apt-get to install a deb so that depends get resolved. | ||
59 | 25 | - sysconfig: Correctly render dns and dns search info. | ||
60 | 26 | [Ryan McCabe] (LP: #1705804) | ||
61 | 27 | - integration test: replace curtin test ppa with cloud-init test ppa. | ||
62 | 28 | - EC2: Fix bug using fallback_nic and metadata when restoring from cache. | ||
63 | 29 | (LP: #1732917) | ||
64 | 30 | - EC2: Kill dhclient process used in sandbox dhclient. (LP: #1732964) | ||
65 | 31 | - ntp: fix configuration template rendering for openSUSE and SLES | ||
66 | 32 | (LP: #1726572) | ||
67 | 33 | - centos: Provide the failed #include url in error messages | ||
68 | 34 | - Catch UrlError when #include'ing URLs [Andrew Jorgensen] | ||
69 | 35 | - hosts: Fix openSUSE and SLES setup for /etc/hosts and clarify docs. | ||
70 | 36 | [Robert Schweikert] (LP: #1731022) | ||
71 | 37 | - rh_subscription: Perform null checks for enabled and disabled repos. | ||
72 | 38 | [Dave Mulford] | ||
73 | 39 | - Improve warning message when a template is not found. | ||
74 | 40 | [Robert Schweikert] (LP: #1731035) | ||
75 | 41 | - Replace the temporary i9n.brickies.net with i9n.cloud-init.io. | ||
76 | 42 | - Azure: don't generate network configuration for SRIOV devices | ||
77 | 43 | (LP: #1721579) | ||
78 | 44 | - tests: address some minor feedback missed in last merge. | ||
79 | 45 | - tests: integration test cleanup and full pass of nocloud-kvm. | ||
80 | 46 | - Gentoo: chmod +x on all files in sysvinit/gentoo/ | ||
81 | 47 | [ckonstanski] (LP: #1727126) | ||
82 | 48 | - EC2: Limit network config to fallback nic, fix local-ipv4 only | ||
83 | 49 | instances. (LP: #1728152) | ||
84 | 50 | - Gentoo: Use "rc-service" rather than "service". | ||
85 | 51 | [Carlos Konstanski] (LP: #1727121) | ||
86 | 52 | - resizefs: Fix regression when system booted with root=PARTUUID= | ||
87 | 53 | (LP: #1725067) | ||
88 | 54 | - tools: make yum package installation more reliable | ||
89 | 55 | - citest: fix remaining warnings raised by integration tests. | ||
90 | 56 | - citest: show the class actual class name in results. | ||
91 | 57 | - ntp: fix config module schema to allow empty ntp config (LP: #1724951) | ||
92 | 58 | - tools: disable fastestmirror if using proxy [Joshua Powers] | ||
93 | 59 | - schema: Log debug instead of warning when jsonschema is not available. | ||
94 | 60 | (LP: #1724354) | ||
95 | 61 | - simpletable: Fix get_string method to return table-formatted string | ||
96 | 62 | (LP: #1722566) | ||
97 | 63 | - net: Handle bridge stp values of 0 and convert to boolean type | ||
98 | 64 | - tools: Give specific --abbrev=8 to "git describe" | ||
99 | 65 | - network: bridge_stp value not always correct (LP: #1721157) | ||
100 | 66 | - tests: re-enable tox with nocloud-kvm support [Joshua Powers] | ||
101 | 67 | - systemd: remove limit on tasks created by cloud-init-final.service. | ||
102 | 68 | [Robert Schweikert] (LP: #1717969) | ||
103 | 69 | - suse: Support addition of zypper repos via cloud-config. | ||
104 | 70 | [Robert Schweikert] (LP: #1718675) | ||
105 | 71 | - tests: Combine integration configs and testcases [Joshua Powers] | ||
106 | 72 | - Azure, CloudStack: Support reading dhcp options from systemd-networkd. | ||
107 | 73 | [Dimitri John Ledkov] (LP: #1718029) | ||
108 | 74 | - packages/debian/copyright: remove mention of boto and MIT license | ||
109 | 75 | - systemd: only mention Before=apt-daily.service on debian based distros. | ||
110 | 76 | [Robert Schweikert] | ||
111 | 77 | - Add missing simpletable and simpletable tests for failed merge | ||
112 | 78 | - Remove prettytable dependency, introduce simpletable [Andrew Jorgensen] | ||
113 | 79 | - debian/copyright: dep5 updates, reorganize, add Apache 2.0 license. | ||
114 | 80 | [Joshua Powers] (LP: #1718681) | ||
115 | 81 | - tests: remove dependency on shlex [Joshua Powers] | ||
116 | 82 | - AltCloud: Trust PATH for udevadm and modprobe. | ||
117 | 83 | - DataSourceOVF: use util.find_devs_with(TYPE=iso9660) (LP: #1718287) | ||
118 | 84 | - tests: remove a temp file used in bootcmd tests. | ||
119 | 85 | |||
120 | 1 | 17.1: | 86 | 17.1: |
121 | 2 | - doc: document GCE datasource. [Arnd Hannemann] | 87 | - doc: document GCE datasource. [Arnd Hannemann] |
122 | 3 | - suse: updates to templates to support openSUSE and SLES. | 88 | - suse: updates to templates to support openSUSE and SLES. |
123 | diff --git a/HACKING.rst b/HACKING.rst | |||
124 | index 93e3f42..3bb555c 100644 | |||
125 | --- a/HACKING.rst | |||
126 | +++ b/HACKING.rst | |||
127 | @@ -16,6 +16,14 @@ Do these things once | |||
128 | 16 | When prompted for 'Project contact' or 'Canonical Project Manager' enter | 16 | When prompted for 'Project contact' or 'Canonical Project Manager' enter |
129 | 17 | 'Scott Moser'. | 17 | 'Scott Moser'. |
130 | 18 | 18 | ||
131 | 19 | * Configure git with your email and name for commit messages. | ||
132 | 20 | |||
133 | 21 | Your name will appear in commit messages and will also be used in | ||
134 | 22 | changelogs or release notes. Give yourself credit!:: | ||
135 | 23 | |||
136 | 24 | git config user.name "Your Name" | ||
137 | 25 | git config user.email "Your Email" | ||
138 | 26 | |||
139 | 19 | * Clone the upstream `repository`_ on Launchpad:: | 27 | * Clone the upstream `repository`_ on Launchpad:: |
140 | 20 | 28 | ||
141 | 21 | git clone https://git.launchpad.net/cloud-init | 29 | git clone https://git.launchpad.net/cloud-init |
142 | diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py | |||
143 | index 69b9e43..3ba5903 100644 | |||
144 | --- a/cloudinit/analyze/__main__.py | |||
145 | +++ b/cloudinit/analyze/__main__.py | |||
146 | @@ -6,6 +6,8 @@ import argparse | |||
147 | 6 | import re | 6 | import re |
148 | 7 | import sys | 7 | import sys |
149 | 8 | 8 | ||
150 | 9 | from cloudinit.util import json_dumps | ||
151 | 10 | |||
152 | 9 | from . import dump | 11 | from . import dump |
153 | 10 | from . import show | 12 | from . import show |
154 | 11 | 13 | ||
155 | @@ -112,7 +114,7 @@ def analyze_show(name, args): | |||
156 | 112 | def analyze_dump(name, args): | 114 | def analyze_dump(name, args): |
157 | 113 | """Dump cloud-init events in json format""" | 115 | """Dump cloud-init events in json format""" |
158 | 114 | (infh, outfh) = configure_io(args) | 116 | (infh, outfh) = configure_io(args) |
160 | 115 | outfh.write(dump.json_dumps(_get_events(infh)) + '\n') | 117 | outfh.write(json_dumps(_get_events(infh)) + '\n') |
161 | 116 | 118 | ||
162 | 117 | 119 | ||
163 | 118 | def _get_events(infile): | 120 | def _get_events(infile): |
164 | diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py | |||
165 | index ca4da49..b071aa1 100644 | |||
166 | --- a/cloudinit/analyze/dump.py | |||
167 | +++ b/cloudinit/analyze/dump.py | |||
168 | @@ -2,7 +2,6 @@ | |||
169 | 2 | 2 | ||
170 | 3 | import calendar | 3 | import calendar |
171 | 4 | from datetime import datetime | 4 | from datetime import datetime |
172 | 5 | import json | ||
173 | 6 | import sys | 5 | import sys |
174 | 7 | 6 | ||
175 | 8 | from cloudinit import util | 7 | from cloudinit import util |
176 | @@ -132,11 +131,6 @@ def parse_ci_logline(line): | |||
177 | 132 | return event | 131 | return event |
178 | 133 | 132 | ||
179 | 134 | 133 | ||
180 | 135 | def json_dumps(data): | ||
181 | 136 | return json.dumps(data, indent=1, sort_keys=True, | ||
182 | 137 | separators=(',', ': ')) | ||
183 | 138 | |||
184 | 139 | |||
185 | 140 | def dump_events(cisource=None, rawdata=None): | 134 | def dump_events(cisource=None, rawdata=None): |
186 | 141 | events = [] | 135 | events = [] |
187 | 142 | event = None | 136 | event = None |
188 | @@ -169,7 +163,7 @@ def main(): | |||
189 | 169 | else: | 163 | else: |
190 | 170 | cisource = sys.stdin | 164 | cisource = sys.stdin |
191 | 171 | 165 | ||
193 | 172 | return json_dumps(dump_events(cisource)) | 166 | return util.json_dumps(dump_events(cisource)) |
194 | 173 | 167 | ||
195 | 174 | 168 | ||
196 | 175 | if __name__ == "__main__": | 169 | if __name__ == "__main__": |
197 | diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py | |||
198 | 176 | new file mode 100644 | 170 | new file mode 100644 |
199 | index 0000000..de22f7f | |||
200 | --- /dev/null | |||
201 | +++ b/cloudinit/cmd/clean.py | |||
202 | @@ -0,0 +1,103 @@ | |||
203 | 1 | # Copyright (C) 2017 Canonical Ltd. | ||
204 | 2 | # | ||
205 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
206 | 4 | |||
207 | 5 | """Define 'clean' utility and handler as part of cloud-init commandline.""" | ||
208 | 6 | |||
209 | 7 | import argparse | ||
210 | 8 | import os | ||
211 | 9 | import sys | ||
212 | 10 | |||
213 | 11 | from cloudinit.stages import Init | ||
214 | 12 | from cloudinit.util import ( | ||
215 | 13 | ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, | ||
216 | 14 | is_link, subp) | ||
217 | 15 | |||
218 | 16 | |||
219 | 17 | def error(msg): | ||
220 | 18 | sys.stderr.write("ERROR: " + msg + "\n") | ||
221 | 19 | |||
222 | 20 | |||
223 | 21 | def get_parser(parser=None): | ||
224 | 22 | """Build or extend an arg parser for clean utility. | ||
225 | 23 | |||
226 | 24 | @param parser: Optional existing ArgumentParser instance representing the | ||
227 | 25 | clean subcommand which will be extended to support the args of | ||
228 | 26 | this utility. | ||
229 | 27 | |||
230 | 28 | @returns: ArgumentParser with proper argument configuration. | ||
231 | 29 | """ | ||
232 | 30 | if not parser: | ||
233 | 31 | parser = argparse.ArgumentParser( | ||
234 | 32 | prog='clean', | ||
235 | 33 | description=('Remove logs and artifacts so cloud-init re-runs on ' | ||
236 | 34 | 'a clean system')) | ||
237 | 35 | parser.add_argument( | ||
238 | 36 | '-l', '--logs', action='store_true', default=False, dest='remove_logs', | ||
239 | 37 | help='Remove cloud-init logs.') | ||
240 | 38 | parser.add_argument( | ||
241 | 39 | '-r', '--reboot', action='store_true', default=False, | ||
242 | 40 | help='Reboot system after logs are cleaned so cloud-init re-runs.') | ||
243 | 41 | parser.add_argument( | ||
244 | 42 | '-s', '--seed', action='store_true', default=False, dest='remove_seed', | ||
245 | 43 | help='Remove cloud-init seed directory /var/lib/cloud/seed.') | ||
246 | 44 | return parser | ||
247 | 45 | |||
248 | 46 | |||
249 | 47 | def remove_artifacts(remove_logs, remove_seed=False): | ||
250 | 48 | """Helper which removes artifacts dir and optionally log files. | ||
251 | 49 | |||
252 | 50 | @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False | ||
253 | 51 | preserves them. | ||
254 | 52 | @param: remove_seed: Boolean. Set True to also delete seed subdir in | ||
255 | 53 | paths.cloud_dir. | ||
256 | 54 | @returns: 0 on success, 1 otherwise. | ||
257 | 55 | """ | ||
258 | 56 | init = Init(ds_deps=[]) | ||
259 | 57 | init.read_cfg() | ||
260 | 58 | if remove_logs: | ||
261 | 59 | for log_file in get_config_logfiles(init.cfg): | ||
262 | 60 | del_file(log_file) | ||
263 | 61 | |||
264 | 62 | if not os.path.isdir(init.paths.cloud_dir): | ||
265 | 63 | return 0 # Artifacts dir already cleaned | ||
266 | 64 | with chdir(init.paths.cloud_dir): | ||
267 | 65 | for path in os.listdir('.'): | ||
268 | 66 | if path == 'seed' and not remove_seed: | ||
269 | 67 | continue | ||
270 | 68 | try: | ||
271 | 69 | if os.path.isdir(path) and not is_link(path): | ||
272 | 70 | del_dir(path) | ||
273 | 71 | else: | ||
274 | 72 | del_file(path) | ||
275 | 73 | except OSError as e: | ||
276 | 74 | error('Could not remove {0}: {1}'.format(path, str(e))) | ||
277 | 75 | return 1 | ||
278 | 76 | return 0 | ||
279 | 77 | |||
280 | 78 | |||
281 | 79 | def handle_clean_args(name, args): | ||
282 | 80 | """Handle calls to 'cloud-init clean' as a subcommand.""" | ||
283 | 81 | exit_code = remove_artifacts(args.remove_logs, args.remove_seed) | ||
284 | 82 | if exit_code == 0 and args.reboot: | ||
285 | 83 | cmd = ['shutdown', '-r', 'now'] | ||
286 | 84 | try: | ||
287 | 85 | subp(cmd, capture=False) | ||
288 | 86 | except ProcessExecutionError as e: | ||
289 | 87 | error( | ||
290 | 88 | 'Could not reboot this system using "{0}": {1}'.format( | ||
291 | 89 | cmd, str(e))) | ||
292 | 90 | exit_code = 1 | ||
293 | 91 | return exit_code | ||
294 | 92 | |||
295 | 93 | |||
296 | 94 | def main(): | ||
297 | 95 | """Tool to collect and tar all cloud-init related logs.""" | ||
298 | 96 | parser = get_parser() | ||
299 | 97 | sys.exit(handle_clean_args('clean', parser.parse_args())) | ||
300 | 98 | |||
301 | 99 | |||
302 | 100 | if __name__ == '__main__': | ||
303 | 101 | main() | ||
304 | 102 | |||
305 | 103 | # vi: ts=4 expandtab | ||
306 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
307 | index 6fb9d9e..d2f1b77 100644 | |||
308 | --- a/cloudinit/cmd/main.py | |||
309 | +++ b/cloudinit/cmd/main.py | |||
310 | @@ -421,7 +421,13 @@ def di_report_warn(datasource, cfg): | |||
311 | 421 | LOG.debug("no di_report found in config.") | 421 | LOG.debug("no di_report found in config.") |
312 | 422 | return | 422 | return |
313 | 423 | 423 | ||
315 | 424 | dicfg = cfg.get('di_report', {}) | 424 | dicfg = cfg['di_report'] |
316 | 425 | if dicfg is None: | ||
317 | 426 | # ds-identify may write 'di_report:\n #comment\n' | ||
318 | 427 | # which reads as {'di_report': None} | ||
319 | 428 | LOG.debug("di_report was None.") | ||
320 | 429 | return | ||
321 | 430 | |||
322 | 425 | if not isinstance(dicfg, dict): | 431 | if not isinstance(dicfg, dict): |
323 | 426 | LOG.warning("di_report config not a dictionary: %s", dicfg) | 432 | LOG.warning("di_report config not a dictionary: %s", dicfg) |
324 | 427 | return | 433 | return |
325 | @@ -603,7 +609,11 @@ def status_wrapper(name, args, data_d=None, link_d=None): | |||
326 | 603 | else: | 609 | else: |
327 | 604 | raise ValueError("unknown name: %s" % name) | 610 | raise ValueError("unknown name: %s" % name) |
328 | 605 | 611 | ||
330 | 606 | modes = ('init', 'init-local', 'modules-config', 'modules-final') | 612 | modes = ('init', 'init-local', 'modules-init', 'modules-config', |
331 | 613 | 'modules-final') | ||
332 | 614 | if mode not in modes: | ||
333 | 615 | raise ValueError( | ||
334 | 616 | "Invalid cloud init mode specified '{0}'".format(mode)) | ||
335 | 607 | 617 | ||
336 | 608 | status = None | 618 | status = None |
337 | 609 | if mode == 'init-local': | 619 | if mode == 'init-local': |
338 | @@ -615,16 +625,18 @@ def status_wrapper(name, args, data_d=None, link_d=None): | |||
339 | 615 | except Exception: | 625 | except Exception: |
340 | 616 | pass | 626 | pass |
341 | 617 | 627 | ||
342 | 628 | nullstatus = { | ||
343 | 629 | 'errors': [], | ||
344 | 630 | 'start': None, | ||
345 | 631 | 'finished': None, | ||
346 | 632 | } | ||
347 | 618 | if status is None: | 633 | if status is None: |
348 | 619 | nullstatus = { | ||
349 | 620 | 'errors': [], | ||
350 | 621 | 'start': None, | ||
351 | 622 | 'finished': None, | ||
352 | 623 | } | ||
353 | 624 | status = {'v1': {}} | 634 | status = {'v1': {}} |
354 | 625 | for m in modes: | 635 | for m in modes: |
355 | 626 | status['v1'][m] = nullstatus.copy() | 636 | status['v1'][m] = nullstatus.copy() |
356 | 627 | status['v1']['datasource'] = None | 637 | status['v1']['datasource'] = None |
357 | 638 | elif mode not in status['v1']: | ||
358 | 639 | status['v1'][mode] = nullstatus.copy() | ||
359 | 628 | 640 | ||
360 | 629 | v1 = status['v1'] | 641 | v1 = status['v1'] |
361 | 630 | v1['stage'] = mode | 642 | v1['stage'] = mode |
362 | @@ -767,6 +779,12 @@ def main(sysv_args=None): | |||
363 | 767 | parser_collect_logs = subparsers.add_parser( | 779 | parser_collect_logs = subparsers.add_parser( |
364 | 768 | 'collect-logs', help='Collect and tar all cloud-init debug info') | 780 | 'collect-logs', help='Collect and tar all cloud-init debug info') |
365 | 769 | 781 | ||
366 | 782 | parser_clean = subparsers.add_parser( | ||
367 | 783 | 'clean', help='Remove logs and artifacts so cloud-init can re-run.') | ||
368 | 784 | |||
369 | 785 | parser_status = subparsers.add_parser( | ||
370 | 786 | 'status', help='Report cloud-init status or wait on completion.') | ||
371 | 787 | |||
372 | 770 | if sysv_args: | 788 | if sysv_args: |
373 | 771 | # Only load subparsers if subcommand is specified to avoid load cost | 789 | # Only load subparsers if subcommand is specified to avoid load cost |
374 | 772 | if sysv_args[0] == 'analyze': | 790 | if sysv_args[0] == 'analyze': |
375 | @@ -783,6 +801,18 @@ def main(sysv_args=None): | |||
376 | 783 | logs_parser(parser_collect_logs) | 801 | logs_parser(parser_collect_logs) |
377 | 784 | parser_collect_logs.set_defaults( | 802 | parser_collect_logs.set_defaults( |
378 | 785 | action=('collect-logs', handle_collect_logs_args)) | 803 | action=('collect-logs', handle_collect_logs_args)) |
379 | 804 | elif sysv_args[0] == 'clean': | ||
380 | 805 | from cloudinit.cmd.clean import ( | ||
381 | 806 | get_parser as clean_parser, handle_clean_args) | ||
382 | 807 | clean_parser(parser_clean) | ||
383 | 808 | parser_clean.set_defaults( | ||
384 | 809 | action=('clean', handle_clean_args)) | ||
385 | 810 | elif sysv_args[0] == 'status': | ||
386 | 811 | from cloudinit.cmd.status import ( | ||
387 | 812 | get_parser as status_parser, handle_status_args) | ||
388 | 813 | status_parser(parser_status) | ||
389 | 814 | parser_status.set_defaults( | ||
390 | 815 | action=('status', handle_status_args)) | ||
391 | 786 | 816 | ||
392 | 787 | args = parser.parse_args(args=sysv_args) | 817 | args = parser.parse_args(args=sysv_args) |
393 | 788 | 818 | ||
394 | diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py | |||
395 | 789 | new file mode 100644 | 819 | new file mode 100644 |
396 | index 0000000..d7aaee9 | |||
397 | --- /dev/null | |||
398 | +++ b/cloudinit/cmd/status.py | |||
399 | @@ -0,0 +1,160 @@ | |||
400 | 1 | # Copyright (C) 2017 Canonical Ltd. | ||
401 | 2 | # | ||
402 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
403 | 4 | |||
404 | 5 | """Define 'status' utility and handler as part of cloud-init commandline.""" | ||
405 | 6 | |||
406 | 7 | import argparse | ||
407 | 8 | import os | ||
408 | 9 | import sys | ||
409 | 10 | from time import gmtime, strftime, sleep | ||
410 | 11 | |||
411 | 12 | from cloudinit.distros import uses_systemd | ||
412 | 13 | from cloudinit.stages import Init | ||
413 | 14 | from cloudinit.util import get_cmdline, load_file, load_json | ||
414 | 15 | |||
415 | 16 | CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' | ||
416 | 17 | |||
417 | 18 | # customer visible status messages | ||
418 | 19 | STATUS_ENABLED_NOT_RUN = 'not run' | ||
419 | 20 | STATUS_RUNNING = 'running' | ||
420 | 21 | STATUS_DONE = 'done' | ||
421 | 22 | STATUS_ERROR = 'error' | ||
422 | 23 | STATUS_DISABLED = 'disabled' | ||
423 | 24 | |||
424 | 25 | |||
425 | 26 | def get_parser(parser=None): | ||
426 | 27 | """Build or extend an arg parser for status utility. | ||
427 | 28 | |||
428 | 29 | @param parser: Optional existing ArgumentParser instance representing the | ||
429 | 30 | status subcommand which will be extended to support the args of | ||
430 | 31 | this utility. | ||
431 | 32 | |||
432 | 33 | @returns: ArgumentParser with proper argument configuration. | ||
433 | 34 | """ | ||
434 | 35 | if not parser: | ||
435 | 36 | parser = argparse.ArgumentParser( | ||
436 | 37 | prog='status', | ||
437 | 38 | description='Report run status of cloud init') | ||
438 | 39 | parser.add_argument( | ||
439 | 40 | '-l', '--long', action='store_true', default=False, | ||
440 | 41 | help=('Report long format of statuses including run stage name and' | ||
441 | 42 | ' error messages')) | ||
442 | 43 | parser.add_argument( | ||
443 | 44 | '-w', '--wait', action='store_true', default=False, | ||
444 | 45 | help='Block waiting on cloud-init to complete') | ||
445 | 46 | return parser | ||
446 | 47 | |||
447 | 48 | |||
448 | 49 | def handle_status_args(name, args): | ||
449 | 50 | """Handle calls to 'cloud-init status' as a subcommand.""" | ||
450 | 51 | # Read configured paths | ||
451 | 52 | init = Init(ds_deps=[]) | ||
452 | 53 | init.read_cfg() | ||
453 | 54 | |||
454 | 55 | status, status_detail, time = _get_status_details(init.paths) | ||
455 | 56 | if args.wait: | ||
456 | 57 | while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): | ||
457 | 58 | sys.stdout.write('.') | ||
458 | 59 | sys.stdout.flush() | ||
459 | 60 | status, status_detail, time = _get_status_details(init.paths) | ||
460 | 61 | sleep(0.25) | ||
461 | 62 | sys.stdout.write('\n') | ||
462 | 63 | if args.long: | ||
463 | 64 | print('status: {0}'.format(status)) | ||
464 | 65 | if time: | ||
465 | 66 | print('time: {0}'.format(time)) | ||
466 | 67 | print('detail:\n{0}'.format(status_detail)) | ||
467 | 68 | else: | ||
468 | 69 | print('status: {0}'.format(status)) | ||
469 | 70 | return 1 if status == STATUS_ERROR else 0 | ||
470 | 71 | |||
471 | 72 | |||
472 | 73 | def _is_cloudinit_disabled(disable_file, paths): | ||
473 | 74 | """Report whether cloud-init is disabled. | ||
474 | 75 | |||
475 | 76 | @param disable_file: The path to the cloud-init disable file. | ||
476 | 77 | @param paths: An initialized cloudinit.helpers.Paths object. | ||
477 | 78 | @returns: A tuple containing (bool, reason) about cloud-init's status and | ||
478 | 79 | why. | ||
479 | 80 | """ | ||
480 | 81 | is_disabled = False | ||
481 | 82 | cmdline_parts = get_cmdline().split() | ||
482 | 83 | if not uses_systemd(): | ||
483 | 84 | reason = 'Cloud-init enabled on sysvinit' | ||
484 | 85 | elif 'cloud-init=enabled' in cmdline_parts: | ||
485 | 86 | reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' | ||
486 | 87 | elif os.path.exists(disable_file): | ||
487 | 88 | is_disabled = True | ||
488 | 89 | reason = 'Cloud-init disabled by {0}'.format(disable_file) | ||
489 | 90 | elif 'cloud-init=disabled' in cmdline_parts: | ||
490 | 91 | is_disabled = True | ||
491 | 92 | reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' | ||
492 | 93 | elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): | ||
493 | 94 | is_disabled = True | ||
494 | 95 | reason = 'Cloud-init disabled by cloud-init-generator' | ||
495 | 96 | else: | ||
496 | 97 | reason = 'Cloud-init enabled by systemd cloud-init-generator' | ||
497 | 98 | return (is_disabled, reason) | ||
498 | 99 | |||
499 | 100 | |||
500 | 101 | def _get_status_details(paths): | ||
501 | 102 | """Return a 3-tuple of status, status_details and time of last event. | ||
502 | 103 | |||
503 | 104 | @param paths: An initialized cloudinit.helpers.paths object. | ||
504 | 105 | |||
505 | 106 | Values are obtained from parsing paths.run_dir/status.json. | ||
506 | 107 | """ | ||
507 | 108 | |||
508 | 109 | status = STATUS_ENABLED_NOT_RUN | ||
509 | 110 | status_detail = '' | ||
510 | 111 | status_v1 = {} | ||
511 | 112 | |||
512 | 113 | status_file = os.path.join(paths.run_dir, 'status.json') | ||
513 | 114 | |||
514 | 115 | (is_disabled, reason) = _is_cloudinit_disabled( | ||
515 | 116 | CLOUDINIT_DISABLED_FILE, paths) | ||
516 | 117 | if is_disabled: | ||
517 | 118 | status = STATUS_DISABLED | ||
518 | 119 | status_detail = reason | ||
519 | 120 | if os.path.exists(status_file): | ||
520 | 121 | status_v1 = load_json(load_file(status_file)).get('v1', {}) | ||
521 | 122 | errors = [] | ||
522 | 123 | latest_event = 0 | ||
523 | 124 | for key, value in sorted(status_v1.items()): | ||
524 | 125 | if key == 'stage': | ||
525 | 126 | if value: | ||
526 | 127 | status_detail = 'Running in stage: {0}'.format(value) | ||
527 | 128 | elif key == 'datasource': | ||
528 | 129 | status_detail = value | ||
529 | 130 | elif isinstance(value, dict): | ||
530 | 131 | errors.extend(value.get('errors', [])) | ||
531 | 132 | start = value.get('start') or 0 | ||
532 | 133 | finished = value.get('finished') or 0 | ||
533 | 134 | if finished == 0 and start != 0: | ||
534 | 135 | status = STATUS_RUNNING | ||
535 | 136 | event_time = max(start, finished) | ||
536 | 137 | if event_time > latest_event: | ||
537 | 138 | latest_event = event_time | ||
538 | 139 | if errors: | ||
539 | 140 | status = STATUS_ERROR | ||
540 | 141 | status_detail = '\n'.join(errors) | ||
541 | 142 | elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: | ||
542 | 143 | status = STATUS_DONE | ||
543 | 144 | if latest_event: | ||
544 | 145 | time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) | ||
545 | 146 | else: | ||
546 | 147 | time = '' | ||
547 | 148 | return status, status_detail, time | ||
548 | 149 | |||
549 | 150 | |||
550 | 151 | def main(): | ||
551 | 152 | """Tool to report status of cloud-init.""" | ||
552 | 153 | parser = get_parser() | ||
553 | 154 | sys.exit(handle_status_args('status', parser.parse_args())) | ||
554 | 155 | |||
555 | 156 | |||
556 | 157 | if __name__ == '__main__': | ||
557 | 158 | main() | ||
558 | 159 | |||
559 | 160 | # vi: ts=4 expandtab | ||
560 | diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py | |||
561 | 0 | new file mode 100644 | 161 | new file mode 100644 |
562 | index 0000000..e69de29 | |||
563 | --- /dev/null | |||
564 | +++ b/cloudinit/cmd/tests/__init__.py | |||
565 | diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py | |||
566 | 1 | new file mode 100644 | 162 | new file mode 100644 |
567 | index 0000000..6713af4 | |||
568 | --- /dev/null | |||
569 | +++ b/cloudinit/cmd/tests/test_clean.py | |||
570 | @@ -0,0 +1,176 @@ | |||
571 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
572 | 2 | |||
573 | 3 | from cloudinit.cmd import clean | ||
574 | 4 | from cloudinit.util import ensure_dir, sym_link, write_file | ||
575 | 5 | from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock | ||
576 | 6 | from collections import namedtuple | ||
577 | 7 | import os | ||
578 | 8 | from six import StringIO | ||
579 | 9 | |||
580 | 10 | mypaths = namedtuple('MyPaths', 'cloud_dir') | ||
581 | 11 | |||
582 | 12 | |||
583 | 13 | class TestClean(CiTestCase): | ||
584 | 14 | |||
585 | 15 | def setUp(self): | ||
586 | 16 | super(TestClean, self).setUp() | ||
587 | 17 | self.new_root = self.tmp_dir() | ||
588 | 18 | self.artifact_dir = self.tmp_path('artifacts', self.new_root) | ||
589 | 19 | self.log1 = self.tmp_path('cloud-init.log', self.new_root) | ||
590 | 20 | self.log2 = self.tmp_path('cloud-init-output.log', self.new_root) | ||
591 | 21 | |||
592 | 22 | class FakeInit(object): | ||
593 | 23 | cfg = {'def_log_file': self.log1, | ||
594 | 24 | 'output': {'all': '|tee -a {0}'.format(self.log2)}} | ||
595 | 25 | paths = mypaths(cloud_dir=self.artifact_dir) | ||
596 | 26 | |||
597 | 27 | def __init__(self, ds_deps): | ||
598 | 28 | pass | ||
599 | 29 | |||
600 | 30 | def read_cfg(self): | ||
601 | 31 | pass | ||
602 | 32 | |||
603 | 33 | self.init_class = FakeInit | ||
604 | 34 | |||
605 | 35 | def test_remove_artifacts_removes_logs(self): | ||
606 | 36 | """remove_artifacts removes logs when remove_logs is True.""" | ||
607 | 37 | write_file(self.log1, 'cloud-init-log') | ||
608 | 38 | write_file(self.log2, 'cloud-init-output-log') | ||
609 | 39 | |||
610 | 40 | self.assertFalse( | ||
611 | 41 | os.path.exists(self.artifact_dir), 'Unexpected artifacts dir') | ||
612 | 42 | retcode = wrap_and_call( | ||
613 | 43 | 'cloudinit.cmd.clean', | ||
614 | 44 | {'Init': {'side_effect': self.init_class}}, | ||
615 | 45 | clean.remove_artifacts, remove_logs=True) | ||
616 | 46 | self.assertFalse(os.path.exists(self.log1), 'Unexpected file') | ||
617 | 47 | self.assertFalse(os.path.exists(self.log2), 'Unexpected file') | ||
618 | 48 | self.assertEqual(0, retcode) | ||
619 | 49 | |||
620 | 50 | def test_remove_artifacts_preserves_logs(self): | ||
621 | 51 | """remove_artifacts leaves logs when remove_logs is False.""" | ||
622 | 52 | write_file(self.log1, 'cloud-init-log') | ||
623 | 53 | write_file(self.log2, 'cloud-init-output-log') | ||
624 | 54 | |||
625 | 55 | retcode = wrap_and_call( | ||
626 | 56 | 'cloudinit.cmd.clean', | ||
627 | 57 | {'Init': {'side_effect': self.init_class}}, | ||
628 | 58 | clean.remove_artifacts, remove_logs=False) | ||
629 | 59 | self.assertTrue(os.path.exists(self.log1), 'Missing expected file') | ||
630 | 60 | self.assertTrue(os.path.exists(self.log2), 'Missing expected file') | ||
631 | 61 | self.assertEqual(0, retcode) | ||
632 | 62 | |||
633 | 63 | def test_remove_artifacts_removes_unlinks_symlinks(self): | ||
634 | 64 | """remove_artifacts cleans artifacts dir unlinking any symlinks.""" | ||
635 | 65 | dir1 = os.path.join(self.artifact_dir, 'dir1') | ||
636 | 66 | ensure_dir(dir1) | ||
637 | 67 | symlink = os.path.join(self.artifact_dir, 'mylink') | ||
638 | 68 | sym_link(dir1, symlink) | ||
639 | 69 | |||
640 | 70 | retcode = wrap_and_call( | ||
641 | 71 | 'cloudinit.cmd.clean', | ||
642 | 72 | {'Init': {'side_effect': self.init_class}}, | ||
643 | 73 | clean.remove_artifacts, remove_logs=False) | ||
644 | 74 | self.assertEqual(0, retcode) | ||
645 | 75 | for path in (dir1, symlink): | ||
646 | 76 | self.assertFalse( | ||
647 | 77 | os.path.exists(path), | ||
648 | 78 | 'Unexpected {0} dir'.format(path)) | ||
649 | 79 | |||
650 | 80 | def test_remove_artifacts_removes_artifacts_skipping_seed(self): | ||
651 | 81 | """remove_artifacts cleans artifacts dir with exception of seed dir.""" | ||
652 | 82 | dirs = [ | ||
653 | 83 | self.artifact_dir, | ||
654 | 84 | os.path.join(self.artifact_dir, 'seed'), | ||
655 | 85 | os.path.join(self.artifact_dir, 'dir1'), | ||
656 | 86 | os.path.join(self.artifact_dir, 'dir2')] | ||
657 | 87 | for _dir in dirs: | ||
658 | 88 | ensure_dir(_dir) | ||
659 | 89 | |||
660 | 90 | retcode = wrap_and_call( | ||
661 | 91 | 'cloudinit.cmd.clean', | ||
662 | 92 | {'Init': {'side_effect': self.init_class}}, | ||
663 | 93 | clean.remove_artifacts, remove_logs=False) | ||
664 | 94 | self.assertEqual(0, retcode) | ||
665 | 95 | for expected_dir in dirs[:2]: | ||
666 | 96 | self.assertTrue( | ||
667 | 97 | os.path.exists(expected_dir), | ||
668 | 98 | 'Missing {0} dir'.format(expected_dir)) | ||
669 | 99 | for deleted_dir in dirs[2:]: | ||
670 | 100 | self.assertFalse( | ||
671 | 101 | os.path.exists(deleted_dir), | ||
672 | 102 | 'Unexpected {0} dir'.format(deleted_dir)) | ||
673 | 103 | |||
674 | 104 | def test_remove_artifacts_removes_artifacts_removes_seed(self): | ||
675 | 105 | """remove_artifacts removes seed dir when remove_seed is True.""" | ||
676 | 106 | dirs = [ | ||
677 | 107 | self.artifact_dir, | ||
678 | 108 | os.path.join(self.artifact_dir, 'seed'), | ||
679 | 109 | os.path.join(self.artifact_dir, 'dir1'), | ||
680 | 110 | os.path.join(self.artifact_dir, 'dir2')] | ||
681 | 111 | for _dir in dirs: | ||
682 | 112 | ensure_dir(_dir) | ||
683 | 113 | |||
684 | 114 | retcode = wrap_and_call( | ||
685 | 115 | 'cloudinit.cmd.clean', | ||
686 | 116 | {'Init': {'side_effect': self.init_class}}, | ||
687 | 117 | clean.remove_artifacts, remove_logs=False, remove_seed=True) | ||
688 | 118 | self.assertEqual(0, retcode) | ||
689 | 119 | self.assertTrue( | ||
690 | 120 | os.path.exists(self.artifact_dir), 'Missing artifact dir') | ||
691 | 121 | for deleted_dir in dirs[1:]: | ||
692 | 122 | self.assertFalse( | ||
693 | 123 | os.path.exists(deleted_dir), | ||
694 | 124 | 'Unexpected {0} dir'.format(deleted_dir)) | ||
695 | 125 | |||
696 | 126 | def test_remove_artifacts_returns_one_on_errors(self): | ||
697 | 127 | """remove_artifacts returns non-zero on failure and prints an error.""" | ||
698 | 128 | ensure_dir(self.artifact_dir) | ||
699 | 129 | ensure_dir(os.path.join(self.artifact_dir, 'dir1')) | ||
700 | 130 | |||
701 | 131 | with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: | ||
702 | 132 | retcode = wrap_and_call( | ||
703 | 133 | 'cloudinit.cmd.clean', | ||
704 | 134 | {'del_dir': {'side_effect': OSError('oops')}, | ||
705 | 135 | 'Init': {'side_effect': self.init_class}}, | ||
706 | 136 | clean.remove_artifacts, remove_logs=False) | ||
707 | 137 | self.assertEqual(1, retcode) | ||
708 | 138 | self.assertEqual( | ||
709 | 139 | 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue()) | ||
710 | 140 | |||
711 | 141 | def test_handle_clean_args_reboots(self): | ||
712 | 142 | """handle_clean_args_reboots when reboot arg is provided.""" | ||
713 | 143 | |||
714 | 144 | called_cmds = [] | ||
715 | 145 | |||
716 | 146 | def fake_subp(cmd, capture): | ||
717 | 147 | called_cmds.append((cmd, capture)) | ||
718 | 148 | return '', '' | ||
719 | 149 | |||
720 | 150 | myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot') | ||
721 | 151 | cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True) | ||
722 | 152 | retcode = wrap_and_call( | ||
723 | 153 | 'cloudinit.cmd.clean', | ||
724 | 154 | {'subp': {'side_effect': fake_subp}, | ||
725 | 155 | 'Init': {'side_effect': self.init_class}}, | ||
726 | 156 | clean.handle_clean_args, name='does not matter', args=cmdargs) | ||
727 | 157 | self.assertEqual(0, retcode) | ||
728 | 158 | self.assertEqual( | ||
729 | 159 | [(['shutdown', '-r', 'now'], False)], called_cmds) | ||
730 | 160 | |||
731 | 161 | def test_status_main(self): | ||
732 | 162 | '''clean.main can be run as a standalone script.''' | ||
733 | 163 | write_file(self.log1, 'cloud-init-log') | ||
734 | 164 | with self.assertRaises(SystemExit) as context_manager: | ||
735 | 165 | wrap_and_call( | ||
736 | 166 | 'cloudinit.cmd.clean', | ||
737 | 167 | {'Init': {'side_effect': self.init_class}, | ||
738 | 168 | 'sys.argv': {'new': ['clean', '--logs']}}, | ||
739 | 169 | clean.main) | ||
740 | 170 | |||
741 | 171 | self.assertRaisesCodeEqual(0, context_manager.exception.code) | ||
742 | 172 | self.assertFalse( | ||
743 | 173 | os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) | ||
744 | 174 | |||
745 | 175 | |||
746 | 176 | # vi: ts=4 expandtab syntax=python | ||
747 | diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py | |||
748 | 0 | new file mode 100644 | 177 | new file mode 100644 |
749 | index 0000000..a7c0a91 | |||
750 | --- /dev/null | |||
751 | +++ b/cloudinit/cmd/tests/test_status.py | |||
752 | @@ -0,0 +1,368 @@ | |||
753 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
754 | 2 | |||
755 | 3 | from collections import namedtuple | ||
756 | 4 | import os | ||
757 | 5 | from six import StringIO | ||
758 | 6 | from textwrap import dedent | ||
759 | 7 | |||
760 | 8 | from cloudinit.atomic_helper import write_json | ||
761 | 9 | from cloudinit.cmd import status | ||
762 | 10 | from cloudinit.util import write_file | ||
763 | 11 | from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock | ||
764 | 12 | |||
765 | 13 | mypaths = namedtuple('MyPaths', 'run_dir') | ||
766 | 14 | myargs = namedtuple('MyArgs', 'long wait') | ||
767 | 15 | |||
768 | 16 | |||
769 | 17 | class TestStatus(CiTestCase): | ||
770 | 18 | |||
771 | 19 | def setUp(self): | ||
772 | 20 | super(TestStatus, self).setUp() | ||
773 | 21 | self.new_root = self.tmp_dir() | ||
774 | 22 | self.status_file = self.tmp_path('status.json', self.new_root) | ||
775 | 23 | self.disable_file = self.tmp_path('cloudinit-disable', self.new_root) | ||
776 | 24 | self.paths = mypaths(run_dir=self.new_root) | ||
777 | 25 | |||
778 | 26 | class FakeInit(object): | ||
779 | 27 | paths = self.paths | ||
780 | 28 | |||
781 | 29 | def __init__(self, ds_deps): | ||
782 | 30 | pass | ||
783 | 31 | |||
784 | 32 | def read_cfg(self): | ||
785 | 33 | pass | ||
786 | 34 | |||
787 | 35 | self.init_class = FakeInit | ||
788 | 36 | |||
789 | 37 | def test__is_cloudinit_disabled_false_on_sysvinit(self): | ||
790 | 38 | '''When not in an environment using systemd, return False.''' | ||
791 | 39 | write_file(self.disable_file, '') # Create the ignored disable file | ||
792 | 40 | (is_disabled, reason) = wrap_and_call( | ||
793 | 41 | 'cloudinit.cmd.status', | ||
794 | 42 | {'uses_systemd': False}, | ||
795 | 43 | status._is_cloudinit_disabled, self.disable_file, self.paths) | ||
796 | 44 | self.assertFalse( | ||
797 | 45 | is_disabled, 'expected enabled cloud-init on sysvinit') | ||
798 | 46 | self.assertEqual('Cloud-init enabled on sysvinit', reason) | ||
799 | 47 | |||
800 | 48 | def test__is_cloudinit_disabled_true_on_disable_file(self): | ||
801 | 49 | '''When using systemd and disable_file is present return disabled.''' | ||
802 | 50 | write_file(self.disable_file, '') # Create observed disable file | ||
803 | 51 | (is_disabled, reason) = wrap_and_call( | ||
804 | 52 | 'cloudinit.cmd.status', | ||
805 | 53 | {'uses_systemd': True}, | ||
806 | 54 | status._is_cloudinit_disabled, self.disable_file, self.paths) | ||
807 | 55 | self.assertTrue(is_disabled, 'expected disabled cloud-init') | ||
808 | 56 | self.assertEqual( | ||
809 | 57 | 'Cloud-init disabled by {0}'.format(self.disable_file), reason) | ||
810 | 58 | |||
811 | 59 | def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): | ||
812 | 60 | '''Not disabled when using systemd and enabled via commandline.''' | ||
813 | 61 | write_file(self.disable_file, '') # Create ignored disable file | ||
814 | 62 | (is_disabled, reason) = wrap_and_call( | ||
815 | 63 | 'cloudinit.cmd.status', | ||
816 | 64 | {'uses_systemd': True, | ||
817 | 65 | 'get_cmdline': 'something cloud-init=enabled else'}, | ||
818 | 66 | status._is_cloudinit_disabled, self.disable_file, self.paths) | ||
819 | 67 | self.assertFalse(is_disabled, 'expected enabled cloud-init') | ||
820 | 68 | self.assertEqual( | ||
821 | 69 | 'Cloud-init enabled by kernel command line cloud-init=enabled', | ||
822 | 70 | reason) | ||
823 | 71 | |||
824 | 72 | def test__is_cloudinit_disabled_true_on_kernel_cmdline(self): | ||
825 | 73 | '''When using systemd and disable_file is present return disabled.''' | ||
826 | 74 | (is_disabled, reason) = wrap_and_call( | ||
827 | 75 | 'cloudinit.cmd.status', | ||
828 | 76 | {'uses_systemd': True, | ||
829 | 77 | 'get_cmdline': 'something cloud-init=disabled else'}, | ||
830 | 78 | status._is_cloudinit_disabled, self.disable_file, self.paths) | ||
831 | 79 | self.assertTrue(is_disabled, 'expected disabled cloud-init') | ||
832 | 80 | self.assertEqual( | ||
833 | 81 | 'Cloud-init disabled by kernel parameter cloud-init=disabled', | ||
834 | 82 | reason) | ||
835 | 83 | |||
836 | 84 | def test__is_cloudinit_disabled_true_when_generator_disables(self): | ||
837 | 85 | '''When cloud-init-generator doesn't write enabled file return True.''' | ||
838 | 86 | enabled_file = os.path.join(self.paths.run_dir, 'enabled') | ||
839 | 87 | self.assertFalse(os.path.exists(enabled_file)) | ||
840 | 88 | (is_disabled, reason) = wrap_and_call( | ||
841 | 89 | 'cloudinit.cmd.status', | ||
842 | 90 | {'uses_systemd': True, | ||
843 | 91 | 'get_cmdline': 'something'}, | ||
844 | 92 | status._is_cloudinit_disabled, self.disable_file, self.paths) | ||
845 | 93 | self.assertTrue(is_disabled, 'expected disabled cloud-init') | ||
846 | 94 | self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) | ||
847 | 95 | |||
848 | 96 | def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): | ||
849 | 97 | '''Report enabled when systemd generator creates the enabled file.''' | ||
850 | 98 | enabled_file = os.path.join(self.paths.run_dir, 'enabled') | ||
851 | 99 | write_file(enabled_file, '') | ||
852 | 100 | (is_disabled, reason) = wrap_and_call( | ||
853 | 101 | 'cloudinit.cmd.status', | ||
854 | 102 | {'uses_systemd': True, | ||
855 | 103 | 'get_cmdline': 'something ignored'}, | ||
856 | 104 | status._is_cloudinit_disabled, self.disable_file, self.paths) | ||
857 | 105 | self.assertFalse(is_disabled, 'expected enabled cloud-init') | ||
858 | 106 | self.assertEqual( | ||
859 | 107 | 'Cloud-init enabled by systemd cloud-init-generator', reason) | ||
860 | 108 | |||
861 | 109 | def test_status_returns_not_run(self): | ||
862 | 110 | '''When status.json does not exist yet, return 'not run'.''' | ||
863 | 111 | self.assertFalse( | ||
864 | 112 | os.path.exists(self.status_file), 'Unexpected status.json found') | ||
865 | 113 | cmdargs = myargs(long=False, wait=False) | ||
866 | 114 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
867 | 115 | retcode = wrap_and_call( | ||
868 | 116 | 'cloudinit.cmd.status', | ||
869 | 117 | {'_is_cloudinit_disabled': (False, ''), | ||
870 | 118 | 'Init': {'side_effect': self.init_class}}, | ||
871 | 119 | status.handle_status_args, 'ignored', cmdargs) | ||
872 | 120 | self.assertEqual(0, retcode) | ||
873 | 121 | self.assertEqual('status: not run\n', m_stdout.getvalue()) | ||
874 | 122 | |||
875 | 123 | def test_status_returns_disabled_long_on_presence_of_disable_file(self): | ||
876 | 124 | '''When cloudinit is disabled, return disabled reason.''' | ||
877 | 125 | |||
878 | 126 | checked_files = [] | ||
879 | 127 | |||
880 | 128 | def fakeexists(filepath): | ||
881 | 129 | checked_files.append(filepath) | ||
882 | 130 | status_file = os.path.join(self.paths.run_dir, 'status.json') | ||
883 | 131 | return bool(not filepath == status_file) | ||
884 | 132 | |||
885 | 133 | cmdargs = myargs(long=True, wait=False) | ||
886 | 134 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
887 | 135 | retcode = wrap_and_call( | ||
888 | 136 | 'cloudinit.cmd.status', | ||
889 | 137 | {'os.path.exists': {'side_effect': fakeexists}, | ||
890 | 138 | '_is_cloudinit_disabled': (True, 'disabled for some reason'), | ||
891 | 139 | 'Init': {'side_effect': self.init_class}}, | ||
892 | 140 | status.handle_status_args, 'ignored', cmdargs) | ||
893 | 141 | self.assertEqual(0, retcode) | ||
894 | 142 | self.assertEqual( | ||
895 | 143 | [os.path.join(self.paths.run_dir, 'status.json')], | ||
896 | 144 | checked_files) | ||
897 | 145 | expected = dedent('''\ | ||
898 | 146 | status: disabled | ||
899 | 147 | detail: | ||
900 | 148 | disabled for some reason | ||
901 | 149 | ''') | ||
902 | 150 | self.assertEqual(expected, m_stdout.getvalue()) | ||
903 | 151 | |||
904 | 152 | def test_status_returns_running(self): | ||
905 | 153 | '''Report running when status exists with an unfinished stage.''' | ||
906 | 154 | write_json(self.status_file, | ||
907 | 155 | {'v1': {'init': {'start': 1, 'finished': None}}}) | ||
908 | 156 | cmdargs = myargs(long=False, wait=False) | ||
909 | 157 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
910 | 158 | retcode = wrap_and_call( | ||
911 | 159 | 'cloudinit.cmd.status', | ||
912 | 160 | {'_is_cloudinit_disabled': (False, ''), | ||
913 | 161 | 'Init': {'side_effect': self.init_class}}, | ||
914 | 162 | status.handle_status_args, 'ignored', cmdargs) | ||
915 | 163 | self.assertEqual(0, retcode) | ||
916 | 164 | self.assertEqual('status: running\n', m_stdout.getvalue()) | ||
917 | 165 | |||
918 | 166 | def test_status_returns_done(self): | ||
919 | 167 | '''Reports done when stage is None and all stages are finished.''' | ||
920 | 168 | write_json( | ||
921 | 169 | self.status_file, | ||
922 | 170 | {'v1': {'stage': None, | ||
923 | 171 | 'datasource': ( | ||
924 | 172 | 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' | ||
925 | 173 | '[dsmode=net]'), | ||
926 | 174 | 'blah': {'finished': 123.456}, | ||
927 | 175 | 'init': {'errors': [], 'start': 124.567, | ||
928 | 176 | 'finished': 125.678}, | ||
929 | 177 | 'init-local': {'start': 123.45, 'finished': 123.46}}}) | ||
930 | 178 | cmdargs = myargs(long=False, wait=False) | ||
931 | 179 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
932 | 180 | retcode = wrap_and_call( | ||
933 | 181 | 'cloudinit.cmd.status', | ||
934 | 182 | {'_is_cloudinit_disabled': (False, ''), | ||
935 | 183 | 'Init': {'side_effect': self.init_class}}, | ||
936 | 184 | status.handle_status_args, 'ignored', cmdargs) | ||
937 | 185 | self.assertEqual(0, retcode) | ||
938 | 186 | self.assertEqual('status: done\n', m_stdout.getvalue()) | ||
939 | 187 | |||
940 | 188 | def test_status_returns_done_long(self): | ||
941 | 189 | '''Long format of done status includes datasource info.''' | ||
942 | 190 | write_json( | ||
943 | 191 | self.status_file, | ||
944 | 192 | {'v1': {'stage': None, | ||
945 | 193 | 'datasource': ( | ||
946 | 194 | 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' | ||
947 | 195 | '[dsmode=net]'), | ||
948 | 196 | 'init': {'start': 124.567, 'finished': 125.678}, | ||
949 | 197 | 'init-local': {'start': 123.45, 'finished': 123.46}}}) | ||
950 | 198 | cmdargs = myargs(long=True, wait=False) | ||
951 | 199 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
952 | 200 | retcode = wrap_and_call( | ||
953 | 201 | 'cloudinit.cmd.status', | ||
954 | 202 | {'_is_cloudinit_disabled': (False, ''), | ||
955 | 203 | 'Init': {'side_effect': self.init_class}}, | ||
956 | 204 | status.handle_status_args, 'ignored', cmdargs) | ||
957 | 205 | self.assertEqual(0, retcode) | ||
958 | 206 | expected = dedent('''\ | ||
959 | 207 | status: done | ||
960 | 208 | time: Thu, 01 Jan 1970 00:02:05 +0000 | ||
961 | 209 | detail: | ||
962 | 210 | DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] | ||
963 | 211 | ''') | ||
964 | 212 | self.assertEqual(expected, m_stdout.getvalue()) | ||
965 | 213 | |||
966 | 214 | def test_status_on_errors(self): | ||
967 | 215 | '''Reports error when any stage has errors.''' | ||
968 | 216 | write_json( | ||
969 | 217 | self.status_file, | ||
970 | 218 | {'v1': {'stage': None, | ||
971 | 219 | 'blah': {'errors': [], 'finished': 123.456}, | ||
972 | 220 | 'init': {'errors': ['error1'], 'start': 124.567, | ||
973 | 221 | 'finished': 125.678}, | ||
974 | 222 | 'init-local': {'start': 123.45, 'finished': 123.46}}}) | ||
975 | 223 | cmdargs = myargs(long=False, wait=False) | ||
976 | 224 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
977 | 225 | retcode = wrap_and_call( | ||
978 | 226 | 'cloudinit.cmd.status', | ||
979 | 227 | {'_is_cloudinit_disabled': (False, ''), | ||
980 | 228 | 'Init': {'side_effect': self.init_class}}, | ||
981 | 229 | status.handle_status_args, 'ignored', cmdargs) | ||
982 | 230 | self.assertEqual(1, retcode) | ||
983 | 231 | self.assertEqual('status: error\n', m_stdout.getvalue()) | ||
984 | 232 | |||
985 | 233 | def test_status_on_errors_long(self): | ||
986 | 234 | '''Long format of error status includes all error messages.''' | ||
987 | 235 | write_json( | ||
988 | 236 | self.status_file, | ||
989 | 237 | {'v1': {'stage': None, | ||
990 | 238 | 'datasource': ( | ||
991 | 239 | 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' | ||
992 | 240 | '[dsmode=net]'), | ||
993 | 241 | 'init': {'errors': ['error1'], 'start': 124.567, | ||
994 | 242 | 'finished': 125.678}, | ||
995 | 243 | 'init-local': {'errors': ['error2', 'error3'], | ||
996 | 244 | 'start': 123.45, 'finished': 123.46}}}) | ||
997 | 245 | cmdargs = myargs(long=True, wait=False) | ||
998 | 246 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
999 | 247 | retcode = wrap_and_call( | ||
1000 | 248 | 'cloudinit.cmd.status', | ||
1001 | 249 | {'_is_cloudinit_disabled': (False, ''), | ||
1002 | 250 | 'Init': {'side_effect': self.init_class}}, | ||
1003 | 251 | status.handle_status_args, 'ignored', cmdargs) | ||
1004 | 252 | self.assertEqual(1, retcode) | ||
1005 | 253 | expected = dedent('''\ | ||
1006 | 254 | status: error | ||
1007 | 255 | time: Thu, 01 Jan 1970 00:02:05 +0000 | ||
1008 | 256 | detail: | ||
1009 | 257 | error1 | ||
1010 | 258 | error2 | ||
1011 | 259 | error3 | ||
1012 | 260 | ''') | ||
1013 | 261 | self.assertEqual(expected, m_stdout.getvalue()) | ||
1014 | 262 | |||
1015 | 263 | def test_status_returns_running_long_format(self): | ||
1016 | 264 | '''Long format reports the stage in which we are running.''' | ||
1017 | 265 | write_json( | ||
1018 | 266 | self.status_file, | ||
1019 | 267 | {'v1': {'stage': 'init', | ||
1020 | 268 | 'init': {'start': 124.456, 'finished': None}, | ||
1021 | 269 | 'init-local': {'start': 123.45, 'finished': 123.46}}}) | ||
1022 | 270 | cmdargs = myargs(long=True, wait=False) | ||
1023 | 271 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
1024 | 272 | retcode = wrap_and_call( | ||
1025 | 273 | 'cloudinit.cmd.status', | ||
1026 | 274 | {'_is_cloudinit_disabled': (False, ''), | ||
1027 | 275 | 'Init': {'side_effect': self.init_class}}, | ||
1028 | 276 | status.handle_status_args, 'ignored', cmdargs) | ||
1029 | 277 | self.assertEqual(0, retcode) | ||
1030 | 278 | expected = dedent('''\ | ||
1031 | 279 | status: running | ||
1032 | 280 | time: Thu, 01 Jan 1970 00:02:04 +0000 | ||
1033 | 281 | detail: | ||
1034 | 282 | Running in stage: init | ||
1035 | 283 | ''') | ||
1036 | 284 | self.assertEqual(expected, m_stdout.getvalue()) | ||
1037 | 285 | |||
1038 | 286 | def test_status_wait_blocks_until_done(self): | ||
1039 | 287 | '''Specifying wait will poll every 1/4 second until done state.''' | ||
1040 | 288 | running_json = { | ||
1041 | 289 | 'v1': {'stage': 'init', | ||
1042 | 290 | 'init': {'start': 124.456, 'finished': None}, | ||
1043 | 291 | 'init-local': {'start': 123.45, 'finished': 123.46}}} | ||
1044 | 292 | done_json = { | ||
1045 | 293 | 'v1': {'stage': None, | ||
1046 | 294 | 'init': {'start': 124.456, 'finished': 125.678}, | ||
1047 | 295 | 'init-local': {'start': 123.45, 'finished': 123.46}}} | ||
1048 | 296 | |||
1049 | 297 | self.sleep_calls = 0 | ||
1050 | 298 | |||
1051 | 299 | def fake_sleep(interval): | ||
1052 | 300 | self.assertEqual(0.25, interval) | ||
1053 | 301 | self.sleep_calls += 1 | ||
1054 | 302 | if self.sleep_calls == 2: | ||
1055 | 303 | write_json(self.status_file, running_json) | ||
1056 | 304 | elif self.sleep_calls == 3: | ||
1057 | 305 | write_json(self.status_file, done_json) | ||
1058 | 306 | |||
1059 | 307 | cmdargs = myargs(long=False, wait=True) | ||
1060 | 308 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
1061 | 309 | retcode = wrap_and_call( | ||
1062 | 310 | 'cloudinit.cmd.status', | ||
1063 | 311 | {'sleep': {'side_effect': fake_sleep}, | ||
1064 | 312 | '_is_cloudinit_disabled': (False, ''), | ||
1065 | 313 | 'Init': {'side_effect': self.init_class}}, | ||
1066 | 314 | status.handle_status_args, 'ignored', cmdargs) | ||
1067 | 315 | self.assertEqual(0, retcode) | ||
1068 | 316 | self.assertEqual(4, self.sleep_calls) | ||
1069 | 317 | self.assertEqual('....\nstatus: done\n', m_stdout.getvalue()) | ||
1070 | 318 | |||
1071 | 319 | def test_status_wait_blocks_until_error(self): | ||
1072 | 320 | '''Specifying wait will poll every 1/4 second until error state.''' | ||
1073 | 321 | running_json = { | ||
1074 | 322 | 'v1': {'stage': 'init', | ||
1075 | 323 | 'init': {'start': 124.456, 'finished': None}, | ||
1076 | 324 | 'init-local': {'start': 123.45, 'finished': 123.46}}} | ||
1077 | 325 | error_json = { | ||
1078 | 326 | 'v1': {'stage': None, | ||
1079 | 327 | 'init': {'errors': ['error1'], 'start': 124.456, | ||
1080 | 328 | 'finished': 125.678}, | ||
1081 | 329 | 'init-local': {'start': 123.45, 'finished': 123.46}}} | ||
1082 | 330 | |||
1083 | 331 | self.sleep_calls = 0 | ||
1084 | 332 | |||
1085 | 333 | def fake_sleep(interval): | ||
1086 | 334 | self.assertEqual(0.25, interval) | ||
1087 | 335 | self.sleep_calls += 1 | ||
1088 | 336 | if self.sleep_calls == 2: | ||
1089 | 337 | write_json(self.status_file, running_json) | ||
1090 | 338 | elif self.sleep_calls == 3: | ||
1091 | 339 | write_json(self.status_file, error_json) | ||
1092 | 340 | |||
1093 | 341 | cmdargs = myargs(long=False, wait=True) | ||
1094 | 342 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
1095 | 343 | retcode = wrap_and_call( | ||
1096 | 344 | 'cloudinit.cmd.status', | ||
1097 | 345 | {'sleep': {'side_effect': fake_sleep}, | ||
1098 | 346 | '_is_cloudinit_disabled': (False, ''), | ||
1099 | 347 | 'Init': {'side_effect': self.init_class}}, | ||
1100 | 348 | status.handle_status_args, 'ignored', cmdargs) | ||
1101 | 349 | self.assertEqual(1, retcode) | ||
1102 | 350 | self.assertEqual(4, self.sleep_calls) | ||
1103 | 351 | self.assertEqual('....\nstatus: error\n', m_stdout.getvalue()) | ||
1104 | 352 | |||
1105 | 353 | def test_status_main(self): | ||
1106 | 354 | '''status.main can be run as a standalone script.''' | ||
1107 | 355 | write_json(self.status_file, | ||
1108 | 356 | {'v1': {'init': {'start': 1, 'finished': None}}}) | ||
1109 | 357 | with self.assertRaises(SystemExit) as context_manager: | ||
1110 | 358 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
1111 | 359 | wrap_and_call( | ||
1112 | 360 | 'cloudinit.cmd.status', | ||
1113 | 361 | {'sys.argv': {'new': ['status']}, | ||
1114 | 362 | '_is_cloudinit_disabled': (False, ''), | ||
1115 | 363 | 'Init': {'side_effect': self.init_class}}, | ||
1116 | 364 | status.main) | ||
1117 | 365 | self.assertRaisesCodeEqual(0, context_manager.exception.code) | ||
1118 | 366 | self.assertEqual('status: running\n', m_stdout.getvalue()) | ||
1119 | 367 | |||
1120 | 368 | # vi: ts=4 expandtab syntax=python | ||
1121 | diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py | |||
1122 | index 177cbcf..5b9cbca 100644 | |||
1123 | --- a/cloudinit/config/cc_apt_configure.py | |||
1124 | +++ b/cloudinit/config/cc_apt_configure.py | |||
1125 | @@ -275,8 +275,9 @@ def handle(name, ocfg, cloud, log, _): | |||
1126 | 275 | cfg = ocfg.get('apt', {}) | 275 | cfg = ocfg.get('apt', {}) |
1127 | 276 | 276 | ||
1128 | 277 | if not isinstance(cfg, dict): | 277 | if not isinstance(cfg, dict): |
1131 | 278 | raise ValueError("Expected dictionary for 'apt' config, found %s", | 278 | raise ValueError( |
1132 | 279 | type(cfg)) | 279 | "Expected dictionary for 'apt' config, found {config_type}".format( |
1133 | 280 | config_type=type(cfg))) | ||
1134 | 280 | 281 | ||
1135 | 281 | apply_debconf_selections(cfg, target) | 282 | apply_debconf_selections(cfg, target) |
1136 | 282 | apply_apt(cfg, cloud, target) | 283 | apply_apt(cfg, cloud, target) |
1137 | diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py | |||
1138 | index c2b83ae..c3e8c48 100644 | |||
1139 | --- a/cloudinit/config/cc_disk_setup.py | |||
1140 | +++ b/cloudinit/config/cc_disk_setup.py | |||
1141 | @@ -788,7 +788,8 @@ def mkpart(device, definition): | |||
1142 | 788 | # This prevents you from overwriting the device | 788 | # This prevents you from overwriting the device |
1143 | 789 | LOG.debug("Checking if device %s is a valid device", device) | 789 | LOG.debug("Checking if device %s is a valid device", device) |
1144 | 790 | if not is_device_valid(device): | 790 | if not is_device_valid(device): |
1146 | 791 | raise Exception("Device %s is not a disk device!", device) | 791 | raise Exception( |
1147 | 792 | 'Device {device} is not a disk device!'.format(device=device)) | ||
1148 | 792 | 793 | ||
1149 | 793 | # Remove the partition table entries | 794 | # Remove the partition table entries |
1150 | 794 | if isinstance(layout, str) and layout.lower() == "remove": | 795 | if isinstance(layout, str) and layout.lower() == "remove": |
1151 | @@ -945,8 +946,9 @@ def mkfs(fs_cfg): | |||
1152 | 945 | 946 | ||
1153 | 946 | # Check that we can create the FS | 947 | # Check that we can create the FS |
1154 | 947 | if not (fs_type or fs_cmd): | 948 | if not (fs_type or fs_cmd): |
1157 | 948 | raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd " | 949 | raise Exception( |
1158 | 949 | "must be set.", label) | 950 | "No way to create filesystem '{label}'. fs_type or fs_cmd " |
1159 | 951 | "must be set.".format(label=label)) | ||
1160 | 950 | 952 | ||
1161 | 951 | # Create the commands | 953 | # Create the commands |
1162 | 952 | shell = False | 954 | shell = False |
1163 | diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py | |||
1164 | index 8f9f1ab..eaf1e94 100644 | |||
1165 | --- a/cloudinit/config/cc_landscape.py | |||
1166 | +++ b/cloudinit/config/cc_landscape.py | |||
1167 | @@ -94,10 +94,10 @@ def handle(_name, cfg, cloud, log, _args): | |||
1168 | 94 | ls_cloudcfg = cfg.get("landscape", {}) | 94 | ls_cloudcfg = cfg.get("landscape", {}) |
1169 | 95 | 95 | ||
1170 | 96 | if not isinstance(ls_cloudcfg, (dict)): | 96 | if not isinstance(ls_cloudcfg, (dict)): |
1175 | 97 | raise RuntimeError(("'landscape' key existed in config," | 97 | raise RuntimeError( |
1176 | 98 | " but not a dictionary type," | 98 | "'landscape' key existed in config, but not a dictionary type," |
1177 | 99 | " is a %s instead"), | 99 | " is a {_type} instead".format( |
1178 | 100 | type_utils.obj_name(ls_cloudcfg)) | 100 | _type=type_utils.obj_name(ls_cloudcfg))) |
1179 | 101 | if not ls_cloudcfg: | 101 | if not ls_cloudcfg: |
1180 | 102 | return | 102 | return |
1181 | 103 | 103 | ||
1182 | diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py | |||
1183 | index f50bcb3..cbd0237 100644 | |||
1184 | --- a/cloudinit/config/cc_ntp.py | |||
1185 | +++ b/cloudinit/config/cc_ntp.py | |||
1186 | @@ -106,9 +106,9 @@ def handle(name, cfg, cloud, log, _args): | |||
1187 | 106 | 106 | ||
1188 | 107 | # TODO drop this when validate_cloudconfig_schema is strict=True | 107 | # TODO drop this when validate_cloudconfig_schema is strict=True |
1189 | 108 | if not isinstance(ntp_cfg, (dict)): | 108 | if not isinstance(ntp_cfg, (dict)): |
1193 | 109 | raise RuntimeError(("'ntp' key existed in config," | 109 | raise RuntimeError( |
1194 | 110 | " but not a dictionary type," | 110 | "'ntp' key existed in config, but not a dictionary type," |
1195 | 111 | " is a %s %instead"), type_utils.obj_name(ntp_cfg)) | 111 | " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) |
1196 | 112 | 112 | ||
1197 | 113 | validate_cloudconfig_schema(cfg, schema) | 113 | validate_cloudconfig_schema(cfg, schema) |
1198 | 114 | if ntp_installable(): | 114 | if ntp_installable(): |
1199 | @@ -206,8 +206,8 @@ def write_ntp_config_template(cfg, cloud, path, template=None): | |||
1200 | 206 | if not template_fn: | 206 | if not template_fn: |
1201 | 207 | template_fn = cloud.get_template_filename('ntp.conf') | 207 | template_fn = cloud.get_template_filename('ntp.conf') |
1202 | 208 | if not template_fn: | 208 | if not template_fn: |
1205 | 209 | raise RuntimeError(("No template found, " | 209 | raise RuntimeError( |
1206 | 210 | "not rendering %s"), path) | 210 | 'No template found, not rendering {path}'.format(path=path)) |
1207 | 211 | 211 | ||
1208 | 212 | templater.render_to_file(template_fn, path, params) | 212 | templater.render_to_file(template_fn, path, params) |
1209 | 213 | 213 | ||
1210 | diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py | |||
1211 | index eba58b0..4da3a58 100644 | |||
1212 | --- a/cloudinit/config/cc_power_state_change.py | |||
1213 | +++ b/cloudinit/config/cc_power_state_change.py | |||
1214 | @@ -194,6 +194,7 @@ def doexit(sysexit): | |||
1215 | 194 | 194 | ||
1216 | 195 | 195 | ||
1217 | 196 | def execmd(exe_args, output=None, data_in=None): | 196 | def execmd(exe_args, output=None, data_in=None): |
1218 | 197 | ret = 1 | ||
1219 | 197 | try: | 198 | try: |
1220 | 198 | proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, | 199 | proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, |
1221 | 199 | stdout=output, stderr=subprocess.STDOUT) | 200 | stdout=output, stderr=subprocess.STDOUT) |
1222 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py | |||
1223 | index 0d282e6..cec22bb 100644 | |||
1224 | --- a/cloudinit/config/cc_resizefs.py | |||
1225 | +++ b/cloudinit/config/cc_resizefs.py | |||
1226 | @@ -59,7 +59,17 @@ __doc__ = get_schema_doc(schema) # Supplement python help() | |||
1227 | 59 | 59 | ||
1228 | 60 | 60 | ||
1229 | 61 | def _resize_btrfs(mount_point, devpth): | 61 | def _resize_btrfs(mount_point, devpth): |
1231 | 62 | return ('btrfs', 'filesystem', 'resize', 'max', mount_point) | 62 | # If "/" is ro resize will fail. However it should be allowed since resize |
1232 | 63 | # makes everything bigger and subvolumes that are not ro will benefit. | ||
1233 | 64 | # Use a subvolume that is not ro to trick the resize operation to do the | ||
1234 | 65 | # "right" thing. The use of ".snapshot" is specific to "snapper" a generic | ||
1235 | 66 | # solution would be walk the subvolumes and find a rw mounted subvolume. | ||
1236 | 67 | if (not util.mount_is_read_write(mount_point) and | ||
1237 | 68 | os.path.isdir("%s/.snapshots" % mount_point)): | ||
1238 | 69 | return ('btrfs', 'filesystem', 'resize', 'max', | ||
1239 | 70 | '%s/.snapshots' % mount_point) | ||
1240 | 71 | else: | ||
1241 | 72 | return ('btrfs', 'filesystem', 'resize', 'max', mount_point) | ||
1242 | 63 | 73 | ||
1243 | 64 | 74 | ||
1244 | 65 | def _resize_ext(mount_point, devpth): | 75 | def _resize_ext(mount_point, devpth): |
1245 | diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py | |||
1246 | index a9d21e7..530808c 100644 | |||
1247 | --- a/cloudinit/config/cc_rh_subscription.py | |||
1248 | +++ b/cloudinit/config/cc_rh_subscription.py | |||
1249 | @@ -276,9 +276,8 @@ class SubscriptionManager(object): | |||
1250 | 276 | cmd = ['attach', '--auto'] | 276 | cmd = ['attach', '--auto'] |
1251 | 277 | try: | 277 | try: |
1252 | 278 | return_out, return_err = self._sub_man_cli(cmd) | 278 | return_out, return_err = self._sub_man_cli(cmd) |
1256 | 279 | except util.ProcessExecutionError: | 279 | except util.ProcessExecutionError as e: |
1257 | 280 | self.log_warn("Auto-attach failed with: " | 280 | self.log_warn("Auto-attach failed with: {0}".format(e)) |
1255 | 281 | "{0}]".format(return_err.strip())) | ||
1258 | 282 | return False | 281 | return False |
1259 | 283 | for line in return_out.split("\n"): | 282 | for line in return_out.split("\n"): |
1260 | 284 | if line is not "": | 283 | if line is not "": |
1261 | diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py | |||
1262 | index 50ff9e3..af08788 100644 | |||
1263 | --- a/cloudinit/config/cc_rsyslog.py | |||
1264 | +++ b/cloudinit/config/cc_rsyslog.py | |||
1265 | @@ -20,15 +20,15 @@ which defaults to ``20-cloud-config.conf``. The rsyslog config directory to | |||
1266 | 20 | write config files to may be specified in ``config_dir``, which defaults to | 20 | write config files to may be specified in ``config_dir``, which defaults to |
1267 | 21 | ``/etc/rsyslog.d``. | 21 | ``/etc/rsyslog.d``. |
1268 | 22 | 22 | ||
1272 | 23 | A list of configurations for for rsyslog can be specified under the ``configs`` | 23 | A list of configurations for rsyslog can be specified under the ``configs`` key |
1273 | 24 | key in the ``rsyslog`` config. Each entry in ``configs`` is either a string or | 24 | in the ``rsyslog`` config. Each entry in ``configs`` is either a string or a |
1274 | 25 | a dictionary. Each config entry contains a configuration string and a file to | 25 | dictionary. Each config entry contains a configuration string and a file to |
1275 | 26 | write it to. For config entries that are a dictionary, ``filename`` sets the | 26 | write it to. For config entries that are a dictionary, ``filename`` sets the |
1276 | 27 | target filename and ``content`` specifies the config string to write. For | 27 | target filename and ``content`` specifies the config string to write. For |
1277 | 28 | config entries that are only a string, the string is used as the config string | 28 | config entries that are only a string, the string is used as the config string |
1278 | 29 | to write. If the filename to write the config to is not specified, the value of | 29 | to write. If the filename to write the config to is not specified, the value of |
1281 | 30 | the ``config_filename`` key is used. A file with the selected filename will | 30 | the ``config_filename`` key is used. A file with the selected filename will be |
1282 | 31 | be written inside the directory specified by ``config_dir``. | 31 | written inside the directory specified by ``config_dir``. |
1283 | 32 | 32 | ||
1284 | 33 | The command to use to reload the rsyslog service after the config has been | 33 | The command to use to reload the rsyslog service after the config has been |
1285 | 34 | updated can be specified in ``service_reload_command``. If this is set to | 34 | updated can be specified in ``service_reload_command``. If this is set to |
1286 | diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py | |||
1287 | index e76b9c0..65f6e77 100644 | |||
1288 | --- a/cloudinit/config/cc_seed_random.py | |||
1289 | +++ b/cloudinit/config/cc_seed_random.py | |||
1290 | @@ -95,7 +95,8 @@ def handle_random_seed_command(command, required, env=None): | |||
1291 | 95 | cmd = command[0] | 95 | cmd = command[0] |
1292 | 96 | if not util.which(cmd): | 96 | if not util.which(cmd): |
1293 | 97 | if required: | 97 | if required: |
1295 | 98 | raise ValueError("command '%s' not found but required=true", cmd) | 98 | raise ValueError( |
1296 | 99 | "command '{cmd}' not found but required=true".format(cmd=cmd)) | ||
1297 | 99 | else: | 100 | else: |
1298 | 100 | LOG.debug("command '%s' not found for seed_command", cmd) | 101 | LOG.debug("command '%s' not found for seed_command", cmd) |
1299 | 101 | return | 102 | return |
1300 | diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py | |||
1301 | index fe0cc73..e82c081 100644 | |||
1302 | --- a/cloudinit/config/cc_snap_config.py | |||
1303 | +++ b/cloudinit/config/cc_snap_config.py | |||
1304 | @@ -87,7 +87,9 @@ def add_assertions(assertions=None): | |||
1305 | 87 | assertions = [] | 87 | assertions = [] |
1306 | 88 | 88 | ||
1307 | 89 | if not isinstance(assertions, list): | 89 | if not isinstance(assertions, list): |
1309 | 90 | raise ValueError('assertion parameter was not a list: %s', assertions) | 90 | raise ValueError( |
1310 | 91 | 'assertion parameter was not a list: {assertions}'.format( | ||
1311 | 92 | assertions=assertions)) | ||
1312 | 91 | 93 | ||
1313 | 92 | snap_cmd = [SNAPPY_CMD, 'ack'] | 94 | snap_cmd = [SNAPPY_CMD, 'ack'] |
1314 | 93 | combined = "\n".join(assertions) | 95 | combined = "\n".join(assertions) |
1315 | @@ -115,7 +117,8 @@ def add_snap_user(cfg=None): | |||
1316 | 115 | cfg = {} | 117 | cfg = {} |
1317 | 116 | 118 | ||
1318 | 117 | if not isinstance(cfg, dict): | 119 | if not isinstance(cfg, dict): |
1320 | 118 | raise ValueError('configuration parameter was not a dict: %s', cfg) | 120 | raise ValueError( |
1321 | 121 | 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg)) | ||
1322 | 119 | 122 | ||
1323 | 120 | snapuser = cfg.get('email', None) | 123 | snapuser = cfg.get('email', None) |
1324 | 121 | if not snapuser: | 124 | if not snapuser: |
1325 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py | |||
1326 | index d5becd1..55260ea 100755 | |||
1327 | --- a/cloudinit/distros/__init__.py | |||
1328 | +++ b/cloudinit/distros/__init__.py | |||
1329 | @@ -45,6 +45,10 @@ OSFAMILIES = { | |||
1330 | 45 | 45 | ||
1331 | 46 | LOG = logging.getLogger(__name__) | 46 | LOG = logging.getLogger(__name__) |
1332 | 47 | 47 | ||
1333 | 48 | # This is a best guess regex, based on current EC2 AZs on 2017-12-11. | ||
1334 | 49 | # It could break when Amazon adds new regions and new AZs. | ||
1335 | 50 | _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') | ||
1336 | 51 | |||
1337 | 48 | 52 | ||
1338 | 49 | @six.add_metaclass(abc.ABCMeta) | 53 | @six.add_metaclass(abc.ABCMeta) |
1339 | 50 | class Distro(object): | 54 | class Distro(object): |
1340 | @@ -102,11 +106,8 @@ class Distro(object): | |||
1341 | 102 | self._apply_hostname(writeable_hostname) | 106 | self._apply_hostname(writeable_hostname) |
1342 | 103 | 107 | ||
1343 | 104 | def uses_systemd(self): | 108 | def uses_systemd(self): |
1349 | 105 | try: | 109 | """Wrapper to report whether this distro uses systemd or sysvinit.""" |
1350 | 106 | res = os.lstat('/run/systemd/system') | 110 | return uses_systemd() |
1346 | 107 | return stat.S_ISDIR(res.st_mode) | ||
1347 | 108 | except Exception: | ||
1348 | 109 | return False | ||
1351 | 110 | 111 | ||
1352 | 111 | @abc.abstractmethod | 112 | @abc.abstractmethod |
1353 | 112 | def package_command(self, cmd, args=None, pkgs=None): | 113 | def package_command(self, cmd, args=None, pkgs=None): |
1354 | @@ -686,18 +687,13 @@ def _get_package_mirror_info(mirror_info, data_source=None, | |||
1355 | 686 | if not mirror_info: | 687 | if not mirror_info: |
1356 | 687 | mirror_info = {} | 688 | mirror_info = {} |
1357 | 688 | 689 | ||
1358 | 689 | # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) | ||
1359 | 690 | # the region is us-east-1. so region = az[0:-1] | ||
1360 | 691 | directions_re = '|'.join([ | ||
1361 | 692 | 'central', 'east', 'north', 'northeast', 'northwest', | ||
1362 | 693 | 'south', 'southeast', 'southwest', 'west']) | ||
1363 | 694 | ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re) | ||
1364 | 695 | |||
1365 | 696 | subst = {} | 690 | subst = {} |
1366 | 697 | if data_source and data_source.availability_zone: | 691 | if data_source and data_source.availability_zone: |
1367 | 698 | subst['availability_zone'] = data_source.availability_zone | 692 | subst['availability_zone'] = data_source.availability_zone |
1368 | 699 | 693 | ||
1370 | 700 | if re.match(ec2_az_re, data_source.availability_zone): | 694 | # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) |
1371 | 695 | # the region is us-east-1. so region = az[0:-1] | ||
1372 | 696 | if _EC2_AZ_RE.match(data_source.availability_zone): | ||
1373 | 701 | subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] | 697 | subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] |
1374 | 702 | 698 | ||
1375 | 703 | if data_source and data_source.region: | 699 | if data_source and data_source.region: |
1376 | @@ -761,4 +757,13 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", | |||
1377 | 761 | util.copy(tz_file, tz_local) | 757 | util.copy(tz_file, tz_local) |
1378 | 762 | return | 758 | return |
1379 | 763 | 759 | ||
1380 | 760 | |||
1381 | 761 | def uses_systemd(): | ||
1382 | 762 | try: | ||
1383 | 763 | res = os.lstat('/run/systemd/system') | ||
1384 | 764 | return stat.S_ISDIR(res.st_mode) | ||
1385 | 765 | except Exception: | ||
1386 | 766 | return False | ||
1387 | 767 | |||
1388 | 768 | |||
1389 | 764 | # vi: ts=4 expandtab | 769 | # vi: ts=4 expandtab |
1390 | diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py | |||
1391 | index bad112f..aa468bc 100644 | |||
1392 | --- a/cloudinit/distros/freebsd.py | |||
1393 | +++ b/cloudinit/distros/freebsd.py | |||
1394 | @@ -116,6 +116,7 @@ class Distro(distros.Distro): | |||
1395 | 116 | (out, err) = util.subp(['ifconfig', '-a']) | 116 | (out, err) = util.subp(['ifconfig', '-a']) |
1396 | 117 | ifconfigoutput = [x for x in (out.strip()).splitlines() | 117 | ifconfigoutput = [x for x in (out.strip()).splitlines() |
1397 | 118 | if len(x.split()) > 0] | 118 | if len(x.split()) > 0] |
1398 | 119 | bsddev = 'NOT_FOUND' | ||
1399 | 119 | for line in ifconfigoutput: | 120 | for line in ifconfigoutput: |
1400 | 120 | m = re.match('^\w+', line) | 121 | m = re.match('^\w+', line) |
1401 | 121 | if m: | 122 | if m: |
1402 | @@ -347,15 +348,9 @@ class Distro(distros.Distro): | |||
1403 | 347 | bymac[Distro.get_interface_mac(n)] = { | 348 | bymac[Distro.get_interface_mac(n)] = { |
1404 | 348 | 'name': n, 'up': self.is_up(n), 'downable': None} | 349 | 'name': n, 'up': self.is_up(n), 'downable': None} |
1405 | 349 | 350 | ||
1406 | 351 | nics_with_addresses = set() | ||
1407 | 350 | if check_downable: | 352 | if check_downable: |
1416 | 351 | nics_with_addresses = set() | 353 | nics_with_addresses = set(self.get_ipv4() + self.get_ipv6()) |
1409 | 352 | ipv6 = self.get_ipv6() | ||
1410 | 353 | ipv4 = self.get_ipv4() | ||
1411 | 354 | for bytes_out in (ipv6, ipv4): | ||
1412 | 355 | for i in ipv6: | ||
1413 | 356 | nics_with_addresses.update(i) | ||
1414 | 357 | for i in ipv4: | ||
1415 | 358 | nics_with_addresses.update(i) | ||
1417 | 359 | 354 | ||
1418 | 360 | for d in bymac.values(): | 355 | for d in bymac.values(): |
1419 | 361 | d['downable'] = (d['up'] is False or | 356 | d['downable'] = (d['up'] is False or |
1420 | diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py | |||
1421 | index 723d6bd..d6c61e4 100644 | |||
1422 | --- a/cloudinit/ec2_utils.py | |||
1423 | +++ b/cloudinit/ec2_utils.py | |||
1424 | @@ -1,6 +1,8 @@ | |||
1425 | 1 | # Copyright (C) 2012 Yahoo! Inc. | 1 | # Copyright (C) 2012 Yahoo! Inc. |
1426 | 2 | # Copyright (C) 2014 Amazon.com, Inc. or its affiliates. | ||
1427 | 2 | # | 3 | # |
1428 | 3 | # Author: Joshua Harlow <harlowja@yahoo-inc.com> | 4 | # Author: Joshua Harlow <harlowja@yahoo-inc.com> |
1429 | 5 | # Author: Andrew Jorgensen <ajorgens@amazon.com> | ||
1430 | 4 | # | 6 | # |
1431 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | 7 | # This file is part of cloud-init. See LICENSE file for license information. |
1432 | 6 | 8 | ||
1433 | @@ -164,14 +166,11 @@ def get_instance_userdata(api_version='latest', | |||
1434 | 164 | return user_data | 166 | return user_data |
1435 | 165 | 167 | ||
1436 | 166 | 168 | ||
1445 | 167 | def get_instance_metadata(api_version='latest', | 169 | def _get_instance_metadata(tree, api_version='latest', |
1446 | 168 | metadata_address='http://169.254.169.254', | 170 | metadata_address='http://169.254.169.254', |
1447 | 169 | ssl_details=None, timeout=5, retries=5, | 171 | ssl_details=None, timeout=5, retries=5, |
1448 | 170 | leaf_decoder=None): | 172 | leaf_decoder=None): |
1449 | 171 | md_url = url_helper.combine_url(metadata_address, api_version) | 173 | md_url = url_helper.combine_url(metadata_address, api_version, tree) |
1442 | 172 | # Note, 'meta-data' explicitly has trailing /. | ||
1443 | 173 | # this is required for CloudStack (LP: #1356855) | ||
1444 | 174 | md_url = url_helper.combine_url(md_url, 'meta-data/') | ||
1450 | 175 | caller = functools.partial(util.read_file_or_url, | 174 | caller = functools.partial(util.read_file_or_url, |
1451 | 176 | ssl_details=ssl_details, timeout=timeout, | 175 | ssl_details=ssl_details, timeout=timeout, |
1452 | 177 | retries=retries) | 176 | retries=retries) |
1453 | @@ -189,7 +188,29 @@ def get_instance_metadata(api_version='latest', | |||
1454 | 189 | md = {} | 188 | md = {} |
1455 | 190 | return md | 189 | return md |
1456 | 191 | except Exception: | 190 | except Exception: |
1458 | 192 | util.logexc(LOG, "Failed fetching metadata from url %s", md_url) | 191 | util.logexc(LOG, "Failed fetching %s from url %s", tree, md_url) |
1459 | 193 | return {} | 192 | return {} |
1460 | 194 | 193 | ||
1461 | 194 | |||
1462 | 195 | def get_instance_metadata(api_version='latest', | ||
1463 | 196 | metadata_address='http://169.254.169.254', | ||
1464 | 197 | ssl_details=None, timeout=5, retries=5, | ||
1465 | 198 | leaf_decoder=None): | ||
1466 | 199 | # Note, 'meta-data' explicitly has trailing /. | ||
1467 | 200 | # this is required for CloudStack (LP: #1356855) | ||
1468 | 201 | return _get_instance_metadata(tree='meta-data/', api_version=api_version, | ||
1469 | 202 | metadata_address=metadata_address, | ||
1470 | 203 | ssl_details=ssl_details, timeout=timeout, | ||
1471 | 204 | retries=retries, leaf_decoder=leaf_decoder) | ||
1472 | 205 | |||
1473 | 206 | |||
1474 | 207 | def get_instance_identity(api_version='latest', | ||
1475 | 208 | metadata_address='http://169.254.169.254', | ||
1476 | 209 | ssl_details=None, timeout=5, retries=5, | ||
1477 | 210 | leaf_decoder=None): | ||
1478 | 211 | return _get_instance_metadata(tree='dynamic/instance-identity', | ||
1479 | 212 | api_version=api_version, | ||
1480 | 213 | metadata_address=metadata_address, | ||
1481 | 214 | ssl_details=ssl_details, timeout=timeout, | ||
1482 | 215 | retries=retries, leaf_decoder=leaf_decoder) | ||
1483 | 195 | # vi: ts=4 expandtab | 216 | # vi: ts=4 expandtab |
1484 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py | |||
1485 | index a1b0db1..c015e79 100644 | |||
1486 | --- a/cloudinit/net/__init__.py | |||
1487 | +++ b/cloudinit/net/__init__.py | |||
1488 | @@ -18,7 +18,7 @@ SYS_CLASS_NET = "/sys/class/net/" | |||
1489 | 18 | DEFAULT_PRIMARY_INTERFACE = 'eth0' | 18 | DEFAULT_PRIMARY_INTERFACE = 'eth0' |
1490 | 19 | 19 | ||
1491 | 20 | 20 | ||
1493 | 21 | def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')): | 21 | def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): |
1494 | 22 | """Sorting for Humans: natural sort order. Can be use as the key to sort | 22 | """Sorting for Humans: natural sort order. Can be use as the key to sort |
1495 | 23 | functions. | 23 | functions. |
1496 | 24 | This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as | 24 | This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as |
1497 | @@ -224,7 +224,7 @@ def find_fallback_nic(blacklist_drivers=None): | |||
1498 | 224 | 224 | ||
1499 | 225 | # if eth0 exists use it above anything else, otherwise get the interface | 225 | # if eth0 exists use it above anything else, otherwise get the interface |
1500 | 226 | # that we can read 'first' (using the sorted defintion of first). | 226 | # that we can read 'first' (using the sorted defintion of first). |
1502 | 227 | names = list(sorted(potential_interfaces, key=_natural_sort_key)) | 227 | names = list(sorted(potential_interfaces, key=natural_sort_key)) |
1503 | 228 | if DEFAULT_PRIMARY_INTERFACE in names: | 228 | if DEFAULT_PRIMARY_INTERFACE in names: |
1504 | 229 | names.remove(DEFAULT_PRIMARY_INTERFACE) | 229 | names.remove(DEFAULT_PRIMARY_INTERFACE) |
1505 | 230 | names.insert(0, DEFAULT_PRIMARY_INTERFACE) | 230 | names.insert(0, DEFAULT_PRIMARY_INTERFACE) |
1506 | diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py | |||
1507 | index 38b27a5..7b2cc9d 100755 | |||
1508 | --- a/cloudinit/net/cmdline.py | |||
1509 | +++ b/cloudinit/net/cmdline.py | |||
1510 | @@ -116,10 +116,11 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None): | |||
1511 | 116 | prev = names[name]['entry'] | 116 | prev = names[name]['entry'] |
1512 | 117 | if prev.get('mac_address') != entry.get('mac_address'): | 117 | if prev.get('mac_address') != entry.get('mac_address'): |
1513 | 118 | raise ValueError( | 118 | raise ValueError( |
1518 | 119 | "device '%s' was defined multiple times (%s)" | 119 | "device '{name}' was defined multiple times ({files})" |
1519 | 120 | " but had differing mac addresses: %s -> %s.", | 120 | " but had differing mac addresses: {old} -> {new}.".format( |
1520 | 121 | (name, ' '.join(names[name]['files']), | 121 | name=name, files=' '.join(names[name]['files']), |
1521 | 122 | prev.get('mac_address'), entry.get('mac_address'))) | 122 | old=prev.get('mac_address'), |
1522 | 123 | new=entry.get('mac_address'))) | ||
1523 | 123 | prev['subnets'].extend(entry['subnets']) | 124 | prev['subnets'].extend(entry['subnets']) |
1524 | 124 | names[name]['files'].append(cfg_file) | 125 | names[name]['files'].append(cfg_file) |
1525 | 125 | else: | 126 | else: |
1526 | diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py | |||
1527 | index 875a460..087c0c0 100644 | |||
1528 | --- a/cloudinit/net/dhcp.py | |||
1529 | +++ b/cloudinit/net/dhcp.py | |||
1530 | @@ -10,7 +10,9 @@ import os | |||
1531 | 10 | import re | 10 | import re |
1532 | 11 | import signal | 11 | import signal |
1533 | 12 | 12 | ||
1535 | 13 | from cloudinit.net import find_fallback_nic, get_devicelist | 13 | from cloudinit.net import ( |
1536 | 14 | EphemeralIPv4Network, find_fallback_nic, get_devicelist) | ||
1537 | 15 | from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip | ||
1538 | 14 | from cloudinit import temp_utils | 16 | from cloudinit import temp_utils |
1539 | 15 | from cloudinit import util | 17 | from cloudinit import util |
1540 | 16 | from six import StringIO | 18 | from six import StringIO |
1541 | @@ -29,6 +31,45 @@ class InvalidDHCPLeaseFileError(Exception): | |||
1542 | 29 | pass | 31 | pass |
1543 | 30 | 32 | ||
1544 | 31 | 33 | ||
1545 | 34 | class NoDHCPLeaseError(Exception): | ||
1546 | 35 | """Raised when unable to get a DHCP lease.""" | ||
1547 | 36 | pass | ||
1548 | 37 | |||
1549 | 38 | |||
1550 | 39 | class EphemeralDHCPv4(object): | ||
1551 | 40 | def __init__(self, iface=None): | ||
1552 | 41 | self.iface = iface | ||
1553 | 42 | self._ephipv4 = None | ||
1554 | 43 | |||
1555 | 44 | def __enter__(self): | ||
1556 | 45 | try: | ||
1557 | 46 | leases = maybe_perform_dhcp_discovery(self.iface) | ||
1558 | 47 | except InvalidDHCPLeaseFileError: | ||
1559 | 48 | raise NoDHCPLeaseError() | ||
1560 | 49 | if not leases: | ||
1561 | 50 | raise NoDHCPLeaseError() | ||
1562 | 51 | lease = leases[-1] | ||
1563 | 52 | LOG.debug("Received dhcp lease on %s for %s/%s", | ||
1564 | 53 | lease['interface'], lease['fixed-address'], | ||
1565 | 54 | lease['subnet-mask']) | ||
1566 | 55 | nmap = {'interface': 'interface', 'ip': 'fixed-address', | ||
1567 | 56 | 'prefix_or_mask': 'subnet-mask', | ||
1568 | 57 | 'broadcast': 'broadcast-address', | ||
1569 | 58 | 'router': 'routers'} | ||
1570 | 59 | kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) | ||
1571 | 60 | if not kwargs['broadcast']: | ||
1572 | 61 | kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) | ||
1573 | 62 | ephipv4 = EphemeralIPv4Network(**kwargs) | ||
1574 | 63 | ephipv4.__enter__() | ||
1575 | 64 | self._ephipv4 = ephipv4 | ||
1576 | 65 | return lease | ||
1577 | 66 | |||
1578 | 67 | def __exit__(self, excp_type, excp_value, excp_traceback): | ||
1579 | 68 | if not self._ephipv4: | ||
1580 | 69 | return | ||
1581 | 70 | self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) | ||
1582 | 71 | |||
1583 | 72 | |||
1584 | 32 | def maybe_perform_dhcp_discovery(nic=None): | 73 | def maybe_perform_dhcp_discovery(nic=None): |
1585 | 33 | """Perform dhcp discovery if nic valid and dhclient command exists. | 74 | """Perform dhcp discovery if nic valid and dhclient command exists. |
1586 | 34 | 75 | ||
1587 | diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py | |||
1588 | index e9e2cf4..fe667d8 100644 | |||
1589 | --- a/cloudinit/net/network_state.py | |||
1590 | +++ b/cloudinit/net/network_state.py | |||
1591 | @@ -474,8 +474,9 @@ class NetworkStateInterpreter(object): | |||
1592 | 474 | elif bridge_stp in ['off', '0', 0]: | 474 | elif bridge_stp in ['off', '0', 0]: |
1593 | 475 | bridge_stp = False | 475 | bridge_stp = False |
1594 | 476 | else: | 476 | else: |
1597 | 477 | raise ValueError("Cannot convert bridge_stp value" | 477 | raise ValueError( |
1598 | 478 | "(%s) to boolean", bridge_stp) | 478 | 'Cannot convert bridge_stp value ({stp}) to' |
1599 | 479 | ' boolean'.format(stp=bridge_stp)) | ||
1600 | 479 | iface.update({'bridge_stp': bridge_stp}) | 480 | iface.update({'bridge_stp': bridge_stp}) |
1601 | 480 | 481 | ||
1602 | 481 | interfaces.update({iface['name']: iface}) | 482 | interfaces.update({iface['name']: iface}) |
1603 | @@ -692,7 +693,8 @@ class NetworkStateInterpreter(object): | |||
1604 | 692 | elif cmd_type == "bond": | 693 | elif cmd_type == "bond": |
1605 | 693 | self.handle_bond(v1_cmd) | 694 | self.handle_bond(v1_cmd) |
1606 | 694 | else: | 695 | else: |
1608 | 695 | raise ValueError('Unknown command type: %s', cmd_type) | 696 | raise ValueError('Unknown command type: {cmd_type}'.format( |
1609 | 697 | cmd_type=cmd_type)) | ||
1610 | 696 | 698 | ||
1611 | 697 | def _v2_to_v1_ipcfg(self, cfg): | 699 | def _v2_to_v1_ipcfg(self, cfg): |
1612 | 698 | """Common ipconfig extraction from v2 to v1 subnets array.""" | 700 | """Common ipconfig extraction from v2 to v1 subnets array.""" |
1613 | @@ -959,4 +961,16 @@ def mask_to_net_prefix(mask): | |||
1614 | 959 | return ipv4_mask_to_net_prefix(mask) | 961 | return ipv4_mask_to_net_prefix(mask) |
1615 | 960 | 962 | ||
1616 | 961 | 963 | ||
1617 | 964 | def mask_and_ipv4_to_bcast_addr(mask, ip): | ||
1618 | 965 | """Calculate the broadcast address from the subnet mask and ip addr. | ||
1619 | 966 | |||
1620 | 967 | Supports ipv4 only.""" | ||
1621 | 968 | ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2) | ||
1622 | 969 | mask_dec = ipv4_mask_to_net_prefix(mask) | ||
1623 | 970 | bcast_bin = ip_bin | (2**(32 - mask_dec) - 1) | ||
1624 | 971 | bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF) | ||
1625 | 972 | for i in range(4)[::-1]]) | ||
1626 | 973 | return bcast_str | ||
1627 | 974 | |||
1628 | 975 | |||
1629 | 962 | # vi: ts=4 expandtab | 976 | # vi: ts=4 expandtab |
1630 | diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py | |||
1631 | index 43a7e42..7ac8288 100644 | |||
1632 | --- a/cloudinit/sources/DataSourceAliYun.py | |||
1633 | +++ b/cloudinit/sources/DataSourceAliYun.py | |||
1634 | @@ -11,6 +11,7 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS" | |||
1635 | 11 | 11 | ||
1636 | 12 | class DataSourceAliYun(EC2.DataSourceEc2): | 12 | class DataSourceAliYun(EC2.DataSourceEc2): |
1637 | 13 | 13 | ||
1638 | 14 | dsname = 'AliYun' | ||
1639 | 14 | metadata_urls = ['http://100.100.100.200'] | 15 | metadata_urls = ['http://100.100.100.200'] |
1640 | 15 | 16 | ||
1641 | 16 | # The minimum supported metadata_version from the ec2 metadata apis | 17 | # The minimum supported metadata_version from the ec2 metadata apis |
1642 | diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py | |||
1643 | index c78ad9e..e1d0055 100644 | |||
1644 | --- a/cloudinit/sources/DataSourceAltCloud.py | |||
1645 | +++ b/cloudinit/sources/DataSourceAltCloud.py | |||
1646 | @@ -74,6 +74,9 @@ def read_user_data_callback(mount_dir): | |||
1647 | 74 | 74 | ||
1648 | 75 | 75 | ||
1649 | 76 | class DataSourceAltCloud(sources.DataSource): | 76 | class DataSourceAltCloud(sources.DataSource): |
1650 | 77 | |||
1651 | 78 | dsname = 'AltCloud' | ||
1652 | 79 | |||
1653 | 77 | def __init__(self, sys_cfg, distro, paths): | 80 | def __init__(self, sys_cfg, distro, paths): |
1654 | 78 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 81 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
1655 | 79 | self.seed = None | 82 | self.seed = None |
1656 | @@ -112,7 +115,7 @@ class DataSourceAltCloud(sources.DataSource): | |||
1657 | 112 | 115 | ||
1658 | 113 | return 'UNKNOWN' | 116 | return 'UNKNOWN' |
1659 | 114 | 117 | ||
1661 | 115 | def get_data(self): | 118 | def _get_data(self): |
1662 | 116 | ''' | 119 | ''' |
1663 | 117 | Description: | 120 | Description: |
1664 | 118 | User Data is passed to the launching instance which | 121 | User Data is passed to the launching instance which |
1665 | @@ -142,7 +145,7 @@ class DataSourceAltCloud(sources.DataSource): | |||
1666 | 142 | else: | 145 | else: |
1667 | 143 | cloud_type = self.get_cloud_type() | 146 | cloud_type = self.get_cloud_type() |
1668 | 144 | 147 | ||
1670 | 145 | LOG.debug('cloud_type: ' + str(cloud_type)) | 148 | LOG.debug('cloud_type: %s', str(cloud_type)) |
1671 | 146 | 149 | ||
1672 | 147 | if 'RHEV' in cloud_type: | 150 | if 'RHEV' in cloud_type: |
1673 | 148 | if self.user_data_rhevm(): | 151 | if self.user_data_rhevm(): |
1674 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py | |||
1675 | index 14367e9..4bcbf3a 100644 | |||
1676 | --- a/cloudinit/sources/DataSourceAzure.py | |||
1677 | +++ b/cloudinit/sources/DataSourceAzure.py | |||
1678 | @@ -11,13 +11,16 @@ from functools import partial | |||
1679 | 11 | import os | 11 | import os |
1680 | 12 | import os.path | 12 | import os.path |
1681 | 13 | import re | 13 | import re |
1682 | 14 | from time import time | ||
1683 | 14 | from xml.dom import minidom | 15 | from xml.dom import minidom |
1684 | 15 | import xml.etree.ElementTree as ET | 16 | import xml.etree.ElementTree as ET |
1685 | 16 | 17 | ||
1686 | 17 | from cloudinit import log as logging | 18 | from cloudinit import log as logging |
1687 | 18 | from cloudinit import net | 19 | from cloudinit import net |
1688 | 20 | from cloudinit.net.dhcp import EphemeralDHCPv4 | ||
1689 | 19 | from cloudinit import sources | 21 | from cloudinit import sources |
1690 | 20 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric | 22 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric |
1691 | 23 | from cloudinit.url_helper import readurl, wait_for_url, UrlError | ||
1692 | 21 | from cloudinit import util | 24 | from cloudinit import util |
1693 | 22 | 25 | ||
1694 | 23 | LOG = logging.getLogger(__name__) | 26 | LOG = logging.getLogger(__name__) |
1695 | @@ -26,10 +29,16 @@ DS_NAME = 'Azure' | |||
1696 | 26 | DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} | 29 | DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} |
1697 | 27 | AGENT_START = ['service', 'walinuxagent', 'start'] | 30 | AGENT_START = ['service', 'walinuxagent', 'start'] |
1698 | 28 | AGENT_START_BUILTIN = "__builtin__" | 31 | AGENT_START_BUILTIN = "__builtin__" |
1700 | 29 | BOUNCE_COMMAND = [ | 32 | BOUNCE_COMMAND_IFUP = [ |
1701 | 30 | 'sh', '-xc', | 33 | 'sh', '-xc', |
1702 | 31 | "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" | 34 | "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" |
1703 | 32 | ] | 35 | ] |
1704 | 36 | BOUNCE_COMMAND_FREEBSD = [ | ||
1705 | 37 | 'sh', '-xc', | ||
1706 | 38 | ("i=$interface; x=0; ifconfig down $i || x=$?; " | ||
1707 | 39 | "ifconfig up $i || x=$?; exit $x") | ||
1708 | 40 | ] | ||
1709 | 41 | |||
1710 | 33 | # azure systems will always have a resource disk, and 66-azure-ephemeral.rules | 42 | # azure systems will always have a resource disk, and 66-azure-ephemeral.rules |
1711 | 34 | # ensures that it gets linked to this path. | 43 | # ensures that it gets linked to this path. |
1712 | 35 | RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' | 44 | RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' |
1713 | @@ -38,6 +47,9 @@ LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' | |||
1714 | 38 | DEFAULT_FS = 'ext4' | 47 | DEFAULT_FS = 'ext4' |
1715 | 39 | # DMI chassis-asset-tag is set static for all azure instances | 48 | # DMI chassis-asset-tag is set static for all azure instances |
1716 | 40 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' | 49 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' |
1717 | 50 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" | ||
1718 | 51 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" | ||
1719 | 52 | IMDS_RETRIES = 5 | ||
1720 | 41 | 53 | ||
1721 | 42 | 54 | ||
1722 | 43 | def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): | 55 | def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): |
1723 | @@ -177,11 +189,6 @@ if util.is_FreeBSD(): | |||
1724 | 177 | RESOURCE_DISK_PATH = "/dev/" + res_disk | 189 | RESOURCE_DISK_PATH = "/dev/" + res_disk |
1725 | 178 | else: | 190 | else: |
1726 | 179 | LOG.debug("resource disk is None") | 191 | LOG.debug("resource disk is None") |
1727 | 180 | BOUNCE_COMMAND = [ | ||
1728 | 181 | 'sh', '-xc', | ||
1729 | 182 | ("i=$interface; x=0; ifconfig down $i || x=$?; " | ||
1730 | 183 | "ifconfig up $i || x=$?; exit $x") | ||
1731 | 184 | ] | ||
1732 | 185 | 192 | ||
1733 | 186 | BUILTIN_DS_CONFIG = { | 193 | BUILTIN_DS_CONFIG = { |
1734 | 187 | 'agent_command': AGENT_START_BUILTIN, | 194 | 'agent_command': AGENT_START_BUILTIN, |
1735 | @@ -190,7 +197,7 @@ BUILTIN_DS_CONFIG = { | |||
1736 | 190 | 'hostname_bounce': { | 197 | 'hostname_bounce': { |
1737 | 191 | 'interface': DEFAULT_PRIMARY_NIC, | 198 | 'interface': DEFAULT_PRIMARY_NIC, |
1738 | 192 | 'policy': True, | 199 | 'policy': True, |
1740 | 193 | 'command': BOUNCE_COMMAND, | 200 | 'command': 'builtin', |
1741 | 194 | 'hostname_command': 'hostname', | 201 | 'hostname_command': 'hostname', |
1742 | 195 | }, | 202 | }, |
1743 | 196 | 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, | 203 | 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, |
1744 | @@ -246,6 +253,8 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): | |||
1745 | 246 | 253 | ||
1746 | 247 | 254 | ||
1747 | 248 | class DataSourceAzure(sources.DataSource): | 255 | class DataSourceAzure(sources.DataSource): |
1748 | 256 | |||
1749 | 257 | dsname = 'Azure' | ||
1750 | 249 | _negotiated = False | 258 | _negotiated = False |
1751 | 250 | 259 | ||
1752 | 251 | def __init__(self, sys_cfg, distro, paths): | 260 | def __init__(self, sys_cfg, distro, paths): |
1753 | @@ -273,19 +282,20 @@ class DataSourceAzure(sources.DataSource): | |||
1754 | 273 | 282 | ||
1755 | 274 | with temporary_hostname(azure_hostname, self.ds_cfg, | 283 | with temporary_hostname(azure_hostname, self.ds_cfg, |
1756 | 275 | hostname_command=hostname_command) \ | 284 | hostname_command=hostname_command) \ |
1759 | 276 | as previous_hostname: | 285 | as previous_hn: |
1760 | 277 | if (previous_hostname is not None and | 286 | if (previous_hn is not None and |
1761 | 278 | util.is_true(self.ds_cfg.get('set_hostname'))): | 287 | util.is_true(self.ds_cfg.get('set_hostname'))): |
1762 | 279 | cfg = self.ds_cfg['hostname_bounce'] | 288 | cfg = self.ds_cfg['hostname_bounce'] |
1763 | 280 | 289 | ||
1764 | 281 | # "Bouncing" the network | 290 | # "Bouncing" the network |
1765 | 282 | try: | 291 | try: |
1769 | 283 | perform_hostname_bounce(hostname=azure_hostname, | 292 | return perform_hostname_bounce(hostname=azure_hostname, |
1770 | 284 | cfg=cfg, | 293 | cfg=cfg, |
1771 | 285 | prev_hostname=previous_hostname) | 294 | prev_hostname=previous_hn) |
1772 | 286 | except Exception as e: | 295 | except Exception as e: |
1773 | 287 | LOG.warning("Failed publishing hostname: %s", e) | 296 | LOG.warning("Failed publishing hostname: %s", e) |
1774 | 288 | util.logexc(LOG, "handling set_hostname failed") | 297 | util.logexc(LOG, "handling set_hostname failed") |
1775 | 298 | return False | ||
1776 | 289 | 299 | ||
1777 | 290 | def get_metadata_from_agent(self): | 300 | def get_metadata_from_agent(self): |
1778 | 291 | temp_hostname = self.metadata.get('local-hostname') | 301 | temp_hostname = self.metadata.get('local-hostname') |
1779 | @@ -330,7 +340,7 @@ class DataSourceAzure(sources.DataSource): | |||
1780 | 330 | metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) | 340 | metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) |
1781 | 331 | return metadata | 341 | return metadata |
1782 | 332 | 342 | ||
1784 | 333 | def get_data(self): | 343 | def _get_data(self): |
1785 | 334 | # azure removes/ejects the cdrom containing the ovf-env.xml | 344 | # azure removes/ejects the cdrom containing the ovf-env.xml |
1786 | 335 | # file on reboot. So, in order to successfully reboot we | 345 | # file on reboot. So, in order to successfully reboot we |
1787 | 336 | # need to look in the datadir and consider that valid | 346 | # need to look in the datadir and consider that valid |
1788 | @@ -342,15 +352,20 @@ class DataSourceAzure(sources.DataSource): | |||
1789 | 342 | ddir = self.ds_cfg['data_dir'] | 352 | ddir = self.ds_cfg['data_dir'] |
1790 | 343 | 353 | ||
1791 | 344 | candidates = [self.seed_dir] | 354 | candidates = [self.seed_dir] |
1792 | 355 | if os.path.isfile(REPROVISION_MARKER_FILE): | ||
1793 | 356 | candidates.insert(0, "IMDS") | ||
1794 | 345 | candidates.extend(list_possible_azure_ds_devs()) | 357 | candidates.extend(list_possible_azure_ds_devs()) |
1795 | 346 | if ddir: | 358 | if ddir: |
1796 | 347 | candidates.append(ddir) | 359 | candidates.append(ddir) |
1797 | 348 | 360 | ||
1798 | 349 | found = None | 361 | found = None |
1800 | 350 | 362 | reprovision = False | |
1801 | 351 | for cdev in candidates: | 363 | for cdev in candidates: |
1802 | 352 | try: | 364 | try: |
1804 | 353 | if cdev.startswith("/dev/"): | 365 | if cdev == "IMDS": |
1805 | 366 | ret = None | ||
1806 | 367 | reprovision = True | ||
1807 | 368 | elif cdev.startswith("/dev/"): | ||
1808 | 354 | if util.is_FreeBSD(): | 369 | if util.is_FreeBSD(): |
1809 | 355 | ret = util.mount_cb(cdev, load_azure_ds_dir, | 370 | ret = util.mount_cb(cdev, load_azure_ds_dir, |
1810 | 356 | mtype="udf", sync=False) | 371 | mtype="udf", sync=False) |
1811 | @@ -367,6 +382,8 @@ class DataSourceAzure(sources.DataSource): | |||
1812 | 367 | LOG.warning("%s was not mountable", cdev) | 382 | LOG.warning("%s was not mountable", cdev) |
1813 | 368 | continue | 383 | continue |
1814 | 369 | 384 | ||
1815 | 385 | if reprovision or self._should_reprovision(ret): | ||
1816 | 386 | ret = self._reprovision() | ||
1817 | 370 | (md, self.userdata_raw, cfg, files) = ret | 387 | (md, self.userdata_raw, cfg, files) = ret |
1818 | 371 | self.seed = cdev | 388 | self.seed = cdev |
1819 | 372 | self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) | 389 | self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) |
1820 | @@ -425,6 +442,83 @@ class DataSourceAzure(sources.DataSource): | |||
1821 | 425 | LOG.debug("negotiating already done for %s", | 442 | LOG.debug("negotiating already done for %s", |
1822 | 426 | self.get_instance_id()) | 443 | self.get_instance_id()) |
1823 | 427 | 444 | ||
1824 | 445 | def _poll_imds(self, report_ready=True): | ||
1825 | 446 | """Poll IMDS for the new provisioning data until we get a valid | ||
1826 | 447 | response. Then return the returned JSON object.""" | ||
1827 | 448 | url = IMDS_URL + "?api-version=2017-04-02" | ||
1828 | 449 | headers = {"Metadata": "true"} | ||
1829 | 450 | LOG.debug("Start polling IMDS") | ||
1830 | 451 | |||
1831 | 452 | def sleep_cb(response, loop_n): | ||
1832 | 453 | return 1 | ||
1833 | 454 | |||
1834 | 455 | def exception_cb(msg, exception): | ||
1835 | 456 | if isinstance(exception, UrlError) and exception.code == 404: | ||
1836 | 457 | return | ||
1837 | 458 | LOG.warning("Exception during polling. Will try DHCP.", | ||
1838 | 459 | exc_info=True) | ||
1839 | 460 | |||
1840 | 461 | # If we get an exception while trying to call IMDS, we | ||
1841 | 462 | # call DHCP and setup the ephemeral network to acquire the new IP. | ||
1842 | 463 | raise exception | ||
1843 | 464 | |||
1844 | 465 | need_report = report_ready | ||
1845 | 466 | for i in range(IMDS_RETRIES): | ||
1846 | 467 | try: | ||
1847 | 468 | with EphemeralDHCPv4() as lease: | ||
1848 | 469 | if need_report: | ||
1849 | 470 | self._report_ready(lease=lease) | ||
1850 | 471 | need_report = False | ||
1851 | 472 | wait_for_url([url], max_wait=None, timeout=60, | ||
1852 | 473 | status_cb=LOG.info, | ||
1853 | 474 | headers_cb=lambda url: headers, sleep_time=1, | ||
1854 | 475 | exception_cb=exception_cb, | ||
1855 | 476 | sleep_time_cb=sleep_cb) | ||
1856 | 477 | return str(readurl(url, headers=headers)) | ||
1857 | 478 | except Exception: | ||
1858 | 479 | LOG.debug("Exception during polling-retrying dhcp" + | ||
1859 | 480 | " %d more time(s).", (IMDS_RETRIES - i), | ||
1860 | 481 | exc_info=True) | ||
1861 | 482 | |||
1862 | 483 | def _report_ready(self, lease): | ||
1863 | 484 | """Tells the fabric provisioning has completed | ||
1864 | 485 | before we go into our polling loop.""" | ||
1865 | 486 | try: | ||
1866 | 487 | get_metadata_from_fabric(None, lease['unknown-245']) | ||
1867 | 488 | except Exception as exc: | ||
1868 | 489 | LOG.warning( | ||
1869 | 490 | "Error communicating with Azure fabric; You may experience." | ||
1870 | 491 | "connectivity issues.", exc_info=True) | ||
1871 | 492 | |||
1872 | 493 | def _should_reprovision(self, ret): | ||
1873 | 494 | """Whether or not we should poll IMDS for reprovisioning data. | ||
1874 | 495 | Also sets a marker file to poll IMDS. | ||
1875 | 496 | |||
1876 | 497 | The marker file is used for the following scenario: the VM boots into | ||
1877 | 498 | this polling loop, which we expect to be proceeding infinitely until | ||
1878 | 499 | the VM is picked. If for whatever reason the platform moves us to a | ||
1879 | 500 | new host (for instance a hardware issue), we need to keep polling. | ||
1880 | 501 | However, since the VM reports ready to the Fabric, we will not attach | ||
1881 | 502 | the ISO, thus cloud-init needs to have a way of knowing that it should | ||
1882 | 503 | jump back into the polling loop in order to retrieve the ovf_env.""" | ||
1883 | 504 | if not ret: | ||
1884 | 505 | return False | ||
1885 | 506 | (md, self.userdata_raw, cfg, files) = ret | ||
1886 | 507 | path = REPROVISION_MARKER_FILE | ||
1887 | 508 | if (cfg.get('PreprovisionedVm') is True or | ||
1888 | 509 | os.path.isfile(path)): | ||
1889 | 510 | if not os.path.isfile(path): | ||
1890 | 511 | LOG.info("Creating a marker file to poll imds") | ||
1891 | 512 | util.write_file(path, "%s: %s\n" % (os.getpid(), time())) | ||
1892 | 513 | return True | ||
1893 | 514 | return False | ||
1894 | 515 | |||
1895 | 516 | def _reprovision(self): | ||
1896 | 517 | """Initiate the reprovisioning workflow.""" | ||
1897 | 518 | contents = self._poll_imds() | ||
1898 | 519 | md, ud, cfg = read_azure_ovf(contents) | ||
1899 | 520 | return (md, ud, cfg, {'ovf-env.xml': contents}) | ||
1900 | 521 | |||
1901 | 428 | def _negotiate(self): | 522 | def _negotiate(self): |
1902 | 429 | """Negotiate with fabric and return data from it. | 523 | """Negotiate with fabric and return data from it. |
1903 | 430 | 524 | ||
1904 | @@ -450,7 +544,7 @@ class DataSourceAzure(sources.DataSource): | |||
1905 | 450 | "Error communicating with Azure fabric; You may experience." | 544 | "Error communicating with Azure fabric; You may experience." |
1906 | 451 | "connectivity issues.", exc_info=True) | 545 | "connectivity issues.", exc_info=True) |
1907 | 452 | return False | 546 | return False |
1909 | 453 | 547 | util.del_file(REPROVISION_MARKER_FILE) | |
1910 | 454 | return fabric_data | 548 | return fabric_data |
1911 | 455 | 549 | ||
1912 | 456 | def activate(self, cfg, is_new_instance): | 550 | def activate(self, cfg, is_new_instance): |
1913 | @@ -580,18 +674,19 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, | |||
1914 | 580 | if os.path.exists(sempath): | 674 | if os.path.exists(sempath): |
1915 | 581 | try: | 675 | try: |
1916 | 582 | os.unlink(sempath) | 676 | os.unlink(sempath) |
1918 | 583 | LOG.debug(bmsg + " removed.") | 677 | LOG.debug('%s removed.', bmsg) |
1919 | 584 | except Exception as e: | 678 | except Exception as e: |
1920 | 585 | # python3 throws FileNotFoundError, python2 throws OSError | 679 | # python3 throws FileNotFoundError, python2 throws OSError |
1922 | 586 | LOG.warning(bmsg + ": remove failed! (%s)", e) | 680 | LOG.warning('%s: remove failed! (%s)', bmsg, e) |
1923 | 587 | else: | 681 | else: |
1925 | 588 | LOG.debug(bmsg + " did not exist.") | 682 | LOG.debug('%s did not exist.', bmsg) |
1926 | 589 | return | 683 | return |
1927 | 590 | 684 | ||
1928 | 591 | 685 | ||
1929 | 592 | def perform_hostname_bounce(hostname, cfg, prev_hostname): | 686 | def perform_hostname_bounce(hostname, cfg, prev_hostname): |
1930 | 593 | # set the hostname to 'hostname' if it is not already set to that. | 687 | # set the hostname to 'hostname' if it is not already set to that. |
1931 | 594 | # then, if policy is not off, bounce the interface using command | 688 | # then, if policy is not off, bounce the interface using command |
1932 | 689 | # Returns True if the network was bounced, False otherwise. | ||
1933 | 595 | command = cfg['command'] | 690 | command = cfg['command'] |
1934 | 596 | interface = cfg['interface'] | 691 | interface = cfg['interface'] |
1935 | 597 | policy = cfg['policy'] | 692 | policy = cfg['policy'] |
1936 | @@ -604,8 +699,15 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): | |||
1937 | 604 | env['old_hostname'] = prev_hostname | 699 | env['old_hostname'] = prev_hostname |
1938 | 605 | 700 | ||
1939 | 606 | if command == "builtin": | 701 | if command == "builtin": |
1942 | 607 | command = BOUNCE_COMMAND | 702 | if util.is_FreeBSD(): |
1943 | 608 | 703 | command = BOUNCE_COMMAND_FREEBSD | |
1944 | 704 | elif util.which('ifup'): | ||
1945 | 705 | command = BOUNCE_COMMAND_IFUP | ||
1946 | 706 | else: | ||
1947 | 707 | LOG.debug( | ||
1948 | 708 | "Skipping network bounce: ifupdown utils aren't present.") | ||
1949 | 709 | # Don't bounce as networkd handles hostname DDNS updates | ||
1950 | 710 | return False | ||
1951 | 609 | LOG.debug("pubhname: publishing hostname [%s]", msg) | 711 | LOG.debug("pubhname: publishing hostname [%s]", msg) |
1952 | 610 | shell = not isinstance(command, (list, tuple)) | 712 | shell = not isinstance(command, (list, tuple)) |
1953 | 611 | # capture=False, see comments in bug 1202758 and bug 1206164. | 713 | # capture=False, see comments in bug 1202758 and bug 1206164. |
1954 | @@ -613,6 +715,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): | |||
1955 | 613 | get_uptime=True, func=util.subp, | 715 | get_uptime=True, func=util.subp, |
1956 | 614 | kwargs={'args': command, 'shell': shell, 'capture': False, | 716 | kwargs={'args': command, 'shell': shell, 'capture': False, |
1957 | 615 | 'env': env}) | 717 | 'env': env}) |
1958 | 718 | return True | ||
1959 | 616 | 719 | ||
1960 | 617 | 720 | ||
1961 | 618 | def crtfile_to_pubkey(fname, data=None): | 721 | def crtfile_to_pubkey(fname, data=None): |
1962 | @@ -829,9 +932,35 @@ def read_azure_ovf(contents): | |||
1963 | 829 | if 'ssh_pwauth' not in cfg and password: | 932 | if 'ssh_pwauth' not in cfg and password: |
1964 | 830 | cfg['ssh_pwauth'] = True | 933 | cfg['ssh_pwauth'] = True |
1965 | 831 | 934 | ||
1966 | 935 | cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom) | ||
1967 | 936 | |||
1968 | 832 | return (md, ud, cfg) | 937 | return (md, ud, cfg) |
1969 | 833 | 938 | ||
1970 | 834 | 939 | ||
1971 | 940 | def _extract_preprovisioned_vm_setting(dom): | ||
1972 | 941 | """Read the preprovision flag from the ovf. It should not | ||
1973 | 942 | exist unless true.""" | ||
1974 | 943 | platform_settings_section = find_child( | ||
1975 | 944 | dom.documentElement, | ||
1976 | 945 | lambda n: n.localName == "PlatformSettingsSection") | ||
1977 | 946 | if not platform_settings_section or len(platform_settings_section) == 0: | ||
1978 | 947 | LOG.debug("PlatformSettingsSection not found") | ||
1979 | 948 | return False | ||
1980 | 949 | platform_settings = find_child( | ||
1981 | 950 | platform_settings_section[0], | ||
1982 | 951 | lambda n: n.localName == "PlatformSettings") | ||
1983 | 952 | if not platform_settings or len(platform_settings) == 0: | ||
1984 | 953 | LOG.debug("PlatformSettings not found") | ||
1985 | 954 | return False | ||
1986 | 955 | preprovisionedVm = find_child( | ||
1987 | 956 | platform_settings[0], | ||
1988 | 957 | lambda n: n.localName == "PreprovisionedVm") | ||
1989 | 958 | if not preprovisionedVm or len(preprovisionedVm) == 0: | ||
1990 | 959 | LOG.debug("PreprovisionedVm not found") | ||
1991 | 960 | return False | ||
1992 | 961 | return util.translate_bool(preprovisionedVm[0].firstChild.nodeValue) | ||
1993 | 962 | |||
1994 | 963 | |||
1995 | 835 | def encrypt_pass(password, salt_id="$6$"): | 964 | def encrypt_pass(password, salt_id="$6$"): |
1996 | 836 | return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) | 965 | return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) |
1997 | 837 | 966 | ||
1998 | diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py | |||
1999 | index d7fcd45..699a85b 100644 | |||
2000 | --- a/cloudinit/sources/DataSourceBigstep.py | |||
2001 | +++ b/cloudinit/sources/DataSourceBigstep.py | |||
2002 | @@ -16,13 +16,16 @@ LOG = logging.getLogger(__name__) | |||
2003 | 16 | 16 | ||
2004 | 17 | 17 | ||
2005 | 18 | class DataSourceBigstep(sources.DataSource): | 18 | class DataSourceBigstep(sources.DataSource): |
2006 | 19 | |||
2007 | 20 | dsname = 'Bigstep' | ||
2008 | 21 | |||
2009 | 19 | def __init__(self, sys_cfg, distro, paths): | 22 | def __init__(self, sys_cfg, distro, paths): |
2010 | 20 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 23 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2011 | 21 | self.metadata = {} | 24 | self.metadata = {} |
2012 | 22 | self.vendordata_raw = "" | 25 | self.vendordata_raw = "" |
2013 | 23 | self.userdata_raw = "" | 26 | self.userdata_raw = "" |
2014 | 24 | 27 | ||
2016 | 25 | def get_data(self, apply_filter=False): | 28 | def _get_data(self, apply_filter=False): |
2017 | 26 | url = get_url_from_file() | 29 | url = get_url_from_file() |
2018 | 27 | if url is None: | 30 | if url is None: |
2019 | 28 | return False | 31 | return False |
2020 | diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py | |||
2021 | index 19df16b..4eaad47 100644 | |||
2022 | --- a/cloudinit/sources/DataSourceCloudSigma.py | |||
2023 | +++ b/cloudinit/sources/DataSourceCloudSigma.py | |||
2024 | @@ -23,6 +23,9 @@ class DataSourceCloudSigma(sources.DataSource): | |||
2025 | 23 | For more information about CloudSigma's Server Context: | 23 | For more information about CloudSigma's Server Context: |
2026 | 24 | http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html | 24 | http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html |
2027 | 25 | """ | 25 | """ |
2028 | 26 | |||
2029 | 27 | dsname = 'CloudSigma' | ||
2030 | 28 | |||
2031 | 26 | def __init__(self, sys_cfg, distro, paths): | 29 | def __init__(self, sys_cfg, distro, paths): |
2032 | 27 | self.cepko = Cepko() | 30 | self.cepko = Cepko() |
2033 | 28 | self.ssh_public_key = '' | 31 | self.ssh_public_key = '' |
2034 | @@ -46,7 +49,7 @@ class DataSourceCloudSigma(sources.DataSource): | |||
2035 | 46 | LOG.warning("failed to query dmi data for system product name") | 49 | LOG.warning("failed to query dmi data for system product name") |
2036 | 47 | return False | 50 | return False |
2037 | 48 | 51 | ||
2039 | 49 | def get_data(self): | 52 | def _get_data(self): |
2040 | 50 | """ | 53 | """ |
2041 | 51 | Metadata is the whole server context and /meta/cloud-config is used | 54 | Metadata is the whole server context and /meta/cloud-config is used |
2042 | 52 | as userdata. | 55 | as userdata. |
2043 | diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py | |||
2044 | index 9dc473f..0df545f 100644 | |||
2045 | --- a/cloudinit/sources/DataSourceCloudStack.py | |||
2046 | +++ b/cloudinit/sources/DataSourceCloudStack.py | |||
2047 | @@ -65,6 +65,9 @@ class CloudStackPasswordServerClient(object): | |||
2048 | 65 | 65 | ||
2049 | 66 | 66 | ||
2050 | 67 | class DataSourceCloudStack(sources.DataSource): | 67 | class DataSourceCloudStack(sources.DataSource): |
2051 | 68 | |||
2052 | 69 | dsname = 'CloudStack' | ||
2053 | 70 | |||
2054 | 68 | def __init__(self, sys_cfg, distro, paths): | 71 | def __init__(self, sys_cfg, distro, paths): |
2055 | 69 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 72 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2056 | 70 | self.seed_dir = os.path.join(paths.seed_dir, 'cs') | 73 | self.seed_dir = os.path.join(paths.seed_dir, 'cs') |
2057 | @@ -117,7 +120,7 @@ class DataSourceCloudStack(sources.DataSource): | |||
2058 | 117 | def get_config_obj(self): | 120 | def get_config_obj(self): |
2059 | 118 | return self.cfg | 121 | return self.cfg |
2060 | 119 | 122 | ||
2062 | 120 | def get_data(self): | 123 | def _get_data(self): |
2063 | 121 | seed_ret = {} | 124 | seed_ret = {} |
2064 | 122 | if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): | 125 | if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): |
2065 | 123 | self.userdata_raw = seed_ret['user-data'] | 126 | self.userdata_raw = seed_ret['user-data'] |
2066 | diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py | |||
2067 | index ef374f3..b8db626 100644 | |||
2068 | --- a/cloudinit/sources/DataSourceConfigDrive.py | |||
2069 | +++ b/cloudinit/sources/DataSourceConfigDrive.py | |||
2070 | @@ -25,13 +25,16 @@ DEFAULT_METADATA = { | |||
2071 | 25 | "instance-id": DEFAULT_IID, | 25 | "instance-id": DEFAULT_IID, |
2072 | 26 | } | 26 | } |
2073 | 27 | FS_TYPES = ('vfat', 'iso9660') | 27 | FS_TYPES = ('vfat', 'iso9660') |
2075 | 28 | LABEL_TYPES = ('config-2',) | 28 | LABEL_TYPES = ('config-2', 'CONFIG-2') |
2076 | 29 | POSSIBLE_MOUNTS = ('sr', 'cd') | 29 | POSSIBLE_MOUNTS = ('sr', 'cd') |
2077 | 30 | OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS | 30 | OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS |
2078 | 31 | for i in range(0, 2))) | 31 | for i in range(0, 2))) |
2079 | 32 | 32 | ||
2080 | 33 | 33 | ||
2081 | 34 | class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): | 34 | class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): |
2082 | 35 | |||
2083 | 36 | dsname = 'ConfigDrive' | ||
2084 | 37 | |||
2085 | 35 | def __init__(self, sys_cfg, distro, paths): | 38 | def __init__(self, sys_cfg, distro, paths): |
2086 | 36 | super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) | 39 | super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) |
2087 | 37 | self.source = None | 40 | self.source = None |
2088 | @@ -50,7 +53,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): | |||
2089 | 50 | mstr += "[source=%s]" % (self.source) | 53 | mstr += "[source=%s]" % (self.source) |
2090 | 51 | return mstr | 54 | return mstr |
2091 | 52 | 55 | ||
2093 | 53 | def get_data(self): | 56 | def _get_data(self): |
2094 | 54 | found = None | 57 | found = None |
2095 | 55 | md = {} | 58 | md = {} |
2096 | 56 | results = {} | 59 | results = {} |
2097 | @@ -221,7 +224,7 @@ def find_candidate_devs(probe_optical=True): | |||
2098 | 221 | config drive v2: | 224 | config drive v2: |
2099 | 222 | Disk should be: | 225 | Disk should be: |
2100 | 223 | * either vfat or iso9660 formated | 226 | * either vfat or iso9660 formated |
2102 | 224 | * labeled with 'config-2' | 227 | * labeled with 'config-2' or 'CONFIG-2' |
2103 | 225 | """ | 228 | """ |
2104 | 226 | # query optical drive to get it in blkid cache for 2.6 kernels | 229 | # query optical drive to get it in blkid cache for 2.6 kernels |
2105 | 227 | if probe_optical: | 230 | if probe_optical: |
2106 | diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py | |||
2107 | index 5e7e66b..e0ef665 100644 | |||
2108 | --- a/cloudinit/sources/DataSourceDigitalOcean.py | |||
2109 | +++ b/cloudinit/sources/DataSourceDigitalOcean.py | |||
2110 | @@ -27,6 +27,9 @@ MD_USE_IPV4LL = True | |||
2111 | 27 | 27 | ||
2112 | 28 | 28 | ||
2113 | 29 | class DataSourceDigitalOcean(sources.DataSource): | 29 | class DataSourceDigitalOcean(sources.DataSource): |
2114 | 30 | |||
2115 | 31 | dsname = 'DigitalOcean' | ||
2116 | 32 | |||
2117 | 30 | def __init__(self, sys_cfg, distro, paths): | 33 | def __init__(self, sys_cfg, distro, paths): |
2118 | 31 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 34 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2119 | 32 | self.distro = distro | 35 | self.distro = distro |
2120 | @@ -44,7 +47,7 @@ class DataSourceDigitalOcean(sources.DataSource): | |||
2121 | 44 | def _get_sysinfo(self): | 47 | def _get_sysinfo(self): |
2122 | 45 | return do_helper.read_sysinfo() | 48 | return do_helper.read_sysinfo() |
2123 | 46 | 49 | ||
2125 | 47 | def get_data(self): | 50 | def _get_data(self): |
2126 | 48 | (is_do, droplet_id) = self._get_sysinfo() | 51 | (is_do, droplet_id) = self._get_sysinfo() |
2127 | 49 | 52 | ||
2128 | 50 | # only proceed if we know we are on DigitalOcean | 53 | # only proceed if we know we are on DigitalOcean |
2129 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py | |||
2130 | index 7bbbfb6..e14553b 100644 | |||
2131 | --- a/cloudinit/sources/DataSourceEc2.py | |||
2132 | +++ b/cloudinit/sources/DataSourceEc2.py | |||
2133 | @@ -14,7 +14,7 @@ import time | |||
2134 | 14 | from cloudinit import ec2_utils as ec2 | 14 | from cloudinit import ec2_utils as ec2 |
2135 | 15 | from cloudinit import log as logging | 15 | from cloudinit import log as logging |
2136 | 16 | from cloudinit import net | 16 | from cloudinit import net |
2138 | 17 | from cloudinit.net import dhcp | 17 | from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError |
2139 | 18 | from cloudinit import sources | 18 | from cloudinit import sources |
2140 | 19 | from cloudinit import url_helper as uhelp | 19 | from cloudinit import url_helper as uhelp |
2141 | 20 | from cloudinit import util | 20 | from cloudinit import util |
2142 | @@ -31,6 +31,7 @@ _unset = "_unset" | |||
2143 | 31 | 31 | ||
2144 | 32 | 32 | ||
2145 | 33 | class Platforms(object): | 33 | class Platforms(object): |
2146 | 34 | # TODO Rename and move to cloudinit.cloud.CloudNames | ||
2147 | 34 | ALIYUN = "AliYun" | 35 | ALIYUN = "AliYun" |
2148 | 35 | AWS = "AWS" | 36 | AWS = "AWS" |
2149 | 36 | BRIGHTBOX = "Brightbox" | 37 | BRIGHTBOX = "Brightbox" |
2150 | @@ -45,6 +46,7 @@ class Platforms(object): | |||
2151 | 45 | 46 | ||
2152 | 46 | class DataSourceEc2(sources.DataSource): | 47 | class DataSourceEc2(sources.DataSource): |
2153 | 47 | 48 | ||
2154 | 49 | dsname = 'Ec2' | ||
2155 | 48 | # Default metadata urls that will be used if none are provided | 50 | # Default metadata urls that will be used if none are provided |
2156 | 49 | # They will be checked for 'resolveability' and some of the | 51 | # They will be checked for 'resolveability' and some of the |
2157 | 50 | # following may be discarded if they do not resolve | 52 | # following may be discarded if they do not resolve |
2158 | @@ -68,11 +70,15 @@ class DataSourceEc2(sources.DataSource): | |||
2159 | 68 | _fallback_interface = None | 70 | _fallback_interface = None |
2160 | 69 | 71 | ||
2161 | 70 | def __init__(self, sys_cfg, distro, paths): | 72 | def __init__(self, sys_cfg, distro, paths): |
2163 | 71 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 73 | super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) |
2164 | 72 | self.metadata_address = None | 74 | self.metadata_address = None |
2165 | 73 | self.seed_dir = os.path.join(paths.seed_dir, "ec2") | 75 | self.seed_dir = os.path.join(paths.seed_dir, "ec2") |
2166 | 74 | 76 | ||
2168 | 75 | def get_data(self): | 77 | def _get_cloud_name(self): |
2169 | 78 | """Return the cloud name as identified during _get_data.""" | ||
2170 | 79 | return self.cloud_platform | ||
2171 | 80 | |||
2172 | 81 | def _get_data(self): | ||
2173 | 76 | seed_ret = {} | 82 | seed_ret = {} |
2174 | 77 | if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): | 83 | if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): |
2175 | 78 | self.userdata_raw = seed_ret['user-data'] | 84 | self.userdata_raw = seed_ret['user-data'] |
2176 | @@ -96,22 +102,13 @@ class DataSourceEc2(sources.DataSource): | |||
2177 | 96 | if util.is_FreeBSD(): | 102 | if util.is_FreeBSD(): |
2178 | 97 | LOG.debug("FreeBSD doesn't support running dhclient with -sf") | 103 | LOG.debug("FreeBSD doesn't support running dhclient with -sf") |
2179 | 98 | return False | 104 | return False |
2185 | 99 | dhcp_leases = dhcp.maybe_perform_dhcp_discovery( | 105 | try: |
2186 | 100 | self.fallback_interface) | 106 | with EphemeralDHCPv4(self.fallback_interface): |
2187 | 101 | if not dhcp_leases: | 107 | return util.log_time( |
2188 | 102 | # DataSourceEc2Local failed in init-local stage. DataSourceEc2 | 108 | logfunc=LOG.debug, msg='Crawl of metadata service', |
2189 | 103 | # will still run in init-network stage. | 109 | func=self._crawl_metadata) |
2190 | 110 | except NoDHCPLeaseError: | ||
2191 | 104 | return False | 111 | return False |
2192 | 105 | dhcp_opts = dhcp_leases[-1] | ||
2193 | 106 | net_params = {'interface': dhcp_opts.get('interface'), | ||
2194 | 107 | 'ip': dhcp_opts.get('fixed-address'), | ||
2195 | 108 | 'prefix_or_mask': dhcp_opts.get('subnet-mask'), | ||
2196 | 109 | 'broadcast': dhcp_opts.get('broadcast-address'), | ||
2197 | 110 | 'router': dhcp_opts.get('routers')} | ||
2198 | 111 | with net.EphemeralIPv4Network(**net_params): | ||
2199 | 112 | return util.log_time( | ||
2200 | 113 | logfunc=LOG.debug, msg='Crawl of metadata service', | ||
2201 | 114 | func=self._crawl_metadata) | ||
2202 | 115 | else: | 112 | else: |
2203 | 116 | return self._crawl_metadata() | 113 | return self._crawl_metadata() |
2204 | 117 | 114 | ||
2205 | @@ -148,7 +145,12 @@ class DataSourceEc2(sources.DataSource): | |||
2206 | 148 | return self.min_metadata_version | 145 | return self.min_metadata_version |
2207 | 149 | 146 | ||
2208 | 150 | def get_instance_id(self): | 147 | def get_instance_id(self): |
2210 | 151 | return self.metadata['instance-id'] | 148 | if self.cloud_platform == Platforms.AWS: |
2211 | 149 | # Prefer the ID from the instance identity document, but fall back | ||
2212 | 150 | return self.identity.get( | ||
2213 | 151 | 'instanceId', self.metadata['instance-id']) | ||
2214 | 152 | else: | ||
2215 | 153 | return self.metadata['instance-id'] | ||
2216 | 152 | 154 | ||
2217 | 153 | def _get_url_settings(self): | 155 | def _get_url_settings(self): |
2218 | 154 | mcfg = self.ds_cfg | 156 | mcfg = self.ds_cfg |
2219 | @@ -262,19 +264,31 @@ class DataSourceEc2(sources.DataSource): | |||
2220 | 262 | @property | 264 | @property |
2221 | 263 | def availability_zone(self): | 265 | def availability_zone(self): |
2222 | 264 | try: | 266 | try: |
2224 | 265 | return self.metadata['placement']['availability-zone'] | 267 | if self.cloud_platform == Platforms.AWS: |
2225 | 268 | return self.identity.get( | ||
2226 | 269 | 'availabilityZone', | ||
2227 | 270 | self.metadata['placement']['availability-zone']) | ||
2228 | 271 | else: | ||
2229 | 272 | return self.metadata['placement']['availability-zone'] | ||
2230 | 266 | except KeyError: | 273 | except KeyError: |
2231 | 267 | return None | 274 | return None |
2232 | 268 | 275 | ||
2233 | 269 | @property | 276 | @property |
2234 | 270 | def region(self): | 277 | def region(self): |
2238 | 271 | az = self.availability_zone | 278 | if self.cloud_platform == Platforms.AWS: |
2239 | 272 | if az is not None: | 279 | region = self.identity.get('region') |
2240 | 273 | return az[:-1] | 280 | # Fallback to trimming the availability zone if region is missing |
2241 | 281 | if self.availability_zone and not region: | ||
2242 | 282 | region = self.availability_zone[:-1] | ||
2243 | 283 | return region | ||
2244 | 284 | else: | ||
2245 | 285 | az = self.availability_zone | ||
2246 | 286 | if az is not None: | ||
2247 | 287 | return az[:-1] | ||
2248 | 274 | return None | 288 | return None |
2249 | 275 | 289 | ||
2250 | 276 | @property | 290 | @property |
2252 | 277 | def cloud_platform(self): | 291 | def cloud_platform(self): # TODO rename cloud_name |
2253 | 278 | if self._cloud_platform is None: | 292 | if self._cloud_platform is None: |
2254 | 279 | self._cloud_platform = identify_platform() | 293 | self._cloud_platform = identify_platform() |
2255 | 280 | return self._cloud_platform | 294 | return self._cloud_platform |
2256 | @@ -351,6 +365,9 @@ class DataSourceEc2(sources.DataSource): | |||
2257 | 351 | api_version, self.metadata_address) | 365 | api_version, self.metadata_address) |
2258 | 352 | self.metadata = ec2.get_instance_metadata( | 366 | self.metadata = ec2.get_instance_metadata( |
2259 | 353 | api_version, self.metadata_address) | 367 | api_version, self.metadata_address) |
2260 | 368 | if self.cloud_platform == Platforms.AWS: | ||
2261 | 369 | self.identity = ec2.get_instance_identity( | ||
2262 | 370 | api_version, self.metadata_address).get('document', {}) | ||
2263 | 354 | except Exception: | 371 | except Exception: |
2264 | 355 | util.logexc( | 372 | util.logexc( |
2265 | 356 | LOG, "Failed reading from metadata address %s", | 373 | LOG, "Failed reading from metadata address %s", |
2266 | diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py | |||
2267 | index ccae420..2da34a9 100644 | |||
2268 | --- a/cloudinit/sources/DataSourceGCE.py | |||
2269 | +++ b/cloudinit/sources/DataSourceGCE.py | |||
2270 | @@ -2,8 +2,12 @@ | |||
2271 | 2 | # | 2 | # |
2272 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | 3 | # This file is part of cloud-init. See LICENSE file for license information. |
2273 | 4 | 4 | ||
2274 | 5 | import datetime | ||
2275 | 6 | import json | ||
2276 | 7 | |||
2277 | 5 | from base64 import b64decode | 8 | from base64 import b64decode |
2278 | 6 | 9 | ||
2279 | 10 | from cloudinit.distros import ug_util | ||
2280 | 7 | from cloudinit import log as logging | 11 | from cloudinit import log as logging |
2281 | 8 | from cloudinit import sources | 12 | from cloudinit import sources |
2282 | 9 | from cloudinit import url_helper | 13 | from cloudinit import url_helper |
2283 | @@ -17,16 +21,18 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') | |||
2284 | 17 | 21 | ||
2285 | 18 | 22 | ||
2286 | 19 | class GoogleMetadataFetcher(object): | 23 | class GoogleMetadataFetcher(object): |
2288 | 20 | headers = {'X-Google-Metadata-Request': 'True'} | 24 | headers = {'Metadata-Flavor': 'Google'} |
2289 | 21 | 25 | ||
2290 | 22 | def __init__(self, metadata_address): | 26 | def __init__(self, metadata_address): |
2291 | 23 | self.metadata_address = metadata_address | 27 | self.metadata_address = metadata_address |
2292 | 24 | 28 | ||
2294 | 25 | def get_value(self, path, is_text): | 29 | def get_value(self, path, is_text, is_recursive=False): |
2295 | 26 | value = None | 30 | value = None |
2296 | 27 | try: | 31 | try: |
2299 | 28 | resp = url_helper.readurl(url=self.metadata_address + path, | 32 | url = self.metadata_address + path |
2300 | 29 | headers=self.headers) | 33 | if is_recursive: |
2301 | 34 | url += '/?recursive=True' | ||
2302 | 35 | resp = url_helper.readurl(url=url, headers=self.headers) | ||
2303 | 30 | except url_helper.UrlError as exc: | 36 | except url_helper.UrlError as exc: |
2304 | 31 | msg = "url %s raised exception %s" | 37 | msg = "url %s raised exception %s" |
2305 | 32 | LOG.debug(msg, path, exc) | 38 | LOG.debug(msg, path, exc) |
2306 | @@ -35,22 +41,29 @@ class GoogleMetadataFetcher(object): | |||
2307 | 35 | if is_text: | 41 | if is_text: |
2308 | 36 | value = util.decode_binary(resp.contents) | 42 | value = util.decode_binary(resp.contents) |
2309 | 37 | else: | 43 | else: |
2311 | 38 | value = resp.contents | 44 | value = resp.contents.decode('utf-8') |
2312 | 39 | else: | 45 | else: |
2313 | 40 | LOG.debug("url %s returned code %s", path, resp.code) | 46 | LOG.debug("url %s returned code %s", path, resp.code) |
2314 | 41 | return value | 47 | return value |
2315 | 42 | 48 | ||
2316 | 43 | 49 | ||
2317 | 44 | class DataSourceGCE(sources.DataSource): | 50 | class DataSourceGCE(sources.DataSource): |
2318 | 51 | |||
2319 | 52 | dsname = 'GCE' | ||
2320 | 53 | |||
2321 | 45 | def __init__(self, sys_cfg, distro, paths): | 54 | def __init__(self, sys_cfg, distro, paths): |
2322 | 46 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 55 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2323 | 56 | self.default_user = None | ||
2324 | 57 | if distro: | ||
2325 | 58 | (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro) | ||
2326 | 59 | (self.default_user, _user_config) = ug_util.extract_default(users) | ||
2327 | 47 | self.metadata = dict() | 60 | self.metadata = dict() |
2328 | 48 | self.ds_cfg = util.mergemanydict([ | 61 | self.ds_cfg = util.mergemanydict([ |
2329 | 49 | util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), | 62 | util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), |
2330 | 50 | BUILTIN_DS_CONFIG]) | 63 | BUILTIN_DS_CONFIG]) |
2331 | 51 | self.metadata_address = self.ds_cfg['metadata_url'] | 64 | self.metadata_address = self.ds_cfg['metadata_url'] |
2332 | 52 | 65 | ||
2334 | 53 | def get_data(self): | 66 | def _get_data(self): |
2335 | 54 | ret = util.log_time( | 67 | ret = util.log_time( |
2336 | 55 | LOG.debug, 'Crawl of GCE metadata service', | 68 | LOG.debug, 'Crawl of GCE metadata service', |
2337 | 56 | read_md, kwargs={'address': self.metadata_address}) | 69 | read_md, kwargs={'address': self.metadata_address}) |
2338 | @@ -67,17 +80,18 @@ class DataSourceGCE(sources.DataSource): | |||
2339 | 67 | 80 | ||
2340 | 68 | @property | 81 | @property |
2341 | 69 | def launch_index(self): | 82 | def launch_index(self): |
2343 | 70 | # GCE does not provide lauch_index property | 83 | # GCE does not provide lauch_index property. |
2344 | 71 | return None | 84 | return None |
2345 | 72 | 85 | ||
2346 | 73 | def get_instance_id(self): | 86 | def get_instance_id(self): |
2347 | 74 | return self.metadata['instance-id'] | 87 | return self.metadata['instance-id'] |
2348 | 75 | 88 | ||
2349 | 76 | def get_public_ssh_keys(self): | 89 | def get_public_ssh_keys(self): |
2351 | 77 | return self.metadata['public-keys'] | 90 | public_keys_data = self.metadata['public-keys-data'] |
2352 | 91 | return _parse_public_keys(public_keys_data, self.default_user) | ||
2353 | 78 | 92 | ||
2354 | 79 | def get_hostname(self, fqdn=False, resolve_ip=False): | 93 | def get_hostname(self, fqdn=False, resolve_ip=False): |
2356 | 80 | # GCE has long FDQN's and has asked for short hostnames | 94 | # GCE has long FDQN's and has asked for short hostnames. |
2357 | 81 | return self.metadata['local-hostname'].split('.')[0] | 95 | return self.metadata['local-hostname'].split('.')[0] |
2358 | 82 | 96 | ||
2359 | 83 | @property | 97 | @property |
2360 | @@ -89,15 +103,58 @@ class DataSourceGCE(sources.DataSource): | |||
2361 | 89 | return self.availability_zone.rsplit('-', 1)[0] | 103 | return self.availability_zone.rsplit('-', 1)[0] |
2362 | 90 | 104 | ||
2363 | 91 | 105 | ||
2367 | 92 | def _trim_key(public_key): | 106 | def _has_expired(public_key): |
2368 | 93 | # GCE takes sshKeys attribute in the format of '<user>:<public_key>' | 107 | # Check whether an SSH key is expired. Public key input is a single SSH |
2369 | 94 | # so we have to trim each key to remove the username part | 108 | # public key in the GCE specific key format documented here: |
2370 | 109 | # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat | ||
2371 | 110 | try: | ||
2372 | 111 | # Check for the Google-specific schema identifier. | ||
2373 | 112 | schema, json_str = public_key.split(None, 3)[2:] | ||
2374 | 113 | except (ValueError, AttributeError): | ||
2375 | 114 | return False | ||
2376 | 115 | |||
2377 | 116 | # Do not expire keys if they do not have the expected schema identifier. | ||
2378 | 117 | if schema != 'google-ssh': | ||
2379 | 118 | return False | ||
2380 | 119 | |||
2381 | 120 | try: | ||
2382 | 121 | json_obj = json.loads(json_str) | ||
2383 | 122 | except ValueError: | ||
2384 | 123 | return False | ||
2385 | 124 | |||
2386 | 125 | # Do not expire keys if there is no expriation timestamp. | ||
2387 | 126 | if 'expireOn' not in json_obj: | ||
2388 | 127 | return False | ||
2389 | 128 | |||
2390 | 129 | expire_str = json_obj['expireOn'] | ||
2391 | 130 | format_str = '%Y-%m-%dT%H:%M:%S+0000' | ||
2392 | 95 | try: | 131 | try: |
2398 | 96 | index = public_key.index(':') | 132 | expire_time = datetime.datetime.strptime(expire_str, format_str) |
2399 | 97 | if index > 0: | 133 | except ValueError: |
2400 | 98 | return public_key[(index + 1):] | 134 | return False |
2401 | 99 | except Exception: | 135 | |
2402 | 100 | return public_key | 136 | # Expire the key if and only if we have exceeded the expiration timestamp. |
2403 | 137 | return datetime.datetime.utcnow() > expire_time | ||
2404 | 138 | |||
2405 | 139 | |||
2406 | 140 | def _parse_public_keys(public_keys_data, default_user=None): | ||
2407 | 141 | # Parse the SSH key data for the default user account. Public keys input is | ||
2408 | 142 | # a list containing SSH public keys in the GCE specific key format | ||
2409 | 143 | # documented here: | ||
2410 | 144 | # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat | ||
2411 | 145 | public_keys = [] | ||
2412 | 146 | if not public_keys_data: | ||
2413 | 147 | return public_keys | ||
2414 | 148 | for public_key in public_keys_data: | ||
2415 | 149 | if not public_key or not all(ord(c) < 128 for c in public_key): | ||
2416 | 150 | continue | ||
2417 | 151 | split_public_key = public_key.split(':', 1) | ||
2418 | 152 | if len(split_public_key) != 2: | ||
2419 | 153 | continue | ||
2420 | 154 | user, key = split_public_key | ||
2421 | 155 | if user in ('cloudinit', default_user) and not _has_expired(key): | ||
2422 | 156 | public_keys.append(key) | ||
2423 | 157 | return public_keys | ||
2424 | 101 | 158 | ||
2425 | 102 | 159 | ||
2426 | 103 | def read_md(address=None, platform_check=True): | 160 | def read_md(address=None, platform_check=True): |
2427 | @@ -113,31 +170,28 @@ def read_md(address=None, platform_check=True): | |||
2428 | 113 | ret['reason'] = "Not running on GCE." | 170 | ret['reason'] = "Not running on GCE." |
2429 | 114 | return ret | 171 | return ret |
2430 | 115 | 172 | ||
2432 | 116 | # if we cannot resolve the metadata server, then no point in trying | 173 | # If we cannot resolve the metadata server, then no point in trying. |
2433 | 117 | if not util.is_resolvable_url(address): | 174 | if not util.is_resolvable_url(address): |
2434 | 118 | LOG.debug("%s is not resolvable", address) | 175 | LOG.debug("%s is not resolvable", address) |
2435 | 119 | ret['reason'] = 'address "%s" is not resolvable' % address | 176 | ret['reason'] = 'address "%s" is not resolvable' % address |
2436 | 120 | return ret | 177 | return ret |
2437 | 121 | 178 | ||
2439 | 122 | # url_map: (our-key, path, required, is_text) | 179 | # url_map: (our-key, path, required, is_text, is_recursive) |
2440 | 123 | url_map = [ | 180 | url_map = [ |
2449 | 124 | ('instance-id', ('instance/id',), True, True), | 181 | ('instance-id', ('instance/id',), True, True, False), |
2450 | 125 | ('availability-zone', ('instance/zone',), True, True), | 182 | ('availability-zone', ('instance/zone',), True, True, False), |
2451 | 126 | ('local-hostname', ('instance/hostname',), True, True), | 183 | ('local-hostname', ('instance/hostname',), True, True, False), |
2452 | 127 | ('public-keys', ('project/attributes/sshKeys', | 184 | ('instance-data', ('instance/attributes',), False, False, True), |
2453 | 128 | 'instance/attributes/ssh-keys'), False, True), | 185 | ('project-data', ('project/attributes',), False, False, True), |
2446 | 129 | ('user-data', ('instance/attributes/user-data',), False, False), | ||
2447 | 130 | ('user-data-encoding', ('instance/attributes/user-data-encoding',), | ||
2448 | 131 | False, True), | ||
2454 | 132 | ] | 186 | ] |
2455 | 133 | 187 | ||
2456 | 134 | metadata_fetcher = GoogleMetadataFetcher(address) | 188 | metadata_fetcher = GoogleMetadataFetcher(address) |
2457 | 135 | md = {} | 189 | md = {} |
2460 | 136 | # iterate over url_map keys to get metadata items | 190 | # Iterate over url_map keys to get metadata items. |
2461 | 137 | for (mkey, paths, required, is_text) in url_map: | 191 | for (mkey, paths, required, is_text, is_recursive) in url_map: |
2462 | 138 | value = None | 192 | value = None |
2463 | 139 | for path in paths: | 193 | for path in paths: |
2465 | 140 | new_value = metadata_fetcher.get_value(path, is_text) | 194 | new_value = metadata_fetcher.get_value(path, is_text, is_recursive) |
2466 | 141 | if new_value is not None: | 195 | if new_value is not None: |
2467 | 142 | value = new_value | 196 | value = new_value |
2468 | 143 | if required and value is None: | 197 | if required and value is None: |
2469 | @@ -146,17 +200,23 @@ def read_md(address=None, platform_check=True): | |||
2470 | 146 | return ret | 200 | return ret |
2471 | 147 | md[mkey] = value | 201 | md[mkey] = value |
2472 | 148 | 202 | ||
2476 | 149 | if md['public-keys']: | 203 | instance_data = json.loads(md['instance-data'] or '{}') |
2477 | 150 | lines = md['public-keys'].splitlines() | 204 | project_data = json.loads(md['project-data'] or '{}') |
2478 | 151 | md['public-keys'] = [_trim_key(k) for k in lines] | 205 | valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')] |
2479 | 206 | block_project = instance_data.get('block-project-ssh-keys', '').lower() | ||
2480 | 207 | if block_project != 'true' and not instance_data.get('sshKeys'): | ||
2481 | 208 | valid_keys.append(project_data.get('ssh-keys')) | ||
2482 | 209 | valid_keys.append(project_data.get('sshKeys')) | ||
2483 | 210 | public_keys_data = '\n'.join([key for key in valid_keys if key]) | ||
2484 | 211 | md['public-keys-data'] = public_keys_data.splitlines() | ||
2485 | 152 | 212 | ||
2486 | 153 | if md['availability-zone']: | 213 | if md['availability-zone']: |
2487 | 154 | md['availability-zone'] = md['availability-zone'].split('/')[-1] | 214 | md['availability-zone'] = md['availability-zone'].split('/')[-1] |
2488 | 155 | 215 | ||
2490 | 156 | encoding = md.get('user-data-encoding') | 216 | encoding = instance_data.get('user-data-encoding') |
2491 | 157 | if encoding: | 217 | if encoding: |
2492 | 158 | if encoding == 'base64': | 218 | if encoding == 'base64': |
2494 | 159 | md['user-data'] = b64decode(md['user-data']) | 219 | md['user-data'] = b64decode(instance_data.get('user-data')) |
2495 | 160 | else: | 220 | else: |
2496 | 161 | LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) | 221 | LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) |
2497 | 162 | 222 | ||
2498 | @@ -185,20 +245,19 @@ def platform_reports_gce(): | |||
2499 | 185 | return False | 245 | return False |
2500 | 186 | 246 | ||
2501 | 187 | 247 | ||
2503 | 188 | # Used to match classes to dependencies | 248 | # Used to match classes to dependencies. |
2504 | 189 | datasources = [ | 249 | datasources = [ |
2505 | 190 | (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), | 250 | (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), |
2506 | 191 | ] | 251 | ] |
2507 | 192 | 252 | ||
2508 | 193 | 253 | ||
2510 | 194 | # Return a list of data sources that match this set of dependencies | 254 | # Return a list of data sources that match this set of dependencies. |
2511 | 195 | def get_datasource_list(depends): | 255 | def get_datasource_list(depends): |
2512 | 196 | return sources.list_from_depends(depends, datasources) | 256 | return sources.list_from_depends(depends, datasources) |
2513 | 197 | 257 | ||
2514 | 198 | 258 | ||
2515 | 199 | if __name__ == "__main__": | 259 | if __name__ == "__main__": |
2516 | 200 | import argparse | 260 | import argparse |
2517 | 201 | import json | ||
2518 | 202 | import sys | 261 | import sys |
2519 | 203 | 262 | ||
2520 | 204 | from base64 import b64encode | 263 | from base64 import b64encode |
2521 | @@ -214,7 +273,7 @@ if __name__ == "__main__": | |||
2522 | 214 | data = read_md(address=args.endpoint, platform_check=args.platform_check) | 273 | data = read_md(address=args.endpoint, platform_check=args.platform_check) |
2523 | 215 | if 'user-data' in data: | 274 | if 'user-data' in data: |
2524 | 216 | # user-data is bytes not string like other things. Handle it specially. | 275 | # user-data is bytes not string like other things. Handle it specially. |
2526 | 217 | # if it can be represented as utf-8 then do so. Otherwise print base64 | 276 | # If it can be represented as utf-8 then do so. Otherwise print base64 |
2527 | 218 | # encoded value in the key user-data-b64. | 277 | # encoded value in the key user-data-b64. |
2528 | 219 | try: | 278 | try: |
2529 | 220 | data['user-data'] = data['user-data'].decode() | 279 | data['user-data'] = data['user-data'].decode() |
2530 | @@ -222,7 +281,7 @@ if __name__ == "__main__": | |||
2531 | 222 | sys.stderr.write("User-data cannot be decoded. " | 281 | sys.stderr.write("User-data cannot be decoded. " |
2532 | 223 | "Writing as base64\n") | 282 | "Writing as base64\n") |
2533 | 224 | del data['user-data'] | 283 | del data['user-data'] |
2535 | 225 | # b64encode returns a bytes value. decode to get the string. | 284 | # b64encode returns a bytes value. Decode to get the string. |
2536 | 226 | data['user-data-b64'] = b64encode(data['user-data']).decode() | 285 | data['user-data-b64'] = b64encode(data['user-data']).decode() |
2537 | 227 | 286 | ||
2538 | 228 | print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': '))) | 287 | print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': '))) |
2539 | diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py | |||
2540 | index 77df5a5..6ac8863 100644 | |||
2541 | --- a/cloudinit/sources/DataSourceMAAS.py | |||
2542 | +++ b/cloudinit/sources/DataSourceMAAS.py | |||
2543 | @@ -8,6 +8,7 @@ | |||
2544 | 8 | 8 | ||
2545 | 9 | from __future__ import print_function | 9 | from __future__ import print_function |
2546 | 10 | 10 | ||
2547 | 11 | import hashlib | ||
2548 | 11 | import os | 12 | import os |
2549 | 12 | import time | 13 | import time |
2550 | 13 | 14 | ||
2551 | @@ -39,30 +40,28 @@ class DataSourceMAAS(sources.DataSource): | |||
2552 | 39 | hostname | 40 | hostname |
2553 | 40 | vendor-data | 41 | vendor-data |
2554 | 41 | """ | 42 | """ |
2555 | 43 | |||
2556 | 44 | dsname = "MAAS" | ||
2557 | 45 | id_hash = None | ||
2558 | 46 | _oauth_helper = None | ||
2559 | 47 | |||
2560 | 42 | def __init__(self, sys_cfg, distro, paths): | 48 | def __init__(self, sys_cfg, distro, paths): |
2561 | 43 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 49 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2562 | 44 | self.base_url = None | 50 | self.base_url = None |
2563 | 45 | self.seed_dir = os.path.join(paths.seed_dir, 'maas') | 51 | self.seed_dir = os.path.join(paths.seed_dir, 'maas') |
2573 | 46 | self.oauth_helper = self._get_helper() | 52 | self.id_hash = get_id_from_ds_cfg(self.ds_cfg) |
2565 | 47 | |||
2566 | 48 | def _get_helper(self): | ||
2567 | 49 | mcfg = self.ds_cfg | ||
2568 | 50 | # If we are missing token_key, token_secret or consumer_key | ||
2569 | 51 | # then just do non-authed requests | ||
2570 | 52 | for required in ('token_key', 'token_secret', 'consumer_key'): | ||
2571 | 53 | if required not in mcfg: | ||
2572 | 54 | return url_helper.OauthUrlHelper() | ||
2574 | 55 | 53 | ||
2579 | 56 | return url_helper.OauthUrlHelper( | 54 | @property |
2580 | 57 | consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], | 55 | def oauth_helper(self): |
2581 | 58 | token_secret=mcfg['token_secret'], | 56 | if not self._oauth_helper: |
2582 | 59 | consumer_secret=mcfg.get('consumer_secret')) | 57 | self._oauth_helper = get_oauth_helper(self.ds_cfg) |
2583 | 58 | return self._oauth_helper | ||
2584 | 60 | 59 | ||
2585 | 61 | def __str__(self): | 60 | def __str__(self): |
2586 | 62 | root = sources.DataSource.__str__(self) | 61 | root = sources.DataSource.__str__(self) |
2587 | 63 | return "%s [%s]" % (root, self.base_url) | 62 | return "%s [%s]" % (root, self.base_url) |
2588 | 64 | 63 | ||
2590 | 65 | def get_data(self): | 64 | def _get_data(self): |
2591 | 66 | mcfg = self.ds_cfg | 65 | mcfg = self.ds_cfg |
2592 | 67 | 66 | ||
2593 | 68 | try: | 67 | try: |
2594 | @@ -144,6 +143,36 @@ class DataSourceMAAS(sources.DataSource): | |||
2595 | 144 | 143 | ||
2596 | 145 | return bool(url) | 144 | return bool(url) |
2597 | 146 | 145 | ||
2598 | 146 | def check_instance_id(self, sys_cfg): | ||
2599 | 147 | """locally check if the current system is the same instance. | ||
2600 | 148 | |||
2601 | 149 | MAAS doesn't provide a real instance-id, and if it did, it is | ||
2602 | 150 | still only available over the network. We need to check based | ||
2603 | 151 | only on local resources. So compute a hash based on Oauth tokens.""" | ||
2604 | 152 | if self.id_hash is None: | ||
2605 | 153 | return False | ||
2606 | 154 | ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {}) | ||
2607 | 155 | return (self.id_hash == get_id_from_ds_cfg(ncfg)) | ||
2608 | 156 | |||
2609 | 157 | |||
2610 | 158 | def get_oauth_helper(cfg): | ||
2611 | 159 | """Return an oauth helper instance for values in cfg. | ||
2612 | 160 | |||
2613 | 161 | @raises ValueError from OauthUrlHelper if some required fields have | ||
2614 | 162 | true-ish values but others do not.""" | ||
2615 | 163 | keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret') | ||
2616 | 164 | kwargs = dict([(r, cfg.get(r)) for r in keys]) | ||
2617 | 165 | return url_helper.OauthUrlHelper(**kwargs) | ||
2618 | 166 | |||
2619 | 167 | |||
2620 | 168 | def get_id_from_ds_cfg(ds_cfg): | ||
2621 | 169 | """Given a config, generate a unique identifier for this node.""" | ||
2622 | 170 | fields = ('consumer_key', 'token_key', 'token_secret') | ||
2623 | 171 | idstr = '\0'.join([ds_cfg.get(f, "") for f in fields]) | ||
2624 | 172 | # store the encoding version as part of the hash in the event | ||
2625 | 173 | # that it ever changed we can compute older versions. | ||
2626 | 174 | return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest() | ||
2627 | 175 | |||
2628 | 147 | 176 | ||
2629 | 148 | def read_maas_seed_dir(seed_d): | 177 | def read_maas_seed_dir(seed_d): |
2630 | 149 | if seed_d.startswith("file://"): | 178 | if seed_d.startswith("file://"): |
2631 | @@ -319,7 +348,7 @@ if __name__ == "__main__": | |||
2632 | 319 | sys.stderr.write("Must provide a url or a config with url.\n") | 348 | sys.stderr.write("Must provide a url or a config with url.\n") |
2633 | 320 | sys.exit(1) | 349 | sys.exit(1) |
2634 | 321 | 350 | ||
2636 | 322 | oauth_helper = url_helper.OauthUrlHelper(**creds) | 351 | oauth_helper = get_oauth_helper(creds) |
2637 | 323 | 352 | ||
2638 | 324 | def geturl(url): | 353 | def geturl(url): |
2639 | 325 | # the retry is to ensure that oauth timestamp gets fixed | 354 | # the retry is to ensure that oauth timestamp gets fixed |
2640 | diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py | |||
2641 | index e641244..5d3a8dd 100644 | |||
2642 | --- a/cloudinit/sources/DataSourceNoCloud.py | |||
2643 | +++ b/cloudinit/sources/DataSourceNoCloud.py | |||
2644 | @@ -20,6 +20,9 @@ LOG = logging.getLogger(__name__) | |||
2645 | 20 | 20 | ||
2646 | 21 | 21 | ||
2647 | 22 | class DataSourceNoCloud(sources.DataSource): | 22 | class DataSourceNoCloud(sources.DataSource): |
2648 | 23 | |||
2649 | 24 | dsname = "NoCloud" | ||
2650 | 25 | |||
2651 | 23 | def __init__(self, sys_cfg, distro, paths): | 26 | def __init__(self, sys_cfg, distro, paths): |
2652 | 24 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 27 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2653 | 25 | self.seed = None | 28 | self.seed = None |
2654 | @@ -32,7 +35,7 @@ class DataSourceNoCloud(sources.DataSource): | |||
2655 | 32 | root = sources.DataSource.__str__(self) | 35 | root = sources.DataSource.__str__(self) |
2656 | 33 | return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) | 36 | return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) |
2657 | 34 | 37 | ||
2659 | 35 | def get_data(self): | 38 | def _get_data(self): |
2660 | 36 | defaults = { | 39 | defaults = { |
2661 | 37 | "instance-id": "nocloud", | 40 | "instance-id": "nocloud", |
2662 | 38 | "dsmode": self.dsmode, | 41 | "dsmode": self.dsmode, |
2663 | diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py | |||
2664 | index 906bb27..e63a7e3 100644 | |||
2665 | --- a/cloudinit/sources/DataSourceNone.py | |||
2666 | +++ b/cloudinit/sources/DataSourceNone.py | |||
2667 | @@ -11,12 +11,15 @@ LOG = logging.getLogger(__name__) | |||
2668 | 11 | 11 | ||
2669 | 12 | 12 | ||
2670 | 13 | class DataSourceNone(sources.DataSource): | 13 | class DataSourceNone(sources.DataSource): |
2671 | 14 | |||
2672 | 15 | dsname = "None" | ||
2673 | 16 | |||
2674 | 14 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): | 17 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): |
2675 | 15 | sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) | 18 | sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) |
2676 | 16 | self.metadata = {} | 19 | self.metadata = {} |
2677 | 17 | self.userdata_raw = '' | 20 | self.userdata_raw = '' |
2678 | 18 | 21 | ||
2680 | 19 | def get_data(self): | 22 | def _get_data(self): |
2681 | 20 | # If the datasource config has any provided 'fallback' | 23 | # If the datasource config has any provided 'fallback' |
2682 | 21 | # userdata or metadata, use it... | 24 | # userdata or metadata, use it... |
2683 | 22 | if 'userdata_raw' in self.ds_cfg: | 25 | if 'userdata_raw' in self.ds_cfg: |
2684 | diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py | |||
2685 | index ccebf11..6e62f98 100644 | |||
2686 | --- a/cloudinit/sources/DataSourceOVF.py | |||
2687 | +++ b/cloudinit/sources/DataSourceOVF.py | |||
2688 | @@ -21,6 +21,8 @@ from cloudinit import util | |||
2689 | 21 | 21 | ||
2690 | 22 | from cloudinit.sources.helpers.vmware.imc.config \ | 22 | from cloudinit.sources.helpers.vmware.imc.config \ |
2691 | 23 | import Config | 23 | import Config |
2692 | 24 | from cloudinit.sources.helpers.vmware.imc.config_custom_script \ | ||
2693 | 25 | import PreCustomScript, PostCustomScript | ||
2694 | 24 | from cloudinit.sources.helpers.vmware.imc.config_file \ | 26 | from cloudinit.sources.helpers.vmware.imc.config_file \ |
2695 | 25 | import ConfigFile | 27 | import ConfigFile |
2696 | 26 | from cloudinit.sources.helpers.vmware.imc.config_nic \ | 28 | from cloudinit.sources.helpers.vmware.imc.config_nic \ |
2697 | @@ -30,7 +32,7 @@ from cloudinit.sources.helpers.vmware.imc.config_passwd \ | |||
2698 | 30 | from cloudinit.sources.helpers.vmware.imc.guestcust_error \ | 32 | from cloudinit.sources.helpers.vmware.imc.guestcust_error \ |
2699 | 31 | import GuestCustErrorEnum | 33 | import GuestCustErrorEnum |
2700 | 32 | from cloudinit.sources.helpers.vmware.imc.guestcust_event \ | 34 | from cloudinit.sources.helpers.vmware.imc.guestcust_event \ |
2702 | 33 | import GuestCustEventEnum | 35 | import GuestCustEventEnum as GuestCustEvent |
2703 | 34 | from cloudinit.sources.helpers.vmware.imc.guestcust_state \ | 36 | from cloudinit.sources.helpers.vmware.imc.guestcust_state \ |
2704 | 35 | import GuestCustStateEnum | 37 | import GuestCustStateEnum |
2705 | 36 | from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( | 38 | from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( |
2706 | @@ -43,6 +45,9 @@ LOG = logging.getLogger(__name__) | |||
2707 | 43 | 45 | ||
2708 | 44 | 46 | ||
2709 | 45 | class DataSourceOVF(sources.DataSource): | 47 | class DataSourceOVF(sources.DataSource): |
2710 | 48 | |||
2711 | 49 | dsname = "OVF" | ||
2712 | 50 | |||
2713 | 46 | def __init__(self, sys_cfg, distro, paths): | 51 | def __init__(self, sys_cfg, distro, paths): |
2714 | 47 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 52 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2715 | 48 | self.seed = None | 53 | self.seed = None |
2716 | @@ -60,7 +65,7 @@ class DataSourceOVF(sources.DataSource): | |||
2717 | 60 | root = sources.DataSource.__str__(self) | 65 | root = sources.DataSource.__str__(self) |
2718 | 61 | return "%s [seed=%s]" % (root, self.seed) | 66 | return "%s [seed=%s]" % (root, self.seed) |
2719 | 62 | 67 | ||
2721 | 63 | def get_data(self): | 68 | def _get_data(self): |
2722 | 64 | found = [] | 69 | found = [] |
2723 | 65 | md = {} | 70 | md = {} |
2724 | 66 | ud = "" | 71 | ud = "" |
2725 | @@ -124,17 +129,31 @@ class DataSourceOVF(sources.DataSource): | |||
2726 | 124 | self._vmware_cust_conf = Config(cf) | 129 | self._vmware_cust_conf = Config(cf) |
2727 | 125 | (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) | 130 | (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) |
2728 | 126 | self._vmware_nics_to_enable = get_nics_to_enable(nicspath) | 131 | self._vmware_nics_to_enable = get_nics_to_enable(nicspath) |
2731 | 127 | markerid = self._vmware_cust_conf.marker_id | 132 | imcdirpath = os.path.dirname(vmwareImcConfigFilePath) |
2732 | 128 | markerexists = check_marker_exists(markerid) | 133 | product_marker = self._vmware_cust_conf.marker_id |
2733 | 134 | hasmarkerfile = check_marker_exists( | ||
2734 | 135 | product_marker, os.path.join(self.paths.cloud_dir, 'data')) | ||
2735 | 136 | special_customization = product_marker and not hasmarkerfile | ||
2736 | 137 | customscript = self._vmware_cust_conf.custom_script_name | ||
2737 | 129 | except Exception as e: | 138 | except Exception as e: |
2746 | 130 | LOG.debug("Error parsing the customization Config File") | 139 | _raise_error_status( |
2747 | 131 | LOG.exception(e) | 140 | "Error parsing the customization Config File", |
2748 | 132 | set_customization_status( | 141 | e, |
2749 | 133 | GuestCustStateEnum.GUESTCUST_STATE_RUNNING, | 142 | GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, |
2750 | 134 | GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) | 143 | vmwareImcConfigFilePath) |
2751 | 135 | raise e | 144 | |
2752 | 136 | finally: | 145 | if special_customization: |
2753 | 137 | util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) | 146 | if customscript: |
2754 | 147 | try: | ||
2755 | 148 | precust = PreCustomScript(customscript, imcdirpath) | ||
2756 | 149 | precust.execute() | ||
2757 | 150 | except Exception as e: | ||
2758 | 151 | _raise_error_status( | ||
2759 | 152 | "Error executing pre-customization script", | ||
2760 | 153 | e, | ||
2761 | 154 | GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, | ||
2762 | 155 | vmwareImcConfigFilePath) | ||
2763 | 156 | |||
2764 | 138 | try: | 157 | try: |
2765 | 139 | LOG.debug("Preparing the Network configuration") | 158 | LOG.debug("Preparing the Network configuration") |
2766 | 140 | self._network_config = get_network_config_from_conf( | 159 | self._network_config = get_network_config_from_conf( |
2767 | @@ -143,13 +162,13 @@ class DataSourceOVF(sources.DataSource): | |||
2768 | 143 | True, | 162 | True, |
2769 | 144 | self.distro.osfamily) | 163 | self.distro.osfamily) |
2770 | 145 | except Exception as e: | 164 | except Exception as e: |
2776 | 146 | LOG.exception(e) | 165 | _raise_error_status( |
2777 | 147 | set_customization_status( | 166 | "Error preparing Network Configuration", |
2778 | 148 | GuestCustStateEnum.GUESTCUST_STATE_RUNNING, | 167 | e, |
2779 | 149 | GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) | 168 | GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, |
2780 | 150 | raise e | 169 | vmwareImcConfigFilePath) |
2781 | 151 | 170 | ||
2783 | 152 | if markerid and not markerexists: | 171 | if special_customization: |
2784 | 153 | LOG.debug("Applying password customization") | 172 | LOG.debug("Applying password customization") |
2785 | 154 | pwdConfigurator = PasswordConfigurator() | 173 | pwdConfigurator = PasswordConfigurator() |
2786 | 155 | adminpwd = self._vmware_cust_conf.admin_password | 174 | adminpwd = self._vmware_cust_conf.admin_password |
2787 | @@ -161,27 +180,41 @@ class DataSourceOVF(sources.DataSource): | |||
2788 | 161 | else: | 180 | else: |
2789 | 162 | LOG.debug("Changing password is not needed") | 181 | LOG.debug("Changing password is not needed") |
2790 | 163 | except Exception as e: | 182 | except Exception as e: |
2798 | 164 | LOG.debug("Error applying Password Configuration: %s", e) | 183 | _raise_error_status( |
2799 | 165 | set_customization_status( | 184 | "Error applying Password Configuration", |
2800 | 166 | GuestCustStateEnum.GUESTCUST_STATE_RUNNING, | 185 | e, |
2801 | 167 | GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) | 186 | GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, |
2802 | 168 | return False | 187 | vmwareImcConfigFilePath) |
2803 | 169 | if markerid: | 188 | |
2804 | 170 | LOG.debug("Handle marker creation") | 189 | if customscript: |
2805 | 190 | try: | ||
2806 | 191 | postcust = PostCustomScript(customscript, imcdirpath) | ||
2807 | 192 | postcust.execute() | ||
2808 | 193 | except Exception as e: | ||
2809 | 194 | _raise_error_status( | ||
2810 | 195 | "Error executing post-customization script", | ||
2811 | 196 | e, | ||
2812 | 197 | GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, | ||
2813 | 198 | vmwareImcConfigFilePath) | ||
2814 | 199 | |||
2815 | 200 | if product_marker: | ||
2816 | 171 | try: | 201 | try: |
2818 | 172 | setup_marker_files(markerid) | 202 | setup_marker_files( |
2819 | 203 | product_marker, | ||
2820 | 204 | os.path.join(self.paths.cloud_dir, 'data')) | ||
2821 | 173 | except Exception as e: | 205 | except Exception as e: |
2827 | 174 | LOG.debug("Error creating marker files: %s", e) | 206 | _raise_error_status( |
2828 | 175 | set_customization_status( | 207 | "Error creating marker files", |
2829 | 176 | GuestCustStateEnum.GUESTCUST_STATE_RUNNING, | 208 | e, |
2830 | 177 | GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) | 209 | GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, |
2831 | 178 | return False | 210 | vmwareImcConfigFilePath) |
2832 | 179 | 211 | ||
2833 | 180 | self._vmware_cust_found = True | 212 | self._vmware_cust_found = True |
2834 | 181 | found.append('vmware-tools') | 213 | found.append('vmware-tools') |
2835 | 182 | 214 | ||
2836 | 183 | # TODO: Need to set the status to DONE only when the | 215 | # TODO: Need to set the status to DONE only when the |
2837 | 184 | # customization is done successfully. | 216 | # customization is done successfully. |
2838 | 217 | util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) | ||
2839 | 185 | enable_nics(self._vmware_nics_to_enable) | 218 | enable_nics(self._vmware_nics_to_enable) |
2840 | 186 | set_customization_status( | 219 | set_customization_status( |
2841 | 187 | GuestCustStateEnum.GUESTCUST_STATE_DONE, | 220 | GuestCustStateEnum.GUESTCUST_STATE_DONE, |
2842 | @@ -536,31 +569,52 @@ def get_datasource_list(depends): | |||
2843 | 536 | 569 | ||
2844 | 537 | 570 | ||
2845 | 538 | # To check if marker file exists | 571 | # To check if marker file exists |
2847 | 539 | def check_marker_exists(markerid): | 572 | def check_marker_exists(markerid, marker_dir): |
2848 | 540 | """ | 573 | """ |
2849 | 541 | Check the existence of a marker file. | 574 | Check the existence of a marker file. |
2850 | 542 | Presence of marker file determines whether a certain code path is to be | 575 | Presence of marker file determines whether a certain code path is to be |
2851 | 543 | executed. It is needed for partial guest customization in VMware. | 576 | executed. It is needed for partial guest customization in VMware. |
2852 | 577 | @param markerid: is an unique string representing a particular product | ||
2853 | 578 | marker. | ||
2854 | 579 | @param: marker_dir: The directory in which markers exist. | ||
2855 | 544 | """ | 580 | """ |
2856 | 545 | if not markerid: | 581 | if not markerid: |
2857 | 546 | return False | 582 | return False |
2859 | 547 | markerfile = "/.markerfile-" + markerid | 583 | markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") |
2860 | 548 | if os.path.exists(markerfile): | 584 | if os.path.exists(markerfile): |
2861 | 549 | return True | 585 | return True |
2862 | 550 | return False | 586 | return False |
2863 | 551 | 587 | ||
2864 | 552 | 588 | ||
2865 | 553 | # Create a marker file | 589 | # Create a marker file |
2867 | 554 | def setup_marker_files(markerid): | 590 | def setup_marker_files(markerid, marker_dir): |
2868 | 555 | """ | 591 | """ |
2869 | 556 | Create a new marker file. | 592 | Create a new marker file. |
2870 | 557 | Marker files are unique to a full customization workflow in VMware | 593 | Marker files are unique to a full customization workflow in VMware |
2871 | 558 | environment. | 594 | environment. |
2872 | 595 | @param markerid: is an unique string representing a particular product | ||
2873 | 596 | marker. | ||
2874 | 597 | @param: marker_dir: The directory in which markers exist. | ||
2875 | 598 | |||
2876 | 559 | """ | 599 | """ |
2881 | 560 | if not markerid: | 600 | LOG.debug("Handle marker creation") |
2882 | 561 | return | 601 | markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") |
2883 | 562 | markerfile = "/.markerfile-" + markerid | 602 | for fname in os.listdir(marker_dir): |
2884 | 563 | util.del_file("/.markerfile-*.txt") | 603 | if fname.startswith(".markerfile"): |
2885 | 604 | util.del_file(os.path.join(marker_dir, fname)) | ||
2886 | 564 | open(markerfile, 'w').close() | 605 | open(markerfile, 'w').close() |
2887 | 565 | 606 | ||
2888 | 607 | |||
2889 | 608 | def _raise_error_status(prefix, error, event, config_file): | ||
2890 | 609 | """ | ||
2891 | 610 | Raise error and send customization status to the underlying VMware | ||
2892 | 611 | Virtualization Platform. Also, cleanup the imc directory. | ||
2893 | 612 | """ | ||
2894 | 613 | LOG.debug('%s: %s', prefix, error) | ||
2895 | 614 | set_customization_status( | ||
2896 | 615 | GuestCustStateEnum.GUESTCUST_STATE_RUNNING, | ||
2897 | 616 | event) | ||
2898 | 617 | util.del_dir(os.path.dirname(config_file)) | ||
2899 | 618 | raise error | ||
2900 | 619 | |||
2901 | 566 | # vi: ts=4 expandtab | 620 | # vi: ts=4 expandtab |
2902 | diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py | |||
2903 | index 5fdac19..ce47b6b 100644 | |||
2904 | --- a/cloudinit/sources/DataSourceOpenNebula.py | |||
2905 | +++ b/cloudinit/sources/DataSourceOpenNebula.py | |||
2906 | @@ -12,6 +12,7 @@ | |||
2907 | 12 | # | 12 | # |
2908 | 13 | # This file is part of cloud-init. See LICENSE file for license information. | 13 | # This file is part of cloud-init. See LICENSE file for license information. |
2909 | 14 | 14 | ||
2910 | 15 | import collections | ||
2911 | 15 | import os | 16 | import os |
2912 | 16 | import pwd | 17 | import pwd |
2913 | 17 | import re | 18 | import re |
2914 | @@ -19,6 +20,7 @@ import string | |||
2915 | 19 | 20 | ||
2916 | 20 | from cloudinit import log as logging | 21 | from cloudinit import log as logging |
2917 | 21 | from cloudinit import net | 22 | from cloudinit import net |
2918 | 23 | from cloudinit.net import eni | ||
2919 | 22 | from cloudinit import sources | 24 | from cloudinit import sources |
2920 | 23 | from cloudinit import util | 25 | from cloudinit import util |
2921 | 24 | 26 | ||
2922 | @@ -31,6 +33,9 @@ CONTEXT_DISK_FILES = ["context.sh"] | |||
2923 | 31 | 33 | ||
2924 | 32 | 34 | ||
2925 | 33 | class DataSourceOpenNebula(sources.DataSource): | 35 | class DataSourceOpenNebula(sources.DataSource): |
2926 | 36 | |||
2927 | 37 | dsname = "OpenNebula" | ||
2928 | 38 | |||
2929 | 34 | def __init__(self, sys_cfg, distro, paths): | 39 | def __init__(self, sys_cfg, distro, paths): |
2930 | 35 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | 40 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2931 | 36 | self.seed = None | 41 | self.seed = None |
2932 | @@ -40,7 +45,7 @@ class DataSourceOpenNebula(sources.DataSource): | |||
2933 | 40 | root = sources.DataSource.__str__(self) | 45 | root = sources.DataSource.__str__(self) |
2934 | 41 | return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) | 46 | return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) |
2935 | 42 | 47 | ||
2937 | 43 | def get_data(self): | 48 | def _get_data(self): |
2938 | 44 | defaults = {"instance-id": DEFAULT_IID} | 49 | defaults = {"instance-id": DEFAULT_IID} |
2939 | 45 | results = None | 50 | results = None |
2940 | 46 | seed = None | 51 | seed = None |
2941 | @@ -86,11 +91,18 @@ class DataSourceOpenNebula(sources.DataSource): | |||
2942 | 86 | return False | 91 | return False |
2943 | 87 | 92 | ||
2944 | 88 | self.seed = seed | 93 | self.seed = seed |
2946 | 89 | self.network_eni = results.get("network_config") | 94 | self.network_eni = results.get('network-interfaces') |
2947 | 90 | self.metadata = md | 95 | self.metadata = md |
2948 | 91 | self.userdata_raw = results.get('userdata') | 96 | self.userdata_raw = results.get('userdata') |
2949 | 92 | return True | 97 | return True |
2950 | 93 | 98 | ||
2951 | 99 | @property | ||
2952 | 100 | def network_config(self): | ||
2953 | 101 | if self.network_eni is not None: | ||
2954 | 102 | return eni.convert_eni_data(self.network_eni) | ||
2955 | 103 | else: | ||
2956 | 104 | return None | ||
2957 | 105 | |||
2958 | 94 | def get_hostname(self, fqdn=False, resolve_ip=None): | 106 | def get_hostname(self, fqdn=False, resolve_ip=None): |
2959 | 95 | if resolve_ip is None: | 107 | if resolve_ip is None: |
2960 | 96 | if self.dsmode == sources.DSMODE_NETWORK: | 108 | if self.dsmode == sources.DSMODE_NETWORK: |
2961 | @@ -113,58 +125,53 @@ class OpenNebulaNetwork(object): | |||
2962 | 113 | self.context = context | 125 | self.context = context |
2963 | 114 | if system_nics_by_mac is None: | 126 | if system_nics_by_mac is None: |
2964 | 115 | system_nics_by_mac = get_physical_nics_by_mac() | 127 | system_nics_by_mac = get_physical_nics_by_mac() |
2966 | 116 | self.ifaces = system_nics_by_mac | 128 | self.ifaces = collections.OrderedDict( |
2967 | 129 | [k for k in sorted(system_nics_by_mac.items(), | ||
2968 | 130 | key=lambda k: net.natural_sort_key(k[1]))]) | ||
2969 | 131 | |||
2970 | 132 | # OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC. | ||
2971 | 133 | # context_devname provides {mac.lower():ETHX, mac2.lower():ETHX} | ||
2972 | 134 | self.context_devname = {} | ||
2973 | 135 | for k, v in context.items(): | ||
2974 | 136 | m = re.match(r'^(.+)_MAC$', k) | ||
2975 | 137 | if m: | ||
2976 | 138 | self.context_devname[v.lower()] = m.group(1) | ||
2977 | 117 | 139 | ||
2978 | 118 | def mac2ip(self, mac): | 140 | def mac2ip(self, mac): |
2981 | 119 | components = mac.split(':')[2:] | 141 | return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]]) |
2980 | 120 | return [str(int(c, 16)) for c in components] | ||
2982 | 121 | 142 | ||
2989 | 122 | def get_ip(self, dev, components): | 143 | def mac2network(self, mac): |
2990 | 123 | var_name = dev.upper() + '_IP' | 144 | return self.mac2ip(mac).rpartition(".")[0] + ".0" |
2985 | 124 | if var_name in self.context: | ||
2986 | 125 | return self.context[var_name] | ||
2987 | 126 | else: | ||
2988 | 127 | return '.'.join(components) | ||
2991 | 128 | 145 | ||
2998 | 129 | def get_mask(self, dev): | 146 | def get_dns(self, dev): |
2999 | 130 | var_name = dev.upper() + '_MASK' | 147 | return self.get_field(dev, "dns", "").split() |
2994 | 131 | if var_name in self.context: | ||
2995 | 132 | return self.context[var_name] | ||
2996 | 133 | else: | ||
2997 | 134 | return '255.255.255.0' | ||
3000 | 135 | 148 | ||
3007 | 136 | def get_network(self, dev, components): | 149 | def get_domain(self, dev): |
3008 | 137 | var_name = dev.upper() + '_NETWORK' | 150 | return self.get_field(dev, "domain") |
3009 | 138 | if var_name in self.context: | 151 | |
3010 | 139 | return self.context[var_name] | 152 | def get_ip(self, dev, mac): |
3011 | 140 | else: | 153 | return self.get_field(dev, "ip", self.mac2ip(mac)) |
3006 | 141 | return '.'.join(components[:-1]) + '.0' | ||
3012 | 142 | 154 | ||
3013 | 143 | def get_gateway(self, dev): | 155 | def get_gateway(self, dev): |
3019 | 144 | var_name = dev.upper() + '_GATEWAY' | 156 | return self.get_field(dev, "gateway") |
3015 | 145 | if var_name in self.context: | ||
3016 | 146 | return self.context[var_name] | ||
3017 | 147 | else: | ||
3018 | 148 | return None | ||
3020 | 149 | 157 | ||
3027 | 150 | def get_dns(self, dev): | 158 | def get_mask(self, dev): |
3028 | 151 | var_name = dev.upper() + '_DNS' | 159 | return self.get_field(dev, "mask", "255.255.255.0") |
3023 | 152 | if var_name in self.context: | ||
3024 | 153 | return self.context[var_name] | ||
3025 | 154 | else: | ||
3026 | 155 | return None | ||
3029 | 156 | 160 | ||
3036 | 157 | def get_domain(self, dev): | 161 | def get_network(self, dev, mac): |
3037 | 158 | var_name = dev.upper() + '_DOMAIN' | 162 | return self.get_field(dev, "network", self.mac2network(mac)) |
3038 | 159 | if var_name in self.context: | 163 | |
3039 | 160 | return self.context[var_name] | 164 | def get_field(self, dev, name, default=None): |
3040 | 161 | else: | 165 | """return the field name in context for device dev. |
3041 | 162 | return None | 166 | |
3042 | 167 | context stores <dev>_<NAME> (example: eth0_DOMAIN). | ||
3043 | 168 | an empty string for value will return default.""" | ||
3044 | 169 | val = self.context.get('_'.join((dev, name,)).upper()) | ||
3045 | 170 | # allow empty string to return the default. | ||
3046 | 171 | return default if val in (None, "") else val | ||
3047 | 163 | 172 | ||
3048 | 164 | def gen_conf(self): | 173 | def gen_conf(self): |
3052 | 165 | global_dns = [] | 174 | global_dns = self.context.get('DNS', "").split() |
3050 | 166 | if 'DNS' in self.context: | ||
3051 | 167 | global_dns.append(self.context['DNS']) | ||
3053 | 168 | 175 | ||
3054 | 169 | conf = [] | 176 | conf = [] |
3055 | 170 | conf.append('auto lo') | 177 | conf.append('auto lo') |
3056 | @@ -172,29 +179,31 @@ class OpenNebulaNetwork(object): | |||
3057 | 172 | conf.append('') | 179 | conf.append('') |
3058 | 173 | 180 | ||
3059 | 174 | for mac, dev in self.ifaces.items(): | 181 | for mac, dev in self.ifaces.items(): |
3061 | 175 | ip_components = self.mac2ip(mac) | 182 | mac = mac.lower() |
3062 | 183 | |||
3063 | 184 | # c_dev stores name in context 'ETHX' for this device. | ||
3064 | 185 | # dev stores the current system name. | ||
3065 | 186 | c_dev = self.context_devname.get(mac, dev) | ||
3066 | 176 | 187 | ||
3067 | 177 | conf.append('auto ' + dev) | 188 | conf.append('auto ' + dev) |
3068 | 178 | conf.append('iface ' + dev + ' inet static') | 189 | conf.append('iface ' + dev + ' inet static') |
3072 | 179 | conf.append(' address ' + self.get_ip(dev, ip_components)) | 190 | conf.append(' #hwaddress %s' % mac) |
3073 | 180 | conf.append(' network ' + self.get_network(dev, ip_components)) | 191 | conf.append(' address ' + self.get_ip(c_dev, mac)) |
3074 | 181 | conf.append(' netmask ' + self.get_mask(dev)) | 192 | conf.append(' network ' + self.get_network(c_dev, mac)) |
3075 | 193 | conf.append(' netmask ' + self.get_mask(c_dev)) | ||
3076 | 182 | 194 | ||
3078 | 183 | gateway = self.get_gateway(dev) | 195 | gateway = self.get_gateway(c_dev) |
3079 | 184 | if gateway: | 196 | if gateway: |
3080 | 185 | conf.append(' gateway ' + gateway) | 197 | conf.append(' gateway ' + gateway) |
3081 | 186 | 198 | ||
3083 | 187 | domain = self.get_domain(dev) | 199 | domain = self.get_domain(c_dev) |
3084 | 188 | if domain: | 200 | if domain: |
3085 | 189 | conf.append(' dns-search ' + domain) | 201 | conf.append(' dns-search ' + domain) |
3086 | 190 | 202 | ||
3087 | 191 | # add global DNS servers to all interfaces | 203 | # add global DNS servers to all interfaces |
3089 | 192 | dns = self.get_dns(dev) | 204 | dns = self.get_dns(c_dev) |
3090 | 193 | if global_dns or dns: | 205 | if global_dns or dns: |
3095 | 194 | all_dns = global_dns | 206 | conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) |
3092 | 195 | if dns: | ||
3093 | 196 | all_dns.append(dns) | ||
3094 | 197 | conf.append(' dns-nameservers ' + ' '.join(all_dns)) | ||
3096 | 198 | 207 | ||
3097 | 199 | conf.append('') | 208 | conf.append('') |
3098 | 200 | 209 | ||
3099 | @@ -329,8 +338,9 @@ def read_context_disk_dir(source_dir, asuser=None): | |||
3100 | 329 | try: | 338 | try: |
3101 | 330 | pwd.getpwnam(asuser) | 339 | pwd.getpwnam(asuser) |
3102 | 331 | except KeyError as e: | 340 | except KeyError as e: |
3105 | 332 | raise BrokenContextDiskDir("configured user '%s' " | 341 | raise BrokenContextDiskDir( |
3106 | 333 | "does not exist", asuser) | 342 | "configured user '{user}' does not exist".format( |
3107 | 343 | user=asuser)) | ||
3108 | 334 | try: | 344 | try: |
3109 | 335 | path = os.path.join(source_dir, 'context.sh') | 345 | path = os.path.join(source_dir, 'context.sh') |
3110 | 336 | content = util.load_file(path) | 346 | content = util.load_file(path) |
3111 | diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py | |||
3112 | index b64a7f2..e55a763 100644 | |||
3113 | --- a/cloudinit/sources/DataSourceOpenStack.py | |||
3114 | +++ b/cloudinit/sources/DataSourceOpenStack.py | |||
3115 | @@ -24,6 +24,9 @@ DEFAULT_METADATA = { | |||
3116 | 24 | 24 | ||
3117 | 25 | 25 | ||
3118 | 26 | class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | 26 | class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
3119 | 27 | |||
3120 | 28 | dsname = "OpenStack" | ||
3121 | 29 | |||
3122 | 27 | def __init__(self, sys_cfg, distro, paths): | 30 | def __init__(self, sys_cfg, distro, paths): |
3123 | 28 | super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) | 31 | super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) |
3124 | 29 | self.metadata_address = None | 32 | self.metadata_address = None |
3125 | @@ -96,7 +99,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): | |||
3126 | 96 | self.metadata_address = url2base.get(avail_url) | 99 | self.metadata_address = url2base.get(avail_url) |
3127 | 97 | return bool(avail_url) | 100 | return bool(avail_url) |
3128 | 98 | 101 | ||
3130 | 99 | def get_data(self): | 102 | def _get_data(self): |
3131 | 100 | try: | 103 | try: |
3132 | 101 | if not self.wait_for_metadata_service(): | 104 | if not self.wait_for_metadata_service(): |
3133 | 102 | return False | 105 | return False |
3134 | diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py | |||
3135 | index 3a8a8e8..b0b19c9 100644 | |||
3136 | --- a/cloudinit/sources/DataSourceScaleway.py | |||
3137 | +++ b/cloudinit/sources/DataSourceScaleway.py | |||
3138 | @@ -169,6 +169,8 @@ def query_data_api(api_type, api_address, retries, timeout): | |||
3139 | 169 | 169 | ||
3140 | 170 | class DataSourceScaleway(sources.DataSource): | 170 | class DataSourceScaleway(sources.DataSource): |
3141 | 171 | 171 | ||
3142 | 172 | dsname = "Scaleway" | ||
3143 | 173 | |||
3144 | 172 | def __init__(self, sys_cfg, distro, paths): | 174 | def __init__(self, sys_cfg, distro, paths): |
3145 | 173 | super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) | 175 | super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) |
3146 | 174 | 176 | ||
3147 | @@ -184,7 +186,7 @@ class DataSourceScaleway(sources.DataSource): | |||
3148 | 184 | self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) | 186 | self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) |
3149 | 185 | self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) | 187 | self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) |
3150 | 186 | 188 | ||
3152 | 187 | def get_data(self): | 189 | def _get_data(self): |
3153 | 188 | if not on_scaleway(): | 190 | if not on_scaleway(): |
3154 | 189 | return False | 191 | return False |
3155 | 190 | 192 | ||
3156 | diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py | |||
3157 | index 6c6902f..86bfa5d 100644 | |||
3158 | --- a/cloudinit/sources/DataSourceSmartOS.py | |||
3159 | +++ b/cloudinit/sources/DataSourceSmartOS.py | |||
3160 | @@ -159,6 +159,9 @@ LEGACY_USER_D = "/var/db" | |||
3161 | 159 | 159 | ||
3162 | 160 | 160 | ||
3163 | 161 | class DataSourceSmartOS(sources.DataSource): | 161 | class DataSourceSmartOS(sources.DataSource): |
3164 | 162 | |||
3165 | 163 | dsname = "Joyent" | ||
3166 | 164 | |||
3167 | 162 | _unset = "_unset" | 165 | _unset = "_unset" |
3168 | 163 | smartos_type = _unset | 166 | smartos_type = _unset |
3169 | 164 | md_client = _unset | 167 | md_client = _unset |
3170 | @@ -211,7 +214,7 @@ class DataSourceSmartOS(sources.DataSource): | |||
3171 | 211 | os.rename('/'.join([svc_path, 'provisioning']), | 214 | os.rename('/'.join([svc_path, 'provisioning']), |
3172 | 212 | '/'.join([svc_path, 'provision_success'])) | 215 | '/'.join([svc_path, 'provision_success'])) |
3173 | 213 | 216 | ||
3175 | 214 | def get_data(self): | 217 | def _get_data(self): |
3176 | 215 | self._init() | 218 | self._init() |
3177 | 216 | 219 | ||
3178 | 217 | md = {} | 220 | md = {} |
3179 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py | |||
3180 | index 9a43fbe..a05ca2f 100644 | |||
3181 | --- a/cloudinit/sources/__init__.py | |||
3182 | +++ b/cloudinit/sources/__init__.py | |||
3183 | @@ -10,9 +10,11 @@ | |||
3184 | 10 | 10 | ||
3185 | 11 | import abc | 11 | import abc |
3186 | 12 | import copy | 12 | import copy |
3187 | 13 | import json | ||
3188 | 13 | import os | 14 | import os |
3189 | 14 | import six | 15 | import six |
3190 | 15 | 16 | ||
3191 | 17 | from cloudinit.atomic_helper import write_json | ||
3192 | 16 | from cloudinit import importer | 18 | from cloudinit import importer |
3193 | 17 | from cloudinit import log as logging | 19 | from cloudinit import log as logging |
3194 | 18 | from cloudinit import type_utils | 20 | from cloudinit import type_utils |
3195 | @@ -33,6 +35,12 @@ DEP_FILESYSTEM = "FILESYSTEM" | |||
3196 | 33 | DEP_NETWORK = "NETWORK" | 35 | DEP_NETWORK = "NETWORK" |
3197 | 34 | DS_PREFIX = 'DataSource' | 36 | DS_PREFIX = 'DataSource' |
3198 | 35 | 37 | ||
3199 | 38 | # File in which instance meta-data, user-data and vendor-data is written | ||
3200 | 39 | INSTANCE_JSON_FILE = 'instance-data.json' | ||
3201 | 40 | |||
3202 | 41 | # Key which can be provide a cloud's official product name to cloud-init | ||
3203 | 42 | METADATA_CLOUD_NAME_KEY = 'cloud-name' | ||
3204 | 43 | |||
3205 | 36 | LOG = logging.getLogger(__name__) | 44 | LOG = logging.getLogger(__name__) |
3206 | 37 | 45 | ||
3207 | 38 | 46 | ||
3208 | @@ -40,12 +48,39 @@ class DataSourceNotFoundException(Exception): | |||
3209 | 40 | pass | 48 | pass |
3210 | 41 | 49 | ||
3211 | 42 | 50 | ||
3212 | 51 | def process_base64_metadata(metadata, key_path=''): | ||
3213 | 52 | """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" | ||
3214 | 53 | md_copy = copy.deepcopy(metadata) | ||
3215 | 54 | md_copy['base64-encoded-keys'] = [] | ||
3216 | 55 | for key, val in metadata.items(): | ||
3217 | 56 | if key_path: | ||
3218 | 57 | sub_key_path = key_path + '/' + key | ||
3219 | 58 | else: | ||
3220 | 59 | sub_key_path = key | ||
3221 | 60 | if isinstance(val, str) and val.startswith('ci-b64:'): | ||
3222 | 61 | md_copy['base64-encoded-keys'].append(sub_key_path) | ||
3223 | 62 | md_copy[key] = val.replace('ci-b64:', '') | ||
3224 | 63 | if isinstance(val, dict): | ||
3225 | 64 | return_val = process_base64_metadata(val, sub_key_path) | ||
3226 | 65 | md_copy['base64-encoded-keys'].extend( | ||
3227 | 66 | return_val.pop('base64-encoded-keys')) | ||
3228 | 67 | md_copy[key] = return_val | ||
3229 | 68 | return md_copy | ||
3230 | 69 | |||
3231 | 70 | |||
3232 | 43 | @six.add_metaclass(abc.ABCMeta) | 71 | @six.add_metaclass(abc.ABCMeta) |
3233 | 44 | class DataSource(object): | 72 | class DataSource(object): |
3234 | 45 | 73 | ||
3235 | 46 | dsmode = DSMODE_NETWORK | 74 | dsmode = DSMODE_NETWORK |
3236 | 47 | default_locale = 'en_US.UTF-8' | 75 | default_locale = 'en_US.UTF-8' |
3237 | 48 | 76 | ||
3238 | 77 | # Datasource name needs to be set by subclasses to determine which | ||
3239 | 78 | # cloud-config datasource key is loaded | ||
3240 | 79 | dsname = '_undef' | ||
3241 | 80 | |||
3242 | 81 | # Cached cloud_name as determined by _get_cloud_name | ||
3243 | 82 | _cloud_name = None | ||
3244 | 83 | |||
3245 | 49 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): | 84 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): |
3246 | 50 | self.sys_cfg = sys_cfg | 85 | self.sys_cfg = sys_cfg |
3247 | 51 | self.distro = distro | 86 | self.distro = distro |
3248 | @@ -56,17 +91,8 @@ class DataSource(object): | |||
3249 | 56 | self.vendordata = None | 91 | self.vendordata = None |
3250 | 57 | self.vendordata_raw = None | 92 | self.vendordata_raw = None |
3251 | 58 | 93 | ||
3263 | 59 | # find the datasource config name. | 94 | self.ds_cfg = util.get_cfg_by_path( |
3264 | 60 | # remove 'DataSource' from classname on front, and remove 'Net' on end. | 95 | self.sys_cfg, ("datasource", self.dsname), {}) |
3254 | 61 | # Both Foo and FooNet sources expect config in cfg['sources']['Foo'] | ||
3255 | 62 | name = type_utils.obj_name(self) | ||
3256 | 63 | if name.startswith(DS_PREFIX): | ||
3257 | 64 | name = name[len(DS_PREFIX):] | ||
3258 | 65 | if name.endswith('Net'): | ||
3259 | 66 | name = name[0:-3] | ||
3260 | 67 | |||
3261 | 68 | self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, | ||
3262 | 69 | ("datasource", name), {}) | ||
3265 | 70 | if not self.ds_cfg: | 96 | if not self.ds_cfg: |
3266 | 71 | self.ds_cfg = {} | 97 | self.ds_cfg = {} |
3267 | 72 | 98 | ||
3268 | @@ -78,6 +104,51 @@ class DataSource(object): | |||
3269 | 78 | def __str__(self): | 104 | def __str__(self): |
3270 | 79 | return type_utils.obj_name(self) | 105 | return type_utils.obj_name(self) |
3271 | 80 | 106 | ||
3272 | 107 | def _get_standardized_metadata(self): | ||
3273 | 108 | """Return a dictionary of standardized metadata keys.""" | ||
3274 | 109 | return {'v1': { | ||
3275 | 110 | 'local-hostname': self.get_hostname(), | ||
3276 | 111 | 'instance-id': self.get_instance_id(), | ||
3277 | 112 | 'cloud-name': self.cloud_name, | ||
3278 | 113 | 'region': self.region, | ||
3279 | 114 | 'availability-zone': self.availability_zone}} | ||
3280 | 115 | |||
3281 | 116 | def get_data(self): | ||
3282 | 117 | """Datasources implement _get_data to setup metadata and userdata_raw. | ||
3283 | 118 | |||
3284 | 119 | Minimally, the datasource should return a boolean True on success. | ||
3285 | 120 | """ | ||
3286 | 121 | return_value = self._get_data() | ||
3287 | 122 | json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) | ||
3288 | 123 | if not return_value: | ||
3289 | 124 | return return_value | ||
3290 | 125 | |||
3291 | 126 | instance_data = { | ||
3292 | 127 | 'ds': { | ||
3293 | 128 | 'meta-data': self.metadata, | ||
3294 | 129 | 'user-data': self.get_userdata_raw(), | ||
3295 | 130 | 'vendor-data': self.get_vendordata_raw()}} | ||
3296 | 131 | instance_data.update( | ||
3297 | 132 | self._get_standardized_metadata()) | ||
3298 | 133 | try: | ||
3299 | 134 | # Process content base64encoding unserializable values | ||
3300 | 135 | content = util.json_dumps(instance_data) | ||
3301 | 136 | # Strip base64: prefix and return base64-encoded-keys | ||
3302 | 137 | processed_data = process_base64_metadata(json.loads(content)) | ||
3303 | 138 | except TypeError as e: | ||
3304 | 139 | LOG.warning('Error persisting instance-data.json: %s', str(e)) | ||
3305 | 140 | return return_value | ||
3306 | 141 | except UnicodeDecodeError as e: | ||
3307 | 142 | LOG.warning('Error persisting instance-data.json: %s', str(e)) | ||
3308 | 143 | return return_value | ||
3309 | 144 | write_json(json_file, processed_data, mode=0o600) | ||
3310 | 145 | return return_value | ||
3311 | 146 | |||
3312 | 147 | def _get_data(self): | ||
3313 | 148 | raise NotImplementedError( | ||
3314 | 149 | 'Subclasses of DataSource must implement _get_data which' | ||
3315 | 150 | ' sets self.metadata, vendordata_raw and userdata_raw.') | ||
3316 | 151 | |||
3317 | 81 | def get_userdata(self, apply_filter=False): | 152 | def get_userdata(self, apply_filter=False): |
3318 | 82 | if self.userdata is None: | 153 | if self.userdata is None: |
3319 | 83 | self.userdata = self.ud_proc.process(self.get_userdata_raw()) | 154 | self.userdata = self.ud_proc.process(self.get_userdata_raw()) |
3320 | @@ -91,6 +162,34 @@ class DataSource(object): | |||
3321 | 91 | return self.vendordata | 162 | return self.vendordata |
3322 | 92 | 163 | ||
3323 | 93 | @property | 164 | @property |
3324 | 165 | def cloud_name(self): | ||
3325 | 166 | """Return lowercase cloud name as determined by the datasource. | ||
3326 | 167 | |||
3327 | 168 | Datasource can determine or define its own cloud product name in | ||
3328 | 169 | metadata. | ||
3329 | 170 | """ | ||
3330 | 171 | if self._cloud_name: | ||
3331 | 172 | return self._cloud_name | ||
3332 | 173 | if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY): | ||
3333 | 174 | cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) | ||
3334 | 175 | if isinstance(cloud_name, six.string_types): | ||
3335 | 176 | self._cloud_name = cloud_name.lower() | ||
3336 | 177 | LOG.debug( | ||
3337 | 178 | 'Ignoring metadata provided key %s: non-string type %s', | ||
3338 | 179 | METADATA_CLOUD_NAME_KEY, type(cloud_name)) | ||
3339 | 180 | else: | ||
3340 | 181 | self._cloud_name = self._get_cloud_name().lower() | ||
3341 | 182 | return self._cloud_name | ||
3342 | 183 | |||
3343 | 184 | def _get_cloud_name(self): | ||
3344 | 185 | """Return the datasource name as it frequently matches cloud name. | ||
3345 | 186 | |||
3346 | 187 | Should be overridden in subclasses which can run on multiple | ||
3347 | 188 | cloud names, such as DatasourceEc2. | ||
3348 | 189 | """ | ||
3349 | 190 | return self.dsname | ||
3350 | 191 | |||
3351 | 192 | @property | ||
3352 | 94 | def launch_index(self): | 193 | def launch_index(self): |
3353 | 95 | if not self.metadata: | 194 | if not self.metadata: |
3354 | 96 | return None | 195 | return None |
3355 | @@ -161,8 +260,11 @@ class DataSource(object): | |||
3356 | 161 | 260 | ||
3357 | 162 | @property | 261 | @property |
3358 | 163 | def availability_zone(self): | 262 | def availability_zone(self): |
3361 | 164 | return self.metadata.get('availability-zone', | 263 | top_level_az = self.metadata.get( |
3362 | 165 | self.metadata.get('availability_zone')) | 264 | 'availability-zone', self.metadata.get('availability_zone')) |
3363 | 265 | if top_level_az: | ||
3364 | 266 | return top_level_az | ||
3365 | 267 | return self.metadata.get('placement', {}).get('availability-zone') | ||
3366 | 166 | 268 | ||
3367 | 167 | @property | 269 | @property |
3368 | 168 | def region(self): | 270 | def region(self): |
3369 | @@ -346,7 +448,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): | |||
3370 | 346 | # Return an ordered list of classes that match (if any) | 448 | # Return an ordered list of classes that match (if any) |
3371 | 347 | def list_sources(cfg_list, depends, pkg_list): | 449 | def list_sources(cfg_list, depends, pkg_list): |
3372 | 348 | src_list = [] | 450 | src_list = [] |
3374 | 349 | LOG.debug(("Looking for for data source in: %s," | 451 | LOG.debug(("Looking for data source in: %s," |
3375 | 350 | " via packages %s that matches dependencies %s"), | 452 | " via packages %s that matches dependencies %s"), |
3376 | 351 | cfg_list, pkg_list, depends) | 453 | cfg_list, pkg_list, depends) |
3377 | 352 | for ds_name in cfg_list: | 454 | for ds_name in cfg_list: |
3378 | @@ -417,4 +519,5 @@ def list_from_depends(depends, ds_list): | |||
3379 | 417 | ret_list.append(cls) | 519 | ret_list.append(cls) |
3380 | 418 | return ret_list | 520 | return ret_list |
3381 | 419 | 521 | ||
3382 | 522 | |||
3383 | 420 | # vi: ts=4 expandtab | 523 | # vi: ts=4 expandtab |
3384 | diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py | |||
3385 | index 959b1bd..90c12df 100644 | |||
3386 | --- a/cloudinit/sources/helpers/azure.py | |||
3387 | +++ b/cloudinit/sources/helpers/azure.py | |||
3388 | @@ -199,10 +199,10 @@ class WALinuxAgentShim(object): | |||
3389 | 199 | ' </Container>', | 199 | ' </Container>', |
3390 | 200 | '</Health>']) | 200 | '</Health>']) |
3391 | 201 | 201 | ||
3393 | 202 | def __init__(self, fallback_lease_file=None): | 202 | def __init__(self, fallback_lease_file=None, dhcp_options=None): |
3394 | 203 | LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', | 203 | LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', |
3395 | 204 | fallback_lease_file) | 204 | fallback_lease_file) |
3397 | 205 | self.dhcpoptions = None | 205 | self.dhcpoptions = dhcp_options |
3398 | 206 | self._endpoint = None | 206 | self._endpoint = None |
3399 | 207 | self.openssl_manager = None | 207 | self.openssl_manager = None |
3400 | 208 | self.values = {} | 208 | self.values = {} |
3401 | @@ -220,7 +220,8 @@ class WALinuxAgentShim(object): | |||
3402 | 220 | @property | 220 | @property |
3403 | 221 | def endpoint(self): | 221 | def endpoint(self): |
3404 | 222 | if self._endpoint is None: | 222 | if self._endpoint is None: |
3406 | 223 | self._endpoint = self.find_endpoint(self.lease_file) | 223 | self._endpoint = self.find_endpoint(self.lease_file, |
3407 | 224 | self.dhcpoptions) | ||
3408 | 224 | return self._endpoint | 225 | return self._endpoint |
3409 | 225 | 226 | ||
3410 | 226 | @staticmethod | 227 | @staticmethod |
3411 | @@ -274,7 +275,8 @@ class WALinuxAgentShim(object): | |||
3412 | 274 | name = os.path.basename(hook_file).replace('.json', '') | 275 | name = os.path.basename(hook_file).replace('.json', '') |
3413 | 275 | dhcp_options[name] = json.loads(util.load_file((hook_file))) | 276 | dhcp_options[name] = json.loads(util.load_file((hook_file))) |
3414 | 276 | except ValueError: | 277 | except ValueError: |
3416 | 277 | raise ValueError("%s is not valid JSON data", hook_file) | 278 | raise ValueError( |
3417 | 279 | '{_file} is not valid JSON data'.format(_file=hook_file)) | ||
3418 | 278 | return dhcp_options | 280 | return dhcp_options |
3419 | 279 | 281 | ||
3420 | 280 | @staticmethod | 282 | @staticmethod |
3421 | @@ -291,10 +293,14 @@ class WALinuxAgentShim(object): | |||
3422 | 291 | return _value | 293 | return _value |
3423 | 292 | 294 | ||
3424 | 293 | @staticmethod | 295 | @staticmethod |
3426 | 294 | def find_endpoint(fallback_lease_file=None): | 296 | def find_endpoint(fallback_lease_file=None, dhcp245=None): |
3427 | 295 | value = None | 297 | value = None |
3430 | 296 | LOG.debug('Finding Azure endpoint from networkd...') | 298 | if dhcp245 is not None: |
3431 | 297 | value = WALinuxAgentShim._networkd_get_value_from_leases() | 299 | value = dhcp245 |
3432 | 300 | LOG.debug("Using Azure Endpoint from dhcp options") | ||
3433 | 301 | if value is None: | ||
3434 | 302 | LOG.debug('Finding Azure endpoint from networkd...') | ||
3435 | 303 | value = WALinuxAgentShim._networkd_get_value_from_leases() | ||
3436 | 298 | if value is None: | 304 | if value is None: |
3437 | 299 | # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json | 305 | # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json |
3438 | 300 | # a dhclient exit hook that calls cloud-init-dhclient-hook | 306 | # a dhclient exit hook that calls cloud-init-dhclient-hook |
3439 | @@ -366,8 +372,9 @@ class WALinuxAgentShim(object): | |||
3440 | 366 | LOG.info('Reported ready to Azure fabric.') | 372 | LOG.info('Reported ready to Azure fabric.') |
3441 | 367 | 373 | ||
3442 | 368 | 374 | ||
3445 | 369 | def get_metadata_from_fabric(fallback_lease_file=None): | 375 | def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): |
3446 | 370 | shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file) | 376 | shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, |
3447 | 377 | dhcp_options=dhcp_opts) | ||
3448 | 371 | try: | 378 | try: |
3449 | 372 | return shim.register_with_azure_and_fetch_data() | 379 | return shim.register_with_azure_and_fetch_data() |
3450 | 373 | finally: | 380 | finally: |
3451 | diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py | |||
3452 | index 49d441d..2eaeff3 100644 | |||
3453 | --- a/cloudinit/sources/helpers/vmware/imc/config.py | |||
3454 | +++ b/cloudinit/sources/helpers/vmware/imc/config.py | |||
3455 | @@ -100,4 +100,8 @@ class Config(object): | |||
3456 | 100 | """Returns marker id.""" | 100 | """Returns marker id.""" |
3457 | 101 | return self._configFile.get(Config.MARKERID, None) | 101 | return self._configFile.get(Config.MARKERID, None) |
3458 | 102 | 102 | ||
3459 | 103 | @property | ||
3460 | 104 | def custom_script_name(self): | ||
3461 | 105 | """Return the name of custom (pre/post) script.""" | ||
3462 | 106 | return self._configFile.get(Config.CUSTOM_SCRIPT, None) | ||
3463 | 103 | # vi: ts=4 expandtab | 107 | # vi: ts=4 expandtab |
3464 | diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py | |||
3465 | 104 | new file mode 100644 | 108 | new file mode 100644 |
3466 | index 0000000..a7d4ad9 | |||
3467 | --- /dev/null | |||
3468 | +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py | |||
3469 | @@ -0,0 +1,153 @@ | |||
3470 | 1 | # Copyright (C) 2017 Canonical Ltd. | ||
3471 | 2 | # Copyright (C) 2017 VMware Inc. | ||
3472 | 3 | # | ||
3473 | 4 | # Author: Maitreyee Saikia <msaikia@vmware.com> | ||
3474 | 5 | # | ||
3475 | 6 | # This file is part of cloud-init. See LICENSE file for license information. | ||
3476 | 7 | |||
3477 | 8 | import logging | ||
3478 | 9 | import os | ||
3479 | 10 | import stat | ||
3480 | 11 | from textwrap import dedent | ||
3481 | 12 | |||
3482 | 13 | from cloudinit import util | ||
3483 | 14 | |||
3484 | 15 | LOG = logging.getLogger(__name__) | ||
3485 | 16 | |||
3486 | 17 | |||
3487 | 18 | class CustomScriptNotFound(Exception): | ||
3488 | 19 | pass | ||
3489 | 20 | |||
3490 | 21 | |||
3491 | 22 | class CustomScriptConstant(object): | ||
3492 | 23 | RC_LOCAL = "/etc/rc.local" | ||
3493 | 24 | POST_CUST_TMP_DIR = "/root/.customization" | ||
3494 | 25 | POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" | ||
3495 | 26 | POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, | ||
3496 | 27 | POST_CUST_RUN_SCRIPT_NAME) | ||
3497 | 28 | POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" | ||
3498 | 29 | |||
3499 | 30 | |||
3500 | 31 | class RunCustomScript(object): | ||
3501 | 32 | def __init__(self, scriptname, directory): | ||
3502 | 33 | self.scriptname = scriptname | ||
3503 | 34 | self.directory = directory | ||
3504 | 35 | self.scriptpath = os.path.join(directory, scriptname) | ||
3505 | 36 | |||
3506 | 37 | def prepare_script(self): | ||
3507 | 38 | if not os.path.exists(self.scriptpath): | ||
3508 | 39 | raise CustomScriptNotFound("Script %s not found!! " | ||
3509 | 40 | "Cannot execute custom script!" | ||
3510 | 41 | % self.scriptpath) | ||
3511 | 42 | # Strip any CR characters from the decoded script | ||
3512 | 43 | util.load_file(self.scriptpath).replace("\r", "") | ||
3513 | 44 | st = os.stat(self.scriptpath) | ||
3514 | 45 | os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) | ||
3515 | 46 | |||
3516 | 47 | |||
3517 | 48 | class PreCustomScript(RunCustomScript): | ||
3518 | 49 | def execute(self): | ||
3519 | 50 | """Executing custom script with precustomization argument.""" | ||
3520 | 51 | LOG.debug("Executing pre-customization script") | ||
3521 | 52 | self.prepare_script() | ||
3522 | 53 | util.subp(["/bin/sh", self.scriptpath, "precustomization"]) | ||
3523 | 54 | |||
3524 | 55 | |||
3525 | 56 | class PostCustomScript(RunCustomScript): | ||
3526 | 57 | def __init__(self, scriptname, directory): | ||
3527 | 58 | super(PostCustomScript, self).__init__(scriptname, directory) | ||
3528 | 59 | # Determine when to run custom script. When postreboot is True, | ||
3529 | 60 | # the user uploaded script will run as part of rc.local after | ||
3530 | 61 | # the machine reboots. This is determined by presence of rclocal. | ||
3531 | 62 | # When postreboot is False, script will run as part of cloud-init. | ||
3532 | 63 | self.postreboot = False | ||
3533 | 64 | |||
3534 | 65 | def _install_post_reboot_agent(self, rclocal): | ||
3535 | 66 | """ | ||
3536 | 67 | Install post-reboot agent for running custom script after reboot. | ||
3537 | 68 | As part of this process, we are editing the rclocal file to run a | ||
3538 | 69 | VMware script, which in turn is resposible for handling the user | ||
3539 | 70 | script. | ||
3540 | 71 | @param: path to rc local. | ||
3541 | 72 | """ | ||
3542 | 73 | LOG.debug("Installing post-reboot customization from %s to %s", | ||
3543 | 74 | self.directory, rclocal) | ||
3544 | 75 | if not self.has_previous_agent(rclocal): | ||
3545 | 76 | LOG.info("Adding post-reboot customization agent to rc.local") | ||
3546 | 77 | new_content = dedent(""" | ||
3547 | 78 | # Run post-reboot guest customization | ||
3548 | 79 | /bin/sh %s | ||
3549 | 80 | exit 0 | ||
3550 | 81 | """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT | ||
3551 | 82 | existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') | ||
3552 | 83 | st = os.stat(rclocal) | ||
3553 | 84 | # "x" flag should be set | ||
3554 | 85 | mode = st.st_mode | stat.S_IEXEC | ||
3555 | 86 | util.write_file(rclocal, existing_rclocal + new_content, mode) | ||
3556 | 87 | |||
3557 | 88 | else: | ||
3558 | 89 | # We don't need to update rclocal file everytime a customization | ||
3559 | 90 | # is requested. It just needs to be done for the first time. | ||
3560 | 91 | LOG.info("Post-reboot guest customization agent is already " | ||
3561 | 92 | "registered in rc.local") | ||
3562 | 93 | LOG.debug("Installing post-reboot customization agent finished: %s", | ||
3563 | 94 | self.postreboot) | ||
3564 | 95 | |||
3565 | 96 | def has_previous_agent(self, rclocal): | ||
3566 | 97 | searchstring = "# Run post-reboot guest customization" | ||
3567 | 98 | if searchstring in open(rclocal).read(): | ||
3568 | 99 | return True | ||
3569 | 100 | return False | ||
3570 | 101 | |||
3571 | 102 | def find_rc_local(self): | ||
3572 | 103 | """ | ||
3573 | 104 | Determine if rc local is present. | ||
3574 | 105 | """ | ||
3575 | 106 | rclocal = "" | ||
3576 | 107 | if os.path.exists(CustomScriptConstant.RC_LOCAL): | ||
3577 | 108 | LOG.debug("rc.local detected.") | ||
3578 | 109 | # resolving in case of symlink | ||
3579 | 110 | rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) | ||
3580 | 111 | LOG.debug("rc.local resolved to %s", rclocal) | ||
3581 | 112 | else: | ||
3582 | 113 | LOG.warning("Can't find rc.local, post-customization " | ||
3583 | 114 | "will be run before reboot") | ||
3584 | 115 | return rclocal | ||
3585 | 116 | |||
3586 | 117 | def install_agent(self): | ||
3587 | 118 | rclocal = self.find_rc_local() | ||
3588 | 119 | if rclocal: | ||
3589 | 120 | self._install_post_reboot_agent(rclocal) | ||
3590 | 121 | self.postreboot = True | ||
3591 | 122 | |||
3592 | 123 | def execute(self): | ||
3593 | 124 | """ | ||
3594 | 125 | This method executes post-customization script before or after reboot | ||
3595 | 126 | based on the presence of rc local. | ||
3596 | 127 | """ | ||
3597 | 128 | self.prepare_script() | ||
3598 | 129 | self.install_agent() | ||
3599 | 130 | if not self.postreboot: | ||
3600 | 131 | LOG.warning("Executing post-customization script inline") | ||
3601 | 132 | util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) | ||
3602 | 133 | else: | ||
3603 | 134 | LOG.debug("Scheduling custom script to run post reboot") | ||
3604 | 135 | if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): | ||
3605 | 136 | os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) | ||
3606 | 137 | # Script "post-customize-guest.sh" and user uploaded script are | ||
3607 | 138 | # are present in the same directory and needs to copied to a temp | ||
3608 | 139 | # directory to be executed post reboot. User uploaded script is | ||
3609 | 140 | # saved as customize.sh in the temp directory. | ||
3610 | 141 | # post-customize-guest.sh excutes customize.sh after reboot. | ||
3611 | 142 | LOG.debug("Copying post-customization script") | ||
3612 | 143 | util.copy(self.scriptpath, | ||
3613 | 144 | CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") | ||
3614 | 145 | LOG.debug("Copying script to run post-customization script") | ||
3615 | 146 | util.copy( | ||
3616 | 147 | os.path.join(self.directory, | ||
3617 | 148 | CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), | ||
3618 | 149 | CustomScriptConstant.POST_CUST_RUN_SCRIPT) | ||
3619 | 150 | LOG.info("Creating post-reboot pending marker") | ||
3620 | 151 | util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) | ||
3621 | 152 | |||
3622 | 153 | # vi: ts=4 expandtab | ||
3623 | diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py | |||
3624 | index 2fb07c5..2d8900e 100644 | |||
3625 | --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py | |||
3626 | +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py | |||
3627 | @@ -161,7 +161,7 @@ class NicConfigurator(object): | |||
3628 | 161 | if nic.primary and v4.gateways: | 161 | if nic.primary and v4.gateways: |
3629 | 162 | self.ipv4PrimaryGateway = v4.gateways[0] | 162 | self.ipv4PrimaryGateway = v4.gateways[0] |
3630 | 163 | subnet.update({'gateway': self.ipv4PrimaryGateway}) | 163 | subnet.update({'gateway': self.ipv4PrimaryGateway}) |
3632 | 164 | return [subnet] | 164 | return ([subnet], route_list) |
3633 | 165 | 165 | ||
3634 | 166 | # Add routes if there is no primary nic | 166 | # Add routes if there is no primary nic |
3635 | 167 | if not self._primaryNic: | 167 | if not self._primaryNic: |
3636 | diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py | |||
3637 | 168 | new file mode 100644 | 168 | new file mode 100644 |
3638 | index 0000000..e69de29 | |||
3639 | --- /dev/null | |||
3640 | +++ b/cloudinit/sources/tests/__init__.py | |||
3641 | diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py | |||
3642 | 169 | new file mode 100644 | 169 | new file mode 100644 |
3643 | index 0000000..af15115 | |||
3644 | --- /dev/null | |||
3645 | +++ b/cloudinit/sources/tests/test_init.py | |||
3646 | @@ -0,0 +1,202 @@ | |||
3647 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
3648 | 2 | |||
3649 | 3 | import os | ||
3650 | 4 | import six | ||
3651 | 5 | import stat | ||
3652 | 6 | |||
3653 | 7 | from cloudinit.helpers import Paths | ||
3654 | 8 | from cloudinit.sources import ( | ||
3655 | 9 | INSTANCE_JSON_FILE, DataSource) | ||
3656 | 10 | from cloudinit.tests.helpers import CiTestCase, skipIf | ||
3657 | 11 | from cloudinit.user_data import UserDataProcessor | ||
3658 | 12 | from cloudinit import util | ||
3659 | 13 | |||
3660 | 14 | |||
3661 | 15 | class DataSourceTestSubclassNet(DataSource): | ||
3662 | 16 | |||
3663 | 17 | dsname = 'MyTestSubclass' | ||
3664 | 18 | |||
3665 | 19 | def __init__(self, sys_cfg, distro, paths, custom_userdata=None): | ||
3666 | 20 | super(DataSourceTestSubclassNet, self).__init__( | ||
3667 | 21 | sys_cfg, distro, paths) | ||
3668 | 22 | self._custom_userdata = custom_userdata | ||
3669 | 23 | |||
3670 | 24 | def _get_cloud_name(self): | ||
3671 | 25 | return 'SubclassCloudName' | ||
3672 | 26 | |||
3673 | 27 | def _get_data(self): | ||
3674 | 28 | self.metadata = {'availability_zone': 'myaz', | ||
3675 | 29 | 'local-hostname': 'test-subclass-hostname', | ||
3676 | 30 | 'region': 'myregion'} | ||
3677 | 31 | if self._custom_userdata: | ||
3678 | 32 | self.userdata_raw = self._custom_userdata | ||
3679 | 33 | else: | ||
3680 | 34 | self.userdata_raw = 'userdata_raw' | ||
3681 | 35 | self.vendordata_raw = 'vendordata_raw' | ||
3682 | 36 | return True | ||
3683 | 37 | |||
3684 | 38 | |||
3685 | 39 | class InvalidDataSourceTestSubclassNet(DataSource): | ||
3686 | 40 | pass | ||
3687 | 41 | |||
3688 | 42 | |||
3689 | 43 | class TestDataSource(CiTestCase): | ||
3690 | 44 | |||
3691 | 45 | with_logs = True | ||
3692 | 46 | |||
3693 | 47 | def setUp(self): | ||
3694 | 48 | super(TestDataSource, self).setUp() | ||
3695 | 49 | self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} | ||
3696 | 50 | self.distro = 'distrotest' # generally should be a Distro object | ||
3697 | 51 | self.paths = Paths({}) | ||
3698 | 52 | self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) | ||
3699 | 53 | |||
3700 | 54 | def test_datasource_init(self): | ||
3701 | 55 | """DataSource initializes metadata attributes, ds_cfg and ud_proc.""" | ||
3702 | 56 | self.assertEqual(self.paths, self.datasource.paths) | ||
3703 | 57 | self.assertEqual(self.sys_cfg, self.datasource.sys_cfg) | ||
3704 | 58 | self.assertEqual(self.distro, self.datasource.distro) | ||
3705 | 59 | self.assertIsNone(self.datasource.userdata) | ||
3706 | 60 | self.assertEqual({}, self.datasource.metadata) | ||
3707 | 61 | self.assertIsNone(self.datasource.userdata_raw) | ||
3708 | 62 | self.assertIsNone(self.datasource.vendordata) | ||
3709 | 63 | self.assertIsNone(self.datasource.vendordata_raw) | ||
3710 | 64 | self.assertEqual({'key1': False}, self.datasource.ds_cfg) | ||
3711 | 65 | self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) | ||
3712 | 66 | |||
3713 | 67 | def test_datasource_init_gets_ds_cfg_using_dsname(self): | ||
3714 | 68 | """Init uses DataSource.dsname for sourcing ds_cfg.""" | ||
3715 | 69 | sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} | ||
3716 | 70 | distro = 'distrotest' # generally should be a Distro object | ||
3717 | 71 | paths = Paths({}) | ||
3718 | 72 | datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) | ||
3719 | 73 | self.assertEqual({'key2': False}, datasource.ds_cfg) | ||
3720 | 74 | |||
3721 | 75 | def test_str_is_classname(self): | ||
3722 | 76 | """The string representation of the datasource is the classname.""" | ||
3723 | 77 | self.assertEqual('DataSource', str(self.datasource)) | ||
3724 | 78 | self.assertEqual( | ||
3725 | 79 | 'DataSourceTestSubclassNet', | ||
3726 | 80 | str(DataSourceTestSubclassNet('', '', self.paths))) | ||
3727 | 81 | |||
3728 | 82 | def test__get_data_unimplemented(self): | ||
3729 | 83 | """Raise an error when _get_data is not implemented.""" | ||
3730 | 84 | with self.assertRaises(NotImplementedError) as context_manager: | ||
3731 | 85 | self.datasource.get_data() | ||
3732 | 86 | self.assertIn( | ||
3733 | 87 | 'Subclasses of DataSource must implement _get_data', | ||
3734 | 88 | str(context_manager.exception)) | ||
3735 | 89 | datasource2 = InvalidDataSourceTestSubclassNet( | ||
3736 | 90 | self.sys_cfg, self.distro, self.paths) | ||
3737 | 91 | with self.assertRaises(NotImplementedError) as context_manager: | ||
3738 | 92 | datasource2.get_data() | ||
3739 | 93 | self.assertIn( | ||
3740 | 94 | 'Subclasses of DataSource must implement _get_data', | ||
3741 | 95 | str(context_manager.exception)) | ||
3742 | 96 | |||
3743 | 97 | def test_get_data_calls_subclass__get_data(self): | ||
3744 | 98 | """Datasource.get_data uses the subclass' version of _get_data.""" | ||
3745 | 99 | tmp = self.tmp_dir() | ||
3746 | 100 | datasource = DataSourceTestSubclassNet( | ||
3747 | 101 | self.sys_cfg, self.distro, Paths({'run_dir': tmp})) | ||
3748 | 102 | self.assertTrue(datasource.get_data()) | ||
3749 | 103 | self.assertEqual( | ||
3750 | 104 | {'availability_zone': 'myaz', | ||
3751 | 105 | 'local-hostname': 'test-subclass-hostname', | ||
3752 | 106 | 'region': 'myregion'}, | ||
3753 | 107 | datasource.metadata) | ||
3754 | 108 | self.assertEqual('userdata_raw', datasource.userdata_raw) | ||
3755 | 109 | self.assertEqual('vendordata_raw', datasource.vendordata_raw) | ||
3756 | 110 | |||
3757 | 111 | def test_get_data_write_json_instance_data(self): | ||
3758 | 112 | """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" | ||
3759 | 113 | tmp = self.tmp_dir() | ||
3760 | 114 | datasource = DataSourceTestSubclassNet( | ||
3761 | 115 | self.sys_cfg, self.distro, Paths({'run_dir': tmp})) | ||
3762 | 116 | datasource.get_data() | ||
3763 | 117 | json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) | ||
3764 | 118 | content = util.load_file(json_file) | ||
3765 | 119 | expected = { | ||
3766 | 120 | 'base64-encoded-keys': [], | ||
3767 | 121 | 'v1': { | ||
3768 | 122 | 'availability-zone': 'myaz', | ||
3769 | 123 | 'cloud-name': 'subclasscloudname', | ||
3770 | 124 | 'instance-id': 'iid-datasource', | ||
3771 | 125 | 'local-hostname': 'test-subclass-hostname', | ||
3772 | 126 | 'region': 'myregion'}, | ||
3773 | 127 | 'ds': { | ||
3774 | 128 | 'meta-data': {'availability_zone': 'myaz', | ||
3775 | 129 | 'local-hostname': 'test-subclass-hostname', | ||
3776 | 130 | 'region': 'myregion'}, | ||
3777 | 131 | 'user-data': 'userdata_raw', | ||
3778 | 132 | 'vendor-data': 'vendordata_raw'}} | ||
3779 | 133 | self.assertEqual(expected, util.load_json(content)) | ||
3780 | 134 | file_stat = os.stat(json_file) | ||
3781 | 135 | self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) | ||
3782 | 136 | |||
3783 | 137 | def test_get_data_handles_redacted_unserializable_content(self): | ||
3784 | 138 | """get_data warns unserializable content in INSTANCE_JSON_FILE.""" | ||
3785 | 139 | tmp = self.tmp_dir() | ||
3786 | 140 | datasource = DataSourceTestSubclassNet( | ||
3787 | 141 | self.sys_cfg, self.distro, Paths({'run_dir': tmp}), | ||
3788 | 142 | custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) | ||
3789 | 143 | self.assertTrue(datasource.get_data()) | ||
3790 | 144 | json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) | ||
3791 | 145 | content = util.load_file(json_file) | ||
3792 | 146 | expected_userdata = { | ||
3793 | 147 | 'key1': 'val1', | ||
3794 | 148 | 'key2': { | ||
3795 | 149 | 'key2.1': "Warning: redacted unserializable type <class" | ||
3796 | 150 | " 'cloudinit.helpers.Paths'>"}} | ||
3797 | 151 | instance_json = util.load_json(content) | ||
3798 | 152 | self.assertEqual( | ||
3799 | 153 | expected_userdata, instance_json['ds']['user-data']) | ||
3800 | 154 | |||
3801 | 155 | @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") | ||
3802 | 156 | def test_get_data_base64encodes_unserializable_bytes(self): | ||
3803 | 157 | """On py3, get_data base64encodes any unserializable content.""" | ||
3804 | 158 | tmp = self.tmp_dir() | ||
3805 | 159 | datasource = DataSourceTestSubclassNet( | ||
3806 | 160 | self.sys_cfg, self.distro, Paths({'run_dir': tmp}), | ||
3807 | 161 | custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) | ||
3808 | 162 | self.assertTrue(datasource.get_data()) | ||
3809 | 163 | json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) | ||
3810 | 164 | content = util.load_file(json_file) | ||
3811 | 165 | instance_json = util.load_json(content) | ||
3812 | 166 | self.assertEqual( | ||
3813 | 167 | ['ds/user-data/key2/key2.1'], | ||
3814 | 168 | instance_json['base64-encoded-keys']) | ||
3815 | 169 | self.assertEqual( | ||
3816 | 170 | {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, | ||
3817 | 171 | instance_json['ds']['user-data']) | ||
3818 | 172 | |||
3819 | 173 | @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") | ||
3820 | 174 | def test_get_data_handles_bytes_values(self): | ||
3821 | 175 | """On py2 get_data handles bytes values without having to b64encode.""" | ||
3822 | 176 | tmp = self.tmp_dir() | ||
3823 | 177 | datasource = DataSourceTestSubclassNet( | ||
3824 | 178 | self.sys_cfg, self.distro, Paths({'run_dir': tmp}), | ||
3825 | 179 | custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) | ||
3826 | 180 | self.assertTrue(datasource.get_data()) | ||
3827 | 181 | json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) | ||
3828 | 182 | content = util.load_file(json_file) | ||
3829 | 183 | instance_json = util.load_json(content) | ||
3830 | 184 | self.assertEqual([], instance_json['base64-encoded-keys']) | ||
3831 | 185 | self.assertEqual( | ||
3832 | 186 | {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, | ||
3833 | 187 | instance_json['ds']['user-data']) | ||
3834 | 188 | |||
3835 | 189 | @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") | ||
3836 | 190 | def test_non_utf8_encoding_logs_warning(self): | ||
3837 | 191 | """When non-utf-8 values exist in py2 instance-data is not written.""" | ||
3838 | 192 | tmp = self.tmp_dir() | ||
3839 | 193 | datasource = DataSourceTestSubclassNet( | ||
3840 | 194 | self.sys_cfg, self.distro, Paths({'run_dir': tmp}), | ||
3841 | 195 | custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) | ||
3842 | 196 | self.assertTrue(datasource.get_data()) | ||
3843 | 197 | json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) | ||
3844 | 198 | self.assertFalse(os.path.exists(json_file)) | ||
3845 | 199 | self.assertIn( | ||
3846 | 200 | "WARNING: Error persisting instance-data.json: 'utf8' codec can't" | ||
3847 | 201 | " decode byte 0xaa in position 2: invalid start byte", | ||
3848 | 202 | self.logs.getvalue()) | ||
3849 | diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py | |||
3850 | index 5d7adf7..c98a1b5 100644 | |||
3851 | --- a/cloudinit/temp_utils.py | |||
3852 | +++ b/cloudinit/temp_utils.py | |||
3853 | @@ -28,13 +28,18 @@ def _tempfile_dir_arg(odir=None, needs_exe=False): | |||
3854 | 28 | if odir is not None: | 28 | if odir is not None: |
3855 | 29 | return odir | 29 | return odir |
3856 | 30 | 30 | ||
3857 | 31 | if needs_exe: | ||
3858 | 32 | tdir = _EXE_ROOT_TMPDIR | ||
3859 | 33 | if not os.path.isdir(tdir): | ||
3860 | 34 | os.makedirs(tdir) | ||
3861 | 35 | os.chmod(tdir, 0o1777) | ||
3862 | 36 | return tdir | ||
3863 | 37 | |||
3864 | 31 | global _TMPDIR | 38 | global _TMPDIR |
3865 | 32 | if _TMPDIR: | 39 | if _TMPDIR: |
3866 | 33 | return _TMPDIR | 40 | return _TMPDIR |
3867 | 34 | 41 | ||
3871 | 35 | if needs_exe: | 42 | if os.getuid() == 0: |
3869 | 36 | tdir = _EXE_ROOT_TMPDIR | ||
3870 | 37 | elif os.getuid() == 0: | ||
3872 | 38 | tdir = _ROOT_TMPDIR | 43 | tdir = _ROOT_TMPDIR |
3873 | 39 | else: | 44 | else: |
3874 | 40 | tdir = os.environ.get('TMPDIR', '/tmp') | 45 | tdir = os.environ.get('TMPDIR', '/tmp') |
3875 | diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py | |||
3876 | index 6f88a5b..0080c72 100644 | |||
3877 | --- a/cloudinit/tests/helpers.py | |||
3878 | +++ b/cloudinit/tests/helpers.py | |||
3879 | @@ -3,7 +3,6 @@ | |||
3880 | 3 | from __future__ import print_function | 3 | from __future__ import print_function |
3881 | 4 | 4 | ||
3882 | 5 | import functools | 5 | import functools |
3883 | 6 | import json | ||
3884 | 7 | import logging | 6 | import logging |
3885 | 8 | import os | 7 | import os |
3886 | 9 | import shutil | 8 | import shutil |
3887 | @@ -20,6 +19,11 @@ try: | |||
3888 | 20 | except ImportError: | 19 | except ImportError: |
3889 | 21 | from contextlib2 import ExitStack | 20 | from contextlib2 import ExitStack |
3890 | 22 | 21 | ||
3891 | 22 | try: | ||
3892 | 23 | from configparser import ConfigParser | ||
3893 | 24 | except ImportError: | ||
3894 | 25 | from ConfigParser import ConfigParser | ||
3895 | 26 | |||
3896 | 23 | from cloudinit import helpers as ch | 27 | from cloudinit import helpers as ch |
3897 | 24 | from cloudinit import util | 28 | from cloudinit import util |
3898 | 25 | 29 | ||
3899 | @@ -114,6 +118,16 @@ class TestCase(unittest2.TestCase): | |||
3900 | 114 | self.addCleanup(m.stop) | 118 | self.addCleanup(m.stop) |
3901 | 115 | setattr(self, attr, p) | 119 | setattr(self, attr, p) |
3902 | 116 | 120 | ||
3903 | 121 | # prefer python3 read_file over readfp but allow fallback | ||
3904 | 122 | def parse_and_read(self, contents): | ||
3905 | 123 | parser = ConfigParser() | ||
3906 | 124 | if hasattr(parser, 'read_file'): | ||
3907 | 125 | parser.read_file(contents) | ||
3908 | 126 | elif hasattr(parser, 'readfp'): | ||
3909 | 127 | # pylint: disable=W1505 | ||
3910 | 128 | parser.readfp(contents) | ||
3911 | 129 | return parser | ||
3912 | 130 | |||
3913 | 117 | 131 | ||
3914 | 118 | class CiTestCase(TestCase): | 132 | class CiTestCase(TestCase): |
3915 | 119 | """This is the preferred test case base class unless user | 133 | """This is the preferred test case base class unless user |
3916 | @@ -159,6 +173,18 @@ class CiTestCase(TestCase): | |||
3917 | 159 | dir = self.tmp_dir() | 173 | dir = self.tmp_dir() |
3918 | 160 | return os.path.normpath(os.path.abspath(os.path.join(dir, path))) | 174 | return os.path.normpath(os.path.abspath(os.path.join(dir, path))) |
3919 | 161 | 175 | ||
3920 | 176 | def assertRaisesCodeEqual(self, expected, found): | ||
3921 | 177 | """Handle centos6 having different context manager for assertRaises. | ||
3922 | 178 | with assertRaises(Exception) as e: | ||
3923 | 179 | raise Exception("BOO") | ||
3924 | 180 | |||
3925 | 181 | centos6 will have e.exception as an integer. | ||
3926 | 182 | anything nwere will have it as something with a '.code'""" | ||
3927 | 183 | if isinstance(found, int): | ||
3928 | 184 | self.assertEqual(expected, found) | ||
3929 | 185 | else: | ||
3930 | 186 | self.assertEqual(expected, found.code) | ||
3931 | 187 | |||
3932 | 162 | 188 | ||
3933 | 163 | class ResourceUsingTestCase(CiTestCase): | 189 | class ResourceUsingTestCase(CiTestCase): |
3934 | 164 | 190 | ||
3935 | @@ -337,12 +363,6 @@ def dir2dict(startdir, prefix=None): | |||
3936 | 337 | return flist | 363 | return flist |
3937 | 338 | 364 | ||
3938 | 339 | 365 | ||
3939 | 340 | def json_dumps(data): | ||
3940 | 341 | # print data in nicely formatted json. | ||
3941 | 342 | return json.dumps(data, indent=1, sort_keys=True, | ||
3942 | 343 | separators=(',', ': ')) | ||
3943 | 344 | |||
3944 | 345 | |||
3945 | 346 | def wrap_and_call(prefix, mocks, func, *args, **kwargs): | 366 | def wrap_and_call(prefix, mocks, func, *args, **kwargs): |
3946 | 347 | """ | 367 | """ |
3947 | 348 | call func(args, **kwargs) with mocks applied, then unapplies mocks | 368 | call func(args, **kwargs) with mocks applied, then unapplies mocks |
3948 | @@ -402,4 +422,12 @@ if not hasattr(mock.Mock, 'assert_not_called'): | |||
3949 | 402 | mock.Mock.assert_not_called = __mock_assert_not_called | 422 | mock.Mock.assert_not_called = __mock_assert_not_called |
3950 | 403 | 423 | ||
3951 | 404 | 424 | ||
3952 | 425 | # older unittest2.TestCase (centos6) do not have assertRaisesRegex | ||
3953 | 426 | # And setting assertRaisesRegex to assertRaisesRegexp causes | ||
3954 | 427 | # https://github.com/PyCQA/pylint/issues/1653 . So the workaround. | ||
3955 | 428 | if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): | ||
3956 | 429 | def _tricky(*args, **kwargs): | ||
3957 | 430 | return unittest2.TestCase.assertRaisesRegexp | ||
3958 | 431 | unittest2.TestCase.assertRaisesRegex = _tricky | ||
3959 | 432 | |||
3960 | 405 | # vi: ts=4 expandtab | 433 | # vi: ts=4 expandtab |
3961 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py | |||
3962 | 406 | new file mode 100644 | 434 | new file mode 100644 |
3963 | index 0000000..ba6bf69 | |||
3964 | --- /dev/null | |||
3965 | +++ b/cloudinit/tests/test_util.py | |||
3966 | @@ -0,0 +1,46 @@ | |||
3967 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
3968 | 2 | |||
3969 | 3 | """Tests for cloudinit.util""" | ||
3970 | 4 | |||
3971 | 5 | import logging | ||
3972 | 6 | |||
3973 | 7 | import cloudinit.util as util | ||
3974 | 8 | |||
3975 | 9 | from cloudinit.tests.helpers import CiTestCase, mock | ||
3976 | 10 | |||
3977 | 11 | LOG = logging.getLogger(__name__) | ||
3978 | 12 | |||
3979 | 13 | MOUNT_INFO = [ | ||
3980 | 14 | '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', | ||
3981 | 15 | '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' | ||
3982 | 16 | ] | ||
3983 | 17 | |||
3984 | 18 | |||
3985 | 19 | class TestUtil(CiTestCase): | ||
3986 | 20 | |||
3987 | 21 | def test_parse_mount_info_no_opts_no_arg(self): | ||
3988 | 22 | result = util.parse_mount_info('/home', MOUNT_INFO, LOG) | ||
3989 | 23 | self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) | ||
3990 | 24 | |||
3991 | 25 | def test_parse_mount_info_no_opts_arg(self): | ||
3992 | 26 | result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) | ||
3993 | 27 | self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) | ||
3994 | 28 | |||
3995 | 29 | def test_parse_mount_info_with_opts(self): | ||
3996 | 30 | result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) | ||
3997 | 31 | self.assertEqual( | ||
3998 | 32 | ('/dev/sda1', 'btrfs', '/', 'ro,relatime'), | ||
3999 | 33 | result | ||
4000 | 34 | ) | ||
4001 | 35 | |||
4002 | 36 | @mock.patch('cloudinit.util.get_mount_info') | ||
4003 | 37 | def test_mount_is_rw(self, m_mount_info): | ||
4004 | 38 | m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') | ||
4005 | 39 | is_rw = util.mount_is_read_write('/') | ||
4006 | 40 | self.assertEqual(is_rw, True) | ||
4007 | 41 | |||
4008 | 42 | @mock.patch('cloudinit.util.get_mount_info') | ||
4009 | 43 | def test_mount_is_ro(self, m_mount_info): | ||
4010 | 44 | m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') | ||
4011 | 45 | is_rw = util.mount_is_read_write('/') | ||
4012 | 46 | self.assertEqual(is_rw, False) | ||
4013 | diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py | |||
4014 | index 0e0f5b4..0a5be0b 100644 | |||
4015 | --- a/cloudinit/url_helper.py | |||
4016 | +++ b/cloudinit/url_helper.py | |||
4017 | @@ -273,7 +273,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, | |||
4018 | 273 | 273 | ||
4019 | 274 | def wait_for_url(urls, max_wait=None, timeout=None, | 274 | def wait_for_url(urls, max_wait=None, timeout=None, |
4020 | 275 | status_cb=None, headers_cb=None, sleep_time=1, | 275 | status_cb=None, headers_cb=None, sleep_time=1, |
4022 | 276 | exception_cb=None): | 276 | exception_cb=None, sleep_time_cb=None): |
4023 | 277 | """ | 277 | """ |
4024 | 278 | urls: a list of urls to try | 278 | urls: a list of urls to try |
4025 | 279 | max_wait: roughly the maximum time to wait before giving up | 279 | max_wait: roughly the maximum time to wait before giving up |
4026 | @@ -286,6 +286,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, | |||
4027 | 286 | for request. | 286 | for request. |
4028 | 287 | exception_cb: call method with 2 arguments 'msg' (per status_cb) and | 287 | exception_cb: call method with 2 arguments 'msg' (per status_cb) and |
4029 | 288 | 'exception', the exception that occurred. | 288 | 'exception', the exception that occurred. |
4030 | 289 | sleep_time_cb: call method with 2 arguments (response, loop_n) that | ||
4031 | 290 | generates the next sleep time. | ||
4032 | 289 | 291 | ||
4033 | 290 | the idea of this routine is to wait for the EC2 metdata service to | 292 | the idea of this routine is to wait for the EC2 metdata service to |
4034 | 291 | come up. On both Eucalyptus and EC2 we have seen the case where | 293 | come up. On both Eucalyptus and EC2 we have seen the case where |
4035 | @@ -301,6 +303,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, | |||
4036 | 301 | service but is not going to find one. It is possible that the instance | 303 | service but is not going to find one. It is possible that the instance |
4037 | 302 | data host (169.254.169.254) may be firewalled off Entirely for a sytem, | 304 | data host (169.254.169.254) may be firewalled off Entirely for a sytem, |
4038 | 303 | meaning that the connection will block forever unless a timeout is set. | 305 | meaning that the connection will block forever unless a timeout is set. |
4039 | 306 | |||
4040 | 307 | A value of None for max_wait will retry indefinitely. | ||
4041 | 304 | """ | 308 | """ |
4042 | 305 | start_time = time.time() | 309 | start_time = time.time() |
4043 | 306 | 310 | ||
4044 | @@ -311,18 +315,24 @@ def wait_for_url(urls, max_wait=None, timeout=None, | |||
4045 | 311 | status_cb = log_status_cb | 315 | status_cb = log_status_cb |
4046 | 312 | 316 | ||
4047 | 313 | def timeup(max_wait, start_time): | 317 | def timeup(max_wait, start_time): |
4050 | 314 | return ((max_wait <= 0 or max_wait is None) or | 318 | if (max_wait is None): |
4051 | 315 | (time.time() - start_time > max_wait)) | 319 | return False |
4052 | 320 | return ((max_wait <= 0) or (time.time() - start_time > max_wait)) | ||
4053 | 316 | 321 | ||
4054 | 317 | loop_n = 0 | 322 | loop_n = 0 |
4055 | 323 | response = None | ||
4056 | 318 | while True: | 324 | while True: |
4058 | 319 | sleep_time = int(loop_n / 5) + 1 | 325 | if sleep_time_cb is not None: |
4059 | 326 | sleep_time = sleep_time_cb(response, loop_n) | ||
4060 | 327 | else: | ||
4061 | 328 | sleep_time = int(loop_n / 5) + 1 | ||
4062 | 320 | for url in urls: | 329 | for url in urls: |
4063 | 321 | now = time.time() | 330 | now = time.time() |
4064 | 322 | if loop_n != 0: | 331 | if loop_n != 0: |
4065 | 323 | if timeup(max_wait, start_time): | 332 | if timeup(max_wait, start_time): |
4066 | 324 | break | 333 | break |
4068 | 325 | if timeout and (now + timeout > (start_time + max_wait)): | 334 | if (max_wait is not None and |
4069 | 335 | timeout and (now + timeout > (start_time + max_wait))): | ||
4070 | 326 | # shorten timeout to not run way over max_time | 336 | # shorten timeout to not run way over max_time |
4071 | 327 | timeout = int((start_time + max_wait) - now) | 337 | timeout = int((start_time + max_wait) - now) |
4072 | 328 | 338 | ||
4073 | @@ -354,10 +364,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, | |||
4074 | 354 | url_exc = e | 364 | url_exc = e |
4075 | 355 | 365 | ||
4076 | 356 | time_taken = int(time.time() - start_time) | 366 | time_taken = int(time.time() - start_time) |
4081 | 357 | status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url, | 367 | max_wait_str = "%ss" % max_wait if max_wait else "unlimited" |
4082 | 358 | time_taken, | 368 | status_msg = "Calling '%s' failed [%s/%s]: %s" % (url, |
4083 | 359 | max_wait, | 369 | time_taken, |
4084 | 360 | reason) | 370 | max_wait_str, |
4085 | 371 | reason) | ||
4086 | 361 | status_cb(status_msg) | 372 | status_cb(status_msg) |
4087 | 362 | if exception_cb: | 373 | if exception_cb: |
4088 | 363 | # This can be used to alter the headers that will be sent | 374 | # This can be used to alter the headers that will be sent |
4089 | diff --git a/cloudinit/util.py b/cloudinit/util.py | |||
4090 | index 6c014ba..338fb97 100644 | |||
4091 | --- a/cloudinit/util.py | |||
4092 | +++ b/cloudinit/util.py | |||
4093 | @@ -253,12 +253,18 @@ class ProcessExecutionError(IOError): | |||
4094 | 253 | self.exit_code = exit_code | 253 | self.exit_code = exit_code |
4095 | 254 | 254 | ||
4096 | 255 | if not stderr: | 255 | if not stderr: |
4098 | 256 | self.stderr = self.empty_attr | 256 | if stderr is None: |
4099 | 257 | self.stderr = self.empty_attr | ||
4100 | 258 | else: | ||
4101 | 259 | self.stderr = stderr | ||
4102 | 257 | else: | 260 | else: |
4103 | 258 | self.stderr = self._indent_text(stderr) | 261 | self.stderr = self._indent_text(stderr) |
4104 | 259 | 262 | ||
4105 | 260 | if not stdout: | 263 | if not stdout: |
4107 | 261 | self.stdout = self.empty_attr | 264 | if stdout is None: |
4108 | 265 | self.stdout = self.empty_attr | ||
4109 | 266 | else: | ||
4110 | 267 | self.stdout = stdout | ||
4111 | 262 | else: | 268 | else: |
4112 | 263 | self.stdout = self._indent_text(stdout) | 269 | self.stdout = self._indent_text(stdout) |
4113 | 264 | 270 | ||
4114 | @@ -533,15 +539,6 @@ def multi_log(text, console=True, stderr=True, | |||
4115 | 533 | log.log(log_level, text) | 539 | log.log(log_level, text) |
4116 | 534 | 540 | ||
4117 | 535 | 541 | ||
4118 | 536 | def load_json(text, root_types=(dict,)): | ||
4119 | 537 | decoded = json.loads(decode_binary(text)) | ||
4120 | 538 | if not isinstance(decoded, tuple(root_types)): | ||
4121 | 539 | expected_types = ", ".join([str(t) for t in root_types]) | ||
4122 | 540 | raise TypeError("(%s) root types expected, got %s instead" | ||
4123 | 541 | % (expected_types, type(decoded))) | ||
4124 | 542 | return decoded | ||
4125 | 543 | |||
4126 | 544 | |||
4127 | 545 | def is_ipv4(instr): | 542 | def is_ipv4(instr): |
4128 | 546 | """determine if input string is a ipv4 address. return boolean.""" | 543 | """determine if input string is a ipv4 address. return boolean.""" |
4129 | 547 | toks = instr.split('.') | 544 | toks = instr.split('.') |
4130 | @@ -900,17 +897,17 @@ def load_yaml(blob, default=None, allowed=(dict,)): | |||
4131 | 900 | "of length %s with allowed root types %s", | 897 | "of length %s with allowed root types %s", |
4132 | 901 | len(blob), allowed) | 898 | len(blob), allowed) |
4133 | 902 | converted = safeyaml.load(blob) | 899 | converted = safeyaml.load(blob) |
4135 | 903 | if not isinstance(converted, allowed): | 900 | if converted is None: |
4136 | 901 | LOG.debug("loaded blob returned None, returning default.") | ||
4137 | 902 | converted = default | ||
4138 | 903 | elif not isinstance(converted, allowed): | ||
4139 | 904 | # Yes this will just be caught, but thats ok for now... | 904 | # Yes this will just be caught, but thats ok for now... |
4140 | 905 | raise TypeError(("Yaml load allows %s root types," | 905 | raise TypeError(("Yaml load allows %s root types," |
4141 | 906 | " but got %s instead") % | 906 | " but got %s instead") % |
4142 | 907 | (allowed, type_utils.obj_name(converted))) | 907 | (allowed, type_utils.obj_name(converted))) |
4143 | 908 | loaded = converted | 908 | loaded = converted |
4144 | 909 | except (yaml.YAMLError, TypeError, ValueError): | 909 | except (yaml.YAMLError, TypeError, ValueError): |
4149 | 910 | if len(blob) == 0: | 910 | logexc(LOG, "Failed loading yaml blob") |
4146 | 911 | LOG.debug("load_yaml given empty string, returning default") | ||
4147 | 912 | else: | ||
4148 | 913 | logexc(LOG, "Failed loading yaml blob") | ||
4150 | 914 | return loaded | 911 | return loaded |
4151 | 915 | 912 | ||
4152 | 916 | 913 | ||
4153 | @@ -1398,6 +1395,32 @@ def get_output_cfg(cfg, mode): | |||
4154 | 1398 | return ret | 1395 | return ret |
4155 | 1399 | 1396 | ||
4156 | 1400 | 1397 | ||
4157 | 1398 | def get_config_logfiles(cfg): | ||
4158 | 1399 | """Return a list of log file paths from the configuration dictionary. | ||
4159 | 1400 | |||
4160 | 1401 | @param cfg: The cloud-init merged configuration dictionary. | ||
4161 | 1402 | """ | ||
4162 | 1403 | logs = [] | ||
4163 | 1404 | if not cfg or not isinstance(cfg, dict): | ||
4164 | 1405 | return logs | ||
4165 | 1406 | default_log = cfg.get('def_log_file') | ||
4166 | 1407 | if default_log: | ||
4167 | 1408 | logs.append(default_log) | ||
4168 | 1409 | for fmt in get_output_cfg(cfg, None): | ||
4169 | 1410 | if not fmt: | ||
4170 | 1411 | continue | ||
4171 | 1412 | match = re.match('(?P<type>\||>+)\s*(?P<target>.*)', fmt) | ||
4172 | 1413 | if not match: | ||
4173 | 1414 | continue | ||
4174 | 1415 | target = match.group('target') | ||
4175 | 1416 | parts = target.split() | ||
4176 | 1417 | if len(parts) == 1: | ||
4177 | 1418 | logs.append(target) | ||
4178 | 1419 | elif ['tee', '-a'] == parts[:2]: | ||
4179 | 1420 | logs.append(parts[2]) | ||
4180 | 1421 | return list(set(logs)) | ||
4181 | 1422 | |||
4182 | 1423 | |||
4183 | 1401 | def logexc(log, msg, *args): | 1424 | def logexc(log, msg, *args): |
4184 | 1402 | # Setting this here allows this to change | 1425 | # Setting this here allows this to change |
4185 | 1403 | # levels easily (not always error level) | 1426 | # levels easily (not always error level) |
4186 | @@ -1454,7 +1477,31 @@ def ensure_dirs(dirlist, mode=0o755): | |||
4187 | 1454 | ensure_dir(d, mode) | 1477 | ensure_dir(d, mode) |
4188 | 1455 | 1478 | ||
4189 | 1456 | 1479 | ||
4190 | 1480 | def load_json(text, root_types=(dict,)): | ||
4191 | 1481 | decoded = json.loads(decode_binary(text)) | ||
4192 | 1482 | if not isinstance(decoded, tuple(root_types)): | ||
4193 | 1483 | expected_types = ", ".join([str(t) for t in root_types]) | ||
4194 | 1484 | raise TypeError("(%s) root types expected, got %s instead" | ||
4195 | 1485 | % (expected_types, type(decoded))) | ||
4196 | 1486 | return decoded | ||
4197 | 1487 | |||
4198 | 1488 | |||
4199 | 1489 | def json_serialize_default(_obj): | ||
4200 | 1490 | """Handler for types which aren't json serializable.""" | ||
4201 | 1491 | try: | ||
4202 | 1492 | return 'ci-b64:{0}'.format(b64e(_obj)) | ||
4203 | 1493 | except AttributeError: | ||
4204 | 1494 | return 'Warning: redacted unserializable type {0}'.format(type(_obj)) | ||
4205 | 1495 | |||
4206 | 1496 | |||
4207 | 1497 | def json_dumps(data): | ||
4208 | 1498 | """Return data in nicely formatted json.""" | ||
4209 | 1499 | return json.dumps(data, indent=1, sort_keys=True, | ||
4210 | 1500 | separators=(',', ': '), default=json_serialize_default) | ||
4211 | 1501 | |||
4212 | 1502 | |||
4213 | 1457 | def yaml_dumps(obj, explicit_start=True, explicit_end=True): | 1503 | def yaml_dumps(obj, explicit_start=True, explicit_end=True): |
4214 | 1504 | """Return data in nicely formatted yaml.""" | ||
4215 | 1458 | return yaml.safe_dump(obj, | 1505 | return yaml.safe_dump(obj, |
4216 | 1459 | line_break="\n", | 1506 | line_break="\n", |
4217 | 1460 | indent=4, | 1507 | indent=4, |
4218 | @@ -1540,6 +1587,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): | |||
4219 | 1540 | mtypes = list(mtype) | 1587 | mtypes = list(mtype) |
4220 | 1541 | elif mtype is None: | 1588 | elif mtype is None: |
4221 | 1542 | mtypes = None | 1589 | mtypes = None |
4222 | 1590 | else: | ||
4223 | 1591 | raise TypeError( | ||
4224 | 1592 | 'Unsupported type provided for mtype parameter: {_type}'.format( | ||
4225 | 1593 | _type=type(mtype))) | ||
4226 | 1543 | 1594 | ||
4227 | 1544 | # clean up 'mtype' input a bit based on platform. | 1595 | # clean up 'mtype' input a bit based on platform. |
4228 | 1545 | platsys = platform.system().lower() | 1596 | platsys = platform.system().lower() |
4229 | @@ -1788,58 +1839,60 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | |||
4230 | 1788 | env = env.copy() | 1839 | env = env.copy() |
4231 | 1789 | env.update(update_env) | 1840 | env.update(update_env) |
4232 | 1790 | 1841 | ||
4236 | 1791 | try: | 1842 | if target_path(target) != "/": |
4237 | 1792 | if target_path(target) != "/": | 1843 | args = ['chroot', target] + list(args) |
4235 | 1793 | args = ['chroot', target] + list(args) | ||
4238 | 1794 | 1844 | ||
4261 | 1795 | if not logstring: | 1845 | if not logstring: |
4262 | 1796 | LOG.debug(("Running command %s with allowed return codes %s" | 1846 | LOG.debug(("Running command %s with allowed return codes %s" |
4263 | 1797 | " (shell=%s, capture=%s)"), args, rcs, shell, capture) | 1847 | " (shell=%s, capture=%s)"), args, rcs, shell, capture) |
4264 | 1798 | else: | 1848 | else: |
4265 | 1799 | LOG.debug(("Running hidden command to protect sensitive " | 1849 | LOG.debug(("Running hidden command to protect sensitive " |
4266 | 1800 | "input/output logstring: %s"), logstring) | 1850 | "input/output logstring: %s"), logstring) |
4267 | 1801 | 1851 | ||
4268 | 1802 | stdin = None | 1852 | stdin = None |
4269 | 1803 | stdout = None | 1853 | stdout = None |
4270 | 1804 | stderr = None | 1854 | stderr = None |
4271 | 1805 | if capture: | 1855 | if capture: |
4272 | 1806 | stdout = subprocess.PIPE | 1856 | stdout = subprocess.PIPE |
4273 | 1807 | stderr = subprocess.PIPE | 1857 | stderr = subprocess.PIPE |
4274 | 1808 | if data is None: | 1858 | if data is None: |
4275 | 1809 | # using devnull assures any reads get null, rather | 1859 | # using devnull assures any reads get null, rather |
4276 | 1810 | # than possibly waiting on input. | 1860 | # than possibly waiting on input. |
4277 | 1811 | devnull_fp = open(os.devnull) | 1861 | devnull_fp = open(os.devnull) |
4278 | 1812 | stdin = devnull_fp | 1862 | stdin = devnull_fp |
4279 | 1813 | else: | 1863 | else: |
4280 | 1814 | stdin = subprocess.PIPE | 1864 | stdin = subprocess.PIPE |
4281 | 1815 | if not isinstance(data, bytes): | 1865 | if not isinstance(data, bytes): |
4282 | 1816 | data = data.encode() | 1866 | data = data.encode() |
4283 | 1817 | 1867 | ||
4284 | 1868 | try: | ||
4285 | 1818 | sp = subprocess.Popen(args, stdout=stdout, | 1869 | sp = subprocess.Popen(args, stdout=stdout, |
4286 | 1819 | stderr=stderr, stdin=stdin, | 1870 | stderr=stderr, stdin=stdin, |
4287 | 1820 | env=env, shell=shell) | 1871 | env=env, shell=shell) |
4288 | 1821 | (out, err) = sp.communicate(data) | 1872 | (out, err) = sp.communicate(data) |
4289 | 1822 | |||
4290 | 1823 | # Just ensure blank instead of none. | ||
4291 | 1824 | if not out and capture: | ||
4292 | 1825 | out = b'' | ||
4293 | 1826 | if not err and capture: | ||
4294 | 1827 | err = b'' | ||
4295 | 1828 | if decode: | ||
4296 | 1829 | def ldecode(data, m='utf-8'): | ||
4297 | 1830 | if not isinstance(data, bytes): | ||
4298 | 1831 | return data | ||
4299 | 1832 | return data.decode(m, decode) | ||
4300 | 1833 | |||
4301 | 1834 | out = ldecode(out) | ||
4302 | 1835 | err = ldecode(err) | ||
4303 | 1836 | except OSError as e: | 1873 | except OSError as e: |
4306 | 1837 | raise ProcessExecutionError(cmd=args, reason=e, | 1874 | raise ProcessExecutionError( |
4307 | 1838 | errno=e.errno) | 1875 | cmd=args, reason=e, errno=e.errno, |
4308 | 1876 | stdout="-" if decode else b"-", | ||
4309 | 1877 | stderr="-" if decode else b"-") | ||
4310 | 1839 | finally: | 1878 | finally: |
4311 | 1840 | if devnull_fp: | 1879 | if devnull_fp: |
4312 | 1841 | devnull_fp.close() | 1880 | devnull_fp.close() |
4313 | 1842 | 1881 | ||
4314 | 1882 | # Just ensure blank instead of none. | ||
4315 | 1883 | if not out and capture: | ||
4316 | 1884 | out = b'' | ||
4317 | 1885 | if not err and capture: | ||
4318 | 1886 | err = b'' | ||
4319 | 1887 | if decode: | ||
4320 | 1888 | def ldecode(data, m='utf-8'): | ||
4321 | 1889 | if not isinstance(data, bytes): | ||
4322 | 1890 | return data | ||
4323 | 1891 | return data.decode(m, decode) | ||
4324 | 1892 | |||
4325 | 1893 | out = ldecode(out) | ||
4326 | 1894 | err = ldecode(err) | ||
4327 | 1895 | |||
4328 | 1843 | rc = sp.returncode | 1896 | rc = sp.returncode |
4329 | 1844 | if rc not in rcs: | 1897 | if rc not in rcs: |
4330 | 1845 | raise ProcessExecutionError(stdout=out, stderr=err, | 1898 | raise ProcessExecutionError(stdout=out, stderr=err, |
4331 | @@ -2010,7 +2063,7 @@ def expand_package_list(version_fmt, pkgs): | |||
4332 | 2010 | return pkglist | 2063 | return pkglist |
4333 | 2011 | 2064 | ||
4334 | 2012 | 2065 | ||
4336 | 2013 | def parse_mount_info(path, mountinfo_lines, log=LOG): | 2066 | def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False): |
4337 | 2014 | """Return the mount information for PATH given the lines from | 2067 | """Return the mount information for PATH given the lines from |
4338 | 2015 | /proc/$$/mountinfo.""" | 2068 | /proc/$$/mountinfo.""" |
4339 | 2016 | 2069 | ||
4340 | @@ -2072,11 +2125,16 @@ def parse_mount_info(path, mountinfo_lines, log=LOG): | |||
4341 | 2072 | 2125 | ||
4342 | 2073 | match_mount_point = mount_point | 2126 | match_mount_point = mount_point |
4343 | 2074 | match_mount_point_elements = mount_point_elements | 2127 | match_mount_point_elements = mount_point_elements |
4344 | 2128 | mount_options = parts[5] | ||
4345 | 2075 | 2129 | ||
4348 | 2076 | if devpth and fs_type and match_mount_point: | 2130 | if get_mnt_opts: |
4349 | 2077 | return (devpth, fs_type, match_mount_point) | 2131 | if devpth and fs_type and match_mount_point and mount_options: |
4350 | 2132 | return (devpth, fs_type, match_mount_point, mount_options) | ||
4351 | 2078 | else: | 2133 | else: |
4353 | 2079 | return None | 2134 | if devpth and fs_type and match_mount_point: |
4354 | 2135 | return (devpth, fs_type, match_mount_point) | ||
4355 | 2136 | |||
4356 | 2137 | return None | ||
4357 | 2080 | 2138 | ||
4358 | 2081 | 2139 | ||
4359 | 2082 | def parse_mtab(path): | 2140 | def parse_mtab(path): |
4360 | @@ -2146,7 +2204,7 @@ def parse_mount(path): | |||
4361 | 2146 | return None | 2204 | return None |
4362 | 2147 | 2205 | ||
4363 | 2148 | 2206 | ||
4365 | 2149 | def get_mount_info(path, log=LOG): | 2207 | def get_mount_info(path, log=LOG, get_mnt_opts=False): |
4366 | 2150 | # Use /proc/$$/mountinfo to find the device where path is mounted. | 2208 | # Use /proc/$$/mountinfo to find the device where path is mounted. |
4367 | 2151 | # This is done because with a btrfs filesystem using os.stat(path) | 2209 | # This is done because with a btrfs filesystem using os.stat(path) |
4368 | 2152 | # does not return the ID of the device. | 2210 | # does not return the ID of the device. |
4369 | @@ -2178,7 +2236,7 @@ def get_mount_info(path, log=LOG): | |||
4370 | 2178 | mountinfo_path = '/proc/%s/mountinfo' % os.getpid() | 2236 | mountinfo_path = '/proc/%s/mountinfo' % os.getpid() |
4371 | 2179 | if os.path.exists(mountinfo_path): | 2237 | if os.path.exists(mountinfo_path): |
4372 | 2180 | lines = load_file(mountinfo_path).splitlines() | 2238 | lines = load_file(mountinfo_path).splitlines() |
4374 | 2181 | return parse_mount_info(path, lines, log) | 2239 | return parse_mount_info(path, lines, log, get_mnt_opts) |
4375 | 2182 | elif os.path.exists("/etc/mtab"): | 2240 | elif os.path.exists("/etc/mtab"): |
4376 | 2183 | return parse_mtab(path) | 2241 | return parse_mtab(path) |
4377 | 2184 | else: | 2242 | else: |
4378 | @@ -2286,7 +2344,8 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep): | |||
4379 | 2286 | missing.append(f) | 2344 | missing.append(f) |
4380 | 2287 | 2345 | ||
4381 | 2288 | if len(missing): | 2346 | if len(missing): |
4383 | 2289 | raise ValueError("Missing required files: %s", ','.join(missing)) | 2347 | raise ValueError( |
4384 | 2348 | 'Missing required files: {files}'.format(files=','.join(missing))) | ||
4385 | 2290 | 2349 | ||
4386 | 2291 | return ret | 2350 | return ret |
4387 | 2292 | 2351 | ||
4388 | @@ -2563,4 +2622,10 @@ def wait_for_files(flist, maxwait, naplen=.5, log_pre=""): | |||
4389 | 2563 | return need | 2622 | return need |
4390 | 2564 | 2623 | ||
4391 | 2565 | 2624 | ||
4392 | 2625 | def mount_is_read_write(mount_point): | ||
4393 | 2626 | """Check whether the given mount point is mounted rw""" | ||
4394 | 2627 | result = get_mount_info(mount_point, get_mnt_opts=True) | ||
4395 | 2628 | mount_opts = result[-1].split(',') | ||
4396 | 2629 | return mount_opts[0] == 'rw' | ||
4397 | 2630 | |||
4398 | 2566 | # vi: ts=4 expandtab | 2631 | # vi: ts=4 expandtab |
4399 | diff --git a/cloudinit/version.py b/cloudinit/version.py | |||
4400 | index 3255f39..be6262d 100644 | |||
4401 | --- a/cloudinit/version.py | |||
4402 | +++ b/cloudinit/version.py | |||
4403 | @@ -4,7 +4,7 @@ | |||
4404 | 4 | # | 4 | # |
4405 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | 5 | # This file is part of cloud-init. See LICENSE file for license information. |
4406 | 6 | 6 | ||
4408 | 7 | __VERSION__ = "17.1" | 7 | __VERSION__ = "17.2" |
4409 | 8 | 8 | ||
4410 | 9 | FEATURES = [ | 9 | FEATURES = [ |
4411 | 10 | # supports network config version 1 | 10 | # supports network config version 1 |
4412 | diff --git a/debian/changelog b/debian/changelog | |||
4413 | index 03308d7..474c9ed 100644 | |||
4414 | --- a/debian/changelog | |||
4415 | +++ b/debian/changelog | |||
4416 | @@ -1,10 +1,62 @@ | |||
4418 | 1 | cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.2) UNRELEASED; urgency=medium | 1 | cloud-init (17.2-30-gf7deaf15-0ubuntu1~16.04.1) xenial-proposed; urgency=medium |
4419 | 2 | 2 | ||
4420 | 3 | * debian/patches/ds-identify-behavior-xenial.patch: refresh patch. | 3 | * debian/patches/ds-identify-behavior-xenial.patch: refresh patch. |
4421 | 4 | * debian/grub-legacy-ec2.install: install post(inst|rm) files correctly. | 4 | * debian/grub-legacy-ec2.install: install post(inst|rm) files correctly. |
4422 | 5 | [Simon Deziel] (LP: #1581416) | 5 | [Simon Deziel] (LP: #1581416) |
4425 | 6 | 6 | * New upstream snapshot (LP: #1747059) | |
4426 | 7 | -- Scott Moser <smoser@ubuntu.com> Tue, 12 Dec 2017 14:29:46 -0500 | 7 | - docs: Update RTD content for cloud-init subcommands. |
4427 | 8 | - OVF: Extend well-known labels to include OVFENV. | ||
4428 | 9 | - Fix potential cases of uninitialized variables. | ||
4429 | 10 | - tests: Collect script output as binary, collect systemd journal, fix lxd. | ||
4430 | 11 | - HACKING.rst: mention setting user name and email via git config. | ||
4431 | 12 | - Azure VM Preprovisioning support. [Douglas Jordan] | ||
4432 | 13 | - tools/read-version: Fix read-version when in a git worktree. | ||
4433 | 14 | - docs: Fix typos in docs and one debug message. [Florian Grignon] | ||
4434 | 15 | - btrfs: support resizing if root is mounted ro. | ||
4435 | 16 | [Robert Schweikert] | ||
4436 | 17 | - OpenNebula: Improve network configuration support. | ||
4437 | 18 | [Akihiko Ota] | ||
4438 | 19 | - tests: Fix EC2 Platform to return console output as bytes. | ||
4439 | 20 | - tests: Fix attempted use of /run in a test case. | ||
4440 | 21 | - GCE: Improvements and changes to ssh key behavior for default user. | ||
4441 | 22 | [Max Illfelder] | ||
4442 | 23 | - subp: make ProcessExecutionError have expected types in stderr, stdout. | ||
4443 | 24 | - tests: when querying ntp server, do not do dns resolution. | ||
4444 | 25 | - Recognize uppercase vfat disk labels [James Penick] | ||
4445 | 26 | - tests: remove zesty as supported OS to test | ||
4446 | 27 | - Do not log warning on config files that represent None. | ||
4447 | 28 | - tests: Use git hash pip dependency format for pylxd. | ||
4448 | 29 | - tests: add integration requirements text file | ||
4449 | 30 | - MAAS: add check_instance_id based off oauth tokens. | ||
4450 | 31 | - tests: update apt sources list test | ||
4451 | 32 | - tests: clean up image properties | ||
4452 | 33 | - tests: rename test ssh keys to avoid appearance of leaking private keys. | ||
4453 | 34 | - tests: Enable AWS EC2 Integration Testing | ||
4454 | 35 | - cli: cloud-init clean handles symlinks | ||
4455 | 36 | - SUSE: Add a basic test of network config rendering. [Robert Schweikert] | ||
4456 | 37 | - Azure: Only bounce network when necessary. | ||
4457 | 38 | - lint: Fix lints seen by pylint version 1.8.1. | ||
4458 | 39 | - cli: Fix error in cloud-init modules --mode=init. | ||
4459 | 40 | - release 17.2 | ||
4460 | 41 | - ds-identify: failure in NoCloud due to unset variable usage. | ||
4461 | 42 | - tests: fix collect_console when not implemented | ||
4462 | 43 | - ec2: Use instance-identity doc for region and instance-id | ||
4463 | 44 | [Andrew Jorgensen] | ||
4464 | 45 | - tests: remove leaked tmp files in config drive tests. | ||
4465 | 46 | - setup.py: Do not include rendered files in SOURCES.txt | ||
4466 | 47 | - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert] | ||
4467 | 48 | - tests: move to using tox 1.7.5 | ||
4468 | 49 | - OVF: improve ds-identify to support finding OVF iso transport. | ||
4469 | 50 | - VMware: Support for user provided pre and post-customization scripts | ||
4470 | 51 | [Maitreyee Saikia] | ||
4471 | 52 | - citest: In NoCloudKVM provide keys via metadata not userdata. | ||
4472 | 53 | - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix | ||
4473 | 54 | complaints. | ||
4474 | 55 | - Datasources: Formalize DataSource get_data and related properties. | ||
4475 | 56 | - cli: Add clean and status subcommands | ||
4476 | 57 | - tests: consolidate platforms into specific dirs | ||
4477 | 58 | |||
4478 | 59 | -- Chad Smith <chad.smith@canonical.com> Fri, 02 Feb 2018 12:37:30 -0700 | ||
4479 | 8 | 60 | ||
4480 | 9 | cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.1) xenial-proposed; urgency=medium | 61 | cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.1) xenial-proposed; urgency=medium |
4481 | 10 | 62 | ||
4482 | diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst | |||
4483 | index 859409a..f2976fd 100644 | |||
4484 | --- a/doc/rtd/topics/boot.rst | |||
4485 | +++ b/doc/rtd/topics/boot.rst | |||
4486 | @@ -1,3 +1,5 @@ | |||
4487 | 1 | .. _boot_stages: | ||
4488 | 2 | |||
4489 | 1 | *********** | 3 | *********** |
4490 | 2 | Boot Stages | 4 | Boot Stages |
4491 | 3 | *********** | 5 | *********** |
4492 | @@ -74,7 +76,7 @@ Network | |||
4493 | 74 | * **systemd service**: ``cloud-init.service`` | 76 | * **systemd service**: ``cloud-init.service`` |
4494 | 75 | * **runs**: After local stage and configured networking is up. | 77 | * **runs**: After local stage and configured networking is up. |
4495 | 76 | * **blocks**: As much of remaining boot as possible. | 78 | * **blocks**: As much of remaining boot as possible. |
4497 | 77 | * **modules**: ``init_modules`` | 79 | * **modules**: ``cloud_init_modules`` in **/etc/cloud/cloud.cfg** |
4498 | 78 | 80 | ||
4499 | 79 | This stage requires all configured networking to be online, as it will fully | 81 | This stage requires all configured networking to be online, as it will fully |
4500 | 80 | process any user-data that is found. Here, processing means: | 82 | process any user-data that is found. Here, processing means: |
4501 | @@ -104,7 +106,7 @@ Config | |||
4502 | 104 | * **systemd service**: ``cloud-config.service`` | 106 | * **systemd service**: ``cloud-config.service`` |
4503 | 105 | * **runs**: After network stage. | 107 | * **runs**: After network stage. |
4504 | 106 | * **blocks**: None. | 108 | * **blocks**: None. |
4506 | 107 | * **modules**: ``config_modules`` | 109 | * **modules**: ``cloud_config_modules`` in **/etc/cloud/cloud.cfg** |
4507 | 108 | 110 | ||
4508 | 109 | This stage runs config modules only. Modules that do not really have an | 111 | This stage runs config modules only. Modules that do not really have an |
4509 | 110 | effect on other stages of boot are run here. | 112 | effect on other stages of boot are run here. |
4510 | @@ -115,7 +117,7 @@ Final | |||
4511 | 115 | * **systemd service**: ``cloud-final.service`` | 117 | * **systemd service**: ``cloud-final.service`` |
4512 | 116 | * **runs**: As final part of boot (traditional "rc.local") | 118 | * **runs**: As final part of boot (traditional "rc.local") |
4513 | 117 | * **blocks**: None. | 119 | * **blocks**: None. |
4515 | 118 | * **modules**: ``final_modules`` | 120 | * **modules**: ``cloud_final_modules`` in **/etc/cloud/cloud.cfg** |
4516 | 119 | 121 | ||
4517 | 120 | This stage runs as late in boot as possible. Any scripts that a user is | 122 | This stage runs as late in boot as possible. Any scripts that a user is |
4518 | 121 | accustomed to running after logging into a system should run correctly here. | 123 | accustomed to running after logging into a system should run correctly here. |
4519 | @@ -125,4 +127,9 @@ Things that run here include | |||
4520 | 125 | * configuration management plugins (puppet, chef, salt-minion) | 127 | * configuration management plugins (puppet, chef, salt-minion) |
4521 | 126 | * user-scripts (including ``runcmd``). | 128 | * user-scripts (including ``runcmd``). |
4522 | 127 | 129 | ||
4523 | 130 | For scripts external to cloud-init looking to wait until cloud-init | ||
4524 | 131 | finished, the ``cloud-init status`` subcommand can help block external | ||
4525 | 132 | scripts until cloud-init is done without having to write your own systemd | ||
4526 | 133 | units dependency chains. See :ref:`cli_status` for more info. | ||
4527 | 134 | |||
4528 | 128 | .. vi: textwidth=78 | 135 | .. vi: textwidth=78 |
4529 | diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst | |||
4530 | index 31eaba5..ae3a0c7 100644 | |||
4531 | --- a/doc/rtd/topics/capabilities.rst | |||
4532 | +++ b/doc/rtd/topics/capabilities.rst | |||
4533 | @@ -1,3 +1,5 @@ | |||
4534 | 1 | .. _capabilities: | ||
4535 | 2 | |||
4536 | 1 | ************ | 3 | ************ |
4537 | 2 | Capabilities | 4 | Capabilities |
4538 | 3 | ************ | 5 | ************ |
4539 | @@ -39,17 +41,19 @@ Currently defined feature names include: | |||
4540 | 39 | see :ref:`network_config_v2` documentation for examples. | 41 | see :ref:`network_config_v2` documentation for examples. |
4541 | 40 | 42 | ||
4542 | 41 | 43 | ||
4544 | 42 | CLI Interface : | 44 | CLI Interface |
4545 | 45 | ============= | ||
4546 | 43 | 46 | ||
4550 | 44 | ``cloud-init features`` will print out each feature supported. If cloud-init | 47 | The command line documentation is accessible on any cloud-init |
4551 | 45 | does not have the features subcommand, it also does not support any features | 48 | installed system: |
4549 | 46 | described in this document. | ||
4552 | 47 | 49 | ||
4553 | 48 | .. code-block:: bash | 50 | .. code-block:: bash |
4554 | 49 | 51 | ||
4555 | 50 | % cloud-init --help | 52 | % cloud-init --help |
4558 | 51 | usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force] | 53 | usage: cloud-init [-h] [--version] [--file FILES] |
4559 | 52 | {init,modules,query,single,dhclient-hook,features} ... | 54 | [--debug] [--force] |
4560 | 55 | {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} | ||
4561 | 56 | ... | ||
4562 | 53 | 57 | ||
4563 | 54 | optional arguments: | 58 | optional arguments: |
4564 | 55 | -h, --help show this help message and exit | 59 | -h, --help show this help message and exit |
4565 | @@ -61,7 +65,7 @@ described in this document. | |||
4566 | 61 | your own risk) | 65 | your own risk) |
4567 | 62 | 66 | ||
4568 | 63 | Subcommands: | 67 | Subcommands: |
4570 | 64 | {init,modules,single,dhclient-hook,features,analyze,devel} | 68 | {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} |
4571 | 65 | init initializes cloud-init and performs initial modules | 69 | init initializes cloud-init and performs initial modules |
4572 | 66 | modules activates modules using a given configuration key | 70 | modules activates modules using a given configuration key |
4573 | 67 | single run a single module | 71 | single run a single module |
4574 | @@ -69,11 +73,153 @@ described in this document. | |||
4575 | 69 | features list defined features | 73 | features list defined features |
4576 | 70 | analyze Devel tool: Analyze cloud-init logs and data | 74 | analyze Devel tool: Analyze cloud-init logs and data |
4577 | 71 | devel Run development tools | 75 | devel Run development tools |
4578 | 76 | collect-logs Collect and tar all cloud-init debug info | ||
4579 | 77 | clean Remove logs and artifacts so cloud-init can re-run. | ||
4580 | 78 | status Report cloud-init status or wait on completion. | ||
4581 | 79 | |||
4582 | 80 | CLI Subcommand details | ||
4583 | 81 | ====================== | ||
4584 | 82 | |||
4585 | 83 | .. _cli_features: | ||
4586 | 84 | |||
4587 | 85 | cloud-init features | ||
4588 | 86 | ------------------- | ||
4589 | 87 | Print out each feature supported. If cloud-init does not have the | ||
4590 | 88 | features subcommand, it also does not support any features described in | ||
4591 | 89 | this document. | ||
4592 | 90 | |||
4593 | 91 | .. code-block:: bash | ||
4594 | 72 | 92 | ||
4595 | 73 | % cloud-init features | 93 | % cloud-init features |
4596 | 74 | NETWORK_CONFIG_V1 | 94 | NETWORK_CONFIG_V1 |
4597 | 75 | NETWORK_CONFIG_V2 | 95 | NETWORK_CONFIG_V2 |
4598 | 76 | 96 | ||
4599 | 97 | .. _cli_status: | ||
4600 | 98 | |||
4601 | 99 | cloud-init status | ||
4602 | 100 | ----------------- | ||
4603 | 101 | Report whether cloud-init is running, done, disabled or errored. Exits | ||
4604 | 102 | non-zero if an error is detected in cloud-init. | ||
4605 | 103 | * **--long**: Detailed status information. | ||
4606 | 104 | * **--wait**: Block until cloud-init completes. | ||
4607 | 105 | |||
4608 | 106 | .. code-block:: bash | ||
4609 | 107 | |||
4610 | 108 | % cloud-init status --long | ||
4611 | 109 | status: done | ||
4612 | 110 | time: Wed, 17 Jan 2018 20:41:59 +0000 | ||
4613 | 111 | detail: | ||
4614 | 112 | DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] | ||
4615 | 113 | |||
4616 | 114 | # Cloud-init running still short versus long options | ||
4617 | 115 | % cloud-init status | ||
4618 | 116 | status: running | ||
4619 | 117 | % cloud-init status --long | ||
4620 | 118 | status: running | ||
4621 | 119 | time: Fri, 26 Jan 2018 21:39:43 +0000 | ||
4622 | 120 | detail: | ||
4623 | 121 | Running in stage: init-local | ||
4624 | 122 | |||
4625 | 123 | .. _cli_collect_logs: | ||
4626 | 124 | |||
4627 | 125 | cloud-init collect-logs | ||
4628 | 126 | ----------------------- | ||
4629 | 127 | Collect and tar cloud-init generated logs, data files and system | ||
4630 | 128 | information for triage. This subcommand is integrated with apport. | ||
4631 | 129 | |||
4632 | 130 | **Note**: Ubuntu users can file bugs with `ubuntu-bug cloud-init` to | ||
4633 | 131 | automaticaly attach these logs to a bug report. | ||
4634 | 132 | |||
4635 | 133 | Logs collected are: | ||
4636 | 134 | |||
4637 | 135 | * /var/log/cloud-init*log | ||
4638 | 136 | * /run/cloud-init | ||
4639 | 137 | * cloud-init package version | ||
4640 | 138 | * dmesg output | ||
4641 | 139 | * journalctl output | ||
4642 | 140 | * /var/lib/cloud/instance/user-data.txt | ||
4643 | 141 | |||
4644 | 142 | .. _cli_analyze: | ||
4645 | 143 | |||
4646 | 144 | cloud-init analyze | ||
4647 | 145 | ------------------ | ||
4648 | 146 | Get detailed reports of where cloud-init spends most of its time. See | ||
4649 | 147 | :ref:`boot_time_analysis` for more info. | ||
4650 | 148 | |||
4651 | 149 | * **blame** Report ordered by most costly operations. | ||
4652 | 150 | * **dump** Machine-readable JSON dump of all cloud-init tracked events. | ||
4653 | 151 | * **show** show time-ordered report of the cost of operations during each | ||
4654 | 152 | boot stage. | ||
4655 | 153 | |||
4656 | 154 | .. _cli_devel: | ||
4657 | 155 | |||
4658 | 156 | cloud-init devel | ||
4659 | 157 | ---------------- | ||
4660 | 158 | Collection of development tools under active development. These tools will | ||
4661 | 159 | likely be promoted to top-level subcommands when stable. | ||
4662 | 160 | |||
4663 | 161 | * ``cloud-init devel schema``: A **#cloud-config** format and schema | ||
4664 | 162 | validator. It accepts a cloud-config yaml file and annotates potential | ||
4665 | 163 | schema errors locally without the need for deployment. Schema | ||
4666 | 164 | validation is work in progress and supports a subset of cloud-config | ||
4667 | 165 | modules. | ||
4668 | 166 | |||
4669 | 167 | .. _cli_clean: | ||
4670 | 168 | |||
4671 | 169 | cloud-init clean | ||
4672 | 170 | ---------------- | ||
4673 | 171 | Remove cloud-init artifacts from /var/lib/cloud and optionally reboot the | ||
4674 | 172 | machine to so cloud-init re-runs all stages as it did on first boot. | ||
4675 | 173 | |||
4676 | 174 | * **--logs**: Optionally remove /var/log/cloud-init*log files. | ||
4677 | 175 | * **--reboot**: Reboot the system after removing artifacts. | ||
4678 | 176 | |||
4679 | 177 | .. _cli_init: | ||
4680 | 178 | |||
4681 | 179 | cloud-init init | ||
4682 | 180 | --------------- | ||
4683 | 181 | Generally run by OS init systems to execute cloud-init's stages | ||
4684 | 182 | *init* and *init-local*. See :ref:`boot_stages` for more info. | ||
4685 | 183 | Can be run on the commandline, but is generally gated to run only once | ||
4686 | 184 | due to semaphores in **/var/lib/cloud/instance/sem/** and | ||
4687 | 185 | **/var/lib/cloud/sem**. | ||
4688 | 186 | |||
4689 | 187 | * **--local**: Run *init-local* stage instead of *init*. | ||
4690 | 188 | |||
4691 | 189 | .. _cli_modules: | ||
4692 | 190 | |||
4693 | 191 | cloud-init modules | ||
4694 | 192 | ------------------ | ||
4695 | 193 | Generally run by OS init systems to execute *modules:config* and | ||
4696 | 194 | *modules:final* boot stages. This executes cloud config :ref:`modules` | ||
4697 | 195 | configured to run in the init, config and final stages. The modules are | ||
4698 | 196 | declared to run in various boot stages in the file | ||
4699 | 197 | **/etc/cloud/cloud.cfg** under keys **cloud_init_modules**, | ||
4700 | 198 | **cloud_init_modules** and **cloud_init_modules**. Can be run on the | ||
4701 | 199 | commandline, but each module is gated to run only once due to semaphores | ||
4702 | 200 | in ``/var/lib/cloud/``. | ||
4703 | 201 | |||
4704 | 202 | * **--mode (init|config|final)**: Run *modules:init*, *modules:config* or | ||
4705 | 203 | *modules:final* cloud-init stages. See :ref:`boot_stages` for more info. | ||
4706 | 204 | |||
4707 | 205 | .. _cli_single: | ||
4708 | 206 | |||
4709 | 207 | cloud-init single | ||
4710 | 208 | ----------------- | ||
4711 | 209 | Attempt to run a single named cloud config module. The following example | ||
4712 | 210 | re-runs the cc_set_hostname module ignoring the module default frequency | ||
4713 | 211 | of once-per-instance: | ||
4714 | 212 | |||
4715 | 213 | * **--name**: The cloud-config module name to run | ||
4716 | 214 | * **--frequency**: Optionally override the declared module frequency | ||
4717 | 215 | with one of (always|once-per-instance|once) | ||
4718 | 216 | |||
4719 | 217 | .. code-block:: bash | ||
4720 | 218 | |||
4721 | 219 | % cloud-init single --name set_hostname --frequency always | ||
4722 | 220 | |||
4723 | 221 | **Note**: Mileage may vary trying to re-run each cloud-config module, as | ||
4724 | 222 | some are not idempotent. | ||
4725 | 77 | 223 | ||
4726 | 78 | .. _Cloud-init: https://launchpad.net/cloud-init | 224 | .. _Cloud-init: https://launchpad.net/cloud-init |
4727 | 79 | .. vi: textwidth=78 | 225 | .. vi: textwidth=78 |
4728 | diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst | |||
4729 | index 4e43dd5..c2b47ed 100644 | |||
4730 | --- a/doc/rtd/topics/debugging.rst | |||
4731 | +++ b/doc/rtd/topics/debugging.rst | |||
4732 | @@ -7,6 +7,7 @@ Overview | |||
4733 | 7 | This topic will discuss general approaches for test and debug of cloud-init on | 7 | This topic will discuss general approaches for test and debug of cloud-init on |
4734 | 8 | deployed instances. | 8 | deployed instances. |
4735 | 9 | 9 | ||
4736 | 10 | .. _boot_time_analysis: | ||
4737 | 10 | 11 | ||
4738 | 11 | Boot Time Analysis - cloud-init analyze | 12 | Boot Time Analysis - cloud-init analyze |
4739 | 12 | ====================================== | 13 | ====================================== |
4740 | diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst | |||
4741 | index cdb0f41..7b14675 100644 | |||
4742 | --- a/doc/rtd/topics/modules.rst | |||
4743 | +++ b/doc/rtd/topics/modules.rst | |||
4744 | @@ -1,3 +1,5 @@ | |||
4745 | 1 | .. _modules: | ||
4746 | 2 | |||
4747 | 1 | ******* | 3 | ******* |
4748 | 2 | Modules | 4 | Modules |
4749 | 3 | ******* | 5 | ******* |
4750 | diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst | |||
4751 | index ce3a1bd..2f8ab54 100644 | |||
4752 | --- a/doc/rtd/topics/network-config-format-v1.rst | |||
4753 | +++ b/doc/rtd/topics/network-config-format-v1.rst | |||
4754 | @@ -349,7 +349,7 @@ For any network device (one of the Config Types) users can define a list of | |||
4755 | 349 | entries will create interface alias allowing a single interface to use | 349 | entries will create interface alias allowing a single interface to use |
4756 | 350 | different ip configurations. | 350 | different ip configurations. |
4757 | 351 | 351 | ||
4759 | 352 | Valid keys for for ``subnets`` include the following: | 352 | Valid keys for ``subnets`` include the following: |
4760 | 353 | 353 | ||
4761 | 354 | - ``type``: Specify the subnet type. | 354 | - ``type``: Specify the subnet type. |
4762 | 355 | - ``control``: Specify manual, auto or hotplug. Indicates how the interface | 355 | - ``control``: Specify manual, auto or hotplug. Indicates how the interface |
4763 | diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst | |||
4764 | index d668e3f..bf04bb3 100644 | |||
4765 | --- a/doc/rtd/topics/tests.rst | |||
4766 | +++ b/doc/rtd/topics/tests.rst | |||
4767 | @@ -118,19 +118,19 @@ TreeRun and TreeCollect | |||
4768 | 118 | 118 | ||
4769 | 119 | If working on a cloud-init feature or resolving a bug, it may be useful to | 119 | If working on a cloud-init feature or resolving a bug, it may be useful to |
4770 | 120 | run the current copy of cloud-init in the integration testing environment. | 120 | run the current copy of cloud-init in the integration testing environment. |
4772 | 121 | The integration testing suite can automatically build a deb based on the | 121 | The integration testing suite can automatically build a deb based on the |
4773 | 122 | current working tree of cloud-init and run the test suite using this deb. | 122 | current working tree of cloud-init and run the test suite using this deb. |
4774 | 123 | 123 | ||
4775 | 124 | The ``tree_run`` and ``tree_collect`` commands take the same arguments as | 124 | The ``tree_run`` and ``tree_collect`` commands take the same arguments as |
4778 | 125 | the ``run`` and ``collect`` commands. These commands will build a deb and | 125 | the ``run`` and ``collect`` commands. These commands will build a deb and |
4779 | 126 | write it into a temporary file, then start the test suite and pass that deb | 126 | write it into a temporary file, then start the test suite and pass that deb |
4780 | 127 | in. To build a deb only, and not run the test suite, the ``bddeb`` command | 127 | in. To build a deb only, and not run the test suite, the ``bddeb`` command |
4781 | 128 | can be used. | 128 | can be used. |
4782 | 129 | 129 | ||
4783 | 130 | Note that code in the cloud-init working tree that has not been committed | 130 | Note that code in the cloud-init working tree that has not been committed |
4784 | 131 | when the cloud-init deb is built will still be included. To build a | 131 | when the cloud-init deb is built will still be included. To build a |
4785 | 132 | cloud-init deb from or use the ``tree_run`` command using a copy of | 132 | cloud-init deb from or use the ``tree_run`` command using a copy of |
4787 | 133 | cloud-init located in a different directory, use the option ``--cloud-init | 133 | cloud-init located in a different directory, use the option ``--cloud-init |
4788 | 134 | /path/to/cloud-init``. | 134 | /path/to/cloud-init``. |
4789 | 135 | 135 | ||
4790 | 136 | .. code-block:: bash | 136 | .. code-block:: bash |
4791 | @@ -383,7 +383,7 @@ Development Checklist | |||
4792 | 383 | * Valid unit tests validating output collected | 383 | * Valid unit tests validating output collected |
4793 | 384 | * Passes pylint & pep8 checks | 384 | * Passes pylint & pep8 checks |
4794 | 385 | * Placed in the appropriate sub-folder in the test cases directory | 385 | * Placed in the appropriate sub-folder in the test cases directory |
4796 | 386 | * Tested by running the test: | 386 | * Tested by running the test: |
4797 | 387 | 387 | ||
4798 | 388 | .. code-block:: bash | 388 | .. code-block:: bash |
4799 | 389 | 389 | ||
4800 | @@ -392,6 +392,32 @@ Development Checklist | |||
4801 | 392 | --test modules/your_test.yaml \ | 392 | --test modules/your_test.yaml \ |
4802 | 393 | [--deb <build of cloud-init>] | 393 | [--deb <build of cloud-init>] |
4803 | 394 | 394 | ||
4804 | 395 | |||
4805 | 396 | Platforms | ||
4806 | 397 | ========= | ||
4807 | 398 | |||
4808 | 399 | EC2 | ||
4809 | 400 | --- | ||
4810 | 401 | To run on the EC2 platform it is required that the user has an AWS credentials | ||
4811 | 402 | configuration file specifying his or her access keys and a default region. | ||
4812 | 403 | These configuration files are the standard that the AWS cli and other AWS | ||
4813 | 404 | tools utilize for interacting directly with AWS itself and are normally | ||
4814 | 405 | generated when running ``aws configure``: | ||
4815 | 406 | |||
4816 | 407 | .. code-block:: bash | ||
4817 | 408 | |||
4818 | 409 | $ cat $HOME/.aws/credentials | ||
4819 | 410 | [default] | ||
4820 | 411 | aws_access_key_id = <KEY HERE> | ||
4821 | 412 | aws_secret_access_key = <KEY HERE> | ||
4822 | 413 | |||
4823 | 414 | .. code-block:: bash | ||
4824 | 415 | |||
4825 | 416 | $ cat $HOME/.aws/config | ||
4826 | 417 | [default] | ||
4827 | 418 | region = us-west-2 | ||
4828 | 419 | |||
4829 | 420 | |||
4830 | 395 | Architecture | 421 | Architecture |
4831 | 396 | ============ | 422 | ============ |
4832 | 397 | 423 | ||
4833 | @@ -455,7 +481,7 @@ replace the default. If the data is a dictionary then the value will be the | |||
4834 | 455 | result of merging that dictionary from the default config and that | 481 | result of merging that dictionary from the default config and that |
4835 | 456 | dictionary from the overrides. | 482 | dictionary from the overrides. |
4836 | 457 | 483 | ||
4838 | 458 | Merging is done using the function | 484 | Merging is done using the function |
4839 | 459 | ``tests.cloud_tests.config.merge_config``, which can be examined for more | 485 | ``tests.cloud_tests.config.merge_config``, which can be examined for more |
4840 | 460 | detail on config merging behavior. | 486 | detail on config merging behavior. |
4841 | 461 | 487 | ||
4842 | diff --git a/integration-requirements.txt b/integration-requirements.txt | |||
4843 | 462 | new file mode 100644 | 488 | new file mode 100644 |
4844 | index 0000000..45baac6 | |||
4845 | --- /dev/null | |||
4846 | +++ b/integration-requirements.txt | |||
4847 | @@ -0,0 +1,20 @@ | |||
4848 | 1 | # PyPI requirements for cloud-init integration testing | ||
4849 | 2 | # https://cloudinit.readthedocs.io/en/latest/topics/tests.html | ||
4850 | 3 | # | ||
4851 | 4 | # Note: Changes to this requirements may require updates to | ||
4852 | 5 | # the packages/pkg-deps.json file as well. | ||
4853 | 6 | # | ||
4854 | 7 | |||
4855 | 8 | # ec2 backend | ||
4856 | 9 | boto3==1.5.9 | ||
4857 | 10 | |||
4858 | 11 | # ssh communication | ||
4859 | 12 | paramiko==2.4.0 | ||
4860 | 13 | |||
4861 | 14 | # lxd backend | ||
4862 | 15 | # 01/10/2018: enables use of lxd as snap support | ||
4863 | 16 | git+https://github.com/lxc/pylxd.git@0722955260a6557e6d2ffde1896bfe0707bbca27 | ||
4864 | 17 | |||
4865 | 18 | |||
4866 | 19 | # finds latest image information | ||
4867 | 20 | bzr+lp:simplestreams | ||
4868 | diff --git a/setup.py b/setup.py | |||
4869 | index bf697d7..bc3f52a 100755 | |||
4870 | --- a/setup.py | |||
4871 | +++ b/setup.py | |||
4872 | @@ -18,11 +18,14 @@ import tempfile | |||
4873 | 18 | 18 | ||
4874 | 19 | import setuptools | 19 | import setuptools |
4875 | 20 | from setuptools.command.install import install | 20 | from setuptools.command.install import install |
4876 | 21 | from setuptools.command.egg_info import egg_info | ||
4877 | 21 | 22 | ||
4878 | 22 | from distutils.errors import DistutilsArgError | 23 | from distutils.errors import DistutilsArgError |
4879 | 23 | 24 | ||
4880 | 24 | import subprocess | 25 | import subprocess |
4881 | 25 | 26 | ||
4882 | 27 | RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" | ||
4883 | 28 | |||
4884 | 26 | 29 | ||
4885 | 27 | def is_f(p): | 30 | def is_f(p): |
4886 | 28 | return os.path.isfile(p) | 31 | return os.path.isfile(p) |
4887 | @@ -107,7 +110,7 @@ def render_tmpl(template): | |||
4888 | 107 | return template | 110 | return template |
4889 | 108 | 111 | ||
4890 | 109 | topdir = os.path.dirname(sys.argv[0]) | 112 | topdir = os.path.dirname(sys.argv[0]) |
4892 | 110 | tmpd = tempfile.mkdtemp(dir=topdir) | 113 | tmpd = tempfile.mkdtemp(dir=topdir, prefix=RENDERED_TMPD_PREFIX) |
4893 | 111 | atexit.register(shutil.rmtree, tmpd) | 114 | atexit.register(shutil.rmtree, tmpd) |
4894 | 112 | bname = os.path.basename(template).rstrip(tmpl_ext) | 115 | bname = os.path.basename(template).rstrip(tmpl_ext) |
4895 | 113 | fpath = os.path.join(tmpd, bname) | 116 | fpath = os.path.join(tmpd, bname) |
4896 | @@ -156,6 +159,25 @@ elif os.path.isfile('/etc/redhat-release'): | |||
4897 | 156 | USR_LIB_EXEC = "usr/libexec" | 159 | USR_LIB_EXEC = "usr/libexec" |
4898 | 157 | 160 | ||
4899 | 158 | 161 | ||
4900 | 162 | class MyEggInfo(egg_info): | ||
4901 | 163 | """This makes sure to not include the rendered files in SOURCES.txt.""" | ||
4902 | 164 | |||
4903 | 165 | def find_sources(self): | ||
4904 | 166 | ret = egg_info.find_sources(self) | ||
4905 | 167 | # update the self.filelist. | ||
4906 | 168 | self.filelist.exclude_pattern(RENDERED_TMPD_PREFIX + ".*", | ||
4907 | 169 | is_regex=True) | ||
4908 | 170 | # but since mfname is already written we have to update it also. | ||
4909 | 171 | mfname = os.path.join(self.egg_info, "SOURCES.txt") | ||
4910 | 172 | if os.path.exists(mfname): | ||
4911 | 173 | with open(mfname) as fp: | ||
4912 | 174 | files = [f for f in fp | ||
4913 | 175 | if not f.startswith(RENDERED_TMPD_PREFIX)] | ||
4914 | 176 | with open(mfname, "w") as fp: | ||
4915 | 177 | fp.write(''.join(files)) | ||
4916 | 178 | return ret | ||
4917 | 179 | |||
4918 | 180 | |||
4919 | 159 | # TODO: Is there a better way to do this?? | 181 | # TODO: Is there a better way to do this?? |
4920 | 160 | class InitsysInstallData(install): | 182 | class InitsysInstallData(install): |
4921 | 161 | init_system = None | 183 | init_system = None |
4922 | @@ -229,6 +251,7 @@ if os.uname()[0] != 'FreeBSD': | |||
4923 | 229 | # adding on the right init system configuration files | 251 | # adding on the right init system configuration files |
4924 | 230 | cmdclass = { | 252 | cmdclass = { |
4925 | 231 | 'install': InitsysInstallData, | 253 | 'install': InitsysInstallData, |
4926 | 254 | 'egg_info': MyEggInfo, | ||
4927 | 232 | } | 255 | } |
4928 | 233 | 256 | ||
4929 | 234 | requirements = read_requires() | 257 | requirements = read_requires() |
4930 | diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl | |||
4931 | index bf6b296..ff9c644 100644 | |||
4932 | --- a/systemd/cloud-init-local.service.tmpl | |||
4933 | +++ b/systemd/cloud-init-local.service.tmpl | |||
4934 | @@ -13,12 +13,6 @@ Before=shutdown.target | |||
4935 | 13 | Before=sysinit.target | 13 | Before=sysinit.target |
4936 | 14 | Conflicts=shutdown.target | 14 | Conflicts=shutdown.target |
4937 | 15 | {% endif %} | 15 | {% endif %} |
4938 | 16 | {% if variant in ["suse"] %} | ||
4939 | 17 | # Other distros use Before=sysinit.target. There is not a clearly identified | ||
4940 | 18 | # reason for usage of basic.target instead. | ||
4941 | 19 | Before=basic.target | ||
4942 | 20 | Conflicts=shutdown.target | ||
4943 | 21 | {% endif %} | ||
4944 | 22 | RequiresMountsFor=/var/lib/cloud | 16 | RequiresMountsFor=/var/lib/cloud |
4945 | 23 | 17 | ||
4946 | 24 | [Service] | 18 | [Service] |
4947 | diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py | |||
4948 | index 98c1d6c..dd43698 100644 | |||
4949 | --- a/tests/cloud_tests/__init__.py | |||
4950 | +++ b/tests/cloud_tests/__init__.py | |||
4951 | @@ -10,6 +10,12 @@ TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases') | |||
4952 | 10 | TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases') | 10 | TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases') |
4953 | 11 | TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2]) | 11 | TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2]) |
4954 | 12 | 12 | ||
4955 | 13 | # This domain contains reverse lookups for hostnames that are used. | ||
4956 | 14 | # The primary reason is so sudo will return quickly when it attempts | ||
4957 | 15 | # to look up the hostname. i9n is just short for 'integration'. | ||
4958 | 16 | # see also bug 1730744 for why we had to do this. | ||
4959 | 17 | CI_DOMAIN = "i9n.cloud-init.io" | ||
4960 | 18 | |||
4961 | 13 | 19 | ||
4962 | 14 | def _initialize_logging(): | 20 | def _initialize_logging(): |
4963 | 15 | """Configure logging for cloud_tests.""" | 21 | """Configure logging for cloud_tests.""" |
4964 | diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py | |||
4965 | index fba8a0c..a6d5069 100644 | |||
4966 | --- a/tests/cloud_tests/bddeb.py | |||
4967 | +++ b/tests/cloud_tests/bddeb.py | |||
4968 | @@ -8,7 +8,7 @@ import tempfile | |||
4969 | 8 | 8 | ||
4970 | 9 | from cloudinit import util as c_util | 9 | from cloudinit import util as c_util |
4971 | 10 | from tests.cloud_tests import (config, LOG) | 10 | from tests.cloud_tests import (config, LOG) |
4973 | 11 | from tests.cloud_tests import (platforms, images, snapshots, instances) | 11 | from tests.cloud_tests import platforms |
4974 | 12 | from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) | 12 | from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) |
4975 | 13 | 13 | ||
4976 | 14 | pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] | 14 | pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] |
4977 | @@ -84,18 +84,18 @@ def setup_build(args): | |||
4978 | 84 | # set up image | 84 | # set up image |
4979 | 85 | LOG.info('acquiring image for os: %s', args.build_os) | 85 | LOG.info('acquiring image for os: %s', args.build_os) |
4980 | 86 | img_conf = config.load_os_config(platform.platform_name, args.build_os) | 86 | img_conf = config.load_os_config(platform.platform_name, args.build_os) |
4982 | 87 | image_call = partial(images.get_image, platform, img_conf) | 87 | image_call = partial(platforms.get_image, platform, img_conf) |
4983 | 88 | with PlatformComponent(image_call) as image: | 88 | with PlatformComponent(image_call) as image: |
4984 | 89 | 89 | ||
4985 | 90 | # set up snapshot | 90 | # set up snapshot |
4987 | 91 | snapshot_call = partial(snapshots.get_snapshot, image) | 91 | snapshot_call = partial(platforms.get_snapshot, image) |
4988 | 92 | with PlatformComponent(snapshot_call) as snapshot: | 92 | with PlatformComponent(snapshot_call) as snapshot: |
4989 | 93 | 93 | ||
4990 | 94 | # create instance with cloud-config to set it up | 94 | # create instance with cloud-config to set it up |
4991 | 95 | LOG.info('creating instance to build deb in') | 95 | LOG.info('creating instance to build deb in') |
4992 | 96 | empty_cloud_config = "#cloud-config\n{}" | 96 | empty_cloud_config = "#cloud-config\n{}" |
4993 | 97 | instance_call = partial( | 97 | instance_call = partial( |
4995 | 98 | instances.get_instance, snapshot, empty_cloud_config, | 98 | platforms.get_instance, snapshot, empty_cloud_config, |
4996 | 99 | use_desc='build cloud-init deb') | 99 | use_desc='build cloud-init deb') |
4997 | 100 | with PlatformComponent(instance_call) as instance: | 100 | with PlatformComponent(instance_call) as instance: |
4998 | 101 | 101 | ||
4999 | diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py | |||
5000 | index 71ee764..5ea88e5 100644 |
The diff has been truncated for viewing.
FAILED: Continuous integration, rev:04b240a3e24 e9813314a2159d0 c4999a876f0d18 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 757/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
FAILED: Ubuntu LTS: Build
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 757/rebuild
https:/