Merge ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial
- Git
- lp:~chad.smith/cloud-init
- ubuntu/xenial
- Merge into ubuntu/xenial
Proposed by
Chad Smith
Status: | Merged | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Approved by: | Scott Moser | ||||||||||||
Approved revision: | 04b240a3e24e9813314a2159d0c4999a876f0d18 | ||||||||||||
Merged at revision: | a48cab85b23b542f4bfe9072282b573aa59987ab | ||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/xenial | ||||||||||||
Merge into: | cloud-init:ubuntu/xenial | ||||||||||||
Diff against target: |
10355 lines (+5108/-1157) 144 files modified
.gitignore (+1/-0) .pylintrc (+2/-2) ChangeLog (+85/-0) HACKING.rst (+8/-0) cloudinit/analyze/__main__.py (+3/-1) cloudinit/analyze/dump.py (+1/-7) cloudinit/cmd/clean.py (+103/-0) cloudinit/cmd/main.py (+37/-7) cloudinit/cmd/status.py (+160/-0) cloudinit/cmd/tests/__init__.py (+0/-0) cloudinit/cmd/tests/test_clean.py (+176/-0) cloudinit/cmd/tests/test_status.py (+368/-0) cloudinit/config/cc_apt_configure.py (+3/-2) cloudinit/config/cc_disk_setup.py (+5/-3) cloudinit/config/cc_landscape.py (+4/-4) cloudinit/config/cc_ntp.py (+5/-5) cloudinit/config/cc_power_state_change.py (+1/-0) cloudinit/config/cc_resizefs.py (+11/-1) cloudinit/config/cc_rh_subscription.py (+2/-3) cloudinit/config/cc_rsyslog.py (+5/-5) cloudinit/config/cc_seed_random.py (+2/-1) cloudinit/config/cc_snap_config.py (+5/-2) cloudinit/distros/__init__.py (+18/-13) cloudinit/distros/freebsd.py (+3/-8) cloudinit/ec2_utils.py (+30/-9) cloudinit/net/__init__.py (+2/-2) cloudinit/net/cmdline.py (+5/-4) cloudinit/net/dhcp.py (+42/-1) cloudinit/net/network_state.py (+17/-3) cloudinit/sources/DataSourceAliYun.py (+1/-0) cloudinit/sources/DataSourceAltCloud.py (+5/-2) cloudinit/sources/DataSourceAzure.py (+150/-21) cloudinit/sources/DataSourceBigstep.py (+4/-1) cloudinit/sources/DataSourceCloudSigma.py (+4/-1) cloudinit/sources/DataSourceCloudStack.py (+4/-1) cloudinit/sources/DataSourceConfigDrive.py (+6/-3) cloudinit/sources/DataSourceDigitalOcean.py (+4/-1) cloudinit/sources/DataSourceEc2.py (+41/-24) cloudinit/sources/DataSourceGCE.py (+99/-40) cloudinit/sources/DataSourceMAAS.py (+44/-15) cloudinit/sources/DataSourceNoCloud.py (+4/-1) cloudinit/sources/DataSourceNone.py (+4/-1) cloudinit/sources/DataSourceOVF.py (+92/-38) cloudinit/sources/DataSourceOpenNebula.py (+66/-56) cloudinit/sources/DataSourceOpenStack.py (+4/-1) cloudinit/sources/DataSourceScaleway.py (+3/-1) cloudinit/sources/DataSourceSmartOS.py (+4/-1) cloudinit/sources/__init__.py (+117/-14) cloudinit/sources/helpers/azure.py (+16/-9) cloudinit/sources/helpers/vmware/imc/config.py (+4/-0) cloudinit/sources/helpers/vmware/imc/config_custom_script.py (+153/-0) cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1) cloudinit/sources/tests/__init__.py (+0/-0) cloudinit/sources/tests/test_init.py (+202/-0) cloudinit/temp_utils.py (+8/-3) cloudinit/tests/helpers.py (+35/-7) cloudinit/tests/test_util.py (+46/-0) cloudinit/url_helper.py (+20/-9) cloudinit/util.py (+129/-64) cloudinit/version.py (+1/-1) debian/changelog (+55/-3) dev/null (+0/-172) doc/rtd/topics/boot.rst (+10/-3) doc/rtd/topics/capabilities.rst (+153/-7) doc/rtd/topics/debugging.rst (+1/-0) doc/rtd/topics/modules.rst (+2/-0) doc/rtd/topics/network-config-format-v1.rst (+1/-1) doc/rtd/topics/tests.rst (+32/-6) integration-requirements.txt (+20/-0) setup.py (+24/-1) systemd/cloud-init-local.service.tmpl (+0/-6) tests/cloud_tests/__init__.py (+6/-0) tests/cloud_tests/bddeb.py (+4/-4) tests/cloud_tests/collect.py (+28/-16) tests/cloud_tests/config.py (+3/-1) tests/cloud_tests/platforms.yaml (+6/-5) tests/cloud_tests/platforms/__init__.py (+20/-2) tests/cloud_tests/platforms/ec2/image.py (+99/-0) tests/cloud_tests/platforms/ec2/instance.py (+132/-0) tests/cloud_tests/platforms/ec2/platform.py (+258/-0) tests/cloud_tests/platforms/ec2/snapshot.py (+66/-0) tests/cloud_tests/platforms/images.py (+2/-1) tests/cloud_tests/platforms/instances.py (+69/-1) tests/cloud_tests/platforms/lxd/image.py (+5/-6) tests/cloud_tests/platforms/lxd/instance.py (+22/-27) tests/cloud_tests/platforms/lxd/platform.py (+7/-7) tests/cloud_tests/platforms/lxd/snapshot.py (+2/-2) tests/cloud_tests/platforms/nocloudkvm/image.py (+5/-16) tests/cloud_tests/platforms/nocloudkvm/instance.py (+72/-59) tests/cloud_tests/platforms/nocloudkvm/platform.py (+11/-9) tests/cloud_tests/platforms/nocloudkvm/snapshot.py (+2/-22) tests/cloud_tests/platforms/platforms.py (+96/-0) tests/cloud_tests/platforms/snapshots.py (+0/-0) tests/cloud_tests/releases.yaml (+10/-22) tests/cloud_tests/setup_image.py (+0/-18) tests/cloud_tests/testcases.yaml (+21/-6) tests/cloud_tests/testcases/base.py (+6/-3) tests/cloud_tests/testcases/modules/apt_configure_sources_list.py (+5/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml (+6/-0) tests/cloud_tests/testcases/modules/ntp_pools.yaml (+1/-1) tests/cloud_tests/testcases/modules/ntp_servers.yaml (+1/-1) tests/cloud_tests/testcases/modules/set_hostname_fqdn.py (+1/-1) tests/cloud_tests/util.py (+16/-3) tests/cloud_tests/verify.py (+1/-1) tests/unittests/test_cli.py (+99/-6) tests/unittests/test_cs_util.py (+1/-0) tests/unittests/test_datasource/test_aliyun.py (+17/-1) tests/unittests/test_datasource/test_altcloud.py (+13/-9) tests/unittests/test_datasource/test_azure.py (+204/-40) tests/unittests/test_datasource/test_cloudsigma.py (+9/-4) tests/unittests/test_datasource/test_cloudstack.py (+13/-6) tests/unittests/test_datasource/test_configdrive.py (+25/-37) tests/unittests/test_datasource/test_digitalocean.py (+13/-7) tests/unittests/test_datasource/test_ec2.py (+5/-3) tests/unittests/test_datasource/test_gce.py (+174/-22) tests/unittests/test_datasource/test_maas.py (+46/-7) tests/unittests/test_datasource/test_nocloud.py (+6/-8) tests/unittests/test_datasource/test_opennebula.py (+182/-53) tests/unittests/test_datasource/test_openstack.py (+8/-4) tests/unittests/test_datasource/test_ovf.py (+107/-4) tests/unittests/test_datasource/test_scaleway.py (+9/-4) tests/unittests/test_datasource/test_smartos.py (+2/-1) tests/unittests/test_distros/test_create_users.py (+5/-2) tests/unittests/test_distros/test_netconfig.py (+46/-6) tests/unittests/test_ds_identify.py (+130/-3) tests/unittests/test_handler/test_handler_lxd.py (+0/-3) tests/unittests/test_handler/test_handler_power_state.py (+0/-3) tests/unittests/test_handler/test_handler_resizefs.py (+21/-1) tests/unittests/test_handler/test_handler_yum_add_repo.py (+2/-8) tests/unittests/test_handler/test_handler_zypper_add_repo.py (+1/-6) tests/unittests/test_net.py (+15/-3) tests/unittests/test_reporting.py (+1/-1) tests/unittests/test_runs/test_merge_run.py (+1/-0) tests/unittests/test_runs/test_simple_run.py (+2/-1) tests/unittests/test_templating.py (+1/-1) tests/unittests/test_util.py (+64/-3) tests/unittests/test_vmware/__init__.py (+0/-0) tests/unittests/test_vmware/test_custom_script.py (+99/-0) tests/unittests/test_vmware_config_file.py (+9/-1) tools/ds-identify (+81/-35) tools/make-mime.py (+1/-1) tools/mock-meta.py (+21/-24) tools/read-version (+14/-1) tox.ini (+5/-6) |
||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Needs Fixing | |
Scott Moser | Pending | ||
Review via email: mp+337098@code.launchpad.net |
Commit message
Description of the change
Sync snapshot of master into xenial per SRU.
LP: #1747059
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Needs Fixing
(continuous-integration)
There was an error fetching revisions from git servers. Please try again in a few minutes. If the problem persists, contact Launchpad support.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/.gitignore b/.gitignore |
2 | index b0500a6..75565ed 100644 |
3 | --- a/.gitignore |
4 | +++ b/.gitignore |
5 | @@ -10,3 +10,4 @@ parts |
6 | prime |
7 | stage |
8 | *.snap |
9 | +*.cover |
10 | diff --git a/.pylintrc b/.pylintrc |
11 | index b160ce7..05a086d 100644 |
12 | --- a/.pylintrc |
13 | +++ b/.pylintrc |
14 | @@ -46,7 +46,7 @@ reports=no |
15 | # (useful for modules/projects where namespaces are manipulated during runtime |
16 | # and thus existing member attributes cannot be deduced by static analysis. It |
17 | # supports qualified module names, as well as Unix pattern matching. |
18 | -ignored-modules=six.moves,pkg_resources,httplib,http.client |
19 | +ignored-modules=six.moves,pkg_resources,httplib,http.client,paramiko,simplestreams |
20 | |
21 | # List of class names for which member attributes should not be checked (useful |
22 | # for classes with dynamically set attributes). This supports the use of |
23 | @@ -56,5 +56,5 @@ ignored-classes=optparse.Values,thread._local |
24 | # List of members which are set dynamically and missed by pylint inference |
25 | # system, and so shouldn't trigger E1101 when accessed. Python regular |
26 | # expressions are accepted. |
27 | -generated-members=types,http.client,command_handlers |
28 | +generated-members=types,http.client,command_handlers,m_.* |
29 | |
30 | diff --git a/ChangeLog b/ChangeLog |
31 | index 0260c57..31c2dcb 100644 |
32 | --- a/ChangeLog |
33 | +++ b/ChangeLog |
34 | @@ -1,3 +1,88 @@ |
35 | +17.2: |
36 | + - ds-identify: failure in NoCloud due to unset variable usage. |
37 | + (LP: #1737704) |
38 | + - tests: fix collect_console when not implemented [Joshua Powers] |
39 | + - ec2: Use instance-identity doc for region and instance-id |
40 | + [Andrew Jorgensen] |
41 | + - tests: remove leaked tmp files in config drive tests. |
42 | + - setup.py: Do not include rendered files in SOURCES.txt |
43 | + - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert] |
44 | + - tests: move to using tox 1.7.5 |
45 | + - OVF: improve ds-identify to support finding OVF iso transport. |
46 | + (LP: #1731868) |
47 | + - VMware: Support for user provided pre and post-customization scripts |
48 | + [Maitreyee Saikia] |
49 | + - citest: In NoCloudKVM provide keys via metadata not userdata. |
50 | + - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix |
51 | + complaints. |
52 | + - Datasources: Formalize DataSource get_data and related properties. |
53 | + - cli: Add clean and status subcommands |
54 | + - tests: consolidate platforms into specific dirs |
55 | + - ec2: Fix sandboxed dhclient background process cleanup. (LP: #1735331) |
56 | + - tests: NoCloudKVMImage do not modify the original local cache image. |
57 | + - tests: Enable bionic in integration tests. [Joshua Powers] |
58 | + - tests: Use apt-get to install a deb so that depends get resolved. |
59 | + - sysconfig: Correctly render dns and dns search info. |
60 | + [Ryan McCabe] (LP: #1705804) |
61 | + - integration test: replace curtin test ppa with cloud-init test ppa. |
62 | + - EC2: Fix bug using fallback_nic and metadata when restoring from cache. |
63 | + (LP: #1732917) |
64 | + - EC2: Kill dhclient process used in sandbox dhclient. (LP: #1732964) |
65 | + - ntp: fix configuration template rendering for openSUSE and SLES |
66 | + (LP: #1726572) |
67 | + - centos: Provide the failed #include url in error messages |
68 | + - Catch UrlError when #include'ing URLs [Andrew Jorgensen] |
69 | + - hosts: Fix openSUSE and SLES setup for /etc/hosts and clarify docs. |
70 | + [Robert Schweikert] (LP: #1731022) |
71 | + - rh_subscription: Perform null checks for enabled and disabled repos. |
72 | + [Dave Mulford] |
73 | + - Improve warning message when a template is not found. |
74 | + [Robert Schweikert] (LP: #1731035) |
75 | + - Replace the temporary i9n.brickies.net with i9n.cloud-init.io. |
76 | + - Azure: don't generate network configuration for SRIOV devices |
77 | + (LP: #1721579) |
78 | + - tests: address some minor feedback missed in last merge. |
79 | + - tests: integration test cleanup and full pass of nocloud-kvm. |
80 | + - Gentoo: chmod +x on all files in sysvinit/gentoo/ |
81 | + [ckonstanski] (LP: #1727126) |
82 | + - EC2: Limit network config to fallback nic, fix local-ipv4 only |
83 | + instances. (LP: #1728152) |
84 | + - Gentoo: Use "rc-service" rather than "service". |
85 | + [Carlos Konstanski] (LP: #1727121) |
86 | + - resizefs: Fix regression when system booted with root=PARTUUID= |
87 | + (LP: #1725067) |
88 | + - tools: make yum package installation more reliable |
89 | + - citest: fix remaining warnings raised by integration tests. |
90 | + - citest: show the class actual class name in results. |
91 | + - ntp: fix config module schema to allow empty ntp config (LP: #1724951) |
92 | + - tools: disable fastestmirror if using proxy [Joshua Powers] |
93 | + - schema: Log debug instead of warning when jsonschema is not available. |
94 | + (LP: #1724354) |
95 | + - simpletable: Fix get_string method to return table-formatted string |
96 | + (LP: #1722566) |
97 | + - net: Handle bridge stp values of 0 and convert to boolean type |
98 | + - tools: Give specific --abbrev=8 to "git describe" |
99 | + - network: bridge_stp value not always correct (LP: #1721157) |
100 | + - tests: re-enable tox with nocloud-kvm support [Joshua Powers] |
101 | + - systemd: remove limit on tasks created by cloud-init-final.service. |
102 | + [Robert Schweikert] (LP: #1717969) |
103 | + - suse: Support addition of zypper repos via cloud-config. |
104 | + [Robert Schweikert] (LP: #1718675) |
105 | + - tests: Combine integration configs and testcases [Joshua Powers] |
106 | + - Azure, CloudStack: Support reading dhcp options from systemd-networkd. |
107 | + [Dimitri John Ledkov] (LP: #1718029) |
108 | + - packages/debian/copyright: remove mention of boto and MIT license |
109 | + - systemd: only mention Before=apt-daily.service on debian based distros. |
110 | + [Robert Schweikert] |
111 | + - Add missing simpletable and simpletable tests for failed merge |
112 | + - Remove prettytable dependency, introduce simpletable [Andrew Jorgensen] |
113 | + - debian/copyright: dep5 updates, reorganize, add Apache 2.0 license. |
114 | + [Joshua Powers] (LP: #1718681) |
115 | + - tests: remove dependency on shlex [Joshua Powers] |
116 | + - AltCloud: Trust PATH for udevadm and modprobe. |
117 | + - DataSourceOVF: use util.find_devs_with(TYPE=iso9660) (LP: #1718287) |
118 | + - tests: remove a temp file used in bootcmd tests. |
119 | + |
120 | 17.1: |
121 | - doc: document GCE datasource. [Arnd Hannemann] |
122 | - suse: updates to templates to support openSUSE and SLES. |
123 | diff --git a/HACKING.rst b/HACKING.rst |
124 | index 93e3f42..3bb555c 100644 |
125 | --- a/HACKING.rst |
126 | +++ b/HACKING.rst |
127 | @@ -16,6 +16,14 @@ Do these things once |
128 | When prompted for 'Project contact' or 'Canonical Project Manager' enter |
129 | 'Scott Moser'. |
130 | |
131 | +* Configure git with your email and name for commit messages. |
132 | + |
133 | + Your name will appear in commit messages and will also be used in |
134 | + changelogs or release notes. Give yourself credit!:: |
135 | + |
136 | + git config user.name "Your Name" |
137 | + git config user.email "Your Email" |
138 | + |
139 | * Clone the upstream `repository`_ on Launchpad:: |
140 | |
141 | git clone https://git.launchpad.net/cloud-init |
142 | diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py |
143 | index 69b9e43..3ba5903 100644 |
144 | --- a/cloudinit/analyze/__main__.py |
145 | +++ b/cloudinit/analyze/__main__.py |
146 | @@ -6,6 +6,8 @@ import argparse |
147 | import re |
148 | import sys |
149 | |
150 | +from cloudinit.util import json_dumps |
151 | + |
152 | from . import dump |
153 | from . import show |
154 | |
155 | @@ -112,7 +114,7 @@ def analyze_show(name, args): |
156 | def analyze_dump(name, args): |
157 | """Dump cloud-init events in json format""" |
158 | (infh, outfh) = configure_io(args) |
159 | - outfh.write(dump.json_dumps(_get_events(infh)) + '\n') |
160 | + outfh.write(json_dumps(_get_events(infh)) + '\n') |
161 | |
162 | |
163 | def _get_events(infile): |
164 | diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py |
165 | index ca4da49..b071aa1 100644 |
166 | --- a/cloudinit/analyze/dump.py |
167 | +++ b/cloudinit/analyze/dump.py |
168 | @@ -2,7 +2,6 @@ |
169 | |
170 | import calendar |
171 | from datetime import datetime |
172 | -import json |
173 | import sys |
174 | |
175 | from cloudinit import util |
176 | @@ -132,11 +131,6 @@ def parse_ci_logline(line): |
177 | return event |
178 | |
179 | |
180 | -def json_dumps(data): |
181 | - return json.dumps(data, indent=1, sort_keys=True, |
182 | - separators=(',', ': ')) |
183 | - |
184 | - |
185 | def dump_events(cisource=None, rawdata=None): |
186 | events = [] |
187 | event = None |
188 | @@ -169,7 +163,7 @@ def main(): |
189 | else: |
190 | cisource = sys.stdin |
191 | |
192 | - return json_dumps(dump_events(cisource)) |
193 | + return util.json_dumps(dump_events(cisource)) |
194 | |
195 | |
196 | if __name__ == "__main__": |
197 | diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py |
198 | new file mode 100644 |
199 | index 0000000..de22f7f |
200 | --- /dev/null |
201 | +++ b/cloudinit/cmd/clean.py |
202 | @@ -0,0 +1,103 @@ |
203 | +# Copyright (C) 2017 Canonical Ltd. |
204 | +# |
205 | +# This file is part of cloud-init. See LICENSE file for license information. |
206 | + |
207 | +"""Define 'clean' utility and handler as part of cloud-init commandline.""" |
208 | + |
209 | +import argparse |
210 | +import os |
211 | +import sys |
212 | + |
213 | +from cloudinit.stages import Init |
214 | +from cloudinit.util import ( |
215 | + ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, |
216 | + is_link, subp) |
217 | + |
218 | + |
219 | +def error(msg): |
220 | + sys.stderr.write("ERROR: " + msg + "\n") |
221 | + |
222 | + |
223 | +def get_parser(parser=None): |
224 | + """Build or extend an arg parser for clean utility. |
225 | + |
226 | + @param parser: Optional existing ArgumentParser instance representing the |
227 | + clean subcommand which will be extended to support the args of |
228 | + this utility. |
229 | + |
230 | + @returns: ArgumentParser with proper argument configuration. |
231 | + """ |
232 | + if not parser: |
233 | + parser = argparse.ArgumentParser( |
234 | + prog='clean', |
235 | + description=('Remove logs and artifacts so cloud-init re-runs on ' |
236 | + 'a clean system')) |
237 | + parser.add_argument( |
238 | + '-l', '--logs', action='store_true', default=False, dest='remove_logs', |
239 | + help='Remove cloud-init logs.') |
240 | + parser.add_argument( |
241 | + '-r', '--reboot', action='store_true', default=False, |
242 | + help='Reboot system after logs are cleaned so cloud-init re-runs.') |
243 | + parser.add_argument( |
244 | + '-s', '--seed', action='store_true', default=False, dest='remove_seed', |
245 | + help='Remove cloud-init seed directory /var/lib/cloud/seed.') |
246 | + return parser |
247 | + |
248 | + |
249 | +def remove_artifacts(remove_logs, remove_seed=False): |
250 | + """Helper which removes artifacts dir and optionally log files. |
251 | + |
252 | + @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False |
253 | + preserves them. |
254 | + @param: remove_seed: Boolean. Set True to also delete seed subdir in |
255 | + paths.cloud_dir. |
256 | + @returns: 0 on success, 1 otherwise. |
257 | + """ |
258 | + init = Init(ds_deps=[]) |
259 | + init.read_cfg() |
260 | + if remove_logs: |
261 | + for log_file in get_config_logfiles(init.cfg): |
262 | + del_file(log_file) |
263 | + |
264 | + if not os.path.isdir(init.paths.cloud_dir): |
265 | + return 0 # Artifacts dir already cleaned |
266 | + with chdir(init.paths.cloud_dir): |
267 | + for path in os.listdir('.'): |
268 | + if path == 'seed' and not remove_seed: |
269 | + continue |
270 | + try: |
271 | + if os.path.isdir(path) and not is_link(path): |
272 | + del_dir(path) |
273 | + else: |
274 | + del_file(path) |
275 | + except OSError as e: |
276 | + error('Could not remove {0}: {1}'.format(path, str(e))) |
277 | + return 1 |
278 | + return 0 |
279 | + |
280 | + |
281 | +def handle_clean_args(name, args): |
282 | + """Handle calls to 'cloud-init clean' as a subcommand.""" |
283 | + exit_code = remove_artifacts(args.remove_logs, args.remove_seed) |
284 | + if exit_code == 0 and args.reboot: |
285 | + cmd = ['shutdown', '-r', 'now'] |
286 | + try: |
287 | + subp(cmd, capture=False) |
288 | + except ProcessExecutionError as e: |
289 | + error( |
290 | + 'Could not reboot this system using "{0}": {1}'.format( |
291 | + cmd, str(e))) |
292 | + exit_code = 1 |
293 | + return exit_code |
294 | + |
295 | + |
296 | +def main(): |
297 | + """Tool to collect and tar all cloud-init related logs.""" |
298 | + parser = get_parser() |
299 | + sys.exit(handle_clean_args('clean', parser.parse_args())) |
300 | + |
301 | + |
302 | +if __name__ == '__main__': |
303 | + main() |
304 | + |
305 | +# vi: ts=4 expandtab |
306 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py |
307 | index 6fb9d9e..d2f1b77 100644 |
308 | --- a/cloudinit/cmd/main.py |
309 | +++ b/cloudinit/cmd/main.py |
310 | @@ -421,7 +421,13 @@ def di_report_warn(datasource, cfg): |
311 | LOG.debug("no di_report found in config.") |
312 | return |
313 | |
314 | - dicfg = cfg.get('di_report', {}) |
315 | + dicfg = cfg['di_report'] |
316 | + if dicfg is None: |
317 | + # ds-identify may write 'di_report:\n #comment\n' |
318 | + # which reads as {'di_report': None} |
319 | + LOG.debug("di_report was None.") |
320 | + return |
321 | + |
322 | if not isinstance(dicfg, dict): |
323 | LOG.warning("di_report config not a dictionary: %s", dicfg) |
324 | return |
325 | @@ -603,7 +609,11 @@ def status_wrapper(name, args, data_d=None, link_d=None): |
326 | else: |
327 | raise ValueError("unknown name: %s" % name) |
328 | |
329 | - modes = ('init', 'init-local', 'modules-config', 'modules-final') |
330 | + modes = ('init', 'init-local', 'modules-init', 'modules-config', |
331 | + 'modules-final') |
332 | + if mode not in modes: |
333 | + raise ValueError( |
334 | + "Invalid cloud init mode specified '{0}'".format(mode)) |
335 | |
336 | status = None |
337 | if mode == 'init-local': |
338 | @@ -615,16 +625,18 @@ def status_wrapper(name, args, data_d=None, link_d=None): |
339 | except Exception: |
340 | pass |
341 | |
342 | + nullstatus = { |
343 | + 'errors': [], |
344 | + 'start': None, |
345 | + 'finished': None, |
346 | + } |
347 | if status is None: |
348 | - nullstatus = { |
349 | - 'errors': [], |
350 | - 'start': None, |
351 | - 'finished': None, |
352 | - } |
353 | status = {'v1': {}} |
354 | for m in modes: |
355 | status['v1'][m] = nullstatus.copy() |
356 | status['v1']['datasource'] = None |
357 | + elif mode not in status['v1']: |
358 | + status['v1'][mode] = nullstatus.copy() |
359 | |
360 | v1 = status['v1'] |
361 | v1['stage'] = mode |
362 | @@ -767,6 +779,12 @@ def main(sysv_args=None): |
363 | parser_collect_logs = subparsers.add_parser( |
364 | 'collect-logs', help='Collect and tar all cloud-init debug info') |
365 | |
366 | + parser_clean = subparsers.add_parser( |
367 | + 'clean', help='Remove logs and artifacts so cloud-init can re-run.') |
368 | + |
369 | + parser_status = subparsers.add_parser( |
370 | + 'status', help='Report cloud-init status or wait on completion.') |
371 | + |
372 | if sysv_args: |
373 | # Only load subparsers if subcommand is specified to avoid load cost |
374 | if sysv_args[0] == 'analyze': |
375 | @@ -783,6 +801,18 @@ def main(sysv_args=None): |
376 | logs_parser(parser_collect_logs) |
377 | parser_collect_logs.set_defaults( |
378 | action=('collect-logs', handle_collect_logs_args)) |
379 | + elif sysv_args[0] == 'clean': |
380 | + from cloudinit.cmd.clean import ( |
381 | + get_parser as clean_parser, handle_clean_args) |
382 | + clean_parser(parser_clean) |
383 | + parser_clean.set_defaults( |
384 | + action=('clean', handle_clean_args)) |
385 | + elif sysv_args[0] == 'status': |
386 | + from cloudinit.cmd.status import ( |
387 | + get_parser as status_parser, handle_status_args) |
388 | + status_parser(parser_status) |
389 | + parser_status.set_defaults( |
390 | + action=('status', handle_status_args)) |
391 | |
392 | args = parser.parse_args(args=sysv_args) |
393 | |
394 | diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py |
395 | new file mode 100644 |
396 | index 0000000..d7aaee9 |
397 | --- /dev/null |
398 | +++ b/cloudinit/cmd/status.py |
399 | @@ -0,0 +1,160 @@ |
400 | +# Copyright (C) 2017 Canonical Ltd. |
401 | +# |
402 | +# This file is part of cloud-init. See LICENSE file for license information. |
403 | + |
404 | +"""Define 'status' utility and handler as part of cloud-init commandline.""" |
405 | + |
406 | +import argparse |
407 | +import os |
408 | +import sys |
409 | +from time import gmtime, strftime, sleep |
410 | + |
411 | +from cloudinit.distros import uses_systemd |
412 | +from cloudinit.stages import Init |
413 | +from cloudinit.util import get_cmdline, load_file, load_json |
414 | + |
415 | +CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' |
416 | + |
417 | +# customer visible status messages |
418 | +STATUS_ENABLED_NOT_RUN = 'not run' |
419 | +STATUS_RUNNING = 'running' |
420 | +STATUS_DONE = 'done' |
421 | +STATUS_ERROR = 'error' |
422 | +STATUS_DISABLED = 'disabled' |
423 | + |
424 | + |
425 | +def get_parser(parser=None): |
426 | + """Build or extend an arg parser for status utility. |
427 | + |
428 | + @param parser: Optional existing ArgumentParser instance representing the |
429 | + status subcommand which will be extended to support the args of |
430 | + this utility. |
431 | + |
432 | + @returns: ArgumentParser with proper argument configuration. |
433 | + """ |
434 | + if not parser: |
435 | + parser = argparse.ArgumentParser( |
436 | + prog='status', |
437 | + description='Report run status of cloud init') |
438 | + parser.add_argument( |
439 | + '-l', '--long', action='store_true', default=False, |
440 | + help=('Report long format of statuses including run stage name and' |
441 | + ' error messages')) |
442 | + parser.add_argument( |
443 | + '-w', '--wait', action='store_true', default=False, |
444 | + help='Block waiting on cloud-init to complete') |
445 | + return parser |
446 | + |
447 | + |
448 | +def handle_status_args(name, args): |
449 | + """Handle calls to 'cloud-init status' as a subcommand.""" |
450 | + # Read configured paths |
451 | + init = Init(ds_deps=[]) |
452 | + init.read_cfg() |
453 | + |
454 | + status, status_detail, time = _get_status_details(init.paths) |
455 | + if args.wait: |
456 | + while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): |
457 | + sys.stdout.write('.') |
458 | + sys.stdout.flush() |
459 | + status, status_detail, time = _get_status_details(init.paths) |
460 | + sleep(0.25) |
461 | + sys.stdout.write('\n') |
462 | + if args.long: |
463 | + print('status: {0}'.format(status)) |
464 | + if time: |
465 | + print('time: {0}'.format(time)) |
466 | + print('detail:\n{0}'.format(status_detail)) |
467 | + else: |
468 | + print('status: {0}'.format(status)) |
469 | + return 1 if status == STATUS_ERROR else 0 |
470 | + |
471 | + |
472 | +def _is_cloudinit_disabled(disable_file, paths): |
473 | + """Report whether cloud-init is disabled. |
474 | + |
475 | + @param disable_file: The path to the cloud-init disable file. |
476 | + @param paths: An initialized cloudinit.helpers.Paths object. |
477 | + @returns: A tuple containing (bool, reason) about cloud-init's status and |
478 | + why. |
479 | + """ |
480 | + is_disabled = False |
481 | + cmdline_parts = get_cmdline().split() |
482 | + if not uses_systemd(): |
483 | + reason = 'Cloud-init enabled on sysvinit' |
484 | + elif 'cloud-init=enabled' in cmdline_parts: |
485 | + reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' |
486 | + elif os.path.exists(disable_file): |
487 | + is_disabled = True |
488 | + reason = 'Cloud-init disabled by {0}'.format(disable_file) |
489 | + elif 'cloud-init=disabled' in cmdline_parts: |
490 | + is_disabled = True |
491 | + reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' |
492 | + elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): |
493 | + is_disabled = True |
494 | + reason = 'Cloud-init disabled by cloud-init-generator' |
495 | + else: |
496 | + reason = 'Cloud-init enabled by systemd cloud-init-generator' |
497 | + return (is_disabled, reason) |
498 | + |
499 | + |
500 | +def _get_status_details(paths): |
501 | + """Return a 3-tuple of status, status_details and time of last event. |
502 | + |
503 | + @param paths: An initialized cloudinit.helpers.paths object. |
504 | + |
505 | + Values are obtained from parsing paths.run_dir/status.json. |
506 | + """ |
507 | + |
508 | + status = STATUS_ENABLED_NOT_RUN |
509 | + status_detail = '' |
510 | + status_v1 = {} |
511 | + |
512 | + status_file = os.path.join(paths.run_dir, 'status.json') |
513 | + |
514 | + (is_disabled, reason) = _is_cloudinit_disabled( |
515 | + CLOUDINIT_DISABLED_FILE, paths) |
516 | + if is_disabled: |
517 | + status = STATUS_DISABLED |
518 | + status_detail = reason |
519 | + if os.path.exists(status_file): |
520 | + status_v1 = load_json(load_file(status_file)).get('v1', {}) |
521 | + errors = [] |
522 | + latest_event = 0 |
523 | + for key, value in sorted(status_v1.items()): |
524 | + if key == 'stage': |
525 | + if value: |
526 | + status_detail = 'Running in stage: {0}'.format(value) |
527 | + elif key == 'datasource': |
528 | + status_detail = value |
529 | + elif isinstance(value, dict): |
530 | + errors.extend(value.get('errors', [])) |
531 | + start = value.get('start') or 0 |
532 | + finished = value.get('finished') or 0 |
533 | + if finished == 0 and start != 0: |
534 | + status = STATUS_RUNNING |
535 | + event_time = max(start, finished) |
536 | + if event_time > latest_event: |
537 | + latest_event = event_time |
538 | + if errors: |
539 | + status = STATUS_ERROR |
540 | + status_detail = '\n'.join(errors) |
541 | + elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: |
542 | + status = STATUS_DONE |
543 | + if latest_event: |
544 | + time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) |
545 | + else: |
546 | + time = '' |
547 | + return status, status_detail, time |
548 | + |
549 | + |
550 | +def main(): |
551 | + """Tool to report status of cloud-init.""" |
552 | + parser = get_parser() |
553 | + sys.exit(handle_status_args('status', parser.parse_args())) |
554 | + |
555 | + |
556 | +if __name__ == '__main__': |
557 | + main() |
558 | + |
559 | +# vi: ts=4 expandtab |
560 | diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py |
561 | new file mode 100644 |
562 | index 0000000..e69de29 |
563 | --- /dev/null |
564 | +++ b/cloudinit/cmd/tests/__init__.py |
565 | diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py |
566 | new file mode 100644 |
567 | index 0000000..6713af4 |
568 | --- /dev/null |
569 | +++ b/cloudinit/cmd/tests/test_clean.py |
570 | @@ -0,0 +1,176 @@ |
571 | +# This file is part of cloud-init. See LICENSE file for license information. |
572 | + |
573 | +from cloudinit.cmd import clean |
574 | +from cloudinit.util import ensure_dir, sym_link, write_file |
575 | +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock |
576 | +from collections import namedtuple |
577 | +import os |
578 | +from six import StringIO |
579 | + |
580 | +mypaths = namedtuple('MyPaths', 'cloud_dir') |
581 | + |
582 | + |
583 | +class TestClean(CiTestCase): |
584 | + |
585 | + def setUp(self): |
586 | + super(TestClean, self).setUp() |
587 | + self.new_root = self.tmp_dir() |
588 | + self.artifact_dir = self.tmp_path('artifacts', self.new_root) |
589 | + self.log1 = self.tmp_path('cloud-init.log', self.new_root) |
590 | + self.log2 = self.tmp_path('cloud-init-output.log', self.new_root) |
591 | + |
592 | + class FakeInit(object): |
593 | + cfg = {'def_log_file': self.log1, |
594 | + 'output': {'all': '|tee -a {0}'.format(self.log2)}} |
595 | + paths = mypaths(cloud_dir=self.artifact_dir) |
596 | + |
597 | + def __init__(self, ds_deps): |
598 | + pass |
599 | + |
600 | + def read_cfg(self): |
601 | + pass |
602 | + |
603 | + self.init_class = FakeInit |
604 | + |
605 | + def test_remove_artifacts_removes_logs(self): |
606 | + """remove_artifacts removes logs when remove_logs is True.""" |
607 | + write_file(self.log1, 'cloud-init-log') |
608 | + write_file(self.log2, 'cloud-init-output-log') |
609 | + |
610 | + self.assertFalse( |
611 | + os.path.exists(self.artifact_dir), 'Unexpected artifacts dir') |
612 | + retcode = wrap_and_call( |
613 | + 'cloudinit.cmd.clean', |
614 | + {'Init': {'side_effect': self.init_class}}, |
615 | + clean.remove_artifacts, remove_logs=True) |
616 | + self.assertFalse(os.path.exists(self.log1), 'Unexpected file') |
617 | + self.assertFalse(os.path.exists(self.log2), 'Unexpected file') |
618 | + self.assertEqual(0, retcode) |
619 | + |
620 | + def test_remove_artifacts_preserves_logs(self): |
621 | + """remove_artifacts leaves logs when remove_logs is False.""" |
622 | + write_file(self.log1, 'cloud-init-log') |
623 | + write_file(self.log2, 'cloud-init-output-log') |
624 | + |
625 | + retcode = wrap_and_call( |
626 | + 'cloudinit.cmd.clean', |
627 | + {'Init': {'side_effect': self.init_class}}, |
628 | + clean.remove_artifacts, remove_logs=False) |
629 | + self.assertTrue(os.path.exists(self.log1), 'Missing expected file') |
630 | + self.assertTrue(os.path.exists(self.log2), 'Missing expected file') |
631 | + self.assertEqual(0, retcode) |
632 | + |
633 | + def test_remove_artifacts_removes_unlinks_symlinks(self): |
634 | + """remove_artifacts cleans artifacts dir unlinking any symlinks.""" |
635 | + dir1 = os.path.join(self.artifact_dir, 'dir1') |
636 | + ensure_dir(dir1) |
637 | + symlink = os.path.join(self.artifact_dir, 'mylink') |
638 | + sym_link(dir1, symlink) |
639 | + |
640 | + retcode = wrap_and_call( |
641 | + 'cloudinit.cmd.clean', |
642 | + {'Init': {'side_effect': self.init_class}}, |
643 | + clean.remove_artifacts, remove_logs=False) |
644 | + self.assertEqual(0, retcode) |
645 | + for path in (dir1, symlink): |
646 | + self.assertFalse( |
647 | + os.path.exists(path), |
648 | + 'Unexpected {0} dir'.format(path)) |
649 | + |
650 | + def test_remove_artifacts_removes_artifacts_skipping_seed(self): |
651 | + """remove_artifacts cleans artifacts dir with exception of seed dir.""" |
652 | + dirs = [ |
653 | + self.artifact_dir, |
654 | + os.path.join(self.artifact_dir, 'seed'), |
655 | + os.path.join(self.artifact_dir, 'dir1'), |
656 | + os.path.join(self.artifact_dir, 'dir2')] |
657 | + for _dir in dirs: |
658 | + ensure_dir(_dir) |
659 | + |
660 | + retcode = wrap_and_call( |
661 | + 'cloudinit.cmd.clean', |
662 | + {'Init': {'side_effect': self.init_class}}, |
663 | + clean.remove_artifacts, remove_logs=False) |
664 | + self.assertEqual(0, retcode) |
665 | + for expected_dir in dirs[:2]: |
666 | + self.assertTrue( |
667 | + os.path.exists(expected_dir), |
668 | + 'Missing {0} dir'.format(expected_dir)) |
669 | + for deleted_dir in dirs[2:]: |
670 | + self.assertFalse( |
671 | + os.path.exists(deleted_dir), |
672 | + 'Unexpected {0} dir'.format(deleted_dir)) |
673 | + |
674 | + def test_remove_artifacts_removes_artifacts_removes_seed(self): |
675 | + """remove_artifacts removes seed dir when remove_seed is True.""" |
676 | + dirs = [ |
677 | + self.artifact_dir, |
678 | + os.path.join(self.artifact_dir, 'seed'), |
679 | + os.path.join(self.artifact_dir, 'dir1'), |
680 | + os.path.join(self.artifact_dir, 'dir2')] |
681 | + for _dir in dirs: |
682 | + ensure_dir(_dir) |
683 | + |
684 | + retcode = wrap_and_call( |
685 | + 'cloudinit.cmd.clean', |
686 | + {'Init': {'side_effect': self.init_class}}, |
687 | + clean.remove_artifacts, remove_logs=False, remove_seed=True) |
688 | + self.assertEqual(0, retcode) |
689 | + self.assertTrue( |
690 | + os.path.exists(self.artifact_dir), 'Missing artifact dir') |
691 | + for deleted_dir in dirs[1:]: |
692 | + self.assertFalse( |
693 | + os.path.exists(deleted_dir), |
694 | + 'Unexpected {0} dir'.format(deleted_dir)) |
695 | + |
696 | + def test_remove_artifacts_returns_one_on_errors(self): |
697 | + """remove_artifacts returns non-zero on failure and prints an error.""" |
698 | + ensure_dir(self.artifact_dir) |
699 | + ensure_dir(os.path.join(self.artifact_dir, 'dir1')) |
700 | + |
701 | + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: |
702 | + retcode = wrap_and_call( |
703 | + 'cloudinit.cmd.clean', |
704 | + {'del_dir': {'side_effect': OSError('oops')}, |
705 | + 'Init': {'side_effect': self.init_class}}, |
706 | + clean.remove_artifacts, remove_logs=False) |
707 | + self.assertEqual(1, retcode) |
708 | + self.assertEqual( |
709 | + 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue()) |
710 | + |
711 | + def test_handle_clean_args_reboots(self): |
712 | + """handle_clean_args_reboots when reboot arg is provided.""" |
713 | + |
714 | + called_cmds = [] |
715 | + |
716 | + def fake_subp(cmd, capture): |
717 | + called_cmds.append((cmd, capture)) |
718 | + return '', '' |
719 | + |
720 | + myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot') |
721 | + cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True) |
722 | + retcode = wrap_and_call( |
723 | + 'cloudinit.cmd.clean', |
724 | + {'subp': {'side_effect': fake_subp}, |
725 | + 'Init': {'side_effect': self.init_class}}, |
726 | + clean.handle_clean_args, name='does not matter', args=cmdargs) |
727 | + self.assertEqual(0, retcode) |
728 | + self.assertEqual( |
729 | + [(['shutdown', '-r', 'now'], False)], called_cmds) |
730 | + |
731 | + def test_status_main(self): |
732 | + '''clean.main can be run as a standalone script.''' |
733 | + write_file(self.log1, 'cloud-init-log') |
734 | + with self.assertRaises(SystemExit) as context_manager: |
735 | + wrap_and_call( |
736 | + 'cloudinit.cmd.clean', |
737 | + {'Init': {'side_effect': self.init_class}, |
738 | + 'sys.argv': {'new': ['clean', '--logs']}}, |
739 | + clean.main) |
740 | + |
741 | + self.assertRaisesCodeEqual(0, context_manager.exception.code) |
742 | + self.assertFalse( |
743 | + os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) |
744 | + |
745 | + |
746 | +# vi: ts=4 expandtab syntax=python |
747 | diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py |
748 | new file mode 100644 |
749 | index 0000000..a7c0a91 |
750 | --- /dev/null |
751 | +++ b/cloudinit/cmd/tests/test_status.py |
752 | @@ -0,0 +1,368 @@ |
753 | +# This file is part of cloud-init. See LICENSE file for license information. |
754 | + |
755 | +from collections import namedtuple |
756 | +import os |
757 | +from six import StringIO |
758 | +from textwrap import dedent |
759 | + |
760 | +from cloudinit.atomic_helper import write_json |
761 | +from cloudinit.cmd import status |
762 | +from cloudinit.util import write_file |
763 | +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock |
764 | + |
765 | +mypaths = namedtuple('MyPaths', 'run_dir') |
766 | +myargs = namedtuple('MyArgs', 'long wait') |
767 | + |
768 | + |
769 | +class TestStatus(CiTestCase): |
770 | + |
771 | + def setUp(self): |
772 | + super(TestStatus, self).setUp() |
773 | + self.new_root = self.tmp_dir() |
774 | + self.status_file = self.tmp_path('status.json', self.new_root) |
775 | + self.disable_file = self.tmp_path('cloudinit-disable', self.new_root) |
776 | + self.paths = mypaths(run_dir=self.new_root) |
777 | + |
778 | + class FakeInit(object): |
779 | + paths = self.paths |
780 | + |
781 | + def __init__(self, ds_deps): |
782 | + pass |
783 | + |
784 | + def read_cfg(self): |
785 | + pass |
786 | + |
787 | + self.init_class = FakeInit |
788 | + |
789 | + def test__is_cloudinit_disabled_false_on_sysvinit(self): |
790 | + '''When not in an environment using systemd, return False.''' |
791 | + write_file(self.disable_file, '') # Create the ignored disable file |
792 | + (is_disabled, reason) = wrap_and_call( |
793 | + 'cloudinit.cmd.status', |
794 | + {'uses_systemd': False}, |
795 | + status._is_cloudinit_disabled, self.disable_file, self.paths) |
796 | + self.assertFalse( |
797 | + is_disabled, 'expected enabled cloud-init on sysvinit') |
798 | + self.assertEqual('Cloud-init enabled on sysvinit', reason) |
799 | + |
800 | + def test__is_cloudinit_disabled_true_on_disable_file(self): |
801 | + '''When using systemd and disable_file is present return disabled.''' |
802 | + write_file(self.disable_file, '') # Create observed disable file |
803 | + (is_disabled, reason) = wrap_and_call( |
804 | + 'cloudinit.cmd.status', |
805 | + {'uses_systemd': True}, |
806 | + status._is_cloudinit_disabled, self.disable_file, self.paths) |
807 | + self.assertTrue(is_disabled, 'expected disabled cloud-init') |
808 | + self.assertEqual( |
809 | + 'Cloud-init disabled by {0}'.format(self.disable_file), reason) |
810 | + |
811 | + def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): |
812 | + '''Not disabled when using systemd and enabled via commandline.''' |
813 | + write_file(self.disable_file, '') # Create ignored disable file |
814 | + (is_disabled, reason) = wrap_and_call( |
815 | + 'cloudinit.cmd.status', |
816 | + {'uses_systemd': True, |
817 | + 'get_cmdline': 'something cloud-init=enabled else'}, |
818 | + status._is_cloudinit_disabled, self.disable_file, self.paths) |
819 | + self.assertFalse(is_disabled, 'expected enabled cloud-init') |
820 | + self.assertEqual( |
821 | + 'Cloud-init enabled by kernel command line cloud-init=enabled', |
822 | + reason) |
823 | + |
824 | + def test__is_cloudinit_disabled_true_on_kernel_cmdline(self): |
825 | + '''When using systemd and disable_file is present return disabled.''' |
826 | + (is_disabled, reason) = wrap_and_call( |
827 | + 'cloudinit.cmd.status', |
828 | + {'uses_systemd': True, |
829 | + 'get_cmdline': 'something cloud-init=disabled else'}, |
830 | + status._is_cloudinit_disabled, self.disable_file, self.paths) |
831 | + self.assertTrue(is_disabled, 'expected disabled cloud-init') |
832 | + self.assertEqual( |
833 | + 'Cloud-init disabled by kernel parameter cloud-init=disabled', |
834 | + reason) |
835 | + |
836 | + def test__is_cloudinit_disabled_true_when_generator_disables(self): |
837 | + '''When cloud-init-generator doesn't write enabled file return True.''' |
838 | + enabled_file = os.path.join(self.paths.run_dir, 'enabled') |
839 | + self.assertFalse(os.path.exists(enabled_file)) |
840 | + (is_disabled, reason) = wrap_and_call( |
841 | + 'cloudinit.cmd.status', |
842 | + {'uses_systemd': True, |
843 | + 'get_cmdline': 'something'}, |
844 | + status._is_cloudinit_disabled, self.disable_file, self.paths) |
845 | + self.assertTrue(is_disabled, 'expected disabled cloud-init') |
846 | + self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) |
847 | + |
848 | + def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): |
849 | + '''Report enabled when systemd generator creates the enabled file.''' |
850 | + enabled_file = os.path.join(self.paths.run_dir, 'enabled') |
851 | + write_file(enabled_file, '') |
852 | + (is_disabled, reason) = wrap_and_call( |
853 | + 'cloudinit.cmd.status', |
854 | + {'uses_systemd': True, |
855 | + 'get_cmdline': 'something ignored'}, |
856 | + status._is_cloudinit_disabled, self.disable_file, self.paths) |
857 | + self.assertFalse(is_disabled, 'expected enabled cloud-init') |
858 | + self.assertEqual( |
859 | + 'Cloud-init enabled by systemd cloud-init-generator', reason) |
860 | + |
861 | + def test_status_returns_not_run(self): |
862 | + '''When status.json does not exist yet, return 'not run'.''' |
863 | + self.assertFalse( |
864 | + os.path.exists(self.status_file), 'Unexpected status.json found') |
865 | + cmdargs = myargs(long=False, wait=False) |
866 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
867 | + retcode = wrap_and_call( |
868 | + 'cloudinit.cmd.status', |
869 | + {'_is_cloudinit_disabled': (False, ''), |
870 | + 'Init': {'side_effect': self.init_class}}, |
871 | + status.handle_status_args, 'ignored', cmdargs) |
872 | + self.assertEqual(0, retcode) |
873 | + self.assertEqual('status: not run\n', m_stdout.getvalue()) |
874 | + |
875 | + def test_status_returns_disabled_long_on_presence_of_disable_file(self): |
876 | + '''When cloudinit is disabled, return disabled reason.''' |
877 | + |
878 | + checked_files = [] |
879 | + |
880 | + def fakeexists(filepath): |
881 | + checked_files.append(filepath) |
882 | + status_file = os.path.join(self.paths.run_dir, 'status.json') |
883 | + return bool(not filepath == status_file) |
884 | + |
885 | + cmdargs = myargs(long=True, wait=False) |
886 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
887 | + retcode = wrap_and_call( |
888 | + 'cloudinit.cmd.status', |
889 | + {'os.path.exists': {'side_effect': fakeexists}, |
890 | + '_is_cloudinit_disabled': (True, 'disabled for some reason'), |
891 | + 'Init': {'side_effect': self.init_class}}, |
892 | + status.handle_status_args, 'ignored', cmdargs) |
893 | + self.assertEqual(0, retcode) |
894 | + self.assertEqual( |
895 | + [os.path.join(self.paths.run_dir, 'status.json')], |
896 | + checked_files) |
897 | + expected = dedent('''\ |
898 | + status: disabled |
899 | + detail: |
900 | + disabled for some reason |
901 | + ''') |
902 | + self.assertEqual(expected, m_stdout.getvalue()) |
903 | + |
904 | + def test_status_returns_running(self): |
905 | + '''Report running when status exists with an unfinished stage.''' |
906 | + write_json(self.status_file, |
907 | + {'v1': {'init': {'start': 1, 'finished': None}}}) |
908 | + cmdargs = myargs(long=False, wait=False) |
909 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
910 | + retcode = wrap_and_call( |
911 | + 'cloudinit.cmd.status', |
912 | + {'_is_cloudinit_disabled': (False, ''), |
913 | + 'Init': {'side_effect': self.init_class}}, |
914 | + status.handle_status_args, 'ignored', cmdargs) |
915 | + self.assertEqual(0, retcode) |
916 | + self.assertEqual('status: running\n', m_stdout.getvalue()) |
917 | + |
918 | + def test_status_returns_done(self): |
919 | + '''Reports done when stage is None and all stages are finished.''' |
920 | + write_json( |
921 | + self.status_file, |
922 | + {'v1': {'stage': None, |
923 | + 'datasource': ( |
924 | + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' |
925 | + '[dsmode=net]'), |
926 | + 'blah': {'finished': 123.456}, |
927 | + 'init': {'errors': [], 'start': 124.567, |
928 | + 'finished': 125.678}, |
929 | + 'init-local': {'start': 123.45, 'finished': 123.46}}}) |
930 | + cmdargs = myargs(long=False, wait=False) |
931 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
932 | + retcode = wrap_and_call( |
933 | + 'cloudinit.cmd.status', |
934 | + {'_is_cloudinit_disabled': (False, ''), |
935 | + 'Init': {'side_effect': self.init_class}}, |
936 | + status.handle_status_args, 'ignored', cmdargs) |
937 | + self.assertEqual(0, retcode) |
938 | + self.assertEqual('status: done\n', m_stdout.getvalue()) |
939 | + |
940 | + def test_status_returns_done_long(self): |
941 | + '''Long format of done status includes datasource info.''' |
942 | + write_json( |
943 | + self.status_file, |
944 | + {'v1': {'stage': None, |
945 | + 'datasource': ( |
946 | + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' |
947 | + '[dsmode=net]'), |
948 | + 'init': {'start': 124.567, 'finished': 125.678}, |
949 | + 'init-local': {'start': 123.45, 'finished': 123.46}}}) |
950 | + cmdargs = myargs(long=True, wait=False) |
951 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
952 | + retcode = wrap_and_call( |
953 | + 'cloudinit.cmd.status', |
954 | + {'_is_cloudinit_disabled': (False, ''), |
955 | + 'Init': {'side_effect': self.init_class}}, |
956 | + status.handle_status_args, 'ignored', cmdargs) |
957 | + self.assertEqual(0, retcode) |
958 | + expected = dedent('''\ |
959 | + status: done |
960 | + time: Thu, 01 Jan 1970 00:02:05 +0000 |
961 | + detail: |
962 | + DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] |
963 | + ''') |
964 | + self.assertEqual(expected, m_stdout.getvalue()) |
965 | + |
966 | + def test_status_on_errors(self): |
967 | + '''Reports error when any stage has errors.''' |
968 | + write_json( |
969 | + self.status_file, |
970 | + {'v1': {'stage': None, |
971 | + 'blah': {'errors': [], 'finished': 123.456}, |
972 | + 'init': {'errors': ['error1'], 'start': 124.567, |
973 | + 'finished': 125.678}, |
974 | + 'init-local': {'start': 123.45, 'finished': 123.46}}}) |
975 | + cmdargs = myargs(long=False, wait=False) |
976 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
977 | + retcode = wrap_and_call( |
978 | + 'cloudinit.cmd.status', |
979 | + {'_is_cloudinit_disabled': (False, ''), |
980 | + 'Init': {'side_effect': self.init_class}}, |
981 | + status.handle_status_args, 'ignored', cmdargs) |
982 | + self.assertEqual(1, retcode) |
983 | + self.assertEqual('status: error\n', m_stdout.getvalue()) |
984 | + |
985 | + def test_status_on_errors_long(self): |
986 | + '''Long format of error status includes all error messages.''' |
987 | + write_json( |
988 | + self.status_file, |
989 | + {'v1': {'stage': None, |
990 | + 'datasource': ( |
991 | + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' |
992 | + '[dsmode=net]'), |
993 | + 'init': {'errors': ['error1'], 'start': 124.567, |
994 | + 'finished': 125.678}, |
995 | + 'init-local': {'errors': ['error2', 'error3'], |
996 | + 'start': 123.45, 'finished': 123.46}}}) |
997 | + cmdargs = myargs(long=True, wait=False) |
998 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
999 | + retcode = wrap_and_call( |
1000 | + 'cloudinit.cmd.status', |
1001 | + {'_is_cloudinit_disabled': (False, ''), |
1002 | + 'Init': {'side_effect': self.init_class}}, |
1003 | + status.handle_status_args, 'ignored', cmdargs) |
1004 | + self.assertEqual(1, retcode) |
1005 | + expected = dedent('''\ |
1006 | + status: error |
1007 | + time: Thu, 01 Jan 1970 00:02:05 +0000 |
1008 | + detail: |
1009 | + error1 |
1010 | + error2 |
1011 | + error3 |
1012 | + ''') |
1013 | + self.assertEqual(expected, m_stdout.getvalue()) |
1014 | + |
1015 | + def test_status_returns_running_long_format(self): |
1016 | + '''Long format reports the stage in which we are running.''' |
1017 | + write_json( |
1018 | + self.status_file, |
1019 | + {'v1': {'stage': 'init', |
1020 | + 'init': {'start': 124.456, 'finished': None}, |
1021 | + 'init-local': {'start': 123.45, 'finished': 123.46}}}) |
1022 | + cmdargs = myargs(long=True, wait=False) |
1023 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
1024 | + retcode = wrap_and_call( |
1025 | + 'cloudinit.cmd.status', |
1026 | + {'_is_cloudinit_disabled': (False, ''), |
1027 | + 'Init': {'side_effect': self.init_class}}, |
1028 | + status.handle_status_args, 'ignored', cmdargs) |
1029 | + self.assertEqual(0, retcode) |
1030 | + expected = dedent('''\ |
1031 | + status: running |
1032 | + time: Thu, 01 Jan 1970 00:02:04 +0000 |
1033 | + detail: |
1034 | + Running in stage: init |
1035 | + ''') |
1036 | + self.assertEqual(expected, m_stdout.getvalue()) |
1037 | + |
1038 | + def test_status_wait_blocks_until_done(self): |
1039 | + '''Specifying wait will poll every 1/4 second until done state.''' |
1040 | + running_json = { |
1041 | + 'v1': {'stage': 'init', |
1042 | + 'init': {'start': 124.456, 'finished': None}, |
1043 | + 'init-local': {'start': 123.45, 'finished': 123.46}}} |
1044 | + done_json = { |
1045 | + 'v1': {'stage': None, |
1046 | + 'init': {'start': 124.456, 'finished': 125.678}, |
1047 | + 'init-local': {'start': 123.45, 'finished': 123.46}}} |
1048 | + |
1049 | + self.sleep_calls = 0 |
1050 | + |
1051 | + def fake_sleep(interval): |
1052 | + self.assertEqual(0.25, interval) |
1053 | + self.sleep_calls += 1 |
1054 | + if self.sleep_calls == 2: |
1055 | + write_json(self.status_file, running_json) |
1056 | + elif self.sleep_calls == 3: |
1057 | + write_json(self.status_file, done_json) |
1058 | + |
1059 | + cmdargs = myargs(long=False, wait=True) |
1060 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
1061 | + retcode = wrap_and_call( |
1062 | + 'cloudinit.cmd.status', |
1063 | + {'sleep': {'side_effect': fake_sleep}, |
1064 | + '_is_cloudinit_disabled': (False, ''), |
1065 | + 'Init': {'side_effect': self.init_class}}, |
1066 | + status.handle_status_args, 'ignored', cmdargs) |
1067 | + self.assertEqual(0, retcode) |
1068 | + self.assertEqual(4, self.sleep_calls) |
1069 | + self.assertEqual('....\nstatus: done\n', m_stdout.getvalue()) |
1070 | + |
1071 | + def test_status_wait_blocks_until_error(self): |
1072 | + '''Specifying wait will poll every 1/4 second until error state.''' |
1073 | + running_json = { |
1074 | + 'v1': {'stage': 'init', |
1075 | + 'init': {'start': 124.456, 'finished': None}, |
1076 | + 'init-local': {'start': 123.45, 'finished': 123.46}}} |
1077 | + error_json = { |
1078 | + 'v1': {'stage': None, |
1079 | + 'init': {'errors': ['error1'], 'start': 124.456, |
1080 | + 'finished': 125.678}, |
1081 | + 'init-local': {'start': 123.45, 'finished': 123.46}}} |
1082 | + |
1083 | + self.sleep_calls = 0 |
1084 | + |
1085 | + def fake_sleep(interval): |
1086 | + self.assertEqual(0.25, interval) |
1087 | + self.sleep_calls += 1 |
1088 | + if self.sleep_calls == 2: |
1089 | + write_json(self.status_file, running_json) |
1090 | + elif self.sleep_calls == 3: |
1091 | + write_json(self.status_file, error_json) |
1092 | + |
1093 | + cmdargs = myargs(long=False, wait=True) |
1094 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
1095 | + retcode = wrap_and_call( |
1096 | + 'cloudinit.cmd.status', |
1097 | + {'sleep': {'side_effect': fake_sleep}, |
1098 | + '_is_cloudinit_disabled': (False, ''), |
1099 | + 'Init': {'side_effect': self.init_class}}, |
1100 | + status.handle_status_args, 'ignored', cmdargs) |
1101 | + self.assertEqual(1, retcode) |
1102 | + self.assertEqual(4, self.sleep_calls) |
1103 | + self.assertEqual('....\nstatus: error\n', m_stdout.getvalue()) |
1104 | + |
1105 | + def test_status_main(self): |
1106 | + '''status.main can be run as a standalone script.''' |
1107 | + write_json(self.status_file, |
1108 | + {'v1': {'init': {'start': 1, 'finished': None}}}) |
1109 | + with self.assertRaises(SystemExit) as context_manager: |
1110 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
1111 | + wrap_and_call( |
1112 | + 'cloudinit.cmd.status', |
1113 | + {'sys.argv': {'new': ['status']}, |
1114 | + '_is_cloudinit_disabled': (False, ''), |
1115 | + 'Init': {'side_effect': self.init_class}}, |
1116 | + status.main) |
1117 | + self.assertRaisesCodeEqual(0, context_manager.exception.code) |
1118 | + self.assertEqual('status: running\n', m_stdout.getvalue()) |
1119 | + |
1120 | +# vi: ts=4 expandtab syntax=python |
1121 | diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py |
1122 | index 177cbcf..5b9cbca 100644 |
1123 | --- a/cloudinit/config/cc_apt_configure.py |
1124 | +++ b/cloudinit/config/cc_apt_configure.py |
1125 | @@ -275,8 +275,9 @@ def handle(name, ocfg, cloud, log, _): |
1126 | cfg = ocfg.get('apt', {}) |
1127 | |
1128 | if not isinstance(cfg, dict): |
1129 | - raise ValueError("Expected dictionary for 'apt' config, found %s", |
1130 | - type(cfg)) |
1131 | + raise ValueError( |
1132 | + "Expected dictionary for 'apt' config, found {config_type}".format( |
1133 | + config_type=type(cfg))) |
1134 | |
1135 | apply_debconf_selections(cfg, target) |
1136 | apply_apt(cfg, cloud, target) |
1137 | diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py |
1138 | index c2b83ae..c3e8c48 100644 |
1139 | --- a/cloudinit/config/cc_disk_setup.py |
1140 | +++ b/cloudinit/config/cc_disk_setup.py |
1141 | @@ -788,7 +788,8 @@ def mkpart(device, definition): |
1142 | # This prevents you from overwriting the device |
1143 | LOG.debug("Checking if device %s is a valid device", device) |
1144 | if not is_device_valid(device): |
1145 | - raise Exception("Device %s is not a disk device!", device) |
1146 | + raise Exception( |
1147 | + 'Device {device} is not a disk device!'.format(device=device)) |
1148 | |
1149 | # Remove the partition table entries |
1150 | if isinstance(layout, str) and layout.lower() == "remove": |
1151 | @@ -945,8 +946,9 @@ def mkfs(fs_cfg): |
1152 | |
1153 | # Check that we can create the FS |
1154 | if not (fs_type or fs_cmd): |
1155 | - raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd " |
1156 | - "must be set.", label) |
1157 | + raise Exception( |
1158 | + "No way to create filesystem '{label}'. fs_type or fs_cmd " |
1159 | + "must be set.".format(label=label)) |
1160 | |
1161 | # Create the commands |
1162 | shell = False |
1163 | diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py |
1164 | index 8f9f1ab..eaf1e94 100644 |
1165 | --- a/cloudinit/config/cc_landscape.py |
1166 | +++ b/cloudinit/config/cc_landscape.py |
1167 | @@ -94,10 +94,10 @@ def handle(_name, cfg, cloud, log, _args): |
1168 | ls_cloudcfg = cfg.get("landscape", {}) |
1169 | |
1170 | if not isinstance(ls_cloudcfg, (dict)): |
1171 | - raise RuntimeError(("'landscape' key existed in config," |
1172 | - " but not a dictionary type," |
1173 | - " is a %s instead"), |
1174 | - type_utils.obj_name(ls_cloudcfg)) |
1175 | + raise RuntimeError( |
1176 | + "'landscape' key existed in config, but not a dictionary type," |
1177 | + " is a {_type} instead".format( |
1178 | + _type=type_utils.obj_name(ls_cloudcfg))) |
1179 | if not ls_cloudcfg: |
1180 | return |
1181 | |
1182 | diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py |
1183 | index f50bcb3..cbd0237 100644 |
1184 | --- a/cloudinit/config/cc_ntp.py |
1185 | +++ b/cloudinit/config/cc_ntp.py |
1186 | @@ -106,9 +106,9 @@ def handle(name, cfg, cloud, log, _args): |
1187 | |
1188 | # TODO drop this when validate_cloudconfig_schema is strict=True |
1189 | if not isinstance(ntp_cfg, (dict)): |
1190 | - raise RuntimeError(("'ntp' key existed in config," |
1191 | - " but not a dictionary type," |
1192 | - " is a %s %instead"), type_utils.obj_name(ntp_cfg)) |
1193 | + raise RuntimeError( |
1194 | + "'ntp' key existed in config, but not a dictionary type," |
1195 | + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) |
1196 | |
1197 | validate_cloudconfig_schema(cfg, schema) |
1198 | if ntp_installable(): |
1199 | @@ -206,8 +206,8 @@ def write_ntp_config_template(cfg, cloud, path, template=None): |
1200 | if not template_fn: |
1201 | template_fn = cloud.get_template_filename('ntp.conf') |
1202 | if not template_fn: |
1203 | - raise RuntimeError(("No template found, " |
1204 | - "not rendering %s"), path) |
1205 | + raise RuntimeError( |
1206 | + 'No template found, not rendering {path}'.format(path=path)) |
1207 | |
1208 | templater.render_to_file(template_fn, path, params) |
1209 | |
1210 | diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py |
1211 | index eba58b0..4da3a58 100644 |
1212 | --- a/cloudinit/config/cc_power_state_change.py |
1213 | +++ b/cloudinit/config/cc_power_state_change.py |
1214 | @@ -194,6 +194,7 @@ def doexit(sysexit): |
1215 | |
1216 | |
1217 | def execmd(exe_args, output=None, data_in=None): |
1218 | + ret = 1 |
1219 | try: |
1220 | proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, |
1221 | stdout=output, stderr=subprocess.STDOUT) |
1222 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py |
1223 | index 0d282e6..cec22bb 100644 |
1224 | --- a/cloudinit/config/cc_resizefs.py |
1225 | +++ b/cloudinit/config/cc_resizefs.py |
1226 | @@ -59,7 +59,17 @@ __doc__ = get_schema_doc(schema) # Supplement python help() |
1227 | |
1228 | |
1229 | def _resize_btrfs(mount_point, devpth): |
1230 | - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) |
1231 | + # If "/" is ro resize will fail. However it should be allowed since resize |
1232 | + # makes everything bigger and subvolumes that are not ro will benefit. |
1233 | + # Use a subvolume that is not ro to trick the resize operation to do the |
1234 | + # "right" thing. The use of ".snapshot" is specific to "snapper" a generic |
1235 | + # solution would be walk the subvolumes and find a rw mounted subvolume. |
1236 | + if (not util.mount_is_read_write(mount_point) and |
1237 | + os.path.isdir("%s/.snapshots" % mount_point)): |
1238 | + return ('btrfs', 'filesystem', 'resize', 'max', |
1239 | + '%s/.snapshots' % mount_point) |
1240 | + else: |
1241 | + return ('btrfs', 'filesystem', 'resize', 'max', mount_point) |
1242 | |
1243 | |
1244 | def _resize_ext(mount_point, devpth): |
1245 | diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py |
1246 | index a9d21e7..530808c 100644 |
1247 | --- a/cloudinit/config/cc_rh_subscription.py |
1248 | +++ b/cloudinit/config/cc_rh_subscription.py |
1249 | @@ -276,9 +276,8 @@ class SubscriptionManager(object): |
1250 | cmd = ['attach', '--auto'] |
1251 | try: |
1252 | return_out, return_err = self._sub_man_cli(cmd) |
1253 | - except util.ProcessExecutionError: |
1254 | - self.log_warn("Auto-attach failed with: " |
1255 | - "{0}]".format(return_err.strip())) |
1256 | + except util.ProcessExecutionError as e: |
1257 | + self.log_warn("Auto-attach failed with: {0}".format(e)) |
1258 | return False |
1259 | for line in return_out.split("\n"): |
1260 | if line is not "": |
1261 | diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py |
1262 | index 50ff9e3..af08788 100644 |
1263 | --- a/cloudinit/config/cc_rsyslog.py |
1264 | +++ b/cloudinit/config/cc_rsyslog.py |
1265 | @@ -20,15 +20,15 @@ which defaults to ``20-cloud-config.conf``. The rsyslog config directory to |
1266 | write config files to may be specified in ``config_dir``, which defaults to |
1267 | ``/etc/rsyslog.d``. |
1268 | |
1269 | -A list of configurations for for rsyslog can be specified under the ``configs`` |
1270 | -key in the ``rsyslog`` config. Each entry in ``configs`` is either a string or |
1271 | -a dictionary. Each config entry contains a configuration string and a file to |
1272 | +A list of configurations for rsyslog can be specified under the ``configs`` key |
1273 | +in the ``rsyslog`` config. Each entry in ``configs`` is either a string or a |
1274 | +dictionary. Each config entry contains a configuration string and a file to |
1275 | write it to. For config entries that are a dictionary, ``filename`` sets the |
1276 | target filename and ``content`` specifies the config string to write. For |
1277 | config entries that are only a string, the string is used as the config string |
1278 | to write. If the filename to write the config to is not specified, the value of |
1279 | -the ``config_filename`` key is used. A file with the selected filename will |
1280 | -be written inside the directory specified by ``config_dir``. |
1281 | +the ``config_filename`` key is used. A file with the selected filename will be |
1282 | +written inside the directory specified by ``config_dir``. |
1283 | |
1284 | The command to use to reload the rsyslog service after the config has been |
1285 | updated can be specified in ``service_reload_command``. If this is set to |
1286 | diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py |
1287 | index e76b9c0..65f6e77 100644 |
1288 | --- a/cloudinit/config/cc_seed_random.py |
1289 | +++ b/cloudinit/config/cc_seed_random.py |
1290 | @@ -95,7 +95,8 @@ def handle_random_seed_command(command, required, env=None): |
1291 | cmd = command[0] |
1292 | if not util.which(cmd): |
1293 | if required: |
1294 | - raise ValueError("command '%s' not found but required=true", cmd) |
1295 | + raise ValueError( |
1296 | + "command '{cmd}' not found but required=true".format(cmd=cmd)) |
1297 | else: |
1298 | LOG.debug("command '%s' not found for seed_command", cmd) |
1299 | return |
1300 | diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py |
1301 | index fe0cc73..e82c081 100644 |
1302 | --- a/cloudinit/config/cc_snap_config.py |
1303 | +++ b/cloudinit/config/cc_snap_config.py |
1304 | @@ -87,7 +87,9 @@ def add_assertions(assertions=None): |
1305 | assertions = [] |
1306 | |
1307 | if not isinstance(assertions, list): |
1308 | - raise ValueError('assertion parameter was not a list: %s', assertions) |
1309 | + raise ValueError( |
1310 | + 'assertion parameter was not a list: {assertions}'.format( |
1311 | + assertions=assertions)) |
1312 | |
1313 | snap_cmd = [SNAPPY_CMD, 'ack'] |
1314 | combined = "\n".join(assertions) |
1315 | @@ -115,7 +117,8 @@ def add_snap_user(cfg=None): |
1316 | cfg = {} |
1317 | |
1318 | if not isinstance(cfg, dict): |
1319 | - raise ValueError('configuration parameter was not a dict: %s', cfg) |
1320 | + raise ValueError( |
1321 | + 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg)) |
1322 | |
1323 | snapuser = cfg.get('email', None) |
1324 | if not snapuser: |
1325 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py |
1326 | index d5becd1..55260ea 100755 |
1327 | --- a/cloudinit/distros/__init__.py |
1328 | +++ b/cloudinit/distros/__init__.py |
1329 | @@ -45,6 +45,10 @@ OSFAMILIES = { |
1330 | |
1331 | LOG = logging.getLogger(__name__) |
1332 | |
1333 | +# This is a best guess regex, based on current EC2 AZs on 2017-12-11. |
1334 | +# It could break when Amazon adds new regions and new AZs. |
1335 | +_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') |
1336 | + |
1337 | |
1338 | @six.add_metaclass(abc.ABCMeta) |
1339 | class Distro(object): |
1340 | @@ -102,11 +106,8 @@ class Distro(object): |
1341 | self._apply_hostname(writeable_hostname) |
1342 | |
1343 | def uses_systemd(self): |
1344 | - try: |
1345 | - res = os.lstat('/run/systemd/system') |
1346 | - return stat.S_ISDIR(res.st_mode) |
1347 | - except Exception: |
1348 | - return False |
1349 | + """Wrapper to report whether this distro uses systemd or sysvinit.""" |
1350 | + return uses_systemd() |
1351 | |
1352 | @abc.abstractmethod |
1353 | def package_command(self, cmd, args=None, pkgs=None): |
1354 | @@ -686,18 +687,13 @@ def _get_package_mirror_info(mirror_info, data_source=None, |
1355 | if not mirror_info: |
1356 | mirror_info = {} |
1357 | |
1358 | - # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) |
1359 | - # the region is us-east-1. so region = az[0:-1] |
1360 | - directions_re = '|'.join([ |
1361 | - 'central', 'east', 'north', 'northeast', 'northwest', |
1362 | - 'south', 'southeast', 'southwest', 'west']) |
1363 | - ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re) |
1364 | - |
1365 | subst = {} |
1366 | if data_source and data_source.availability_zone: |
1367 | subst['availability_zone'] = data_source.availability_zone |
1368 | |
1369 | - if re.match(ec2_az_re, data_source.availability_zone): |
1370 | + # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) |
1371 | + # the region is us-east-1. so region = az[0:-1] |
1372 | + if _EC2_AZ_RE.match(data_source.availability_zone): |
1373 | subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] |
1374 | |
1375 | if data_source and data_source.region: |
1376 | @@ -761,4 +757,13 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", |
1377 | util.copy(tz_file, tz_local) |
1378 | return |
1379 | |
1380 | + |
1381 | +def uses_systemd(): |
1382 | + try: |
1383 | + res = os.lstat('/run/systemd/system') |
1384 | + return stat.S_ISDIR(res.st_mode) |
1385 | + except Exception: |
1386 | + return False |
1387 | + |
1388 | + |
1389 | # vi: ts=4 expandtab |
1390 | diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py |
1391 | index bad112f..aa468bc 100644 |
1392 | --- a/cloudinit/distros/freebsd.py |
1393 | +++ b/cloudinit/distros/freebsd.py |
1394 | @@ -116,6 +116,7 @@ class Distro(distros.Distro): |
1395 | (out, err) = util.subp(['ifconfig', '-a']) |
1396 | ifconfigoutput = [x for x in (out.strip()).splitlines() |
1397 | if len(x.split()) > 0] |
1398 | + bsddev = 'NOT_FOUND' |
1399 | for line in ifconfigoutput: |
1400 | m = re.match('^\w+', line) |
1401 | if m: |
1402 | @@ -347,15 +348,9 @@ class Distro(distros.Distro): |
1403 | bymac[Distro.get_interface_mac(n)] = { |
1404 | 'name': n, 'up': self.is_up(n), 'downable': None} |
1405 | |
1406 | + nics_with_addresses = set() |
1407 | if check_downable: |
1408 | - nics_with_addresses = set() |
1409 | - ipv6 = self.get_ipv6() |
1410 | - ipv4 = self.get_ipv4() |
1411 | - for bytes_out in (ipv6, ipv4): |
1412 | - for i in ipv6: |
1413 | - nics_with_addresses.update(i) |
1414 | - for i in ipv4: |
1415 | - nics_with_addresses.update(i) |
1416 | + nics_with_addresses = set(self.get_ipv4() + self.get_ipv6()) |
1417 | |
1418 | for d in bymac.values(): |
1419 | d['downable'] = (d['up'] is False or |
1420 | diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py |
1421 | index 723d6bd..d6c61e4 100644 |
1422 | --- a/cloudinit/ec2_utils.py |
1423 | +++ b/cloudinit/ec2_utils.py |
1424 | @@ -1,6 +1,8 @@ |
1425 | # Copyright (C) 2012 Yahoo! Inc. |
1426 | +# Copyright (C) 2014 Amazon.com, Inc. or its affiliates. |
1427 | # |
1428 | # Author: Joshua Harlow <harlowja@yahoo-inc.com> |
1429 | +# Author: Andrew Jorgensen <ajorgens@amazon.com> |
1430 | # |
1431 | # This file is part of cloud-init. See LICENSE file for license information. |
1432 | |
1433 | @@ -164,14 +166,11 @@ def get_instance_userdata(api_version='latest', |
1434 | return user_data |
1435 | |
1436 | |
1437 | -def get_instance_metadata(api_version='latest', |
1438 | - metadata_address='http://169.254.169.254', |
1439 | - ssl_details=None, timeout=5, retries=5, |
1440 | - leaf_decoder=None): |
1441 | - md_url = url_helper.combine_url(metadata_address, api_version) |
1442 | - # Note, 'meta-data' explicitly has trailing /. |
1443 | - # this is required for CloudStack (LP: #1356855) |
1444 | - md_url = url_helper.combine_url(md_url, 'meta-data/') |
1445 | +def _get_instance_metadata(tree, api_version='latest', |
1446 | + metadata_address='http://169.254.169.254', |
1447 | + ssl_details=None, timeout=5, retries=5, |
1448 | + leaf_decoder=None): |
1449 | + md_url = url_helper.combine_url(metadata_address, api_version, tree) |
1450 | caller = functools.partial(util.read_file_or_url, |
1451 | ssl_details=ssl_details, timeout=timeout, |
1452 | retries=retries) |
1453 | @@ -189,7 +188,29 @@ def get_instance_metadata(api_version='latest', |
1454 | md = {} |
1455 | return md |
1456 | except Exception: |
1457 | - util.logexc(LOG, "Failed fetching metadata from url %s", md_url) |
1458 | + util.logexc(LOG, "Failed fetching %s from url %s", tree, md_url) |
1459 | return {} |
1460 | |
1461 | + |
1462 | +def get_instance_metadata(api_version='latest', |
1463 | + metadata_address='http://169.254.169.254', |
1464 | + ssl_details=None, timeout=5, retries=5, |
1465 | + leaf_decoder=None): |
1466 | + # Note, 'meta-data' explicitly has trailing /. |
1467 | + # this is required for CloudStack (LP: #1356855) |
1468 | + return _get_instance_metadata(tree='meta-data/', api_version=api_version, |
1469 | + metadata_address=metadata_address, |
1470 | + ssl_details=ssl_details, timeout=timeout, |
1471 | + retries=retries, leaf_decoder=leaf_decoder) |
1472 | + |
1473 | + |
1474 | +def get_instance_identity(api_version='latest', |
1475 | + metadata_address='http://169.254.169.254', |
1476 | + ssl_details=None, timeout=5, retries=5, |
1477 | + leaf_decoder=None): |
1478 | + return _get_instance_metadata(tree='dynamic/instance-identity', |
1479 | + api_version=api_version, |
1480 | + metadata_address=metadata_address, |
1481 | + ssl_details=ssl_details, timeout=timeout, |
1482 | + retries=retries, leaf_decoder=leaf_decoder) |
1483 | # vi: ts=4 expandtab |
1484 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py |
1485 | index a1b0db1..c015e79 100644 |
1486 | --- a/cloudinit/net/__init__.py |
1487 | +++ b/cloudinit/net/__init__.py |
1488 | @@ -18,7 +18,7 @@ SYS_CLASS_NET = "/sys/class/net/" |
1489 | DEFAULT_PRIMARY_INTERFACE = 'eth0' |
1490 | |
1491 | |
1492 | -def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')): |
1493 | +def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): |
1494 | """Sorting for Humans: natural sort order. Can be use as the key to sort |
1495 | functions. |
1496 | This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as |
1497 | @@ -224,7 +224,7 @@ def find_fallback_nic(blacklist_drivers=None): |
1498 | |
1499 | # if eth0 exists use it above anything else, otherwise get the interface |
1500 | # that we can read 'first' (using the sorted defintion of first). |
1501 | - names = list(sorted(potential_interfaces, key=_natural_sort_key)) |
1502 | + names = list(sorted(potential_interfaces, key=natural_sort_key)) |
1503 | if DEFAULT_PRIMARY_INTERFACE in names: |
1504 | names.remove(DEFAULT_PRIMARY_INTERFACE) |
1505 | names.insert(0, DEFAULT_PRIMARY_INTERFACE) |
1506 | diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py |
1507 | index 38b27a5..7b2cc9d 100755 |
1508 | --- a/cloudinit/net/cmdline.py |
1509 | +++ b/cloudinit/net/cmdline.py |
1510 | @@ -116,10 +116,11 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None): |
1511 | prev = names[name]['entry'] |
1512 | if prev.get('mac_address') != entry.get('mac_address'): |
1513 | raise ValueError( |
1514 | - "device '%s' was defined multiple times (%s)" |
1515 | - " but had differing mac addresses: %s -> %s.", |
1516 | - (name, ' '.join(names[name]['files']), |
1517 | - prev.get('mac_address'), entry.get('mac_address'))) |
1518 | + "device '{name}' was defined multiple times ({files})" |
1519 | + " but had differing mac addresses: {old} -> {new}.".format( |
1520 | + name=name, files=' '.join(names[name]['files']), |
1521 | + old=prev.get('mac_address'), |
1522 | + new=entry.get('mac_address'))) |
1523 | prev['subnets'].extend(entry['subnets']) |
1524 | names[name]['files'].append(cfg_file) |
1525 | else: |
1526 | diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py |
1527 | index 875a460..087c0c0 100644 |
1528 | --- a/cloudinit/net/dhcp.py |
1529 | +++ b/cloudinit/net/dhcp.py |
1530 | @@ -10,7 +10,9 @@ import os |
1531 | import re |
1532 | import signal |
1533 | |
1534 | -from cloudinit.net import find_fallback_nic, get_devicelist |
1535 | +from cloudinit.net import ( |
1536 | + EphemeralIPv4Network, find_fallback_nic, get_devicelist) |
1537 | +from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip |
1538 | from cloudinit import temp_utils |
1539 | from cloudinit import util |
1540 | from six import StringIO |
1541 | @@ -29,6 +31,45 @@ class InvalidDHCPLeaseFileError(Exception): |
1542 | pass |
1543 | |
1544 | |
1545 | +class NoDHCPLeaseError(Exception): |
1546 | + """Raised when unable to get a DHCP lease.""" |
1547 | + pass |
1548 | + |
1549 | + |
1550 | +class EphemeralDHCPv4(object): |
1551 | + def __init__(self, iface=None): |
1552 | + self.iface = iface |
1553 | + self._ephipv4 = None |
1554 | + |
1555 | + def __enter__(self): |
1556 | + try: |
1557 | + leases = maybe_perform_dhcp_discovery(self.iface) |
1558 | + except InvalidDHCPLeaseFileError: |
1559 | + raise NoDHCPLeaseError() |
1560 | + if not leases: |
1561 | + raise NoDHCPLeaseError() |
1562 | + lease = leases[-1] |
1563 | + LOG.debug("Received dhcp lease on %s for %s/%s", |
1564 | + lease['interface'], lease['fixed-address'], |
1565 | + lease['subnet-mask']) |
1566 | + nmap = {'interface': 'interface', 'ip': 'fixed-address', |
1567 | + 'prefix_or_mask': 'subnet-mask', |
1568 | + 'broadcast': 'broadcast-address', |
1569 | + 'router': 'routers'} |
1570 | + kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) |
1571 | + if not kwargs['broadcast']: |
1572 | + kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) |
1573 | + ephipv4 = EphemeralIPv4Network(**kwargs) |
1574 | + ephipv4.__enter__() |
1575 | + self._ephipv4 = ephipv4 |
1576 | + return lease |
1577 | + |
1578 | + def __exit__(self, excp_type, excp_value, excp_traceback): |
1579 | + if not self._ephipv4: |
1580 | + return |
1581 | + self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) |
1582 | + |
1583 | + |
1584 | def maybe_perform_dhcp_discovery(nic=None): |
1585 | """Perform dhcp discovery if nic valid and dhclient command exists. |
1586 | |
1587 | diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py |
1588 | index e9e2cf4..fe667d8 100644 |
1589 | --- a/cloudinit/net/network_state.py |
1590 | +++ b/cloudinit/net/network_state.py |
1591 | @@ -474,8 +474,9 @@ class NetworkStateInterpreter(object): |
1592 | elif bridge_stp in ['off', '0', 0]: |
1593 | bridge_stp = False |
1594 | else: |
1595 | - raise ValueError("Cannot convert bridge_stp value" |
1596 | - "(%s) to boolean", bridge_stp) |
1597 | + raise ValueError( |
1598 | + 'Cannot convert bridge_stp value ({stp}) to' |
1599 | + ' boolean'.format(stp=bridge_stp)) |
1600 | iface.update({'bridge_stp': bridge_stp}) |
1601 | |
1602 | interfaces.update({iface['name']: iface}) |
1603 | @@ -692,7 +693,8 @@ class NetworkStateInterpreter(object): |
1604 | elif cmd_type == "bond": |
1605 | self.handle_bond(v1_cmd) |
1606 | else: |
1607 | - raise ValueError('Unknown command type: %s', cmd_type) |
1608 | + raise ValueError('Unknown command type: {cmd_type}'.format( |
1609 | + cmd_type=cmd_type)) |
1610 | |
1611 | def _v2_to_v1_ipcfg(self, cfg): |
1612 | """Common ipconfig extraction from v2 to v1 subnets array.""" |
1613 | @@ -959,4 +961,16 @@ def mask_to_net_prefix(mask): |
1614 | return ipv4_mask_to_net_prefix(mask) |
1615 | |
1616 | |
1617 | +def mask_and_ipv4_to_bcast_addr(mask, ip): |
1618 | + """Calculate the broadcast address from the subnet mask and ip addr. |
1619 | + |
1620 | + Supports ipv4 only.""" |
1621 | + ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2) |
1622 | + mask_dec = ipv4_mask_to_net_prefix(mask) |
1623 | + bcast_bin = ip_bin | (2**(32 - mask_dec) - 1) |
1624 | + bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF) |
1625 | + for i in range(4)[::-1]]) |
1626 | + return bcast_str |
1627 | + |
1628 | + |
1629 | # vi: ts=4 expandtab |
1630 | diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py |
1631 | index 43a7e42..7ac8288 100644 |
1632 | --- a/cloudinit/sources/DataSourceAliYun.py |
1633 | +++ b/cloudinit/sources/DataSourceAliYun.py |
1634 | @@ -11,6 +11,7 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS" |
1635 | |
1636 | class DataSourceAliYun(EC2.DataSourceEc2): |
1637 | |
1638 | + dsname = 'AliYun' |
1639 | metadata_urls = ['http://100.100.100.200'] |
1640 | |
1641 | # The minimum supported metadata_version from the ec2 metadata apis |
1642 | diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py |
1643 | index c78ad9e..e1d0055 100644 |
1644 | --- a/cloudinit/sources/DataSourceAltCloud.py |
1645 | +++ b/cloudinit/sources/DataSourceAltCloud.py |
1646 | @@ -74,6 +74,9 @@ def read_user_data_callback(mount_dir): |
1647 | |
1648 | |
1649 | class DataSourceAltCloud(sources.DataSource): |
1650 | + |
1651 | + dsname = 'AltCloud' |
1652 | + |
1653 | def __init__(self, sys_cfg, distro, paths): |
1654 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
1655 | self.seed = None |
1656 | @@ -112,7 +115,7 @@ class DataSourceAltCloud(sources.DataSource): |
1657 | |
1658 | return 'UNKNOWN' |
1659 | |
1660 | - def get_data(self): |
1661 | + def _get_data(self): |
1662 | ''' |
1663 | Description: |
1664 | User Data is passed to the launching instance which |
1665 | @@ -142,7 +145,7 @@ class DataSourceAltCloud(sources.DataSource): |
1666 | else: |
1667 | cloud_type = self.get_cloud_type() |
1668 | |
1669 | - LOG.debug('cloud_type: ' + str(cloud_type)) |
1670 | + LOG.debug('cloud_type: %s', str(cloud_type)) |
1671 | |
1672 | if 'RHEV' in cloud_type: |
1673 | if self.user_data_rhevm(): |
1674 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py |
1675 | index 14367e9..4bcbf3a 100644 |
1676 | --- a/cloudinit/sources/DataSourceAzure.py |
1677 | +++ b/cloudinit/sources/DataSourceAzure.py |
1678 | @@ -11,13 +11,16 @@ from functools import partial |
1679 | import os |
1680 | import os.path |
1681 | import re |
1682 | +from time import time |
1683 | from xml.dom import minidom |
1684 | import xml.etree.ElementTree as ET |
1685 | |
1686 | from cloudinit import log as logging |
1687 | from cloudinit import net |
1688 | +from cloudinit.net.dhcp import EphemeralDHCPv4 |
1689 | from cloudinit import sources |
1690 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric |
1691 | +from cloudinit.url_helper import readurl, wait_for_url, UrlError |
1692 | from cloudinit import util |
1693 | |
1694 | LOG = logging.getLogger(__name__) |
1695 | @@ -26,10 +29,16 @@ DS_NAME = 'Azure' |
1696 | DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} |
1697 | AGENT_START = ['service', 'walinuxagent', 'start'] |
1698 | AGENT_START_BUILTIN = "__builtin__" |
1699 | -BOUNCE_COMMAND = [ |
1700 | +BOUNCE_COMMAND_IFUP = [ |
1701 | 'sh', '-xc', |
1702 | "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" |
1703 | ] |
1704 | +BOUNCE_COMMAND_FREEBSD = [ |
1705 | + 'sh', '-xc', |
1706 | + ("i=$interface; x=0; ifconfig down $i || x=$?; " |
1707 | + "ifconfig up $i || x=$?; exit $x") |
1708 | +] |
1709 | + |
1710 | # azure systems will always have a resource disk, and 66-azure-ephemeral.rules |
1711 | # ensures that it gets linked to this path. |
1712 | RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' |
1713 | @@ -38,6 +47,9 @@ LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' |
1714 | DEFAULT_FS = 'ext4' |
1715 | # DMI chassis-asset-tag is set static for all azure instances |
1716 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' |
1717 | +REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" |
1718 | +IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" |
1719 | +IMDS_RETRIES = 5 |
1720 | |
1721 | |
1722 | def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): |
1723 | @@ -177,11 +189,6 @@ if util.is_FreeBSD(): |
1724 | RESOURCE_DISK_PATH = "/dev/" + res_disk |
1725 | else: |
1726 | LOG.debug("resource disk is None") |
1727 | - BOUNCE_COMMAND = [ |
1728 | - 'sh', '-xc', |
1729 | - ("i=$interface; x=0; ifconfig down $i || x=$?; " |
1730 | - "ifconfig up $i || x=$?; exit $x") |
1731 | - ] |
1732 | |
1733 | BUILTIN_DS_CONFIG = { |
1734 | 'agent_command': AGENT_START_BUILTIN, |
1735 | @@ -190,7 +197,7 @@ BUILTIN_DS_CONFIG = { |
1736 | 'hostname_bounce': { |
1737 | 'interface': DEFAULT_PRIMARY_NIC, |
1738 | 'policy': True, |
1739 | - 'command': BOUNCE_COMMAND, |
1740 | + 'command': 'builtin', |
1741 | 'hostname_command': 'hostname', |
1742 | }, |
1743 | 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, |
1744 | @@ -246,6 +253,8 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): |
1745 | |
1746 | |
1747 | class DataSourceAzure(sources.DataSource): |
1748 | + |
1749 | + dsname = 'Azure' |
1750 | _negotiated = False |
1751 | |
1752 | def __init__(self, sys_cfg, distro, paths): |
1753 | @@ -273,19 +282,20 @@ class DataSourceAzure(sources.DataSource): |
1754 | |
1755 | with temporary_hostname(azure_hostname, self.ds_cfg, |
1756 | hostname_command=hostname_command) \ |
1757 | - as previous_hostname: |
1758 | - if (previous_hostname is not None and |
1759 | + as previous_hn: |
1760 | + if (previous_hn is not None and |
1761 | util.is_true(self.ds_cfg.get('set_hostname'))): |
1762 | cfg = self.ds_cfg['hostname_bounce'] |
1763 | |
1764 | # "Bouncing" the network |
1765 | try: |
1766 | - perform_hostname_bounce(hostname=azure_hostname, |
1767 | - cfg=cfg, |
1768 | - prev_hostname=previous_hostname) |
1769 | + return perform_hostname_bounce(hostname=azure_hostname, |
1770 | + cfg=cfg, |
1771 | + prev_hostname=previous_hn) |
1772 | except Exception as e: |
1773 | LOG.warning("Failed publishing hostname: %s", e) |
1774 | util.logexc(LOG, "handling set_hostname failed") |
1775 | + return False |
1776 | |
1777 | def get_metadata_from_agent(self): |
1778 | temp_hostname = self.metadata.get('local-hostname') |
1779 | @@ -330,7 +340,7 @@ class DataSourceAzure(sources.DataSource): |
1780 | metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) |
1781 | return metadata |
1782 | |
1783 | - def get_data(self): |
1784 | + def _get_data(self): |
1785 | # azure removes/ejects the cdrom containing the ovf-env.xml |
1786 | # file on reboot. So, in order to successfully reboot we |
1787 | # need to look in the datadir and consider that valid |
1788 | @@ -342,15 +352,20 @@ class DataSourceAzure(sources.DataSource): |
1789 | ddir = self.ds_cfg['data_dir'] |
1790 | |
1791 | candidates = [self.seed_dir] |
1792 | + if os.path.isfile(REPROVISION_MARKER_FILE): |
1793 | + candidates.insert(0, "IMDS") |
1794 | candidates.extend(list_possible_azure_ds_devs()) |
1795 | if ddir: |
1796 | candidates.append(ddir) |
1797 | |
1798 | found = None |
1799 | - |
1800 | + reprovision = False |
1801 | for cdev in candidates: |
1802 | try: |
1803 | - if cdev.startswith("/dev/"): |
1804 | + if cdev == "IMDS": |
1805 | + ret = None |
1806 | + reprovision = True |
1807 | + elif cdev.startswith("/dev/"): |
1808 | if util.is_FreeBSD(): |
1809 | ret = util.mount_cb(cdev, load_azure_ds_dir, |
1810 | mtype="udf", sync=False) |
1811 | @@ -367,6 +382,8 @@ class DataSourceAzure(sources.DataSource): |
1812 | LOG.warning("%s was not mountable", cdev) |
1813 | continue |
1814 | |
1815 | + if reprovision or self._should_reprovision(ret): |
1816 | + ret = self._reprovision() |
1817 | (md, self.userdata_raw, cfg, files) = ret |
1818 | self.seed = cdev |
1819 | self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) |
1820 | @@ -425,6 +442,83 @@ class DataSourceAzure(sources.DataSource): |
1821 | LOG.debug("negotiating already done for %s", |
1822 | self.get_instance_id()) |
1823 | |
1824 | + def _poll_imds(self, report_ready=True): |
1825 | + """Poll IMDS for the new provisioning data until we get a valid |
1826 | + response. Then return the returned JSON object.""" |
1827 | + url = IMDS_URL + "?api-version=2017-04-02" |
1828 | + headers = {"Metadata": "true"} |
1829 | + LOG.debug("Start polling IMDS") |
1830 | + |
1831 | + def sleep_cb(response, loop_n): |
1832 | + return 1 |
1833 | + |
1834 | + def exception_cb(msg, exception): |
1835 | + if isinstance(exception, UrlError) and exception.code == 404: |
1836 | + return |
1837 | + LOG.warning("Exception during polling. Will try DHCP.", |
1838 | + exc_info=True) |
1839 | + |
1840 | + # If we get an exception while trying to call IMDS, we |
1841 | + # call DHCP and setup the ephemeral network to acquire the new IP. |
1842 | + raise exception |
1843 | + |
1844 | + need_report = report_ready |
1845 | + for i in range(IMDS_RETRIES): |
1846 | + try: |
1847 | + with EphemeralDHCPv4() as lease: |
1848 | + if need_report: |
1849 | + self._report_ready(lease=lease) |
1850 | + need_report = False |
1851 | + wait_for_url([url], max_wait=None, timeout=60, |
1852 | + status_cb=LOG.info, |
1853 | + headers_cb=lambda url: headers, sleep_time=1, |
1854 | + exception_cb=exception_cb, |
1855 | + sleep_time_cb=sleep_cb) |
1856 | + return str(readurl(url, headers=headers)) |
1857 | + except Exception: |
1858 | + LOG.debug("Exception during polling-retrying dhcp" + |
1859 | + " %d more time(s).", (IMDS_RETRIES - i), |
1860 | + exc_info=True) |
1861 | + |
1862 | + def _report_ready(self, lease): |
1863 | + """Tells the fabric provisioning has completed |
1864 | + before we go into our polling loop.""" |
1865 | + try: |
1866 | + get_metadata_from_fabric(None, lease['unknown-245']) |
1867 | + except Exception as exc: |
1868 | + LOG.warning( |
1869 | + "Error communicating with Azure fabric; You may experience." |
1870 | + "connectivity issues.", exc_info=True) |
1871 | + |
1872 | + def _should_reprovision(self, ret): |
1873 | + """Whether or not we should poll IMDS for reprovisioning data. |
1874 | + Also sets a marker file to poll IMDS. |
1875 | + |
1876 | + The marker file is used for the following scenario: the VM boots into |
1877 | + this polling loop, which we expect to be proceeding infinitely until |
1878 | + the VM is picked. If for whatever reason the platform moves us to a |
1879 | + new host (for instance a hardware issue), we need to keep polling. |
1880 | + However, since the VM reports ready to the Fabric, we will not attach |
1881 | + the ISO, thus cloud-init needs to have a way of knowing that it should |
1882 | + jump back into the polling loop in order to retrieve the ovf_env.""" |
1883 | + if not ret: |
1884 | + return False |
1885 | + (md, self.userdata_raw, cfg, files) = ret |
1886 | + path = REPROVISION_MARKER_FILE |
1887 | + if (cfg.get('PreprovisionedVm') is True or |
1888 | + os.path.isfile(path)): |
1889 | + if not os.path.isfile(path): |
1890 | + LOG.info("Creating a marker file to poll imds") |
1891 | + util.write_file(path, "%s: %s\n" % (os.getpid(), time())) |
1892 | + return True |
1893 | + return False |
1894 | + |
1895 | + def _reprovision(self): |
1896 | + """Initiate the reprovisioning workflow.""" |
1897 | + contents = self._poll_imds() |
1898 | + md, ud, cfg = read_azure_ovf(contents) |
1899 | + return (md, ud, cfg, {'ovf-env.xml': contents}) |
1900 | + |
1901 | def _negotiate(self): |
1902 | """Negotiate with fabric and return data from it. |
1903 | |
1904 | @@ -450,7 +544,7 @@ class DataSourceAzure(sources.DataSource): |
1905 | "Error communicating with Azure fabric; You may experience." |
1906 | "connectivity issues.", exc_info=True) |
1907 | return False |
1908 | - |
1909 | + util.del_file(REPROVISION_MARKER_FILE) |
1910 | return fabric_data |
1911 | |
1912 | def activate(self, cfg, is_new_instance): |
1913 | @@ -580,18 +674,19 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, |
1914 | if os.path.exists(sempath): |
1915 | try: |
1916 | os.unlink(sempath) |
1917 | - LOG.debug(bmsg + " removed.") |
1918 | + LOG.debug('%s removed.', bmsg) |
1919 | except Exception as e: |
1920 | # python3 throws FileNotFoundError, python2 throws OSError |
1921 | - LOG.warning(bmsg + ": remove failed! (%s)", e) |
1922 | + LOG.warning('%s: remove failed! (%s)', bmsg, e) |
1923 | else: |
1924 | - LOG.debug(bmsg + " did not exist.") |
1925 | + LOG.debug('%s did not exist.', bmsg) |
1926 | return |
1927 | |
1928 | |
1929 | def perform_hostname_bounce(hostname, cfg, prev_hostname): |
1930 | # set the hostname to 'hostname' if it is not already set to that. |
1931 | # then, if policy is not off, bounce the interface using command |
1932 | + # Returns True if the network was bounced, False otherwise. |
1933 | command = cfg['command'] |
1934 | interface = cfg['interface'] |
1935 | policy = cfg['policy'] |
1936 | @@ -604,8 +699,15 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): |
1937 | env['old_hostname'] = prev_hostname |
1938 | |
1939 | if command == "builtin": |
1940 | - command = BOUNCE_COMMAND |
1941 | - |
1942 | + if util.is_FreeBSD(): |
1943 | + command = BOUNCE_COMMAND_FREEBSD |
1944 | + elif util.which('ifup'): |
1945 | + command = BOUNCE_COMMAND_IFUP |
1946 | + else: |
1947 | + LOG.debug( |
1948 | + "Skipping network bounce: ifupdown utils aren't present.") |
1949 | + # Don't bounce as networkd handles hostname DDNS updates |
1950 | + return False |
1951 | LOG.debug("pubhname: publishing hostname [%s]", msg) |
1952 | shell = not isinstance(command, (list, tuple)) |
1953 | # capture=False, see comments in bug 1202758 and bug 1206164. |
1954 | @@ -613,6 +715,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): |
1955 | get_uptime=True, func=util.subp, |
1956 | kwargs={'args': command, 'shell': shell, 'capture': False, |
1957 | 'env': env}) |
1958 | + return True |
1959 | |
1960 | |
1961 | def crtfile_to_pubkey(fname, data=None): |
1962 | @@ -829,9 +932,35 @@ def read_azure_ovf(contents): |
1963 | if 'ssh_pwauth' not in cfg and password: |
1964 | cfg['ssh_pwauth'] = True |
1965 | |
1966 | + cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom) |
1967 | + |
1968 | return (md, ud, cfg) |
1969 | |
1970 | |
1971 | +def _extract_preprovisioned_vm_setting(dom): |
1972 | + """Read the preprovision flag from the ovf. It should not |
1973 | + exist unless true.""" |
1974 | + platform_settings_section = find_child( |
1975 | + dom.documentElement, |
1976 | + lambda n: n.localName == "PlatformSettingsSection") |
1977 | + if not platform_settings_section or len(platform_settings_section) == 0: |
1978 | + LOG.debug("PlatformSettingsSection not found") |
1979 | + return False |
1980 | + platform_settings = find_child( |
1981 | + platform_settings_section[0], |
1982 | + lambda n: n.localName == "PlatformSettings") |
1983 | + if not platform_settings or len(platform_settings) == 0: |
1984 | + LOG.debug("PlatformSettings not found") |
1985 | + return False |
1986 | + preprovisionedVm = find_child( |
1987 | + platform_settings[0], |
1988 | + lambda n: n.localName == "PreprovisionedVm") |
1989 | + if not preprovisionedVm or len(preprovisionedVm) == 0: |
1990 | + LOG.debug("PreprovisionedVm not found") |
1991 | + return False |
1992 | + return util.translate_bool(preprovisionedVm[0].firstChild.nodeValue) |
1993 | + |
1994 | + |
1995 | def encrypt_pass(password, salt_id="$6$"): |
1996 | return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) |
1997 | |
1998 | diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py |
1999 | index d7fcd45..699a85b 100644 |
2000 | --- a/cloudinit/sources/DataSourceBigstep.py |
2001 | +++ b/cloudinit/sources/DataSourceBigstep.py |
2002 | @@ -16,13 +16,16 @@ LOG = logging.getLogger(__name__) |
2003 | |
2004 | |
2005 | class DataSourceBigstep(sources.DataSource): |
2006 | + |
2007 | + dsname = 'Bigstep' |
2008 | + |
2009 | def __init__(self, sys_cfg, distro, paths): |
2010 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2011 | self.metadata = {} |
2012 | self.vendordata_raw = "" |
2013 | self.userdata_raw = "" |
2014 | |
2015 | - def get_data(self, apply_filter=False): |
2016 | + def _get_data(self, apply_filter=False): |
2017 | url = get_url_from_file() |
2018 | if url is None: |
2019 | return False |
2020 | diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py |
2021 | index 19df16b..4eaad47 100644 |
2022 | --- a/cloudinit/sources/DataSourceCloudSigma.py |
2023 | +++ b/cloudinit/sources/DataSourceCloudSigma.py |
2024 | @@ -23,6 +23,9 @@ class DataSourceCloudSigma(sources.DataSource): |
2025 | For more information about CloudSigma's Server Context: |
2026 | http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html |
2027 | """ |
2028 | + |
2029 | + dsname = 'CloudSigma' |
2030 | + |
2031 | def __init__(self, sys_cfg, distro, paths): |
2032 | self.cepko = Cepko() |
2033 | self.ssh_public_key = '' |
2034 | @@ -46,7 +49,7 @@ class DataSourceCloudSigma(sources.DataSource): |
2035 | LOG.warning("failed to query dmi data for system product name") |
2036 | return False |
2037 | |
2038 | - def get_data(self): |
2039 | + def _get_data(self): |
2040 | """ |
2041 | Metadata is the whole server context and /meta/cloud-config is used |
2042 | as userdata. |
2043 | diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py |
2044 | index 9dc473f..0df545f 100644 |
2045 | --- a/cloudinit/sources/DataSourceCloudStack.py |
2046 | +++ b/cloudinit/sources/DataSourceCloudStack.py |
2047 | @@ -65,6 +65,9 @@ class CloudStackPasswordServerClient(object): |
2048 | |
2049 | |
2050 | class DataSourceCloudStack(sources.DataSource): |
2051 | + |
2052 | + dsname = 'CloudStack' |
2053 | + |
2054 | def __init__(self, sys_cfg, distro, paths): |
2055 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2056 | self.seed_dir = os.path.join(paths.seed_dir, 'cs') |
2057 | @@ -117,7 +120,7 @@ class DataSourceCloudStack(sources.DataSource): |
2058 | def get_config_obj(self): |
2059 | return self.cfg |
2060 | |
2061 | - def get_data(self): |
2062 | + def _get_data(self): |
2063 | seed_ret = {} |
2064 | if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): |
2065 | self.userdata_raw = seed_ret['user-data'] |
2066 | diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py |
2067 | index ef374f3..b8db626 100644 |
2068 | --- a/cloudinit/sources/DataSourceConfigDrive.py |
2069 | +++ b/cloudinit/sources/DataSourceConfigDrive.py |
2070 | @@ -25,13 +25,16 @@ DEFAULT_METADATA = { |
2071 | "instance-id": DEFAULT_IID, |
2072 | } |
2073 | FS_TYPES = ('vfat', 'iso9660') |
2074 | -LABEL_TYPES = ('config-2',) |
2075 | +LABEL_TYPES = ('config-2', 'CONFIG-2') |
2076 | POSSIBLE_MOUNTS = ('sr', 'cd') |
2077 | OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS |
2078 | for i in range(0, 2))) |
2079 | |
2080 | |
2081 | class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): |
2082 | + |
2083 | + dsname = 'ConfigDrive' |
2084 | + |
2085 | def __init__(self, sys_cfg, distro, paths): |
2086 | super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) |
2087 | self.source = None |
2088 | @@ -50,7 +53,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): |
2089 | mstr += "[source=%s]" % (self.source) |
2090 | return mstr |
2091 | |
2092 | - def get_data(self): |
2093 | + def _get_data(self): |
2094 | found = None |
2095 | md = {} |
2096 | results = {} |
2097 | @@ -221,7 +224,7 @@ def find_candidate_devs(probe_optical=True): |
2098 | config drive v2: |
2099 | Disk should be: |
2100 | * either vfat or iso9660 formated |
2101 | - * labeled with 'config-2' |
2102 | + * labeled with 'config-2' or 'CONFIG-2' |
2103 | """ |
2104 | # query optical drive to get it in blkid cache for 2.6 kernels |
2105 | if probe_optical: |
2106 | diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py |
2107 | index 5e7e66b..e0ef665 100644 |
2108 | --- a/cloudinit/sources/DataSourceDigitalOcean.py |
2109 | +++ b/cloudinit/sources/DataSourceDigitalOcean.py |
2110 | @@ -27,6 +27,9 @@ MD_USE_IPV4LL = True |
2111 | |
2112 | |
2113 | class DataSourceDigitalOcean(sources.DataSource): |
2114 | + |
2115 | + dsname = 'DigitalOcean' |
2116 | + |
2117 | def __init__(self, sys_cfg, distro, paths): |
2118 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2119 | self.distro = distro |
2120 | @@ -44,7 +47,7 @@ class DataSourceDigitalOcean(sources.DataSource): |
2121 | def _get_sysinfo(self): |
2122 | return do_helper.read_sysinfo() |
2123 | |
2124 | - def get_data(self): |
2125 | + def _get_data(self): |
2126 | (is_do, droplet_id) = self._get_sysinfo() |
2127 | |
2128 | # only proceed if we know we are on DigitalOcean |
2129 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py |
2130 | index 7bbbfb6..e14553b 100644 |
2131 | --- a/cloudinit/sources/DataSourceEc2.py |
2132 | +++ b/cloudinit/sources/DataSourceEc2.py |
2133 | @@ -14,7 +14,7 @@ import time |
2134 | from cloudinit import ec2_utils as ec2 |
2135 | from cloudinit import log as logging |
2136 | from cloudinit import net |
2137 | -from cloudinit.net import dhcp |
2138 | +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError |
2139 | from cloudinit import sources |
2140 | from cloudinit import url_helper as uhelp |
2141 | from cloudinit import util |
2142 | @@ -31,6 +31,7 @@ _unset = "_unset" |
2143 | |
2144 | |
2145 | class Platforms(object): |
2146 | + # TODO Rename and move to cloudinit.cloud.CloudNames |
2147 | ALIYUN = "AliYun" |
2148 | AWS = "AWS" |
2149 | BRIGHTBOX = "Brightbox" |
2150 | @@ -45,6 +46,7 @@ class Platforms(object): |
2151 | |
2152 | class DataSourceEc2(sources.DataSource): |
2153 | |
2154 | + dsname = 'Ec2' |
2155 | # Default metadata urls that will be used if none are provided |
2156 | # They will be checked for 'resolveability' and some of the |
2157 | # following may be discarded if they do not resolve |
2158 | @@ -68,11 +70,15 @@ class DataSourceEc2(sources.DataSource): |
2159 | _fallback_interface = None |
2160 | |
2161 | def __init__(self, sys_cfg, distro, paths): |
2162 | - sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2163 | + super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) |
2164 | self.metadata_address = None |
2165 | self.seed_dir = os.path.join(paths.seed_dir, "ec2") |
2166 | |
2167 | - def get_data(self): |
2168 | + def _get_cloud_name(self): |
2169 | + """Return the cloud name as identified during _get_data.""" |
2170 | + return self.cloud_platform |
2171 | + |
2172 | + def _get_data(self): |
2173 | seed_ret = {} |
2174 | if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): |
2175 | self.userdata_raw = seed_ret['user-data'] |
2176 | @@ -96,22 +102,13 @@ class DataSourceEc2(sources.DataSource): |
2177 | if util.is_FreeBSD(): |
2178 | LOG.debug("FreeBSD doesn't support running dhclient with -sf") |
2179 | return False |
2180 | - dhcp_leases = dhcp.maybe_perform_dhcp_discovery( |
2181 | - self.fallback_interface) |
2182 | - if not dhcp_leases: |
2183 | - # DataSourceEc2Local failed in init-local stage. DataSourceEc2 |
2184 | - # will still run in init-network stage. |
2185 | + try: |
2186 | + with EphemeralDHCPv4(self.fallback_interface): |
2187 | + return util.log_time( |
2188 | + logfunc=LOG.debug, msg='Crawl of metadata service', |
2189 | + func=self._crawl_metadata) |
2190 | + except NoDHCPLeaseError: |
2191 | return False |
2192 | - dhcp_opts = dhcp_leases[-1] |
2193 | - net_params = {'interface': dhcp_opts.get('interface'), |
2194 | - 'ip': dhcp_opts.get('fixed-address'), |
2195 | - 'prefix_or_mask': dhcp_opts.get('subnet-mask'), |
2196 | - 'broadcast': dhcp_opts.get('broadcast-address'), |
2197 | - 'router': dhcp_opts.get('routers')} |
2198 | - with net.EphemeralIPv4Network(**net_params): |
2199 | - return util.log_time( |
2200 | - logfunc=LOG.debug, msg='Crawl of metadata service', |
2201 | - func=self._crawl_metadata) |
2202 | else: |
2203 | return self._crawl_metadata() |
2204 | |
2205 | @@ -148,7 +145,12 @@ class DataSourceEc2(sources.DataSource): |
2206 | return self.min_metadata_version |
2207 | |
2208 | def get_instance_id(self): |
2209 | - return self.metadata['instance-id'] |
2210 | + if self.cloud_platform == Platforms.AWS: |
2211 | + # Prefer the ID from the instance identity document, but fall back |
2212 | + return self.identity.get( |
2213 | + 'instanceId', self.metadata['instance-id']) |
2214 | + else: |
2215 | + return self.metadata['instance-id'] |
2216 | |
2217 | def _get_url_settings(self): |
2218 | mcfg = self.ds_cfg |
2219 | @@ -262,19 +264,31 @@ class DataSourceEc2(sources.DataSource): |
2220 | @property |
2221 | def availability_zone(self): |
2222 | try: |
2223 | - return self.metadata['placement']['availability-zone'] |
2224 | + if self.cloud_platform == Platforms.AWS: |
2225 | + return self.identity.get( |
2226 | + 'availabilityZone', |
2227 | + self.metadata['placement']['availability-zone']) |
2228 | + else: |
2229 | + return self.metadata['placement']['availability-zone'] |
2230 | except KeyError: |
2231 | return None |
2232 | |
2233 | @property |
2234 | def region(self): |
2235 | - az = self.availability_zone |
2236 | - if az is not None: |
2237 | - return az[:-1] |
2238 | + if self.cloud_platform == Platforms.AWS: |
2239 | + region = self.identity.get('region') |
2240 | + # Fallback to trimming the availability zone if region is missing |
2241 | + if self.availability_zone and not region: |
2242 | + region = self.availability_zone[:-1] |
2243 | + return region |
2244 | + else: |
2245 | + az = self.availability_zone |
2246 | + if az is not None: |
2247 | + return az[:-1] |
2248 | return None |
2249 | |
2250 | @property |
2251 | - def cloud_platform(self): |
2252 | + def cloud_platform(self): # TODO rename cloud_name |
2253 | if self._cloud_platform is None: |
2254 | self._cloud_platform = identify_platform() |
2255 | return self._cloud_platform |
2256 | @@ -351,6 +365,9 @@ class DataSourceEc2(sources.DataSource): |
2257 | api_version, self.metadata_address) |
2258 | self.metadata = ec2.get_instance_metadata( |
2259 | api_version, self.metadata_address) |
2260 | + if self.cloud_platform == Platforms.AWS: |
2261 | + self.identity = ec2.get_instance_identity( |
2262 | + api_version, self.metadata_address).get('document', {}) |
2263 | except Exception: |
2264 | util.logexc( |
2265 | LOG, "Failed reading from metadata address %s", |
2266 | diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py |
2267 | index ccae420..2da34a9 100644 |
2268 | --- a/cloudinit/sources/DataSourceGCE.py |
2269 | +++ b/cloudinit/sources/DataSourceGCE.py |
2270 | @@ -2,8 +2,12 @@ |
2271 | # |
2272 | # This file is part of cloud-init. See LICENSE file for license information. |
2273 | |
2274 | +import datetime |
2275 | +import json |
2276 | + |
2277 | from base64 import b64decode |
2278 | |
2279 | +from cloudinit.distros import ug_util |
2280 | from cloudinit import log as logging |
2281 | from cloudinit import sources |
2282 | from cloudinit import url_helper |
2283 | @@ -17,16 +21,18 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') |
2284 | |
2285 | |
2286 | class GoogleMetadataFetcher(object): |
2287 | - headers = {'X-Google-Metadata-Request': 'True'} |
2288 | + headers = {'Metadata-Flavor': 'Google'} |
2289 | |
2290 | def __init__(self, metadata_address): |
2291 | self.metadata_address = metadata_address |
2292 | |
2293 | - def get_value(self, path, is_text): |
2294 | + def get_value(self, path, is_text, is_recursive=False): |
2295 | value = None |
2296 | try: |
2297 | - resp = url_helper.readurl(url=self.metadata_address + path, |
2298 | - headers=self.headers) |
2299 | + url = self.metadata_address + path |
2300 | + if is_recursive: |
2301 | + url += '/?recursive=True' |
2302 | + resp = url_helper.readurl(url=url, headers=self.headers) |
2303 | except url_helper.UrlError as exc: |
2304 | msg = "url %s raised exception %s" |
2305 | LOG.debug(msg, path, exc) |
2306 | @@ -35,22 +41,29 @@ class GoogleMetadataFetcher(object): |
2307 | if is_text: |
2308 | value = util.decode_binary(resp.contents) |
2309 | else: |
2310 | - value = resp.contents |
2311 | + value = resp.contents.decode('utf-8') |
2312 | else: |
2313 | LOG.debug("url %s returned code %s", path, resp.code) |
2314 | return value |
2315 | |
2316 | |
2317 | class DataSourceGCE(sources.DataSource): |
2318 | + |
2319 | + dsname = 'GCE' |
2320 | + |
2321 | def __init__(self, sys_cfg, distro, paths): |
2322 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2323 | + self.default_user = None |
2324 | + if distro: |
2325 | + (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro) |
2326 | + (self.default_user, _user_config) = ug_util.extract_default(users) |
2327 | self.metadata = dict() |
2328 | self.ds_cfg = util.mergemanydict([ |
2329 | util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), |
2330 | BUILTIN_DS_CONFIG]) |
2331 | self.metadata_address = self.ds_cfg['metadata_url'] |
2332 | |
2333 | - def get_data(self): |
2334 | + def _get_data(self): |
2335 | ret = util.log_time( |
2336 | LOG.debug, 'Crawl of GCE metadata service', |
2337 | read_md, kwargs={'address': self.metadata_address}) |
2338 | @@ -67,17 +80,18 @@ class DataSourceGCE(sources.DataSource): |
2339 | |
2340 | @property |
2341 | def launch_index(self): |
2342 | - # GCE does not provide lauch_index property |
2343 | + # GCE does not provide lauch_index property. |
2344 | return None |
2345 | |
2346 | def get_instance_id(self): |
2347 | return self.metadata['instance-id'] |
2348 | |
2349 | def get_public_ssh_keys(self): |
2350 | - return self.metadata['public-keys'] |
2351 | + public_keys_data = self.metadata['public-keys-data'] |
2352 | + return _parse_public_keys(public_keys_data, self.default_user) |
2353 | |
2354 | def get_hostname(self, fqdn=False, resolve_ip=False): |
2355 | - # GCE has long FDQN's and has asked for short hostnames |
2356 | + # GCE has long FDQN's and has asked for short hostnames. |
2357 | return self.metadata['local-hostname'].split('.')[0] |
2358 | |
2359 | @property |
2360 | @@ -89,15 +103,58 @@ class DataSourceGCE(sources.DataSource): |
2361 | return self.availability_zone.rsplit('-', 1)[0] |
2362 | |
2363 | |
2364 | -def _trim_key(public_key): |
2365 | - # GCE takes sshKeys attribute in the format of '<user>:<public_key>' |
2366 | - # so we have to trim each key to remove the username part |
2367 | +def _has_expired(public_key): |
2368 | + # Check whether an SSH key is expired. Public key input is a single SSH |
2369 | + # public key in the GCE specific key format documented here: |
2370 | + # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat |
2371 | + try: |
2372 | + # Check for the Google-specific schema identifier. |
2373 | + schema, json_str = public_key.split(None, 3)[2:] |
2374 | + except (ValueError, AttributeError): |
2375 | + return False |
2376 | + |
2377 | + # Do not expire keys if they do not have the expected schema identifier. |
2378 | + if schema != 'google-ssh': |
2379 | + return False |
2380 | + |
2381 | + try: |
2382 | + json_obj = json.loads(json_str) |
2383 | + except ValueError: |
2384 | + return False |
2385 | + |
2386 | + # Do not expire keys if there is no expriation timestamp. |
2387 | + if 'expireOn' not in json_obj: |
2388 | + return False |
2389 | + |
2390 | + expire_str = json_obj['expireOn'] |
2391 | + format_str = '%Y-%m-%dT%H:%M:%S+0000' |
2392 | try: |
2393 | - index = public_key.index(':') |
2394 | - if index > 0: |
2395 | - return public_key[(index + 1):] |
2396 | - except Exception: |
2397 | - return public_key |
2398 | + expire_time = datetime.datetime.strptime(expire_str, format_str) |
2399 | + except ValueError: |
2400 | + return False |
2401 | + |
2402 | + # Expire the key if and only if we have exceeded the expiration timestamp. |
2403 | + return datetime.datetime.utcnow() > expire_time |
2404 | + |
2405 | + |
2406 | +def _parse_public_keys(public_keys_data, default_user=None): |
2407 | + # Parse the SSH key data for the default user account. Public keys input is |
2408 | + # a list containing SSH public keys in the GCE specific key format |
2409 | + # documented here: |
2410 | + # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat |
2411 | + public_keys = [] |
2412 | + if not public_keys_data: |
2413 | + return public_keys |
2414 | + for public_key in public_keys_data: |
2415 | + if not public_key or not all(ord(c) < 128 for c in public_key): |
2416 | + continue |
2417 | + split_public_key = public_key.split(':', 1) |
2418 | + if len(split_public_key) != 2: |
2419 | + continue |
2420 | + user, key = split_public_key |
2421 | + if user in ('cloudinit', default_user) and not _has_expired(key): |
2422 | + public_keys.append(key) |
2423 | + return public_keys |
2424 | |
2425 | |
2426 | def read_md(address=None, platform_check=True): |
2427 | @@ -113,31 +170,28 @@ def read_md(address=None, platform_check=True): |
2428 | ret['reason'] = "Not running on GCE." |
2429 | return ret |
2430 | |
2431 | - # if we cannot resolve the metadata server, then no point in trying |
2432 | + # If we cannot resolve the metadata server, then no point in trying. |
2433 | if not util.is_resolvable_url(address): |
2434 | LOG.debug("%s is not resolvable", address) |
2435 | ret['reason'] = 'address "%s" is not resolvable' % address |
2436 | return ret |
2437 | |
2438 | - # url_map: (our-key, path, required, is_text) |
2439 | + # url_map: (our-key, path, required, is_text, is_recursive) |
2440 | url_map = [ |
2441 | - ('instance-id', ('instance/id',), True, True), |
2442 | - ('availability-zone', ('instance/zone',), True, True), |
2443 | - ('local-hostname', ('instance/hostname',), True, True), |
2444 | - ('public-keys', ('project/attributes/sshKeys', |
2445 | - 'instance/attributes/ssh-keys'), False, True), |
2446 | - ('user-data', ('instance/attributes/user-data',), False, False), |
2447 | - ('user-data-encoding', ('instance/attributes/user-data-encoding',), |
2448 | - False, True), |
2449 | + ('instance-id', ('instance/id',), True, True, False), |
2450 | + ('availability-zone', ('instance/zone',), True, True, False), |
2451 | + ('local-hostname', ('instance/hostname',), True, True, False), |
2452 | + ('instance-data', ('instance/attributes',), False, False, True), |
2453 | + ('project-data', ('project/attributes',), False, False, True), |
2454 | ] |
2455 | |
2456 | metadata_fetcher = GoogleMetadataFetcher(address) |
2457 | md = {} |
2458 | - # iterate over url_map keys to get metadata items |
2459 | - for (mkey, paths, required, is_text) in url_map: |
2460 | + # Iterate over url_map keys to get metadata items. |
2461 | + for (mkey, paths, required, is_text, is_recursive) in url_map: |
2462 | value = None |
2463 | for path in paths: |
2464 | - new_value = metadata_fetcher.get_value(path, is_text) |
2465 | + new_value = metadata_fetcher.get_value(path, is_text, is_recursive) |
2466 | if new_value is not None: |
2467 | value = new_value |
2468 | if required and value is None: |
2469 | @@ -146,17 +200,23 @@ def read_md(address=None, platform_check=True): |
2470 | return ret |
2471 | md[mkey] = value |
2472 | |
2473 | - if md['public-keys']: |
2474 | - lines = md['public-keys'].splitlines() |
2475 | - md['public-keys'] = [_trim_key(k) for k in lines] |
2476 | + instance_data = json.loads(md['instance-data'] or '{}') |
2477 | + project_data = json.loads(md['project-data'] or '{}') |
2478 | + valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')] |
2479 | + block_project = instance_data.get('block-project-ssh-keys', '').lower() |
2480 | + if block_project != 'true' and not instance_data.get('sshKeys'): |
2481 | + valid_keys.append(project_data.get('ssh-keys')) |
2482 | + valid_keys.append(project_data.get('sshKeys')) |
2483 | + public_keys_data = '\n'.join([key for key in valid_keys if key]) |
2484 | + md['public-keys-data'] = public_keys_data.splitlines() |
2485 | |
2486 | if md['availability-zone']: |
2487 | md['availability-zone'] = md['availability-zone'].split('/')[-1] |
2488 | |
2489 | - encoding = md.get('user-data-encoding') |
2490 | + encoding = instance_data.get('user-data-encoding') |
2491 | if encoding: |
2492 | if encoding == 'base64': |
2493 | - md['user-data'] = b64decode(md['user-data']) |
2494 | + md['user-data'] = b64decode(instance_data.get('user-data')) |
2495 | else: |
2496 | LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) |
2497 | |
2498 | @@ -185,20 +245,19 @@ def platform_reports_gce(): |
2499 | return False |
2500 | |
2501 | |
2502 | -# Used to match classes to dependencies |
2503 | +# Used to match classes to dependencies. |
2504 | datasources = [ |
2505 | (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), |
2506 | ] |
2507 | |
2508 | |
2509 | -# Return a list of data sources that match this set of dependencies |
2510 | +# Return a list of data sources that match this set of dependencies. |
2511 | def get_datasource_list(depends): |
2512 | return sources.list_from_depends(depends, datasources) |
2513 | |
2514 | |
2515 | if __name__ == "__main__": |
2516 | import argparse |
2517 | - import json |
2518 | import sys |
2519 | |
2520 | from base64 import b64encode |
2521 | @@ -214,7 +273,7 @@ if __name__ == "__main__": |
2522 | data = read_md(address=args.endpoint, platform_check=args.platform_check) |
2523 | if 'user-data' in data: |
2524 | # user-data is bytes not string like other things. Handle it specially. |
2525 | - # if it can be represented as utf-8 then do so. Otherwise print base64 |
2526 | + # If it can be represented as utf-8 then do so. Otherwise print base64 |
2527 | # encoded value in the key user-data-b64. |
2528 | try: |
2529 | data['user-data'] = data['user-data'].decode() |
2530 | @@ -222,7 +281,7 @@ if __name__ == "__main__": |
2531 | sys.stderr.write("User-data cannot be decoded. " |
2532 | "Writing as base64\n") |
2533 | del data['user-data'] |
2534 | - # b64encode returns a bytes value. decode to get the string. |
2535 | + # b64encode returns a bytes value. Decode to get the string. |
2536 | data['user-data-b64'] = b64encode(data['user-data']).decode() |
2537 | |
2538 | print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': '))) |
2539 | diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py |
2540 | index 77df5a5..6ac8863 100644 |
2541 | --- a/cloudinit/sources/DataSourceMAAS.py |
2542 | +++ b/cloudinit/sources/DataSourceMAAS.py |
2543 | @@ -8,6 +8,7 @@ |
2544 | |
2545 | from __future__ import print_function |
2546 | |
2547 | +import hashlib |
2548 | import os |
2549 | import time |
2550 | |
2551 | @@ -39,30 +40,28 @@ class DataSourceMAAS(sources.DataSource): |
2552 | hostname |
2553 | vendor-data |
2554 | """ |
2555 | + |
2556 | + dsname = "MAAS" |
2557 | + id_hash = None |
2558 | + _oauth_helper = None |
2559 | + |
2560 | def __init__(self, sys_cfg, distro, paths): |
2561 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2562 | self.base_url = None |
2563 | self.seed_dir = os.path.join(paths.seed_dir, 'maas') |
2564 | - self.oauth_helper = self._get_helper() |
2565 | - |
2566 | - def _get_helper(self): |
2567 | - mcfg = self.ds_cfg |
2568 | - # If we are missing token_key, token_secret or consumer_key |
2569 | - # then just do non-authed requests |
2570 | - for required in ('token_key', 'token_secret', 'consumer_key'): |
2571 | - if required not in mcfg: |
2572 | - return url_helper.OauthUrlHelper() |
2573 | + self.id_hash = get_id_from_ds_cfg(self.ds_cfg) |
2574 | |
2575 | - return url_helper.OauthUrlHelper( |
2576 | - consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], |
2577 | - token_secret=mcfg['token_secret'], |
2578 | - consumer_secret=mcfg.get('consumer_secret')) |
2579 | + @property |
2580 | + def oauth_helper(self): |
2581 | + if not self._oauth_helper: |
2582 | + self._oauth_helper = get_oauth_helper(self.ds_cfg) |
2583 | + return self._oauth_helper |
2584 | |
2585 | def __str__(self): |
2586 | root = sources.DataSource.__str__(self) |
2587 | return "%s [%s]" % (root, self.base_url) |
2588 | |
2589 | - def get_data(self): |
2590 | + def _get_data(self): |
2591 | mcfg = self.ds_cfg |
2592 | |
2593 | try: |
2594 | @@ -144,6 +143,36 @@ class DataSourceMAAS(sources.DataSource): |
2595 | |
2596 | return bool(url) |
2597 | |
2598 | + def check_instance_id(self, sys_cfg): |
2599 | + """locally check if the current system is the same instance. |
2600 | + |
2601 | + MAAS doesn't provide a real instance-id, and if it did, it is |
2602 | + still only available over the network. We need to check based |
2603 | + only on local resources. So compute a hash based on Oauth tokens.""" |
2604 | + if self.id_hash is None: |
2605 | + return False |
2606 | + ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {}) |
2607 | + return (self.id_hash == get_id_from_ds_cfg(ncfg)) |
2608 | + |
2609 | + |
2610 | +def get_oauth_helper(cfg): |
2611 | + """Return an oauth helper instance for values in cfg. |
2612 | + |
2613 | + @raises ValueError from OauthUrlHelper if some required fields have |
2614 | + true-ish values but others do not.""" |
2615 | + keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret') |
2616 | + kwargs = dict([(r, cfg.get(r)) for r in keys]) |
2617 | + return url_helper.OauthUrlHelper(**kwargs) |
2618 | + |
2619 | + |
2620 | +def get_id_from_ds_cfg(ds_cfg): |
2621 | + """Given a config, generate a unique identifier for this node.""" |
2622 | + fields = ('consumer_key', 'token_key', 'token_secret') |
2623 | + idstr = '\0'.join([ds_cfg.get(f, "") for f in fields]) |
2624 | + # store the encoding version as part of the hash in the event |
2625 | + # that it ever changed we can compute older versions. |
2626 | + return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest() |
2627 | + |
2628 | |
2629 | def read_maas_seed_dir(seed_d): |
2630 | if seed_d.startswith("file://"): |
2631 | @@ -319,7 +348,7 @@ if __name__ == "__main__": |
2632 | sys.stderr.write("Must provide a url or a config with url.\n") |
2633 | sys.exit(1) |
2634 | |
2635 | - oauth_helper = url_helper.OauthUrlHelper(**creds) |
2636 | + oauth_helper = get_oauth_helper(creds) |
2637 | |
2638 | def geturl(url): |
2639 | # the retry is to ensure that oauth timestamp gets fixed |
2640 | diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py |
2641 | index e641244..5d3a8dd 100644 |
2642 | --- a/cloudinit/sources/DataSourceNoCloud.py |
2643 | +++ b/cloudinit/sources/DataSourceNoCloud.py |
2644 | @@ -20,6 +20,9 @@ LOG = logging.getLogger(__name__) |
2645 | |
2646 | |
2647 | class DataSourceNoCloud(sources.DataSource): |
2648 | + |
2649 | + dsname = "NoCloud" |
2650 | + |
2651 | def __init__(self, sys_cfg, distro, paths): |
2652 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2653 | self.seed = None |
2654 | @@ -32,7 +35,7 @@ class DataSourceNoCloud(sources.DataSource): |
2655 | root = sources.DataSource.__str__(self) |
2656 | return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) |
2657 | |
2658 | - def get_data(self): |
2659 | + def _get_data(self): |
2660 | defaults = { |
2661 | "instance-id": "nocloud", |
2662 | "dsmode": self.dsmode, |
2663 | diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py |
2664 | index 906bb27..e63a7e3 100644 |
2665 | --- a/cloudinit/sources/DataSourceNone.py |
2666 | +++ b/cloudinit/sources/DataSourceNone.py |
2667 | @@ -11,12 +11,15 @@ LOG = logging.getLogger(__name__) |
2668 | |
2669 | |
2670 | class DataSourceNone(sources.DataSource): |
2671 | + |
2672 | + dsname = "None" |
2673 | + |
2674 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): |
2675 | sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) |
2676 | self.metadata = {} |
2677 | self.userdata_raw = '' |
2678 | |
2679 | - def get_data(self): |
2680 | + def _get_data(self): |
2681 | # If the datasource config has any provided 'fallback' |
2682 | # userdata or metadata, use it... |
2683 | if 'userdata_raw' in self.ds_cfg: |
2684 | diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py |
2685 | index ccebf11..6e62f98 100644 |
2686 | --- a/cloudinit/sources/DataSourceOVF.py |
2687 | +++ b/cloudinit/sources/DataSourceOVF.py |
2688 | @@ -21,6 +21,8 @@ from cloudinit import util |
2689 | |
2690 | from cloudinit.sources.helpers.vmware.imc.config \ |
2691 | import Config |
2692 | +from cloudinit.sources.helpers.vmware.imc.config_custom_script \ |
2693 | + import PreCustomScript, PostCustomScript |
2694 | from cloudinit.sources.helpers.vmware.imc.config_file \ |
2695 | import ConfigFile |
2696 | from cloudinit.sources.helpers.vmware.imc.config_nic \ |
2697 | @@ -30,7 +32,7 @@ from cloudinit.sources.helpers.vmware.imc.config_passwd \ |
2698 | from cloudinit.sources.helpers.vmware.imc.guestcust_error \ |
2699 | import GuestCustErrorEnum |
2700 | from cloudinit.sources.helpers.vmware.imc.guestcust_event \ |
2701 | - import GuestCustEventEnum |
2702 | + import GuestCustEventEnum as GuestCustEvent |
2703 | from cloudinit.sources.helpers.vmware.imc.guestcust_state \ |
2704 | import GuestCustStateEnum |
2705 | from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( |
2706 | @@ -43,6 +45,9 @@ LOG = logging.getLogger(__name__) |
2707 | |
2708 | |
2709 | class DataSourceOVF(sources.DataSource): |
2710 | + |
2711 | + dsname = "OVF" |
2712 | + |
2713 | def __init__(self, sys_cfg, distro, paths): |
2714 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2715 | self.seed = None |
2716 | @@ -60,7 +65,7 @@ class DataSourceOVF(sources.DataSource): |
2717 | root = sources.DataSource.__str__(self) |
2718 | return "%s [seed=%s]" % (root, self.seed) |
2719 | |
2720 | - def get_data(self): |
2721 | + def _get_data(self): |
2722 | found = [] |
2723 | md = {} |
2724 | ud = "" |
2725 | @@ -124,17 +129,31 @@ class DataSourceOVF(sources.DataSource): |
2726 | self._vmware_cust_conf = Config(cf) |
2727 | (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) |
2728 | self._vmware_nics_to_enable = get_nics_to_enable(nicspath) |
2729 | - markerid = self._vmware_cust_conf.marker_id |
2730 | - markerexists = check_marker_exists(markerid) |
2731 | + imcdirpath = os.path.dirname(vmwareImcConfigFilePath) |
2732 | + product_marker = self._vmware_cust_conf.marker_id |
2733 | + hasmarkerfile = check_marker_exists( |
2734 | + product_marker, os.path.join(self.paths.cloud_dir, 'data')) |
2735 | + special_customization = product_marker and not hasmarkerfile |
2736 | + customscript = self._vmware_cust_conf.custom_script_name |
2737 | except Exception as e: |
2738 | - LOG.debug("Error parsing the customization Config File") |
2739 | - LOG.exception(e) |
2740 | - set_customization_status( |
2741 | - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, |
2742 | - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) |
2743 | - raise e |
2744 | - finally: |
2745 | - util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) |
2746 | + _raise_error_status( |
2747 | + "Error parsing the customization Config File", |
2748 | + e, |
2749 | + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, |
2750 | + vmwareImcConfigFilePath) |
2751 | + |
2752 | + if special_customization: |
2753 | + if customscript: |
2754 | + try: |
2755 | + precust = PreCustomScript(customscript, imcdirpath) |
2756 | + precust.execute() |
2757 | + except Exception as e: |
2758 | + _raise_error_status( |
2759 | + "Error executing pre-customization script", |
2760 | + e, |
2761 | + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, |
2762 | + vmwareImcConfigFilePath) |
2763 | + |
2764 | try: |
2765 | LOG.debug("Preparing the Network configuration") |
2766 | self._network_config = get_network_config_from_conf( |
2767 | @@ -143,13 +162,13 @@ class DataSourceOVF(sources.DataSource): |
2768 | True, |
2769 | self.distro.osfamily) |
2770 | except Exception as e: |
2771 | - LOG.exception(e) |
2772 | - set_customization_status( |
2773 | - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, |
2774 | - GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) |
2775 | - raise e |
2776 | + _raise_error_status( |
2777 | + "Error preparing Network Configuration", |
2778 | + e, |
2779 | + GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, |
2780 | + vmwareImcConfigFilePath) |
2781 | |
2782 | - if markerid and not markerexists: |
2783 | + if special_customization: |
2784 | LOG.debug("Applying password customization") |
2785 | pwdConfigurator = PasswordConfigurator() |
2786 | adminpwd = self._vmware_cust_conf.admin_password |
2787 | @@ -161,27 +180,41 @@ class DataSourceOVF(sources.DataSource): |
2788 | else: |
2789 | LOG.debug("Changing password is not needed") |
2790 | except Exception as e: |
2791 | - LOG.debug("Error applying Password Configuration: %s", e) |
2792 | - set_customization_status( |
2793 | - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, |
2794 | - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) |
2795 | - return False |
2796 | - if markerid: |
2797 | - LOG.debug("Handle marker creation") |
2798 | + _raise_error_status( |
2799 | + "Error applying Password Configuration", |
2800 | + e, |
2801 | + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, |
2802 | + vmwareImcConfigFilePath) |
2803 | + |
2804 | + if customscript: |
2805 | + try: |
2806 | + postcust = PostCustomScript(customscript, imcdirpath) |
2807 | + postcust.execute() |
2808 | + except Exception as e: |
2809 | + _raise_error_status( |
2810 | + "Error executing post-customization script", |
2811 | + e, |
2812 | + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, |
2813 | + vmwareImcConfigFilePath) |
2814 | + |
2815 | + if product_marker: |
2816 | try: |
2817 | - setup_marker_files(markerid) |
2818 | + setup_marker_files( |
2819 | + product_marker, |
2820 | + os.path.join(self.paths.cloud_dir, 'data')) |
2821 | except Exception as e: |
2822 | - LOG.debug("Error creating marker files: %s", e) |
2823 | - set_customization_status( |
2824 | - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, |
2825 | - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) |
2826 | - return False |
2827 | + _raise_error_status( |
2828 | + "Error creating marker files", |
2829 | + e, |
2830 | + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, |
2831 | + vmwareImcConfigFilePath) |
2832 | |
2833 | self._vmware_cust_found = True |
2834 | found.append('vmware-tools') |
2835 | |
2836 | # TODO: Need to set the status to DONE only when the |
2837 | # customization is done successfully. |
2838 | + util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) |
2839 | enable_nics(self._vmware_nics_to_enable) |
2840 | set_customization_status( |
2841 | GuestCustStateEnum.GUESTCUST_STATE_DONE, |
2842 | @@ -536,31 +569,52 @@ def get_datasource_list(depends): |
2843 | |
2844 | |
2845 | # To check if marker file exists |
2846 | -def check_marker_exists(markerid): |
2847 | +def check_marker_exists(markerid, marker_dir): |
2848 | """ |
2849 | Check the existence of a marker file. |
2850 | Presence of marker file determines whether a certain code path is to be |
2851 | executed. It is needed for partial guest customization in VMware. |
2852 | + @param markerid: is an unique string representing a particular product |
2853 | + marker. |
2854 | + @param: marker_dir: The directory in which markers exist. |
2855 | """ |
2856 | if not markerid: |
2857 | return False |
2858 | - markerfile = "/.markerfile-" + markerid |
2859 | + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") |
2860 | if os.path.exists(markerfile): |
2861 | return True |
2862 | return False |
2863 | |
2864 | |
2865 | # Create a marker file |
2866 | -def setup_marker_files(markerid): |
2867 | +def setup_marker_files(markerid, marker_dir): |
2868 | """ |
2869 | Create a new marker file. |
2870 | Marker files are unique to a full customization workflow in VMware |
2871 | environment. |
2872 | + @param markerid: is an unique string representing a particular product |
2873 | + marker. |
2874 | + @param: marker_dir: The directory in which markers exist. |
2875 | + |
2876 | """ |
2877 | - if not markerid: |
2878 | - return |
2879 | - markerfile = "/.markerfile-" + markerid |
2880 | - util.del_file("/.markerfile-*.txt") |
2881 | + LOG.debug("Handle marker creation") |
2882 | + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") |
2883 | + for fname in os.listdir(marker_dir): |
2884 | + if fname.startswith(".markerfile"): |
2885 | + util.del_file(os.path.join(marker_dir, fname)) |
2886 | open(markerfile, 'w').close() |
2887 | |
2888 | + |
2889 | +def _raise_error_status(prefix, error, event, config_file): |
2890 | + """ |
2891 | + Raise error and send customization status to the underlying VMware |
2892 | + Virtualization Platform. Also, cleanup the imc directory. |
2893 | + """ |
2894 | + LOG.debug('%s: %s', prefix, error) |
2895 | + set_customization_status( |
2896 | + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, |
2897 | + event) |
2898 | + util.del_dir(os.path.dirname(config_file)) |
2899 | + raise error |
2900 | + |
2901 | # vi: ts=4 expandtab |
2902 | diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py |
2903 | index 5fdac19..ce47b6b 100644 |
2904 | --- a/cloudinit/sources/DataSourceOpenNebula.py |
2905 | +++ b/cloudinit/sources/DataSourceOpenNebula.py |
2906 | @@ -12,6 +12,7 @@ |
2907 | # |
2908 | # This file is part of cloud-init. See LICENSE file for license information. |
2909 | |
2910 | +import collections |
2911 | import os |
2912 | import pwd |
2913 | import re |
2914 | @@ -19,6 +20,7 @@ import string |
2915 | |
2916 | from cloudinit import log as logging |
2917 | from cloudinit import net |
2918 | +from cloudinit.net import eni |
2919 | from cloudinit import sources |
2920 | from cloudinit import util |
2921 | |
2922 | @@ -31,6 +33,9 @@ CONTEXT_DISK_FILES = ["context.sh"] |
2923 | |
2924 | |
2925 | class DataSourceOpenNebula(sources.DataSource): |
2926 | + |
2927 | + dsname = "OpenNebula" |
2928 | + |
2929 | def __init__(self, sys_cfg, distro, paths): |
2930 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2931 | self.seed = None |
2932 | @@ -40,7 +45,7 @@ class DataSourceOpenNebula(sources.DataSource): |
2933 | root = sources.DataSource.__str__(self) |
2934 | return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) |
2935 | |
2936 | - def get_data(self): |
2937 | + def _get_data(self): |
2938 | defaults = {"instance-id": DEFAULT_IID} |
2939 | results = None |
2940 | seed = None |
2941 | @@ -86,11 +91,18 @@ class DataSourceOpenNebula(sources.DataSource): |
2942 | return False |
2943 | |
2944 | self.seed = seed |
2945 | - self.network_eni = results.get("network_config") |
2946 | + self.network_eni = results.get('network-interfaces') |
2947 | self.metadata = md |
2948 | self.userdata_raw = results.get('userdata') |
2949 | return True |
2950 | |
2951 | + @property |
2952 | + def network_config(self): |
2953 | + if self.network_eni is not None: |
2954 | + return eni.convert_eni_data(self.network_eni) |
2955 | + else: |
2956 | + return None |
2957 | + |
2958 | def get_hostname(self, fqdn=False, resolve_ip=None): |
2959 | if resolve_ip is None: |
2960 | if self.dsmode == sources.DSMODE_NETWORK: |
2961 | @@ -113,58 +125,53 @@ class OpenNebulaNetwork(object): |
2962 | self.context = context |
2963 | if system_nics_by_mac is None: |
2964 | system_nics_by_mac = get_physical_nics_by_mac() |
2965 | - self.ifaces = system_nics_by_mac |
2966 | + self.ifaces = collections.OrderedDict( |
2967 | + [k for k in sorted(system_nics_by_mac.items(), |
2968 | + key=lambda k: net.natural_sort_key(k[1]))]) |
2969 | + |
2970 | + # OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC. |
2971 | + # context_devname provides {mac.lower():ETHX, mac2.lower():ETHX} |
2972 | + self.context_devname = {} |
2973 | + for k, v in context.items(): |
2974 | + m = re.match(r'^(.+)_MAC$', k) |
2975 | + if m: |
2976 | + self.context_devname[v.lower()] = m.group(1) |
2977 | |
2978 | def mac2ip(self, mac): |
2979 | - components = mac.split(':')[2:] |
2980 | - return [str(int(c, 16)) for c in components] |
2981 | + return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]]) |
2982 | |
2983 | - def get_ip(self, dev, components): |
2984 | - var_name = dev.upper() + '_IP' |
2985 | - if var_name in self.context: |
2986 | - return self.context[var_name] |
2987 | - else: |
2988 | - return '.'.join(components) |
2989 | + def mac2network(self, mac): |
2990 | + return self.mac2ip(mac).rpartition(".")[0] + ".0" |
2991 | |
2992 | - def get_mask(self, dev): |
2993 | - var_name = dev.upper() + '_MASK' |
2994 | - if var_name in self.context: |
2995 | - return self.context[var_name] |
2996 | - else: |
2997 | - return '255.255.255.0' |
2998 | + def get_dns(self, dev): |
2999 | + return self.get_field(dev, "dns", "").split() |
3000 | |
3001 | - def get_network(self, dev, components): |
3002 | - var_name = dev.upper() + '_NETWORK' |
3003 | - if var_name in self.context: |
3004 | - return self.context[var_name] |
3005 | - else: |
3006 | - return '.'.join(components[:-1]) + '.0' |
3007 | + def get_domain(self, dev): |
3008 | + return self.get_field(dev, "domain") |
3009 | + |
3010 | + def get_ip(self, dev, mac): |
3011 | + return self.get_field(dev, "ip", self.mac2ip(mac)) |
3012 | |
3013 | def get_gateway(self, dev): |
3014 | - var_name = dev.upper() + '_GATEWAY' |
3015 | - if var_name in self.context: |
3016 | - return self.context[var_name] |
3017 | - else: |
3018 | - return None |
3019 | + return self.get_field(dev, "gateway") |
3020 | |
3021 | - def get_dns(self, dev): |
3022 | - var_name = dev.upper() + '_DNS' |
3023 | - if var_name in self.context: |
3024 | - return self.context[var_name] |
3025 | - else: |
3026 | - return None |
3027 | + def get_mask(self, dev): |
3028 | + return self.get_field(dev, "mask", "255.255.255.0") |
3029 | |
3030 | - def get_domain(self, dev): |
3031 | - var_name = dev.upper() + '_DOMAIN' |
3032 | - if var_name in self.context: |
3033 | - return self.context[var_name] |
3034 | - else: |
3035 | - return None |
3036 | + def get_network(self, dev, mac): |
3037 | + return self.get_field(dev, "network", self.mac2network(mac)) |
3038 | + |
3039 | + def get_field(self, dev, name, default=None): |
3040 | + """return the field name in context for device dev. |
3041 | + |
3042 | + context stores <dev>_<NAME> (example: eth0_DOMAIN). |
3043 | + an empty string for value will return default.""" |
3044 | + val = self.context.get('_'.join((dev, name,)).upper()) |
3045 | + # allow empty string to return the default. |
3046 | + return default if val in (None, "") else val |
3047 | |
3048 | def gen_conf(self): |
3049 | - global_dns = [] |
3050 | - if 'DNS' in self.context: |
3051 | - global_dns.append(self.context['DNS']) |
3052 | + global_dns = self.context.get('DNS', "").split() |
3053 | |
3054 | conf = [] |
3055 | conf.append('auto lo') |
3056 | @@ -172,29 +179,31 @@ class OpenNebulaNetwork(object): |
3057 | conf.append('') |
3058 | |
3059 | for mac, dev in self.ifaces.items(): |
3060 | - ip_components = self.mac2ip(mac) |
3061 | + mac = mac.lower() |
3062 | + |
3063 | + # c_dev stores name in context 'ETHX' for this device. |
3064 | + # dev stores the current system name. |
3065 | + c_dev = self.context_devname.get(mac, dev) |
3066 | |
3067 | conf.append('auto ' + dev) |
3068 | conf.append('iface ' + dev + ' inet static') |
3069 | - conf.append(' address ' + self.get_ip(dev, ip_components)) |
3070 | - conf.append(' network ' + self.get_network(dev, ip_components)) |
3071 | - conf.append(' netmask ' + self.get_mask(dev)) |
3072 | + conf.append(' #hwaddress %s' % mac) |
3073 | + conf.append(' address ' + self.get_ip(c_dev, mac)) |
3074 | + conf.append(' network ' + self.get_network(c_dev, mac)) |
3075 | + conf.append(' netmask ' + self.get_mask(c_dev)) |
3076 | |
3077 | - gateway = self.get_gateway(dev) |
3078 | + gateway = self.get_gateway(c_dev) |
3079 | if gateway: |
3080 | conf.append(' gateway ' + gateway) |
3081 | |
3082 | - domain = self.get_domain(dev) |
3083 | + domain = self.get_domain(c_dev) |
3084 | if domain: |
3085 | conf.append(' dns-search ' + domain) |
3086 | |
3087 | # add global DNS servers to all interfaces |
3088 | - dns = self.get_dns(dev) |
3089 | + dns = self.get_dns(c_dev) |
3090 | if global_dns or dns: |
3091 | - all_dns = global_dns |
3092 | - if dns: |
3093 | - all_dns.append(dns) |
3094 | - conf.append(' dns-nameservers ' + ' '.join(all_dns)) |
3095 | + conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) |
3096 | |
3097 | conf.append('') |
3098 | |
3099 | @@ -329,8 +338,9 @@ def read_context_disk_dir(source_dir, asuser=None): |
3100 | try: |
3101 | pwd.getpwnam(asuser) |
3102 | except KeyError as e: |
3103 | - raise BrokenContextDiskDir("configured user '%s' " |
3104 | - "does not exist", asuser) |
3105 | + raise BrokenContextDiskDir( |
3106 | + "configured user '{user}' does not exist".format( |
3107 | + user=asuser)) |
3108 | try: |
3109 | path = os.path.join(source_dir, 'context.sh') |
3110 | content = util.load_file(path) |
3111 | diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py |
3112 | index b64a7f2..e55a763 100644 |
3113 | --- a/cloudinit/sources/DataSourceOpenStack.py |
3114 | +++ b/cloudinit/sources/DataSourceOpenStack.py |
3115 | @@ -24,6 +24,9 @@ DEFAULT_METADATA = { |
3116 | |
3117 | |
3118 | class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
3119 | + |
3120 | + dsname = "OpenStack" |
3121 | + |
3122 | def __init__(self, sys_cfg, distro, paths): |
3123 | super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) |
3124 | self.metadata_address = None |
3125 | @@ -96,7 +99,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
3126 | self.metadata_address = url2base.get(avail_url) |
3127 | return bool(avail_url) |
3128 | |
3129 | - def get_data(self): |
3130 | + def _get_data(self): |
3131 | try: |
3132 | if not self.wait_for_metadata_service(): |
3133 | return False |
3134 | diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py |
3135 | index 3a8a8e8..b0b19c9 100644 |
3136 | --- a/cloudinit/sources/DataSourceScaleway.py |
3137 | +++ b/cloudinit/sources/DataSourceScaleway.py |
3138 | @@ -169,6 +169,8 @@ def query_data_api(api_type, api_address, retries, timeout): |
3139 | |
3140 | class DataSourceScaleway(sources.DataSource): |
3141 | |
3142 | + dsname = "Scaleway" |
3143 | + |
3144 | def __init__(self, sys_cfg, distro, paths): |
3145 | super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) |
3146 | |
3147 | @@ -184,7 +186,7 @@ class DataSourceScaleway(sources.DataSource): |
3148 | self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) |
3149 | self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) |
3150 | |
3151 | - def get_data(self): |
3152 | + def _get_data(self): |
3153 | if not on_scaleway(): |
3154 | return False |
3155 | |
3156 | diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py |
3157 | index 6c6902f..86bfa5d 100644 |
3158 | --- a/cloudinit/sources/DataSourceSmartOS.py |
3159 | +++ b/cloudinit/sources/DataSourceSmartOS.py |
3160 | @@ -159,6 +159,9 @@ LEGACY_USER_D = "/var/db" |
3161 | |
3162 | |
3163 | class DataSourceSmartOS(sources.DataSource): |
3164 | + |
3165 | + dsname = "Joyent" |
3166 | + |
3167 | _unset = "_unset" |
3168 | smartos_type = _unset |
3169 | md_client = _unset |
3170 | @@ -211,7 +214,7 @@ class DataSourceSmartOS(sources.DataSource): |
3171 | os.rename('/'.join([svc_path, 'provisioning']), |
3172 | '/'.join([svc_path, 'provision_success'])) |
3173 | |
3174 | - def get_data(self): |
3175 | + def _get_data(self): |
3176 | self._init() |
3177 | |
3178 | md = {} |
3179 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py |
3180 | index 9a43fbe..a05ca2f 100644 |
3181 | --- a/cloudinit/sources/__init__.py |
3182 | +++ b/cloudinit/sources/__init__.py |
3183 | @@ -10,9 +10,11 @@ |
3184 | |
3185 | import abc |
3186 | import copy |
3187 | +import json |
3188 | import os |
3189 | import six |
3190 | |
3191 | +from cloudinit.atomic_helper import write_json |
3192 | from cloudinit import importer |
3193 | from cloudinit import log as logging |
3194 | from cloudinit import type_utils |
3195 | @@ -33,6 +35,12 @@ DEP_FILESYSTEM = "FILESYSTEM" |
3196 | DEP_NETWORK = "NETWORK" |
3197 | DS_PREFIX = 'DataSource' |
3198 | |
3199 | +# File in which instance meta-data, user-data and vendor-data is written |
3200 | +INSTANCE_JSON_FILE = 'instance-data.json' |
3201 | + |
3202 | +# Key which can be provide a cloud's official product name to cloud-init |
3203 | +METADATA_CLOUD_NAME_KEY = 'cloud-name' |
3204 | + |
3205 | LOG = logging.getLogger(__name__) |
3206 | |
3207 | |
3208 | @@ -40,12 +48,39 @@ class DataSourceNotFoundException(Exception): |
3209 | pass |
3210 | |
3211 | |
3212 | +def process_base64_metadata(metadata, key_path=''): |
3213 | + """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" |
3214 | + md_copy = copy.deepcopy(metadata) |
3215 | + md_copy['base64-encoded-keys'] = [] |
3216 | + for key, val in metadata.items(): |
3217 | + if key_path: |
3218 | + sub_key_path = key_path + '/' + key |
3219 | + else: |
3220 | + sub_key_path = key |
3221 | + if isinstance(val, str) and val.startswith('ci-b64:'): |
3222 | + md_copy['base64-encoded-keys'].append(sub_key_path) |
3223 | + md_copy[key] = val.replace('ci-b64:', '') |
3224 | + if isinstance(val, dict): |
3225 | + return_val = process_base64_metadata(val, sub_key_path) |
3226 | + md_copy['base64-encoded-keys'].extend( |
3227 | + return_val.pop('base64-encoded-keys')) |
3228 | + md_copy[key] = return_val |
3229 | + return md_copy |
3230 | + |
3231 | + |
3232 | @six.add_metaclass(abc.ABCMeta) |
3233 | class DataSource(object): |
3234 | |
3235 | dsmode = DSMODE_NETWORK |
3236 | default_locale = 'en_US.UTF-8' |
3237 | |
3238 | + # Datasource name needs to be set by subclasses to determine which |
3239 | + # cloud-config datasource key is loaded |
3240 | + dsname = '_undef' |
3241 | + |
3242 | + # Cached cloud_name as determined by _get_cloud_name |
3243 | + _cloud_name = None |
3244 | + |
3245 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): |
3246 | self.sys_cfg = sys_cfg |
3247 | self.distro = distro |
3248 | @@ -56,17 +91,8 @@ class DataSource(object): |
3249 | self.vendordata = None |
3250 | self.vendordata_raw = None |
3251 | |
3252 | - # find the datasource config name. |
3253 | - # remove 'DataSource' from classname on front, and remove 'Net' on end. |
3254 | - # Both Foo and FooNet sources expect config in cfg['sources']['Foo'] |
3255 | - name = type_utils.obj_name(self) |
3256 | - if name.startswith(DS_PREFIX): |
3257 | - name = name[len(DS_PREFIX):] |
3258 | - if name.endswith('Net'): |
3259 | - name = name[0:-3] |
3260 | - |
3261 | - self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, |
3262 | - ("datasource", name), {}) |
3263 | + self.ds_cfg = util.get_cfg_by_path( |
3264 | + self.sys_cfg, ("datasource", self.dsname), {}) |
3265 | if not self.ds_cfg: |
3266 | self.ds_cfg = {} |
3267 | |
3268 | @@ -78,6 +104,51 @@ class DataSource(object): |
3269 | def __str__(self): |
3270 | return type_utils.obj_name(self) |
3271 | |
3272 | + def _get_standardized_metadata(self): |
3273 | + """Return a dictionary of standardized metadata keys.""" |
3274 | + return {'v1': { |
3275 | + 'local-hostname': self.get_hostname(), |
3276 | + 'instance-id': self.get_instance_id(), |
3277 | + 'cloud-name': self.cloud_name, |
3278 | + 'region': self.region, |
3279 | + 'availability-zone': self.availability_zone}} |
3280 | + |
3281 | + def get_data(self): |
3282 | + """Datasources implement _get_data to setup metadata and userdata_raw. |
3283 | + |
3284 | + Minimally, the datasource should return a boolean True on success. |
3285 | + """ |
3286 | + return_value = self._get_data() |
3287 | + json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) |
3288 | + if not return_value: |
3289 | + return return_value |
3290 | + |
3291 | + instance_data = { |
3292 | + 'ds': { |
3293 | + 'meta-data': self.metadata, |
3294 | + 'user-data': self.get_userdata_raw(), |
3295 | + 'vendor-data': self.get_vendordata_raw()}} |
3296 | + instance_data.update( |
3297 | + self._get_standardized_metadata()) |
3298 | + try: |
3299 | + # Process content base64encoding unserializable values |
3300 | + content = util.json_dumps(instance_data) |
3301 | + # Strip base64: prefix and return base64-encoded-keys |
3302 | + processed_data = process_base64_metadata(json.loads(content)) |
3303 | + except TypeError as e: |
3304 | + LOG.warning('Error persisting instance-data.json: %s', str(e)) |
3305 | + return return_value |
3306 | + except UnicodeDecodeError as e: |
3307 | + LOG.warning('Error persisting instance-data.json: %s', str(e)) |
3308 | + return return_value |
3309 | + write_json(json_file, processed_data, mode=0o600) |
3310 | + return return_value |
3311 | + |
3312 | + def _get_data(self): |
3313 | + raise NotImplementedError( |
3314 | + 'Subclasses of DataSource must implement _get_data which' |
3315 | + ' sets self.metadata, vendordata_raw and userdata_raw.') |
3316 | + |
3317 | def get_userdata(self, apply_filter=False): |
3318 | if self.userdata is None: |
3319 | self.userdata = self.ud_proc.process(self.get_userdata_raw()) |
3320 | @@ -91,6 +162,34 @@ class DataSource(object): |
3321 | return self.vendordata |
3322 | |
3323 | @property |
3324 | + def cloud_name(self): |
3325 | + """Return lowercase cloud name as determined by the datasource. |
3326 | + |
3327 | + Datasource can determine or define its own cloud product name in |
3328 | + metadata. |
3329 | + """ |
3330 | + if self._cloud_name: |
3331 | + return self._cloud_name |
3332 | + if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY): |
3333 | + cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) |
3334 | + if isinstance(cloud_name, six.string_types): |
3335 | + self._cloud_name = cloud_name.lower() |
3336 | + LOG.debug( |
3337 | + 'Ignoring metadata provided key %s: non-string type %s', |
3338 | + METADATA_CLOUD_NAME_KEY, type(cloud_name)) |
3339 | + else: |
3340 | + self._cloud_name = self._get_cloud_name().lower() |
3341 | + return self._cloud_name |
3342 | + |
3343 | + def _get_cloud_name(self): |
3344 | + """Return the datasource name as it frequently matches cloud name. |
3345 | + |
3346 | + Should be overridden in subclasses which can run on multiple |
3347 | + cloud names, such as DatasourceEc2. |
3348 | + """ |
3349 | + return self.dsname |
3350 | + |
3351 | + @property |
3352 | def launch_index(self): |
3353 | if not self.metadata: |
3354 | return None |
3355 | @@ -161,8 +260,11 @@ class DataSource(object): |
3356 | |
3357 | @property |
3358 | def availability_zone(self): |
3359 | - return self.metadata.get('availability-zone', |
3360 | - self.metadata.get('availability_zone')) |
3361 | + top_level_az = self.metadata.get( |
3362 | + 'availability-zone', self.metadata.get('availability_zone')) |
3363 | + if top_level_az: |
3364 | + return top_level_az |
3365 | + return self.metadata.get('placement', {}).get('availability-zone') |
3366 | |
3367 | @property |
3368 | def region(self): |
3369 | @@ -346,7 +448,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): |
3370 | # Return an ordered list of classes that match (if any) |
3371 | def list_sources(cfg_list, depends, pkg_list): |
3372 | src_list = [] |
3373 | - LOG.debug(("Looking for for data source in: %s," |
3374 | + LOG.debug(("Looking for data source in: %s," |
3375 | " via packages %s that matches dependencies %s"), |
3376 | cfg_list, pkg_list, depends) |
3377 | for ds_name in cfg_list: |
3378 | @@ -417,4 +519,5 @@ def list_from_depends(depends, ds_list): |
3379 | ret_list.append(cls) |
3380 | return ret_list |
3381 | |
3382 | + |
3383 | # vi: ts=4 expandtab |
3384 | diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py |
3385 | index 959b1bd..90c12df 100644 |
3386 | --- a/cloudinit/sources/helpers/azure.py |
3387 | +++ b/cloudinit/sources/helpers/azure.py |
3388 | @@ -199,10 +199,10 @@ class WALinuxAgentShim(object): |
3389 | ' </Container>', |
3390 | '</Health>']) |
3391 | |
3392 | - def __init__(self, fallback_lease_file=None): |
3393 | + def __init__(self, fallback_lease_file=None, dhcp_options=None): |
3394 | LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', |
3395 | fallback_lease_file) |
3396 | - self.dhcpoptions = None |
3397 | + self.dhcpoptions = dhcp_options |
3398 | self._endpoint = None |
3399 | self.openssl_manager = None |
3400 | self.values = {} |
3401 | @@ -220,7 +220,8 @@ class WALinuxAgentShim(object): |
3402 | @property |
3403 | def endpoint(self): |
3404 | if self._endpoint is None: |
3405 | - self._endpoint = self.find_endpoint(self.lease_file) |
3406 | + self._endpoint = self.find_endpoint(self.lease_file, |
3407 | + self.dhcpoptions) |
3408 | return self._endpoint |
3409 | |
3410 | @staticmethod |
3411 | @@ -274,7 +275,8 @@ class WALinuxAgentShim(object): |
3412 | name = os.path.basename(hook_file).replace('.json', '') |
3413 | dhcp_options[name] = json.loads(util.load_file((hook_file))) |
3414 | except ValueError: |
3415 | - raise ValueError("%s is not valid JSON data", hook_file) |
3416 | + raise ValueError( |
3417 | + '{_file} is not valid JSON data'.format(_file=hook_file)) |
3418 | return dhcp_options |
3419 | |
3420 | @staticmethod |
3421 | @@ -291,10 +293,14 @@ class WALinuxAgentShim(object): |
3422 | return _value |
3423 | |
3424 | @staticmethod |
3425 | - def find_endpoint(fallback_lease_file=None): |
3426 | + def find_endpoint(fallback_lease_file=None, dhcp245=None): |
3427 | value = None |
3428 | - LOG.debug('Finding Azure endpoint from networkd...') |
3429 | - value = WALinuxAgentShim._networkd_get_value_from_leases() |
3430 | + if dhcp245 is not None: |
3431 | + value = dhcp245 |
3432 | + LOG.debug("Using Azure Endpoint from dhcp options") |
3433 | + if value is None: |
3434 | + LOG.debug('Finding Azure endpoint from networkd...') |
3435 | + value = WALinuxAgentShim._networkd_get_value_from_leases() |
3436 | if value is None: |
3437 | # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json |
3438 | # a dhclient exit hook that calls cloud-init-dhclient-hook |
3439 | @@ -366,8 +372,9 @@ class WALinuxAgentShim(object): |
3440 | LOG.info('Reported ready to Azure fabric.') |
3441 | |
3442 | |
3443 | -def get_metadata_from_fabric(fallback_lease_file=None): |
3444 | - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file) |
3445 | +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): |
3446 | + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, |
3447 | + dhcp_options=dhcp_opts) |
3448 | try: |
3449 | return shim.register_with_azure_and_fetch_data() |
3450 | finally: |
3451 | diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py |
3452 | index 49d441d..2eaeff3 100644 |
3453 | --- a/cloudinit/sources/helpers/vmware/imc/config.py |
3454 | +++ b/cloudinit/sources/helpers/vmware/imc/config.py |
3455 | @@ -100,4 +100,8 @@ class Config(object): |
3456 | """Returns marker id.""" |
3457 | return self._configFile.get(Config.MARKERID, None) |
3458 | |
3459 | + @property |
3460 | + def custom_script_name(self): |
3461 | + """Return the name of custom (pre/post) script.""" |
3462 | + return self._configFile.get(Config.CUSTOM_SCRIPT, None) |
3463 | # vi: ts=4 expandtab |
3464 | diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py |
3465 | new file mode 100644 |
3466 | index 0000000..a7d4ad9 |
3467 | --- /dev/null |
3468 | +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py |
3469 | @@ -0,0 +1,153 @@ |
3470 | +# Copyright (C) 2017 Canonical Ltd. |
3471 | +# Copyright (C) 2017 VMware Inc. |
3472 | +# |
3473 | +# Author: Maitreyee Saikia <msaikia@vmware.com> |
3474 | +# |
3475 | +# This file is part of cloud-init. See LICENSE file for license information. |
3476 | + |
3477 | +import logging |
3478 | +import os |
3479 | +import stat |
3480 | +from textwrap import dedent |
3481 | + |
3482 | +from cloudinit import util |
3483 | + |
3484 | +LOG = logging.getLogger(__name__) |
3485 | + |
3486 | + |
3487 | +class CustomScriptNotFound(Exception): |
3488 | + pass |
3489 | + |
3490 | + |
3491 | +class CustomScriptConstant(object): |
3492 | + RC_LOCAL = "/etc/rc.local" |
3493 | + POST_CUST_TMP_DIR = "/root/.customization" |
3494 | + POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" |
3495 | + POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, |
3496 | + POST_CUST_RUN_SCRIPT_NAME) |
3497 | + POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" |
3498 | + |
3499 | + |
3500 | +class RunCustomScript(object): |
3501 | + def __init__(self, scriptname, directory): |
3502 | + self.scriptname = scriptname |
3503 | + self.directory = directory |
3504 | + self.scriptpath = os.path.join(directory, scriptname) |
3505 | + |
3506 | + def prepare_script(self): |
3507 | + if not os.path.exists(self.scriptpath): |
3508 | + raise CustomScriptNotFound("Script %s not found!! " |
3509 | + "Cannot execute custom script!" |
3510 | + % self.scriptpath) |
3511 | + # Strip any CR characters from the decoded script |
3512 | + util.load_file(self.scriptpath).replace("\r", "") |
3513 | + st = os.stat(self.scriptpath) |
3514 | + os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) |
3515 | + |
3516 | + |
3517 | +class PreCustomScript(RunCustomScript): |
3518 | + def execute(self): |
3519 | + """Executing custom script with precustomization argument.""" |
3520 | + LOG.debug("Executing pre-customization script") |
3521 | + self.prepare_script() |
3522 | + util.subp(["/bin/sh", self.scriptpath, "precustomization"]) |
3523 | + |
3524 | + |
3525 | +class PostCustomScript(RunCustomScript): |
3526 | + def __init__(self, scriptname, directory): |
3527 | + super(PostCustomScript, self).__init__(scriptname, directory) |
3528 | + # Determine when to run custom script. When postreboot is True, |
3529 | + # the user uploaded script will run as part of rc.local after |
3530 | + # the machine reboots. This is determined by presence of rclocal. |
3531 | + # When postreboot is False, script will run as part of cloud-init. |
3532 | + self.postreboot = False |
3533 | + |
3534 | + def _install_post_reboot_agent(self, rclocal): |
3535 | + """ |
3536 | + Install post-reboot agent for running custom script after reboot. |
3537 | + As part of this process, we are editing the rclocal file to run a |
3538 | + VMware script, which in turn is resposible for handling the user |
3539 | + script. |
3540 | + @param: path to rc local. |
3541 | + """ |
3542 | + LOG.debug("Installing post-reboot customization from %s to %s", |
3543 | + self.directory, rclocal) |
3544 | + if not self.has_previous_agent(rclocal): |
3545 | + LOG.info("Adding post-reboot customization agent to rc.local") |
3546 | + new_content = dedent(""" |
3547 | + # Run post-reboot guest customization |
3548 | + /bin/sh %s |
3549 | + exit 0 |
3550 | + """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT |
3551 | + existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') |
3552 | + st = os.stat(rclocal) |
3553 | + # "x" flag should be set |
3554 | + mode = st.st_mode | stat.S_IEXEC |
3555 | + util.write_file(rclocal, existing_rclocal + new_content, mode) |
3556 | + |
3557 | + else: |
3558 | + # We don't need to update rclocal file everytime a customization |
3559 | + # is requested. It just needs to be done for the first time. |
3560 | + LOG.info("Post-reboot guest customization agent is already " |
3561 | + "registered in rc.local") |
3562 | + LOG.debug("Installing post-reboot customization agent finished: %s", |
3563 | + self.postreboot) |
3564 | + |
3565 | + def has_previous_agent(self, rclocal): |
3566 | + searchstring = "# Run post-reboot guest customization" |
3567 | + if searchstring in open(rclocal).read(): |
3568 | + return True |
3569 | + return False |
3570 | + |
3571 | + def find_rc_local(self): |
3572 | + """ |
3573 | + Determine if rc local is present. |
3574 | + """ |
3575 | + rclocal = "" |
3576 | + if os.path.exists(CustomScriptConstant.RC_LOCAL): |
3577 | + LOG.debug("rc.local detected.") |
3578 | + # resolving in case of symlink |
3579 | + rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) |
3580 | + LOG.debug("rc.local resolved to %s", rclocal) |
3581 | + else: |
3582 | + LOG.warning("Can't find rc.local, post-customization " |
3583 | + "will be run before reboot") |
3584 | + return rclocal |
3585 | + |
3586 | + def install_agent(self): |
3587 | + rclocal = self.find_rc_local() |
3588 | + if rclocal: |
3589 | + self._install_post_reboot_agent(rclocal) |
3590 | + self.postreboot = True |
3591 | + |
3592 | + def execute(self): |
3593 | + """ |
3594 | + This method executes post-customization script before or after reboot |
3595 | + based on the presence of rc local. |
3596 | + """ |
3597 | + self.prepare_script() |
3598 | + self.install_agent() |
3599 | + if not self.postreboot: |
3600 | + LOG.warning("Executing post-customization script inline") |
3601 | + util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) |
3602 | + else: |
3603 | + LOG.debug("Scheduling custom script to run post reboot") |
3604 | + if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): |
3605 | + os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) |
3606 | + # Script "post-customize-guest.sh" and user uploaded script are |
3607 | + # are present in the same directory and needs to copied to a temp |
3608 | + # directory to be executed post reboot. User uploaded script is |
3609 | + # saved as customize.sh in the temp directory. |
3610 | + # post-customize-guest.sh excutes customize.sh after reboot. |
3611 | + LOG.debug("Copying post-customization script") |
3612 | + util.copy(self.scriptpath, |
3613 | + CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") |
3614 | + LOG.debug("Copying script to run post-customization script") |
3615 | + util.copy( |
3616 | + os.path.join(self.directory, |
3617 | + CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), |
3618 | + CustomScriptConstant.POST_CUST_RUN_SCRIPT) |
3619 | + LOG.info("Creating post-reboot pending marker") |
3620 | + util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) |
3621 | + |
3622 | +# vi: ts=4 expandtab |
3623 | diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py |
3624 | index 2fb07c5..2d8900e 100644 |
3625 | --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py |
3626 | +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py |
3627 | @@ -161,7 +161,7 @@ class NicConfigurator(object): |
3628 | if nic.primary and v4.gateways: |
3629 | self.ipv4PrimaryGateway = v4.gateways[0] |
3630 | subnet.update({'gateway': self.ipv4PrimaryGateway}) |
3631 | - return [subnet] |
3632 | + return ([subnet], route_list) |
3633 | |
3634 | # Add routes if there is no primary nic |
3635 | if not self._primaryNic: |
3636 | diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py |
3637 | new file mode 100644 |
3638 | index 0000000..e69de29 |
3639 | --- /dev/null |
3640 | +++ b/cloudinit/sources/tests/__init__.py |
3641 | diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py |
3642 | new file mode 100644 |
3643 | index 0000000..af15115 |
3644 | --- /dev/null |
3645 | +++ b/cloudinit/sources/tests/test_init.py |
3646 | @@ -0,0 +1,202 @@ |
3647 | +# This file is part of cloud-init. See LICENSE file for license information. |
3648 | + |
3649 | +import os |
3650 | +import six |
3651 | +import stat |
3652 | + |
3653 | +from cloudinit.helpers import Paths |
3654 | +from cloudinit.sources import ( |
3655 | + INSTANCE_JSON_FILE, DataSource) |
3656 | +from cloudinit.tests.helpers import CiTestCase, skipIf |
3657 | +from cloudinit.user_data import UserDataProcessor |
3658 | +from cloudinit import util |
3659 | + |
3660 | + |
3661 | +class DataSourceTestSubclassNet(DataSource): |
3662 | + |
3663 | + dsname = 'MyTestSubclass' |
3664 | + |
3665 | + def __init__(self, sys_cfg, distro, paths, custom_userdata=None): |
3666 | + super(DataSourceTestSubclassNet, self).__init__( |
3667 | + sys_cfg, distro, paths) |
3668 | + self._custom_userdata = custom_userdata |
3669 | + |
3670 | + def _get_cloud_name(self): |
3671 | + return 'SubclassCloudName' |
3672 | + |
3673 | + def _get_data(self): |
3674 | + self.metadata = {'availability_zone': 'myaz', |
3675 | + 'local-hostname': 'test-subclass-hostname', |
3676 | + 'region': 'myregion'} |
3677 | + if self._custom_userdata: |
3678 | + self.userdata_raw = self._custom_userdata |
3679 | + else: |
3680 | + self.userdata_raw = 'userdata_raw' |
3681 | + self.vendordata_raw = 'vendordata_raw' |
3682 | + return True |
3683 | + |
3684 | + |
3685 | +class InvalidDataSourceTestSubclassNet(DataSource): |
3686 | + pass |
3687 | + |
3688 | + |
3689 | +class TestDataSource(CiTestCase): |
3690 | + |
3691 | + with_logs = True |
3692 | + |
3693 | + def setUp(self): |
3694 | + super(TestDataSource, self).setUp() |
3695 | + self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} |
3696 | + self.distro = 'distrotest' # generally should be a Distro object |
3697 | + self.paths = Paths({}) |
3698 | + self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) |
3699 | + |
3700 | + def test_datasource_init(self): |
3701 | + """DataSource initializes metadata attributes, ds_cfg and ud_proc.""" |
3702 | + self.assertEqual(self.paths, self.datasource.paths) |
3703 | + self.assertEqual(self.sys_cfg, self.datasource.sys_cfg) |
3704 | + self.assertEqual(self.distro, self.datasource.distro) |
3705 | + self.assertIsNone(self.datasource.userdata) |
3706 | + self.assertEqual({}, self.datasource.metadata) |
3707 | + self.assertIsNone(self.datasource.userdata_raw) |
3708 | + self.assertIsNone(self.datasource.vendordata) |
3709 | + self.assertIsNone(self.datasource.vendordata_raw) |
3710 | + self.assertEqual({'key1': False}, self.datasource.ds_cfg) |
3711 | + self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) |
3712 | + |
3713 | + def test_datasource_init_gets_ds_cfg_using_dsname(self): |
3714 | + """Init uses DataSource.dsname for sourcing ds_cfg.""" |
3715 | + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} |
3716 | + distro = 'distrotest' # generally should be a Distro object |
3717 | + paths = Paths({}) |
3718 | + datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) |
3719 | + self.assertEqual({'key2': False}, datasource.ds_cfg) |
3720 | + |
3721 | + def test_str_is_classname(self): |
3722 | + """The string representation of the datasource is the classname.""" |
3723 | + self.assertEqual('DataSource', str(self.datasource)) |
3724 | + self.assertEqual( |
3725 | + 'DataSourceTestSubclassNet', |
3726 | + str(DataSourceTestSubclassNet('', '', self.paths))) |
3727 | + |
3728 | + def test__get_data_unimplemented(self): |
3729 | + """Raise an error when _get_data is not implemented.""" |
3730 | + with self.assertRaises(NotImplementedError) as context_manager: |
3731 | + self.datasource.get_data() |
3732 | + self.assertIn( |
3733 | + 'Subclasses of DataSource must implement _get_data', |
3734 | + str(context_manager.exception)) |
3735 | + datasource2 = InvalidDataSourceTestSubclassNet( |
3736 | + self.sys_cfg, self.distro, self.paths) |
3737 | + with self.assertRaises(NotImplementedError) as context_manager: |
3738 | + datasource2.get_data() |
3739 | + self.assertIn( |
3740 | + 'Subclasses of DataSource must implement _get_data', |
3741 | + str(context_manager.exception)) |
3742 | + |
3743 | + def test_get_data_calls_subclass__get_data(self): |
3744 | + """Datasource.get_data uses the subclass' version of _get_data.""" |
3745 | + tmp = self.tmp_dir() |
3746 | + datasource = DataSourceTestSubclassNet( |
3747 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) |
3748 | + self.assertTrue(datasource.get_data()) |
3749 | + self.assertEqual( |
3750 | + {'availability_zone': 'myaz', |
3751 | + 'local-hostname': 'test-subclass-hostname', |
3752 | + 'region': 'myregion'}, |
3753 | + datasource.metadata) |
3754 | + self.assertEqual('userdata_raw', datasource.userdata_raw) |
3755 | + self.assertEqual('vendordata_raw', datasource.vendordata_raw) |
3756 | + |
3757 | + def test_get_data_write_json_instance_data(self): |
3758 | + """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" |
3759 | + tmp = self.tmp_dir() |
3760 | + datasource = DataSourceTestSubclassNet( |
3761 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) |
3762 | + datasource.get_data() |
3763 | + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) |
3764 | + content = util.load_file(json_file) |
3765 | + expected = { |
3766 | + 'base64-encoded-keys': [], |
3767 | + 'v1': { |
3768 | + 'availability-zone': 'myaz', |
3769 | + 'cloud-name': 'subclasscloudname', |
3770 | + 'instance-id': 'iid-datasource', |
3771 | + 'local-hostname': 'test-subclass-hostname', |
3772 | + 'region': 'myregion'}, |
3773 | + 'ds': { |
3774 | + 'meta-data': {'availability_zone': 'myaz', |
3775 | + 'local-hostname': 'test-subclass-hostname', |
3776 | + 'region': 'myregion'}, |
3777 | + 'user-data': 'userdata_raw', |
3778 | + 'vendor-data': 'vendordata_raw'}} |
3779 | + self.assertEqual(expected, util.load_json(content)) |
3780 | + file_stat = os.stat(json_file) |
3781 | + self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) |
3782 | + |
3783 | + def test_get_data_handles_redacted_unserializable_content(self): |
3784 | + """get_data warns unserializable content in INSTANCE_JSON_FILE.""" |
3785 | + tmp = self.tmp_dir() |
3786 | + datasource = DataSourceTestSubclassNet( |
3787 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), |
3788 | + custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) |
3789 | + self.assertTrue(datasource.get_data()) |
3790 | + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) |
3791 | + content = util.load_file(json_file) |
3792 | + expected_userdata = { |
3793 | + 'key1': 'val1', |
3794 | + 'key2': { |
3795 | + 'key2.1': "Warning: redacted unserializable type <class" |
3796 | + " 'cloudinit.helpers.Paths'>"}} |
3797 | + instance_json = util.load_json(content) |
3798 | + self.assertEqual( |
3799 | + expected_userdata, instance_json['ds']['user-data']) |
3800 | + |
3801 | + @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") |
3802 | + def test_get_data_base64encodes_unserializable_bytes(self): |
3803 | + """On py3, get_data base64encodes any unserializable content.""" |
3804 | + tmp = self.tmp_dir() |
3805 | + datasource = DataSourceTestSubclassNet( |
3806 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), |
3807 | + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) |
3808 | + self.assertTrue(datasource.get_data()) |
3809 | + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) |
3810 | + content = util.load_file(json_file) |
3811 | + instance_json = util.load_json(content) |
3812 | + self.assertEqual( |
3813 | + ['ds/user-data/key2/key2.1'], |
3814 | + instance_json['base64-encoded-keys']) |
3815 | + self.assertEqual( |
3816 | + {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, |
3817 | + instance_json['ds']['user-data']) |
3818 | + |
3819 | + @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") |
3820 | + def test_get_data_handles_bytes_values(self): |
3821 | + """On py2 get_data handles bytes values without having to b64encode.""" |
3822 | + tmp = self.tmp_dir() |
3823 | + datasource = DataSourceTestSubclassNet( |
3824 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), |
3825 | + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) |
3826 | + self.assertTrue(datasource.get_data()) |
3827 | + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) |
3828 | + content = util.load_file(json_file) |
3829 | + instance_json = util.load_json(content) |
3830 | + self.assertEqual([], instance_json['base64-encoded-keys']) |
3831 | + self.assertEqual( |
3832 | + {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, |
3833 | + instance_json['ds']['user-data']) |
3834 | + |
3835 | + @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") |
3836 | + def test_non_utf8_encoding_logs_warning(self): |
3837 | + """When non-utf-8 values exist in py2 instance-data is not written.""" |
3838 | + tmp = self.tmp_dir() |
3839 | + datasource = DataSourceTestSubclassNet( |
3840 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), |
3841 | + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) |
3842 | + self.assertTrue(datasource.get_data()) |
3843 | + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) |
3844 | + self.assertFalse(os.path.exists(json_file)) |
3845 | + self.assertIn( |
3846 | + "WARNING: Error persisting instance-data.json: 'utf8' codec can't" |
3847 | + " decode byte 0xaa in position 2: invalid start byte", |
3848 | + self.logs.getvalue()) |
3849 | diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py |
3850 | index 5d7adf7..c98a1b5 100644 |
3851 | --- a/cloudinit/temp_utils.py |
3852 | +++ b/cloudinit/temp_utils.py |
3853 | @@ -28,13 +28,18 @@ def _tempfile_dir_arg(odir=None, needs_exe=False): |
3854 | if odir is not None: |
3855 | return odir |
3856 | |
3857 | + if needs_exe: |
3858 | + tdir = _EXE_ROOT_TMPDIR |
3859 | + if not os.path.isdir(tdir): |
3860 | + os.makedirs(tdir) |
3861 | + os.chmod(tdir, 0o1777) |
3862 | + return tdir |
3863 | + |
3864 | global _TMPDIR |
3865 | if _TMPDIR: |
3866 | return _TMPDIR |
3867 | |
3868 | - if needs_exe: |
3869 | - tdir = _EXE_ROOT_TMPDIR |
3870 | - elif os.getuid() == 0: |
3871 | + if os.getuid() == 0: |
3872 | tdir = _ROOT_TMPDIR |
3873 | else: |
3874 | tdir = os.environ.get('TMPDIR', '/tmp') |
3875 | diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py |
3876 | index 6f88a5b..0080c72 100644 |
3877 | --- a/cloudinit/tests/helpers.py |
3878 | +++ b/cloudinit/tests/helpers.py |
3879 | @@ -3,7 +3,6 @@ |
3880 | from __future__ import print_function |
3881 | |
3882 | import functools |
3883 | -import json |
3884 | import logging |
3885 | import os |
3886 | import shutil |
3887 | @@ -20,6 +19,11 @@ try: |
3888 | except ImportError: |
3889 | from contextlib2 import ExitStack |
3890 | |
3891 | +try: |
3892 | + from configparser import ConfigParser |
3893 | +except ImportError: |
3894 | + from ConfigParser import ConfigParser |
3895 | + |
3896 | from cloudinit import helpers as ch |
3897 | from cloudinit import util |
3898 | |
3899 | @@ -114,6 +118,16 @@ class TestCase(unittest2.TestCase): |
3900 | self.addCleanup(m.stop) |
3901 | setattr(self, attr, p) |
3902 | |
3903 | + # prefer python3 read_file over readfp but allow fallback |
3904 | + def parse_and_read(self, contents): |
3905 | + parser = ConfigParser() |
3906 | + if hasattr(parser, 'read_file'): |
3907 | + parser.read_file(contents) |
3908 | + elif hasattr(parser, 'readfp'): |
3909 | + # pylint: disable=W1505 |
3910 | + parser.readfp(contents) |
3911 | + return parser |
3912 | + |
3913 | |
3914 | class CiTestCase(TestCase): |
3915 | """This is the preferred test case base class unless user |
3916 | @@ -159,6 +173,18 @@ class CiTestCase(TestCase): |
3917 | dir = self.tmp_dir() |
3918 | return os.path.normpath(os.path.abspath(os.path.join(dir, path))) |
3919 | |
3920 | + def assertRaisesCodeEqual(self, expected, found): |
3921 | + """Handle centos6 having different context manager for assertRaises. |
3922 | + with assertRaises(Exception) as e: |
3923 | + raise Exception("BOO") |
3924 | + |
3925 | + centos6 will have e.exception as an integer. |
3926 | + anything nwere will have it as something with a '.code'""" |
3927 | + if isinstance(found, int): |
3928 | + self.assertEqual(expected, found) |
3929 | + else: |
3930 | + self.assertEqual(expected, found.code) |
3931 | + |
3932 | |
3933 | class ResourceUsingTestCase(CiTestCase): |
3934 | |
3935 | @@ -337,12 +363,6 @@ def dir2dict(startdir, prefix=None): |
3936 | return flist |
3937 | |
3938 | |
3939 | -def json_dumps(data): |
3940 | - # print data in nicely formatted json. |
3941 | - return json.dumps(data, indent=1, sort_keys=True, |
3942 | - separators=(',', ': ')) |
3943 | - |
3944 | - |
3945 | def wrap_and_call(prefix, mocks, func, *args, **kwargs): |
3946 | """ |
3947 | call func(args, **kwargs) with mocks applied, then unapplies mocks |
3948 | @@ -402,4 +422,12 @@ if not hasattr(mock.Mock, 'assert_not_called'): |
3949 | mock.Mock.assert_not_called = __mock_assert_not_called |
3950 | |
3951 | |
3952 | +# older unittest2.TestCase (centos6) do not have assertRaisesRegex |
3953 | +# And setting assertRaisesRegex to assertRaisesRegexp causes |
3954 | +# https://github.com/PyCQA/pylint/issues/1653 . So the workaround. |
3955 | +if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): |
3956 | + def _tricky(*args, **kwargs): |
3957 | + return unittest2.TestCase.assertRaisesRegexp |
3958 | + unittest2.TestCase.assertRaisesRegex = _tricky |
3959 | + |
3960 | # vi: ts=4 expandtab |
3961 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py |
3962 | new file mode 100644 |
3963 | index 0000000..ba6bf69 |
3964 | --- /dev/null |
3965 | +++ b/cloudinit/tests/test_util.py |
3966 | @@ -0,0 +1,46 @@ |
3967 | +# This file is part of cloud-init. See LICENSE file for license information. |
3968 | + |
3969 | +"""Tests for cloudinit.util""" |
3970 | + |
3971 | +import logging |
3972 | + |
3973 | +import cloudinit.util as util |
3974 | + |
3975 | +from cloudinit.tests.helpers import CiTestCase, mock |
3976 | + |
3977 | +LOG = logging.getLogger(__name__) |
3978 | + |
3979 | +MOUNT_INFO = [ |
3980 | + '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', |
3981 | + '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' |
3982 | +] |
3983 | + |
3984 | + |
3985 | +class TestUtil(CiTestCase): |
3986 | + |
3987 | + def test_parse_mount_info_no_opts_no_arg(self): |
3988 | + result = util.parse_mount_info('/home', MOUNT_INFO, LOG) |
3989 | + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) |
3990 | + |
3991 | + def test_parse_mount_info_no_opts_arg(self): |
3992 | + result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) |
3993 | + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) |
3994 | + |
3995 | + def test_parse_mount_info_with_opts(self): |
3996 | + result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) |
3997 | + self.assertEqual( |
3998 | + ('/dev/sda1', 'btrfs', '/', 'ro,relatime'), |
3999 | + result |
4000 | + ) |
4001 | + |
4002 | + @mock.patch('cloudinit.util.get_mount_info') |
4003 | + def test_mount_is_rw(self, m_mount_info): |
4004 | + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') |
4005 | + is_rw = util.mount_is_read_write('/') |
4006 | + self.assertEqual(is_rw, True) |
4007 | + |
4008 | + @mock.patch('cloudinit.util.get_mount_info') |
4009 | + def test_mount_is_ro(self, m_mount_info): |
4010 | + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') |
4011 | + is_rw = util.mount_is_read_write('/') |
4012 | + self.assertEqual(is_rw, False) |
4013 | diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py |
4014 | index 0e0f5b4..0a5be0b 100644 |
4015 | --- a/cloudinit/url_helper.py |
4016 | +++ b/cloudinit/url_helper.py |
4017 | @@ -273,7 +273,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
4018 | |
4019 | def wait_for_url(urls, max_wait=None, timeout=None, |
4020 | status_cb=None, headers_cb=None, sleep_time=1, |
4021 | - exception_cb=None): |
4022 | + exception_cb=None, sleep_time_cb=None): |
4023 | """ |
4024 | urls: a list of urls to try |
4025 | max_wait: roughly the maximum time to wait before giving up |
4026 | @@ -286,6 +286,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, |
4027 | for request. |
4028 | exception_cb: call method with 2 arguments 'msg' (per status_cb) and |
4029 | 'exception', the exception that occurred. |
4030 | + sleep_time_cb: call method with 2 arguments (response, loop_n) that |
4031 | + generates the next sleep time. |
4032 | |
4033 | the idea of this routine is to wait for the EC2 metdata service to |
4034 | come up. On both Eucalyptus and EC2 we have seen the case where |
4035 | @@ -301,6 +303,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, |
4036 | service but is not going to find one. It is possible that the instance |
4037 | data host (169.254.169.254) may be firewalled off Entirely for a sytem, |
4038 | meaning that the connection will block forever unless a timeout is set. |
4039 | + |
4040 | + A value of None for max_wait will retry indefinitely. |
4041 | """ |
4042 | start_time = time.time() |
4043 | |
4044 | @@ -311,18 +315,24 @@ def wait_for_url(urls, max_wait=None, timeout=None, |
4045 | status_cb = log_status_cb |
4046 | |
4047 | def timeup(max_wait, start_time): |
4048 | - return ((max_wait <= 0 or max_wait is None) or |
4049 | - (time.time() - start_time > max_wait)) |
4050 | + if (max_wait is None): |
4051 | + return False |
4052 | + return ((max_wait <= 0) or (time.time() - start_time > max_wait)) |
4053 | |
4054 | loop_n = 0 |
4055 | + response = None |
4056 | while True: |
4057 | - sleep_time = int(loop_n / 5) + 1 |
4058 | + if sleep_time_cb is not None: |
4059 | + sleep_time = sleep_time_cb(response, loop_n) |
4060 | + else: |
4061 | + sleep_time = int(loop_n / 5) + 1 |
4062 | for url in urls: |
4063 | now = time.time() |
4064 | if loop_n != 0: |
4065 | if timeup(max_wait, start_time): |
4066 | break |
4067 | - if timeout and (now + timeout > (start_time + max_wait)): |
4068 | + if (max_wait is not None and |
4069 | + timeout and (now + timeout > (start_time + max_wait))): |
4070 | # shorten timeout to not run way over max_time |
4071 | timeout = int((start_time + max_wait) - now) |
4072 | |
4073 | @@ -354,10 +364,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, |
4074 | url_exc = e |
4075 | |
4076 | time_taken = int(time.time() - start_time) |
4077 | - status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url, |
4078 | - time_taken, |
4079 | - max_wait, |
4080 | - reason) |
4081 | + max_wait_str = "%ss" % max_wait if max_wait else "unlimited" |
4082 | + status_msg = "Calling '%s' failed [%s/%s]: %s" % (url, |
4083 | + time_taken, |
4084 | + max_wait_str, |
4085 | + reason) |
4086 | status_cb(status_msg) |
4087 | if exception_cb: |
4088 | # This can be used to alter the headers that will be sent |
4089 | diff --git a/cloudinit/util.py b/cloudinit/util.py |
4090 | index 6c014ba..338fb97 100644 |
4091 | --- a/cloudinit/util.py |
4092 | +++ b/cloudinit/util.py |
4093 | @@ -253,12 +253,18 @@ class ProcessExecutionError(IOError): |
4094 | self.exit_code = exit_code |
4095 | |
4096 | if not stderr: |
4097 | - self.stderr = self.empty_attr |
4098 | + if stderr is None: |
4099 | + self.stderr = self.empty_attr |
4100 | + else: |
4101 | + self.stderr = stderr |
4102 | else: |
4103 | self.stderr = self._indent_text(stderr) |
4104 | |
4105 | if not stdout: |
4106 | - self.stdout = self.empty_attr |
4107 | + if stdout is None: |
4108 | + self.stdout = self.empty_attr |
4109 | + else: |
4110 | + self.stdout = stdout |
4111 | else: |
4112 | self.stdout = self._indent_text(stdout) |
4113 | |
4114 | @@ -533,15 +539,6 @@ def multi_log(text, console=True, stderr=True, |
4115 | log.log(log_level, text) |
4116 | |
4117 | |
4118 | -def load_json(text, root_types=(dict,)): |
4119 | - decoded = json.loads(decode_binary(text)) |
4120 | - if not isinstance(decoded, tuple(root_types)): |
4121 | - expected_types = ", ".join([str(t) for t in root_types]) |
4122 | - raise TypeError("(%s) root types expected, got %s instead" |
4123 | - % (expected_types, type(decoded))) |
4124 | - return decoded |
4125 | - |
4126 | - |
4127 | def is_ipv4(instr): |
4128 | """determine if input string is a ipv4 address. return boolean.""" |
4129 | toks = instr.split('.') |
4130 | @@ -900,17 +897,17 @@ def load_yaml(blob, default=None, allowed=(dict,)): |
4131 | "of length %s with allowed root types %s", |
4132 | len(blob), allowed) |
4133 | converted = safeyaml.load(blob) |
4134 | - if not isinstance(converted, allowed): |
4135 | + if converted is None: |
4136 | + LOG.debug("loaded blob returned None, returning default.") |
4137 | + converted = default |
4138 | + elif not isinstance(converted, allowed): |
4139 | # Yes this will just be caught, but thats ok for now... |
4140 | raise TypeError(("Yaml load allows %s root types," |
4141 | " but got %s instead") % |
4142 | (allowed, type_utils.obj_name(converted))) |
4143 | loaded = converted |
4144 | except (yaml.YAMLError, TypeError, ValueError): |
4145 | - if len(blob) == 0: |
4146 | - LOG.debug("load_yaml given empty string, returning default") |
4147 | - else: |
4148 | - logexc(LOG, "Failed loading yaml blob") |
4149 | + logexc(LOG, "Failed loading yaml blob") |
4150 | return loaded |
4151 | |
4152 | |
4153 | @@ -1398,6 +1395,32 @@ def get_output_cfg(cfg, mode): |
4154 | return ret |
4155 | |
4156 | |
4157 | +def get_config_logfiles(cfg): |
4158 | + """Return a list of log file paths from the configuration dictionary. |
4159 | + |
4160 | + @param cfg: The cloud-init merged configuration dictionary. |
4161 | + """ |
4162 | + logs = [] |
4163 | + if not cfg or not isinstance(cfg, dict): |
4164 | + return logs |
4165 | + default_log = cfg.get('def_log_file') |
4166 | + if default_log: |
4167 | + logs.append(default_log) |
4168 | + for fmt in get_output_cfg(cfg, None): |
4169 | + if not fmt: |
4170 | + continue |
4171 | + match = re.match('(?P<type>\||>+)\s*(?P<target>.*)', fmt) |
4172 | + if not match: |
4173 | + continue |
4174 | + target = match.group('target') |
4175 | + parts = target.split() |
4176 | + if len(parts) == 1: |
4177 | + logs.append(target) |
4178 | + elif ['tee', '-a'] == parts[:2]: |
4179 | + logs.append(parts[2]) |
4180 | + return list(set(logs)) |
4181 | + |
4182 | + |
4183 | def logexc(log, msg, *args): |
4184 | # Setting this here allows this to change |
4185 | # levels easily (not always error level) |
4186 | @@ -1454,7 +1477,31 @@ def ensure_dirs(dirlist, mode=0o755): |
4187 | ensure_dir(d, mode) |
4188 | |
4189 | |
4190 | +def load_json(text, root_types=(dict,)): |
4191 | + decoded = json.loads(decode_binary(text)) |
4192 | + if not isinstance(decoded, tuple(root_types)): |
4193 | + expected_types = ", ".join([str(t) for t in root_types]) |
4194 | + raise TypeError("(%s) root types expected, got %s instead" |
4195 | + % (expected_types, type(decoded))) |
4196 | + return decoded |
4197 | + |
4198 | + |
4199 | +def json_serialize_default(_obj): |
4200 | + """Handler for types which aren't json serializable.""" |
4201 | + try: |
4202 | + return 'ci-b64:{0}'.format(b64e(_obj)) |
4203 | + except AttributeError: |
4204 | + return 'Warning: redacted unserializable type {0}'.format(type(_obj)) |
4205 | + |
4206 | + |
4207 | +def json_dumps(data): |
4208 | + """Return data in nicely formatted json.""" |
4209 | + return json.dumps(data, indent=1, sort_keys=True, |
4210 | + separators=(',', ': '), default=json_serialize_default) |
4211 | + |
4212 | + |
4213 | def yaml_dumps(obj, explicit_start=True, explicit_end=True): |
4214 | + """Return data in nicely formatted yaml.""" |
4215 | return yaml.safe_dump(obj, |
4216 | line_break="\n", |
4217 | indent=4, |
4218 | @@ -1540,6 +1587,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): |
4219 | mtypes = list(mtype) |
4220 | elif mtype is None: |
4221 | mtypes = None |
4222 | + else: |
4223 | + raise TypeError( |
4224 | + 'Unsupported type provided for mtype parameter: {_type}'.format( |
4225 | + _type=type(mtype))) |
4226 | |
4227 | # clean up 'mtype' input a bit based on platform. |
4228 | platsys = platform.system().lower() |
4229 | @@ -1788,58 +1839,60 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
4230 | env = env.copy() |
4231 | env.update(update_env) |
4232 | |
4233 | - try: |
4234 | - if target_path(target) != "/": |
4235 | - args = ['chroot', target] + list(args) |
4236 | + if target_path(target) != "/": |
4237 | + args = ['chroot', target] + list(args) |
4238 | |
4239 | - if not logstring: |
4240 | - LOG.debug(("Running command %s with allowed return codes %s" |
4241 | - " (shell=%s, capture=%s)"), args, rcs, shell, capture) |
4242 | - else: |
4243 | - LOG.debug(("Running hidden command to protect sensitive " |
4244 | - "input/output logstring: %s"), logstring) |
4245 | - |
4246 | - stdin = None |
4247 | - stdout = None |
4248 | - stderr = None |
4249 | - if capture: |
4250 | - stdout = subprocess.PIPE |
4251 | - stderr = subprocess.PIPE |
4252 | - if data is None: |
4253 | - # using devnull assures any reads get null, rather |
4254 | - # than possibly waiting on input. |
4255 | - devnull_fp = open(os.devnull) |
4256 | - stdin = devnull_fp |
4257 | - else: |
4258 | - stdin = subprocess.PIPE |
4259 | - if not isinstance(data, bytes): |
4260 | - data = data.encode() |
4261 | + if not logstring: |
4262 | + LOG.debug(("Running command %s with allowed return codes %s" |
4263 | + " (shell=%s, capture=%s)"), args, rcs, shell, capture) |
4264 | + else: |
4265 | + LOG.debug(("Running hidden command to protect sensitive " |
4266 | + "input/output logstring: %s"), logstring) |
4267 | + |
4268 | + stdin = None |
4269 | + stdout = None |
4270 | + stderr = None |
4271 | + if capture: |
4272 | + stdout = subprocess.PIPE |
4273 | + stderr = subprocess.PIPE |
4274 | + if data is None: |
4275 | + # using devnull assures any reads get null, rather |
4276 | + # than possibly waiting on input. |
4277 | + devnull_fp = open(os.devnull) |
4278 | + stdin = devnull_fp |
4279 | + else: |
4280 | + stdin = subprocess.PIPE |
4281 | + if not isinstance(data, bytes): |
4282 | + data = data.encode() |
4283 | |
4284 | + try: |
4285 | sp = subprocess.Popen(args, stdout=stdout, |
4286 | stderr=stderr, stdin=stdin, |
4287 | env=env, shell=shell) |
4288 | (out, err) = sp.communicate(data) |
4289 | - |
4290 | - # Just ensure blank instead of none. |
4291 | - if not out and capture: |
4292 | - out = b'' |
4293 | - if not err and capture: |
4294 | - err = b'' |
4295 | - if decode: |
4296 | - def ldecode(data, m='utf-8'): |
4297 | - if not isinstance(data, bytes): |
4298 | - return data |
4299 | - return data.decode(m, decode) |
4300 | - |
4301 | - out = ldecode(out) |
4302 | - err = ldecode(err) |
4303 | except OSError as e: |
4304 | - raise ProcessExecutionError(cmd=args, reason=e, |
4305 | - errno=e.errno) |
4306 | + raise ProcessExecutionError( |
4307 | + cmd=args, reason=e, errno=e.errno, |
4308 | + stdout="-" if decode else b"-", |
4309 | + stderr="-" if decode else b"-") |
4310 | finally: |
4311 | if devnull_fp: |
4312 | devnull_fp.close() |
4313 | |
4314 | + # Just ensure blank instead of none. |
4315 | + if not out and capture: |
4316 | + out = b'' |
4317 | + if not err and capture: |
4318 | + err = b'' |
4319 | + if decode: |
4320 | + def ldecode(data, m='utf-8'): |
4321 | + if not isinstance(data, bytes): |
4322 | + return data |
4323 | + return data.decode(m, decode) |
4324 | + |
4325 | + out = ldecode(out) |
4326 | + err = ldecode(err) |
4327 | + |
4328 | rc = sp.returncode |
4329 | if rc not in rcs: |
4330 | raise ProcessExecutionError(stdout=out, stderr=err, |
4331 | @@ -2010,7 +2063,7 @@ def expand_package_list(version_fmt, pkgs): |
4332 | return pkglist |
4333 | |
4334 | |
4335 | -def parse_mount_info(path, mountinfo_lines, log=LOG): |
4336 | +def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False): |
4337 | """Return the mount information for PATH given the lines from |
4338 | /proc/$$/mountinfo.""" |
4339 | |
4340 | @@ -2072,11 +2125,16 @@ def parse_mount_info(path, mountinfo_lines, log=LOG): |
4341 | |
4342 | match_mount_point = mount_point |
4343 | match_mount_point_elements = mount_point_elements |
4344 | + mount_options = parts[5] |
4345 | |
4346 | - if devpth and fs_type and match_mount_point: |
4347 | - return (devpth, fs_type, match_mount_point) |
4348 | + if get_mnt_opts: |
4349 | + if devpth and fs_type and match_mount_point and mount_options: |
4350 | + return (devpth, fs_type, match_mount_point, mount_options) |
4351 | else: |
4352 | - return None |
4353 | + if devpth and fs_type and match_mount_point: |
4354 | + return (devpth, fs_type, match_mount_point) |
4355 | + |
4356 | + return None |
4357 | |
4358 | |
4359 | def parse_mtab(path): |
4360 | @@ -2146,7 +2204,7 @@ def parse_mount(path): |
4361 | return None |
4362 | |
4363 | |
4364 | -def get_mount_info(path, log=LOG): |
4365 | +def get_mount_info(path, log=LOG, get_mnt_opts=False): |
4366 | # Use /proc/$$/mountinfo to find the device where path is mounted. |
4367 | # This is done because with a btrfs filesystem using os.stat(path) |
4368 | # does not return the ID of the device. |
4369 | @@ -2178,7 +2236,7 @@ def get_mount_info(path, log=LOG): |
4370 | mountinfo_path = '/proc/%s/mountinfo' % os.getpid() |
4371 | if os.path.exists(mountinfo_path): |
4372 | lines = load_file(mountinfo_path).splitlines() |
4373 | - return parse_mount_info(path, lines, log) |
4374 | + return parse_mount_info(path, lines, log, get_mnt_opts) |
4375 | elif os.path.exists("/etc/mtab"): |
4376 | return parse_mtab(path) |
4377 | else: |
4378 | @@ -2286,7 +2344,8 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep): |
4379 | missing.append(f) |
4380 | |
4381 | if len(missing): |
4382 | - raise ValueError("Missing required files: %s", ','.join(missing)) |
4383 | + raise ValueError( |
4384 | + 'Missing required files: {files}'.format(files=','.join(missing))) |
4385 | |
4386 | return ret |
4387 | |
4388 | @@ -2563,4 +2622,10 @@ def wait_for_files(flist, maxwait, naplen=.5, log_pre=""): |
4389 | return need |
4390 | |
4391 | |
4392 | +def mount_is_read_write(mount_point): |
4393 | + """Check whether the given mount point is mounted rw""" |
4394 | + result = get_mount_info(mount_point, get_mnt_opts=True) |
4395 | + mount_opts = result[-1].split(',') |
4396 | + return mount_opts[0] == 'rw' |
4397 | + |
4398 | # vi: ts=4 expandtab |
4399 | diff --git a/cloudinit/version.py b/cloudinit/version.py |
4400 | index 3255f39..be6262d 100644 |
4401 | --- a/cloudinit/version.py |
4402 | +++ b/cloudinit/version.py |
4403 | @@ -4,7 +4,7 @@ |
4404 | # |
4405 | # This file is part of cloud-init. See LICENSE file for license information. |
4406 | |
4407 | -__VERSION__ = "17.1" |
4408 | +__VERSION__ = "17.2" |
4409 | |
4410 | FEATURES = [ |
4411 | # supports network config version 1 |
4412 | diff --git a/debian/changelog b/debian/changelog |
4413 | index 03308d7..474c9ed 100644 |
4414 | --- a/debian/changelog |
4415 | +++ b/debian/changelog |
4416 | @@ -1,10 +1,62 @@ |
4417 | -cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.2) UNRELEASED; urgency=medium |
4418 | +cloud-init (17.2-30-gf7deaf15-0ubuntu1~16.04.1) xenial-proposed; urgency=medium |
4419 | |
4420 | * debian/patches/ds-identify-behavior-xenial.patch: refresh patch. |
4421 | * debian/grub-legacy-ec2.install: install post(inst|rm) files correctly. |
4422 | [Simon Deziel] (LP: #1581416) |
4423 | - |
4424 | - -- Scott Moser <smoser@ubuntu.com> Tue, 12 Dec 2017 14:29:46 -0500 |
4425 | + * New upstream snapshot (LP: #1747059) |
4426 | + - docs: Update RTD content for cloud-init subcommands. |
4427 | + - OVF: Extend well-known labels to include OVFENV. |
4428 | + - Fix potential cases of uninitialized variables. |
4429 | + - tests: Collect script output as binary, collect systemd journal, fix lxd. |
4430 | + - HACKING.rst: mention setting user name and email via git config. |
4431 | + - Azure VM Preprovisioning support. [Douglas Jordan] |
4432 | + - tools/read-version: Fix read-version when in a git worktree. |
4433 | + - docs: Fix typos in docs and one debug message. [Florian Grignon] |
4434 | + - btrfs: support resizing if root is mounted ro. |
4435 | + [Robert Schweikert] |
4436 | + - OpenNebula: Improve network configuration support. |
4437 | + [Akihiko Ota] |
4438 | + - tests: Fix EC2 Platform to return console output as bytes. |
4439 | + - tests: Fix attempted use of /run in a test case. |
4440 | + - GCE: Improvements and changes to ssh key behavior for default user. |
4441 | + [Max Illfelder] |
4442 | + - subp: make ProcessExecutionError have expected types in stderr, stdout. |
4443 | + - tests: when querying ntp server, do not do dns resolution. |
4444 | + - Recognize uppercase vfat disk labels [James Penick] |
4445 | + - tests: remove zesty as supported OS to test |
4446 | + - Do not log warning on config files that represent None. |
4447 | + - tests: Use git hash pip dependency format for pylxd. |
4448 | + - tests: add integration requirements text file |
4449 | + - MAAS: add check_instance_id based off oauth tokens. |
4450 | + - tests: update apt sources list test |
4451 | + - tests: clean up image properties |
4452 | + - tests: rename test ssh keys to avoid appearance of leaking private keys. |
4453 | + - tests: Enable AWS EC2 Integration Testing |
4454 | + - cli: cloud-init clean handles symlinks |
4455 | + - SUSE: Add a basic test of network config rendering. [Robert Schweikert] |
4456 | + - Azure: Only bounce network when necessary. |
4457 | + - lint: Fix lints seen by pylint version 1.8.1. |
4458 | + - cli: Fix error in cloud-init modules --mode=init. |
4459 | + - release 17.2 |
4460 | + - ds-identify: failure in NoCloud due to unset variable usage. |
4461 | + - tests: fix collect_console when not implemented |
4462 | + - ec2: Use instance-identity doc for region and instance-id |
4463 | + [Andrew Jorgensen] |
4464 | + - tests: remove leaked tmp files in config drive tests. |
4465 | + - setup.py: Do not include rendered files in SOURCES.txt |
4466 | + - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert] |
4467 | + - tests: move to using tox 1.7.5 |
4468 | + - OVF: improve ds-identify to support finding OVF iso transport. |
4469 | + - VMware: Support for user provided pre and post-customization scripts |
4470 | + [Maitreyee Saikia] |
4471 | + - citest: In NoCloudKVM provide keys via metadata not userdata. |
4472 | + - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix |
4473 | + complaints. |
4474 | + - Datasources: Formalize DataSource get_data and related properties. |
4475 | + - cli: Add clean and status subcommands |
4476 | + - tests: consolidate platforms into specific dirs |
4477 | + |
4478 | + -- Chad Smith <chad.smith@canonical.com> Fri, 02 Feb 2018 12:37:30 -0700 |
4479 | |
4480 | cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.1) xenial-proposed; urgency=medium |
4481 | |
4482 | diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst |
4483 | index 859409a..f2976fd 100644 |
4484 | --- a/doc/rtd/topics/boot.rst |
4485 | +++ b/doc/rtd/topics/boot.rst |
4486 | @@ -1,3 +1,5 @@ |
4487 | +.. _boot_stages: |
4488 | + |
4489 | *********** |
4490 | Boot Stages |
4491 | *********** |
4492 | @@ -74,7 +76,7 @@ Network |
4493 | * **systemd service**: ``cloud-init.service`` |
4494 | * **runs**: After local stage and configured networking is up. |
4495 | * **blocks**: As much of remaining boot as possible. |
4496 | - * **modules**: ``init_modules`` |
4497 | + * **modules**: ``cloud_init_modules`` in **/etc/cloud/cloud.cfg** |
4498 | |
4499 | This stage requires all configured networking to be online, as it will fully |
4500 | process any user-data that is found. Here, processing means: |
4501 | @@ -104,7 +106,7 @@ Config |
4502 | * **systemd service**: ``cloud-config.service`` |
4503 | * **runs**: After network stage. |
4504 | * **blocks**: None. |
4505 | - * **modules**: ``config_modules`` |
4506 | + * **modules**: ``cloud_config_modules`` in **/etc/cloud/cloud.cfg** |
4507 | |
4508 | This stage runs config modules only. Modules that do not really have an |
4509 | effect on other stages of boot are run here. |
4510 | @@ -115,7 +117,7 @@ Final |
4511 | * **systemd service**: ``cloud-final.service`` |
4512 | * **runs**: As final part of boot (traditional "rc.local") |
4513 | * **blocks**: None. |
4514 | - * **modules**: ``final_modules`` |
4515 | + * **modules**: ``cloud_final_modules`` in **/etc/cloud/cloud.cfg** |
4516 | |
4517 | This stage runs as late in boot as possible. Any scripts that a user is |
4518 | accustomed to running after logging into a system should run correctly here. |
4519 | @@ -125,4 +127,9 @@ Things that run here include |
4520 | * configuration management plugins (puppet, chef, salt-minion) |
4521 | * user-scripts (including ``runcmd``). |
4522 | |
4523 | +For scripts external to cloud-init looking to wait until cloud-init |
4524 | +finished, the ``cloud-init status`` subcommand can help block external |
4525 | +scripts until cloud-init is done without having to write your own systemd |
4526 | +units dependency chains. See :ref:`cli_status` for more info. |
4527 | + |
4528 | .. vi: textwidth=78 |
4529 | diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst |
4530 | index 31eaba5..ae3a0c7 100644 |
4531 | --- a/doc/rtd/topics/capabilities.rst |
4532 | +++ b/doc/rtd/topics/capabilities.rst |
4533 | @@ -1,3 +1,5 @@ |
4534 | +.. _capabilities: |
4535 | + |
4536 | ************ |
4537 | Capabilities |
4538 | ************ |
4539 | @@ -39,17 +41,19 @@ Currently defined feature names include: |
4540 | see :ref:`network_config_v2` documentation for examples. |
4541 | |
4542 | |
4543 | -CLI Interface : |
4544 | +CLI Interface |
4545 | +============= |
4546 | |
4547 | -``cloud-init features`` will print out each feature supported. If cloud-init |
4548 | -does not have the features subcommand, it also does not support any features |
4549 | -described in this document. |
4550 | + The command line documentation is accessible on any cloud-init |
4551 | +installed system: |
4552 | |
4553 | .. code-block:: bash |
4554 | |
4555 | % cloud-init --help |
4556 | - usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force] |
4557 | - {init,modules,query,single,dhclient-hook,features} ... |
4558 | + usage: cloud-init [-h] [--version] [--file FILES] |
4559 | + [--debug] [--force] |
4560 | + {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} |
4561 | + ... |
4562 | |
4563 | optional arguments: |
4564 | -h, --help show this help message and exit |
4565 | @@ -61,7 +65,7 @@ described in this document. |
4566 | your own risk) |
4567 | |
4568 | Subcommands: |
4569 | - {init,modules,single,dhclient-hook,features,analyze,devel} |
4570 | + {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} |
4571 | init initializes cloud-init and performs initial modules |
4572 | modules activates modules using a given configuration key |
4573 | single run a single module |
4574 | @@ -69,11 +73,153 @@ described in this document. |
4575 | features list defined features |
4576 | analyze Devel tool: Analyze cloud-init logs and data |
4577 | devel Run development tools |
4578 | + collect-logs Collect and tar all cloud-init debug info |
4579 | + clean Remove logs and artifacts so cloud-init can re-run. |
4580 | + status Report cloud-init status or wait on completion. |
4581 | + |
4582 | +CLI Subcommand details |
4583 | +====================== |
4584 | + |
4585 | +.. _cli_features: |
4586 | + |
4587 | +cloud-init features |
4588 | +------------------- |
4589 | +Print out each feature supported. If cloud-init does not have the |
4590 | +features subcommand, it also does not support any features described in |
4591 | +this document. |
4592 | + |
4593 | +.. code-block:: bash |
4594 | |
4595 | % cloud-init features |
4596 | NETWORK_CONFIG_V1 |
4597 | NETWORK_CONFIG_V2 |
4598 | |
4599 | +.. _cli_status: |
4600 | + |
4601 | +cloud-init status |
4602 | +----------------- |
4603 | +Report whether cloud-init is running, done, disabled or errored. Exits |
4604 | +non-zero if an error is detected in cloud-init. |
4605 | + * **--long**: Detailed status information. |
4606 | + * **--wait**: Block until cloud-init completes. |
4607 | + |
4608 | +.. code-block:: bash |
4609 | + |
4610 | + % cloud-init status --long |
4611 | + status: done |
4612 | + time: Wed, 17 Jan 2018 20:41:59 +0000 |
4613 | + detail: |
4614 | + DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] |
4615 | + |
4616 | + # Cloud-init running still short versus long options |
4617 | + % cloud-init status |
4618 | + status: running |
4619 | + % cloud-init status --long |
4620 | + status: running |
4621 | + time: Fri, 26 Jan 2018 21:39:43 +0000 |
4622 | + detail: |
4623 | + Running in stage: init-local |
4624 | + |
4625 | +.. _cli_collect_logs: |
4626 | + |
4627 | +cloud-init collect-logs |
4628 | +----------------------- |
4629 | +Collect and tar cloud-init generated logs, data files and system |
4630 | +information for triage. This subcommand is integrated with apport. |
4631 | + |
4632 | +**Note**: Ubuntu users can file bugs with `ubuntu-bug cloud-init` to |
4633 | +automaticaly attach these logs to a bug report. |
4634 | + |
4635 | +Logs collected are: |
4636 | + |
4637 | + * /var/log/cloud-init*log |
4638 | + * /run/cloud-init |
4639 | + * cloud-init package version |
4640 | + * dmesg output |
4641 | + * journalctl output |
4642 | + * /var/lib/cloud/instance/user-data.txt |
4643 | + |
4644 | +.. _cli_analyze: |
4645 | + |
4646 | +cloud-init analyze |
4647 | +------------------ |
4648 | +Get detailed reports of where cloud-init spends most of its time. See |
4649 | +:ref:`boot_time_analysis` for more info. |
4650 | + |
4651 | + * **blame** Report ordered by most costly operations. |
4652 | + * **dump** Machine-readable JSON dump of all cloud-init tracked events. |
4653 | + * **show** show time-ordered report of the cost of operations during each |
4654 | + boot stage. |
4655 | + |
4656 | +.. _cli_devel: |
4657 | + |
4658 | +cloud-init devel |
4659 | +---------------- |
4660 | +Collection of development tools under active development. These tools will |
4661 | +likely be promoted to top-level subcommands when stable. |
4662 | + |
4663 | + * ``cloud-init devel schema``: A **#cloud-config** format and schema |
4664 | + validator. It accepts a cloud-config yaml file and annotates potential |
4665 | + schema errors locally without the need for deployment. Schema |
4666 | + validation is work in progress and supports a subset of cloud-config |
4667 | + modules. |
4668 | + |
4669 | +.. _cli_clean: |
4670 | + |
4671 | +cloud-init clean |
4672 | +---------------- |
4673 | +Remove cloud-init artifacts from /var/lib/cloud and optionally reboot the |
4674 | +machine to so cloud-init re-runs all stages as it did on first boot. |
4675 | + |
4676 | + * **--logs**: Optionally remove /var/log/cloud-init*log files. |
4677 | + * **--reboot**: Reboot the system after removing artifacts. |
4678 | + |
4679 | +.. _cli_init: |
4680 | + |
4681 | +cloud-init init |
4682 | +--------------- |
4683 | +Generally run by OS init systems to execute cloud-init's stages |
4684 | +*init* and *init-local*. See :ref:`boot_stages` for more info. |
4685 | +Can be run on the commandline, but is generally gated to run only once |
4686 | +due to semaphores in **/var/lib/cloud/instance/sem/** and |
4687 | +**/var/lib/cloud/sem**. |
4688 | + |
4689 | + * **--local**: Run *init-local* stage instead of *init*. |
4690 | + |
4691 | +.. _cli_modules: |
4692 | + |
4693 | +cloud-init modules |
4694 | +------------------ |
4695 | +Generally run by OS init systems to execute *modules:config* and |
4696 | +*modules:final* boot stages. This executes cloud config :ref:`modules` |
4697 | +configured to run in the init, config and final stages. The modules are |
4698 | +declared to run in various boot stages in the file |
4699 | +**/etc/cloud/cloud.cfg** under keys **cloud_init_modules**, |
4700 | +**cloud_init_modules** and **cloud_init_modules**. Can be run on the |
4701 | +commandline, but each module is gated to run only once due to semaphores |
4702 | +in ``/var/lib/cloud/``. |
4703 | + |
4704 | + * **--mode (init|config|final)**: Run *modules:init*, *modules:config* or |
4705 | + *modules:final* cloud-init stages. See :ref:`boot_stages` for more info. |
4706 | + |
4707 | +.. _cli_single: |
4708 | + |
4709 | +cloud-init single |
4710 | +----------------- |
4711 | +Attempt to run a single named cloud config module. The following example |
4712 | +re-runs the cc_set_hostname module ignoring the module default frequency |
4713 | +of once-per-instance: |
4714 | + |
4715 | + * **--name**: The cloud-config module name to run |
4716 | + * **--frequency**: Optionally override the declared module frequency |
4717 | + with one of (always|once-per-instance|once) |
4718 | + |
4719 | +.. code-block:: bash |
4720 | + |
4721 | + % cloud-init single --name set_hostname --frequency always |
4722 | + |
4723 | +**Note**: Mileage may vary trying to re-run each cloud-config module, as |
4724 | +some are not idempotent. |
4725 | |
4726 | .. _Cloud-init: https://launchpad.net/cloud-init |
4727 | .. vi: textwidth=78 |
4728 | diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst |
4729 | index 4e43dd5..c2b47ed 100644 |
4730 | --- a/doc/rtd/topics/debugging.rst |
4731 | +++ b/doc/rtd/topics/debugging.rst |
4732 | @@ -7,6 +7,7 @@ Overview |
4733 | This topic will discuss general approaches for test and debug of cloud-init on |
4734 | deployed instances. |
4735 | |
4736 | +.. _boot_time_analysis: |
4737 | |
4738 | Boot Time Analysis - cloud-init analyze |
4739 | ====================================== |
4740 | diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst |
4741 | index cdb0f41..7b14675 100644 |
4742 | --- a/doc/rtd/topics/modules.rst |
4743 | +++ b/doc/rtd/topics/modules.rst |
4744 | @@ -1,3 +1,5 @@ |
4745 | +.. _modules: |
4746 | + |
4747 | ******* |
4748 | Modules |
4749 | ******* |
4750 | diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst |
4751 | index ce3a1bd..2f8ab54 100644 |
4752 | --- a/doc/rtd/topics/network-config-format-v1.rst |
4753 | +++ b/doc/rtd/topics/network-config-format-v1.rst |
4754 | @@ -349,7 +349,7 @@ For any network device (one of the Config Types) users can define a list of |
4755 | entries will create interface alias allowing a single interface to use |
4756 | different ip configurations. |
4757 | |
4758 | -Valid keys for for ``subnets`` include the following: |
4759 | +Valid keys for ``subnets`` include the following: |
4760 | |
4761 | - ``type``: Specify the subnet type. |
4762 | - ``control``: Specify manual, auto or hotplug. Indicates how the interface |
4763 | diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst |
4764 | index d668e3f..bf04bb3 100644 |
4765 | --- a/doc/rtd/topics/tests.rst |
4766 | +++ b/doc/rtd/topics/tests.rst |
4767 | @@ -118,19 +118,19 @@ TreeRun and TreeCollect |
4768 | |
4769 | If working on a cloud-init feature or resolving a bug, it may be useful to |
4770 | run the current copy of cloud-init in the integration testing environment. |
4771 | -The integration testing suite can automatically build a deb based on the |
4772 | +The integration testing suite can automatically build a deb based on the |
4773 | current working tree of cloud-init and run the test suite using this deb. |
4774 | |
4775 | The ``tree_run`` and ``tree_collect`` commands take the same arguments as |
4776 | -the ``run`` and ``collect`` commands. These commands will build a deb and |
4777 | -write it into a temporary file, then start the test suite and pass that deb |
4778 | +the ``run`` and ``collect`` commands. These commands will build a deb and |
4779 | +write it into a temporary file, then start the test suite and pass that deb |
4780 | in. To build a deb only, and not run the test suite, the ``bddeb`` command |
4781 | can be used. |
4782 | |
4783 | Note that code in the cloud-init working tree that has not been committed |
4784 | when the cloud-init deb is built will still be included. To build a |
4785 | cloud-init deb from or use the ``tree_run`` command using a copy of |
4786 | -cloud-init located in a different directory, use the option ``--cloud-init |
4787 | +cloud-init located in a different directory, use the option ``--cloud-init |
4788 | /path/to/cloud-init``. |
4789 | |
4790 | .. code-block:: bash |
4791 | @@ -383,7 +383,7 @@ Development Checklist |
4792 | * Valid unit tests validating output collected |
4793 | * Passes pylint & pep8 checks |
4794 | * Placed in the appropriate sub-folder in the test cases directory |
4795 | -* Tested by running the test: |
4796 | +* Tested by running the test: |
4797 | |
4798 | .. code-block:: bash |
4799 | |
4800 | @@ -392,6 +392,32 @@ Development Checklist |
4801 | --test modules/your_test.yaml \ |
4802 | [--deb <build of cloud-init>] |
4803 | |
4804 | + |
4805 | +Platforms |
4806 | +========= |
4807 | + |
4808 | +EC2 |
4809 | +--- |
4810 | +To run on the EC2 platform it is required that the user has an AWS credentials |
4811 | +configuration file specifying his or her access keys and a default region. |
4812 | +These configuration files are the standard that the AWS cli and other AWS |
4813 | +tools utilize for interacting directly with AWS itself and are normally |
4814 | +generated when running ``aws configure``: |
4815 | + |
4816 | +.. code-block:: bash |
4817 | + |
4818 | + $ cat $HOME/.aws/credentials |
4819 | + [default] |
4820 | + aws_access_key_id = <KEY HERE> |
4821 | + aws_secret_access_key = <KEY HERE> |
4822 | + |
4823 | +.. code-block:: bash |
4824 | + |
4825 | + $ cat $HOME/.aws/config |
4826 | + [default] |
4827 | + region = us-west-2 |
4828 | + |
4829 | + |
4830 | Architecture |
4831 | ============ |
4832 | |
4833 | @@ -455,7 +481,7 @@ replace the default. If the data is a dictionary then the value will be the |
4834 | result of merging that dictionary from the default config and that |
4835 | dictionary from the overrides. |
4836 | |
4837 | -Merging is done using the function |
4838 | +Merging is done using the function |
4839 | ``tests.cloud_tests.config.merge_config``, which can be examined for more |
4840 | detail on config merging behavior. |
4841 | |
4842 | diff --git a/integration-requirements.txt b/integration-requirements.txt |
4843 | new file mode 100644 |
4844 | index 0000000..45baac6 |
4845 | --- /dev/null |
4846 | +++ b/integration-requirements.txt |
4847 | @@ -0,0 +1,20 @@ |
4848 | +# PyPI requirements for cloud-init integration testing |
4849 | +# https://cloudinit.readthedocs.io/en/latest/topics/tests.html |
4850 | +# |
4851 | +# Note: Changes to this requirements may require updates to |
4852 | +# the packages/pkg-deps.json file as well. |
4853 | +# |
4854 | + |
4855 | +# ec2 backend |
4856 | +boto3==1.5.9 |
4857 | + |
4858 | +# ssh communication |
4859 | +paramiko==2.4.0 |
4860 | + |
4861 | +# lxd backend |
4862 | +# 01/10/2018: enables use of lxd as snap support |
4863 | +git+https://github.com/lxc/pylxd.git@0722955260a6557e6d2ffde1896bfe0707bbca27 |
4864 | + |
4865 | + |
4866 | +# finds latest image information |
4867 | +bzr+lp:simplestreams |
4868 | diff --git a/setup.py b/setup.py |
4869 | index bf697d7..bc3f52a 100755 |
4870 | --- a/setup.py |
4871 | +++ b/setup.py |
4872 | @@ -18,11 +18,14 @@ import tempfile |
4873 | |
4874 | import setuptools |
4875 | from setuptools.command.install import install |
4876 | +from setuptools.command.egg_info import egg_info |
4877 | |
4878 | from distutils.errors import DistutilsArgError |
4879 | |
4880 | import subprocess |
4881 | |
4882 | +RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" |
4883 | + |
4884 | |
4885 | def is_f(p): |
4886 | return os.path.isfile(p) |
4887 | @@ -107,7 +110,7 @@ def render_tmpl(template): |
4888 | return template |
4889 | |
4890 | topdir = os.path.dirname(sys.argv[0]) |
4891 | - tmpd = tempfile.mkdtemp(dir=topdir) |
4892 | + tmpd = tempfile.mkdtemp(dir=topdir, prefix=RENDERED_TMPD_PREFIX) |
4893 | atexit.register(shutil.rmtree, tmpd) |
4894 | bname = os.path.basename(template).rstrip(tmpl_ext) |
4895 | fpath = os.path.join(tmpd, bname) |
4896 | @@ -156,6 +159,25 @@ elif os.path.isfile('/etc/redhat-release'): |
4897 | USR_LIB_EXEC = "usr/libexec" |
4898 | |
4899 | |
4900 | +class MyEggInfo(egg_info): |
4901 | + """This makes sure to not include the rendered files in SOURCES.txt.""" |
4902 | + |
4903 | + def find_sources(self): |
4904 | + ret = egg_info.find_sources(self) |
4905 | + # update the self.filelist. |
4906 | + self.filelist.exclude_pattern(RENDERED_TMPD_PREFIX + ".*", |
4907 | + is_regex=True) |
4908 | + # but since mfname is already written we have to update it also. |
4909 | + mfname = os.path.join(self.egg_info, "SOURCES.txt") |
4910 | + if os.path.exists(mfname): |
4911 | + with open(mfname) as fp: |
4912 | + files = [f for f in fp |
4913 | + if not f.startswith(RENDERED_TMPD_PREFIX)] |
4914 | + with open(mfname, "w") as fp: |
4915 | + fp.write(''.join(files)) |
4916 | + return ret |
4917 | + |
4918 | + |
4919 | # TODO: Is there a better way to do this?? |
4920 | class InitsysInstallData(install): |
4921 | init_system = None |
4922 | @@ -229,6 +251,7 @@ if os.uname()[0] != 'FreeBSD': |
4923 | # adding on the right init system configuration files |
4924 | cmdclass = { |
4925 | 'install': InitsysInstallData, |
4926 | + 'egg_info': MyEggInfo, |
4927 | } |
4928 | |
4929 | requirements = read_requires() |
4930 | diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl |
4931 | index bf6b296..ff9c644 100644 |
4932 | --- a/systemd/cloud-init-local.service.tmpl |
4933 | +++ b/systemd/cloud-init-local.service.tmpl |
4934 | @@ -13,12 +13,6 @@ Before=shutdown.target |
4935 | Before=sysinit.target |
4936 | Conflicts=shutdown.target |
4937 | {% endif %} |
4938 | -{% if variant in ["suse"] %} |
4939 | -# Other distros use Before=sysinit.target. There is not a clearly identified |
4940 | -# reason for usage of basic.target instead. |
4941 | -Before=basic.target |
4942 | -Conflicts=shutdown.target |
4943 | -{% endif %} |
4944 | RequiresMountsFor=/var/lib/cloud |
4945 | |
4946 | [Service] |
4947 | diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py |
4948 | index 98c1d6c..dd43698 100644 |
4949 | --- a/tests/cloud_tests/__init__.py |
4950 | +++ b/tests/cloud_tests/__init__.py |
4951 | @@ -10,6 +10,12 @@ TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases') |
4952 | TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases') |
4953 | TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2]) |
4954 | |
4955 | +# This domain contains reverse lookups for hostnames that are used. |
4956 | +# The primary reason is so sudo will return quickly when it attempts |
4957 | +# to look up the hostname. i9n is just short for 'integration'. |
4958 | +# see also bug 1730744 for why we had to do this. |
4959 | +CI_DOMAIN = "i9n.cloud-init.io" |
4960 | + |
4961 | |
4962 | def _initialize_logging(): |
4963 | """Configure logging for cloud_tests.""" |
4964 | diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py |
4965 | index fba8a0c..a6d5069 100644 |
4966 | --- a/tests/cloud_tests/bddeb.py |
4967 | +++ b/tests/cloud_tests/bddeb.py |
4968 | @@ -8,7 +8,7 @@ import tempfile |
4969 | |
4970 | from cloudinit import util as c_util |
4971 | from tests.cloud_tests import (config, LOG) |
4972 | -from tests.cloud_tests import (platforms, images, snapshots, instances) |
4973 | +from tests.cloud_tests import platforms |
4974 | from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) |
4975 | |
4976 | pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] |
4977 | @@ -84,18 +84,18 @@ def setup_build(args): |
4978 | # set up image |
4979 | LOG.info('acquiring image for os: %s', args.build_os) |
4980 | img_conf = config.load_os_config(platform.platform_name, args.build_os) |
4981 | - image_call = partial(images.get_image, platform, img_conf) |
4982 | + image_call = partial(platforms.get_image, platform, img_conf) |
4983 | with PlatformComponent(image_call) as image: |
4984 | |
4985 | # set up snapshot |
4986 | - snapshot_call = partial(snapshots.get_snapshot, image) |
4987 | + snapshot_call = partial(platforms.get_snapshot, image) |
4988 | with PlatformComponent(snapshot_call) as snapshot: |
4989 | |
4990 | # create instance with cloud-config to set it up |
4991 | LOG.info('creating instance to build deb in') |
4992 | empty_cloud_config = "#cloud-config\n{}" |
4993 | instance_call = partial( |
4994 | - instances.get_instance, snapshot, empty_cloud_config, |
4995 | + platforms.get_instance, snapshot, empty_cloud_config, |
4996 | use_desc='build cloud-init deb') |
4997 | with PlatformComponent(instance_call) as instance: |
4998 | |
4999 | diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py |
5000 | index 71ee764..5ea88e5 100644 |
The diff has been truncated for viewing.
FAILED: Continuous integration, rev:04b240a3e24 e9813314a2159d0 c4999a876f0d18 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 757/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
FAILED: Ubuntu LTS: Build
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 757/rebuild
https:/