Merge ~chad.smith/cloud-init:ubuntu/bionic into cloud-init:ubuntu/bionic

Proposed by Chad Smith
Status: Merged
Merged at revision: 112f87dcad40b980d2ee47b19d00e0cd29206988
Proposed branch: ~chad.smith/cloud-init:ubuntu/bionic
Merge into: cloud-init:ubuntu/bionic
Diff against target: 12966 lines (+7085/-1665)
134 files modified
.pylintrc (+2/-1)
ChangeLog (+83/-0)
bash_completion/cloud-init (+10/-3)
cloudinit/analyze/tests/test_dump.py (+33/-53)
cloudinit/apport.py (+1/-0)
cloudinit/cloud.py (+2/-2)
cloudinit/cmd/devel/__init__.py (+25/-0)
cloudinit/cmd/devel/net_convert.py (+67/-19)
cloudinit/cmd/devel/parser.py (+16/-7)
cloudinit/cmd/devel/render.py (+85/-0)
cloudinit/cmd/devel/tests/test_render.py (+101/-0)
cloudinit/cmd/main.py (+30/-4)
cloudinit/cmd/query.py (+155/-0)
cloudinit/cmd/tests/test_main.py (+3/-1)
cloudinit/cmd/tests/test_query.py (+193/-0)
cloudinit/cmd/tests/test_status.py (+4/-2)
cloudinit/config/cc_lxd.py (+12/-9)
cloudinit/config/cc_rh_subscription.py (+22/-21)
cloudinit/config/cc_ssh.py (+2/-5)
cloudinit/config/cc_users_groups.py (+39/-2)
cloudinit/config/tests/test_snap.py (+5/-2)
cloudinit/config/tests/test_ssh.py (+151/-0)
cloudinit/config/tests/test_ubuntu_advantage.py (+5/-2)
cloudinit/config/tests/test_users_groups.py (+144/-0)
cloudinit/distros/__init__.py (+28/-8)
cloudinit/distros/debian.py (+0/-5)
cloudinit/distros/net_util.py (+19/-0)
cloudinit/distros/opensuse.py (+13/-47)
cloudinit/distros/rhel.py (+10/-49)
cloudinit/handlers/__init__.py (+8/-3)
cloudinit/handlers/boot_hook.py (+5/-7)
cloudinit/handlers/cloud_config.py (+5/-10)
cloudinit/handlers/jinja_template.py (+137/-0)
cloudinit/handlers/shell_script.py (+3/-6)
cloudinit/handlers/upstart_job.py (+3/-6)
cloudinit/helpers.py (+8/-0)
cloudinit/log.py (+10/-2)
cloudinit/net/__init__.py (+45/-0)
cloudinit/net/eni.py (+10/-3)
cloudinit/net/netplan.py (+5/-1)
cloudinit/net/network_state.py (+4/-0)
cloudinit/net/renderer.py (+6/-3)
cloudinit/net/sysconfig.py (+65/-27)
cloudinit/net/tests/test_init.py (+10/-3)
cloudinit/reporting/__init__.py (+7/-1)
cloudinit/reporting/handlers.py (+246/-0)
cloudinit/settings.py (+2/-1)
cloudinit/sources/DataSourceAltCloud.py (+10/-14)
cloudinit/sources/DataSourceAzure.py (+227/-29)
cloudinit/sources/DataSourceConfigDrive.py (+1/-1)
cloudinit/sources/DataSourceIBMCloud.py (+5/-8)
cloudinit/sources/DataSourceOpenNebula.py (+1/-1)
cloudinit/sources/DataSourceOpenStack.py (+7/-2)
cloudinit/sources/DataSourceOracle.py (+233/-0)
cloudinit/sources/DataSourceScaleway.py (+47/-7)
cloudinit/sources/DataSourceSmartOS.py (+18/-13)
cloudinit/sources/__init__.py (+104/-30)
cloudinit/sources/helpers/openstack.py (+33/-5)
cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1)
cloudinit/sources/tests/test_init.py (+165/-27)
cloudinit/sources/tests/test_oracle.py (+331/-0)
cloudinit/ssh_util.py (+6/-0)
cloudinit/stages.py (+16/-10)
cloudinit/templater.py (+25/-3)
cloudinit/tests/helpers.py (+92/-18)
cloudinit/tests/test_util.py (+76/-2)
cloudinit/util.py (+33/-1)
cloudinit/version.py (+1/-1)
cloudinit/warnings.py (+1/-1)
config/cloud.cfg.tmpl (+0/-2)
debian/changelog (+70/-0)
debian/patches/openstack-no-network-config.patch (+1/-1)
debian/patches/series (+0/-1)
dev/null (+0/-66)
doc/examples/cloud-config-user-groups.txt (+9/-0)
doc/examples/cloud-config.txt (+16/-3)
doc/rtd/index.rst (+1/-0)
doc/rtd/topics/capabilities.rst (+95/-23)
doc/rtd/topics/datasources.rst (+7/-95)
doc/rtd/topics/datasources/oracle.rst (+26/-0)
doc/rtd/topics/debugging.rst (+1/-1)
doc/rtd/topics/format.rst (+17/-4)
doc/rtd/topics/instancedata.rst (+297/-0)
integration-requirements.txt (+3/-2)
tests/cloud_tests/collect.py (+12/-0)
tests/cloud_tests/platforms/instances.py (+2/-1)
tests/cloud_tests/platforms/lxd/instance.py (+38/-4)
tests/cloud_tests/setup_image.py (+9/-1)
tests/cloud_tests/testcases.yaml (+4/-0)
tests/cloud_tests/testcases/__init__.py (+35/-23)
tests/cloud_tests/testcases/base.py (+31/-28)
tests/cloud_tests/testcases/modules/lxd_bridge.py (+12/-2)
tests/cloud_tests/testcases/modules/lxd_dir.py (+12/-2)
tests/cloud_tests/testcases/modules/ntp_chrony.py (+2/-2)
tests/cloud_tests/testcases/modules/snap.yaml (+3/-0)
tests/cloud_tests/testcases/modules/snappy.yaml (+3/-0)
tests/cloud_tests/testcases/modules/write_files.py (+5/-2)
tests/cloud_tests/testcases/modules/write_files.yaml (+11/-4)
tests/cloud_tests/verify.py (+2/-2)
tests/unittests/test_builtin_handlers.py (+302/-22)
tests/unittests/test_cli.py (+1/-2)
tests/unittests/test_datasource/test_altcloud.py (+18/-26)
tests/unittests/test_datasource/test_azure.py (+371/-28)
tests/unittests/test_datasource/test_cloudsigma.py (+3/-0)
tests/unittests/test_datasource/test_common.py (+3/-1)
tests/unittests/test_datasource/test_configdrive.py (+11/-4)
tests/unittests/test_datasource/test_nocloud.py (+2/-0)
tests/unittests/test_datasource/test_opennebula.py (+407/-2)
tests/unittests/test_datasource/test_openstack.py (+116/-5)
tests/unittests/test_datasource/test_ovf.py (+6/-2)
tests/unittests/test_datasource/test_scaleway.py (+76/-3)
tests/unittests/test_datasource/test_smartos.py (+64/-30)
tests/unittests/test_distros/test_create_users.py (+89/-2)
tests/unittests/test_distros/test_netconfig.py (+360/-573)
tests/unittests/test_ds_identify.py (+20/-0)
tests/unittests/test_handler/test_handler_apt_source_v3.py (+11/-3)
tests/unittests/test_handler/test_handler_bootcmd.py (+6/-4)
tests/unittests/test_handler/test_handler_chef.py (+13/-5)
tests/unittests/test_handler/test_handler_etc_hosts.py (+1/-0)
tests/unittests/test_handler/test_handler_lxd.py (+6/-6)
tests/unittests/test_handler/test_handler_ntp.py (+1/-0)
tests/unittests/test_handler/test_handler_resizefs.py (+6/-2)
tests/unittests/test_handler/test_schema.py (+9/-3)
tests/unittests/test_net.py (+604/-43)
tests/unittests/test_reporting_hyperv.py (+134/-0)
tests/unittests/test_rh_subscription.py (+92/-93)
tests/unittests/test_templating.py (+23/-0)
tests/unittests/test_util.py (+15/-12)
tests/unittests/test_vmware_config_file.py (+115/-0)
tools/Z99-cloud-locale-test.sh (+8/-5)
tools/Z99-cloudinit-warnings.sh (+5/-3)
tools/ds-identify (+19/-5)
tools/read-version (+6/-0)
tools/tox-venv (+166/-23)
Reviewer Review Type Date Requested Status
Scott Moser Approve
Server Team CI bot continuous-integration Approve
Review via email: mp+356093@code.launchpad.net

Commit message

New upstream snapshot for SRU into bionic per SRU

LP: #1795953

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:112f87dcad40b980d2ee47b19d00e0cd29206988
https://jenkins.ubuntu.com/server/job/cloud-init-ci/365/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/365/rebuild

review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) wrote :

lgtm.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/.pylintrc b/.pylintrc
2index 3bfa0c8..e376b48 100644
3--- a/.pylintrc
4+++ b/.pylintrc
5@@ -61,7 +61,8 @@ ignored-modules=
6 # List of class names for which member attributes should not be checked (useful
7 # for classes with dynamically set attributes). This supports the use of
8 # qualified names.
9-ignored-classes=optparse.Values,thread._local
10+# argparse.Namespace from https://github.com/PyCQA/pylint/issues/2413
11+ignored-classes=argparse.Namespace,optparse.Values,thread._local
12
13 # List of members which are set dynamically and missed by pylint inference
14 # system, and so shouldn't trigger E1101 when accessed. Python regular
15diff --git a/ChangeLog b/ChangeLog
16index 72c5287..9c043b0 100644
17--- a/ChangeLog
18+++ b/ChangeLog
19@@ -1,3 +1,86 @@
20+18.4:
21+ - add rtd example docs about new standardized keys
22+ - use ds._crawled_metadata instance attribute if set when writing
23+ instance-data.json
24+ - ec2: update crawled metadata. add standardized keys
25+ - tests: allow skipping an entire cloud_test without running.
26+ - tests: disable lxd tests on cosmic
27+ - cii-tests: use unittest2.SkipTest in ntp_chrony due to new deps
28+ - lxd: adjust to snap installed lxd.
29+ - docs: surface experimental doc in instance-data.json
30+ - tests: fix ec2 integration tests. process meta_data instead of meta-data
31+ - Add support for Infiniband network interfaces (IPoIB). [Mark Goddard]
32+ - cli: add cloud-init query subcommand to query instance metadata
33+ - tools/tox-venv: update for new features.
34+ - pylint: ignore warning assignment-from-no-return for _write_network
35+ - stages: Fix bug causing datasource to have incorrect sys_cfg.
36+ (LP: #1787459)
37+ - Remove dead-code _write_network distro implementations.
38+ - net_util: ensure static configs have netmask in translate_network result
39+ [Thomas Berger] (LP: #1792454)
40+ - Fall back to root:root on syslog permissions if other options fail.
41+ [Robert Schweikert]
42+ - tests: Add mock for util.get_hostname. [Robert Schweikert] (LP: #1792799)
43+ - ds-identify: doc string cleanup.
44+ - OpenStack: Support setting mac address on bond.
45+ [Fabian Wiesel] (LP: #1682064)
46+ - bash_completion/cloud-init: fix shell syntax error.
47+ - EphemeralIPv4Network: Be more explicit when adding default route.
48+ (LP: #1792415)
49+ - OpenStack: support reading of newer versions of metdata.
50+ - OpenStack: fix bug causing 'latest' version to be used from network.
51+ (LP: #1792157)
52+ - user-data: jinja template to render instance-data.json in cloud-config
53+ (LP: #1791781)
54+ - config: disable ssh access to a configured user account
55+ - tests: print failed testname instead of docstring upon failure
56+ - tests: Disallow use of util.subp except for where needed.
57+ - sysconfig: refactor sysconfig to accept distro specific templates paths
58+ - Add unit tests for config/cc_ssh.py [Francis Ginther]
59+ - Fix the built-in cloudinit/tests/helpers:skipIf
60+ - read-version: enhance error message [Joshua Powers]
61+ - hyperv_reporting_handler: simplify threaded publisher
62+ - VMWare: Fix a network config bug in vm with static IPv4 and no gateway.
63+ [Pengpeng Sun] (LP: #1766538)
64+ - logging: Add logging config type hyperv for reporting via Azure KVP
65+ [Andy Liu]
66+ - tests: disable other snap test as well [Joshua Powers]
67+ - tests: disable snap, fix write_files binary [Joshua Powers]
68+ - Add datasource Oracle Compute Infrastructure (OCI).
69+ - azure: allow azure to generate network configuration from IMDS per boot.
70+ - Scaleway: Add network configuration to the DataSource [Louis Bouchard]
71+ - docs: Fix example cloud-init analyze command to match output.
72+ [Wesley Gao]
73+ - netplan: Correctly render macaddress on a bonds and bridges when
74+ provided. (LP: #1784699)
75+ - tools: Add 'net-convert' subcommand command to 'cloud-init devel'.
76+ - redhat: remove ssh keys on new instance. (LP: #1781094)
77+ - Use typeset or local in profile.d scripts. (LP: #1784713)
78+ - OpenNebula: Fix null gateway6 [Akihiko Ota] (LP: #1768547)
79+ - oracle: fix detect_openstack to report True on OracleCloud.com DMI data
80+ (LP: #1784685)
81+ - tests: improve LXDInstance trying to workaround or catch bug.
82+ - update_metadata re-config on every boot comments and tests not quite
83+ right [Mike Gerdts]
84+ - tests: Collect build_info from system if available.
85+ - pylint: Fix pylint warnings reported in pylint 2.0.0.
86+ - get_linux_distro: add support for rhel via redhat-release.
87+ - get_linux_distro: add support for centos6 and rawhide flavors of redhat
88+ (LP: #1781229)
89+ - tools: add '--debug' to tools/net-convert.py
90+ - tests: bump the version of paramiko to 2.4.1.
91+ - docs: note in rtd about avoiding /tmp when writing files (LP: #1727876)
92+ - ubuntu,centos,debian: get_linux_distro to align with platform.dist
93+ (LP: #1780481)
94+ - Fix boothook docs on environment variable name (INSTANCE_I ->
95+ INSTANCE_ID) [Marc Tamsky]
96+ - update_metadata: a datasource can support network re-config every boot
97+ - tests: drop salt-minion integration test (LP: #1778737)
98+ - Retry on failed import of gpg receive keys.
99+ - tools: Fix run-container when neither source or binary package requested.
100+ - docs: Fix a small spelling error. [Oz N Tiram]
101+ - tox: use simplestreams from git repository rather than bzr.
102+
103 18.3:
104 - docs: represent sudo:false in docs for user_groups config module
105 - Explicitly prevent `sudo` access for user module
106diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
107index 581432c..8c25032 100644
108--- a/bash_completion/cloud-init
109+++ b/bash_completion/cloud-init
110@@ -10,7 +10,7 @@ _cloudinit_complete()
111 cur_word="${COMP_WORDS[COMP_CWORD]}"
112 prev_word="${COMP_WORDS[COMP_CWORD-1]}"
113
114- subcmds="analyze clean collect-logs devel dhclient-hook features init modules single status"
115+ subcmds="analyze clean collect-logs devel dhclient-hook features init modules query single status"
116 base_params="--help --file --version --debug --force"
117 case ${COMP_CWORD} in
118 1)
119@@ -28,7 +28,7 @@ _cloudinit_complete()
120 COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
121 ;;
122 devel)
123- COMPREPLY=($(compgen -W "--help schema" -- $cur_word))
124+ COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word))
125 ;;
126 dhclient-hook|features)
127 COMPREPLY=($(compgen -W "--help" -- $cur_word))
128@@ -40,6 +40,8 @@ _cloudinit_complete()
129 COMPREPLY=($(compgen -W "--help --mode" -- $cur_word))
130 ;;
131
132+ query)
133+ COMPREPLY=($(compgen -W "--all --help --instance-data --list-keys --user-data --vendor-data --debug" -- $cur_word));;
134 single)
135 COMPREPLY=($(compgen -W "--help --name --frequency --report" -- $cur_word))
136 ;;
137@@ -59,6 +61,11 @@ _cloudinit_complete()
138 --frequency)
139 COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word))
140 ;;
141+ net-convert)
142+ COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word))
143+ ;;
144+ render)
145+ COMPREPLY=($(compgen -W "--help --instance-data --debug" -- $cur_word));;
146 schema)
147 COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word))
148 ;;
149@@ -74,4 +81,4 @@ _cloudinit_complete()
150 }
151 complete -F _cloudinit_complete cloud-init
152
153-# vi: syntax=bash expandtab
154+# vi: syntax=sh expandtab
155diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
156index f4c4284..db2a667 100644
157--- a/cloudinit/analyze/tests/test_dump.py
158+++ b/cloudinit/analyze/tests/test_dump.py
159@@ -5,8 +5,8 @@ from textwrap import dedent
160
161 from cloudinit.analyze.dump import (
162 dump_events, parse_ci_logline, parse_timestamp)
163-from cloudinit.util import subp, write_file
164-from cloudinit.tests.helpers import CiTestCase
165+from cloudinit.util import which, write_file
166+from cloudinit.tests.helpers import CiTestCase, mock, skipIf
167
168
169 class TestParseTimestamp(CiTestCase):
170@@ -15,21 +15,9 @@ class TestParseTimestamp(CiTestCase):
171 """Logs with cloud-init detailed formats will be properly parsed."""
172 trusty_fmt = '%Y-%m-%d %H:%M:%S,%f'
173 trusty_stamp = '2016-09-12 14:39:20,839'
174-
175- parsed = parse_timestamp(trusty_stamp)
176-
177- # convert ourselves
178 dt = datetime.strptime(trusty_stamp, trusty_fmt)
179- expected = float(dt.strftime('%s.%f'))
180-
181- # use date(1)
182- out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp])
183- timestamp = out.strip()
184- date_ts = float(timestamp)
185-
186- self.assertEqual(expected, parsed)
187- self.assertEqual(expected, date_ts)
188- self.assertEqual(date_ts, parsed)
189+ self.assertEqual(
190+ float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp))
191
192 def test_parse_timestamp_handles_syslog_adding_year(self):
193 """Syslog timestamps lack a year. Add year and properly parse."""
194@@ -39,17 +27,9 @@ class TestParseTimestamp(CiTestCase):
195 # convert stamp ourselves by adding the missing year value
196 year = datetime.now().year
197 dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
198- expected = float(dt.strftime('%s.%f'))
199- parsed = parse_timestamp(syslog_stamp)
200-
201- # use date(1)
202- out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp])
203- timestamp = out.strip()
204- date_ts = float(timestamp)
205-
206- self.assertEqual(expected, parsed)
207- self.assertEqual(expected, date_ts)
208- self.assertEqual(date_ts, parsed)
209+ self.assertEqual(
210+ float(dt.strftime('%s.%f')),
211+ parse_timestamp(syslog_stamp))
212
213 def test_parse_timestamp_handles_journalctl_format_adding_year(self):
214 """Journalctl precise timestamps lack a year. Add year and parse."""
215@@ -59,37 +39,22 @@ class TestParseTimestamp(CiTestCase):
216 # convert stamp ourselves by adding the missing year value
217 year = datetime.now().year
218 dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
219- expected = float(dt.strftime('%s.%f'))
220- parsed = parse_timestamp(journal_stamp)
221-
222- # use date(1)
223- out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp])
224- timestamp = out.strip()
225- date_ts = float(timestamp)
226-
227- self.assertEqual(expected, parsed)
228- self.assertEqual(expected, date_ts)
229- self.assertEqual(date_ts, parsed)
230+ self.assertEqual(
231+ float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp))
232
233+ @skipIf(not which("date"), "'date' command not available.")
234 def test_parse_unexpected_timestamp_format_with_date_command(self):
235- """Dump sends unexpected timestamp formats to data for processing."""
236+ """Dump sends unexpected timestamp formats to date for processing."""
237 new_fmt = '%H:%M %m/%d %Y'
238 new_stamp = '17:15 08/08'
239-
240 # convert stamp ourselves by adding the missing year value
241 year = datetime.now().year
242 dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
243- expected = float(dt.strftime('%s.%f'))
244- parsed = parse_timestamp(new_stamp)
245
246 # use date(1)
247- out, _ = subp(['date', '+%s.%6N', '-d', new_stamp])
248- timestamp = out.strip()
249- date_ts = float(timestamp)
250-
251- self.assertEqual(expected, parsed)
252- self.assertEqual(expected, date_ts)
253- self.assertEqual(date_ts, parsed)
254+ with self.allow_subp(["date"]):
255+ self.assertEqual(
256+ float(dt.strftime('%s.%f')), parse_timestamp(new_stamp))
257
258
259 class TestParseCILogLine(CiTestCase):
260@@ -135,7 +100,9 @@ class TestParseCILogLine(CiTestCase):
261 'timestamp': timestamp}
262 self.assertEqual(expected, parse_ci_logline(line))
263
264- def test_parse_logline_returns_event_for_finish_events(self):
265+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
266+ def test_parse_logline_returns_event_for_finish_events(self,
267+ m_parse_from_date):
268 """parse_ci_logline returns a finish event for a parsed log line."""
269 line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]'
270 ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running'
271@@ -147,7 +114,10 @@ class TestParseCILogLine(CiTestCase):
272 'origin': 'cloudinit',
273 'result': 'SUCCESS',
274 'timestamp': 1472594005.972}
275+ m_parse_from_date.return_value = "1472594005.972"
276 self.assertEqual(expected, parse_ci_logline(line))
277+ m_parse_from_date.assert_has_calls(
278+ [mock.call("2016-08-30 21:53:25.972325+00:00")])
279
280
281 SAMPLE_LOGS = dedent("""\
282@@ -162,10 +132,16 @@ Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
283 class TestDumpEvents(CiTestCase):
284 maxDiff = None
285
286- def test_dump_events_with_rawdata(self):
287+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
288+ def test_dump_events_with_rawdata(self, m_parse_from_date):
289 """Rawdata is split and parsed into a tuple of events and data"""
290+ m_parse_from_date.return_value = "1472594005.972"
291 events, data = dump_events(rawdata=SAMPLE_LOGS)
292 expected_data = SAMPLE_LOGS.splitlines()
293+ self.assertEqual(
294+ [mock.call("2016-08-30 21:53:25.972325+00:00")],
295+ m_parse_from_date.call_args_list)
296+ self.assertEqual(expected_data, data)
297 year = datetime.now().year
298 dt1 = datetime.strptime(
299 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
300@@ -183,12 +159,14 @@ class TestDumpEvents(CiTestCase):
301 'result': 'SUCCESS',
302 'timestamp': 1472594005.972}]
303 self.assertEqual(expected_events, events)
304- self.assertEqual(expected_data, data)
305
306- def test_dump_events_with_cisource(self):
307+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
308+ def test_dump_events_with_cisource(self, m_parse_from_date):
309 """Cisource file is read and parsed into a tuple of events and data."""
310 tmpfile = self.tmp_path('logfile')
311 write_file(tmpfile, SAMPLE_LOGS)
312+ m_parse_from_date.return_value = 1472594005.972
313+
314 events, data = dump_events(cisource=open(tmpfile))
315 year = datetime.now().year
316 dt1 = datetime.strptime(
317@@ -208,3 +186,5 @@ class TestDumpEvents(CiTestCase):
318 'timestamp': 1472594005.972}]
319 self.assertEqual(expected_events, events)
320 self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
321+ m_parse_from_date.assert_has_calls(
322+ [mock.call("2016-08-30 21:53:25.972325+00:00")])
323diff --git a/cloudinit/apport.py b/cloudinit/apport.py
324index 130ff26..22cb7fd 100644
325--- a/cloudinit/apport.py
326+++ b/cloudinit/apport.py
327@@ -30,6 +30,7 @@ KNOWN_CLOUD_NAMES = [
328 'NoCloud',
329 'OpenNebula',
330 'OpenStack',
331+ 'Oracle',
332 'OVF',
333 'OpenTelekomCloud',
334 'Scaleway',
335diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
336index 6d12c43..7ae98e1 100644
337--- a/cloudinit/cloud.py
338+++ b/cloudinit/cloud.py
339@@ -47,7 +47,7 @@ class Cloud(object):
340
341 @property
342 def cfg(self):
343- # Ensure that not indirectly modified
344+ # Ensure that cfg is not indirectly modified
345 return copy.deepcopy(self._cfg)
346
347 def run(self, name, functor, args, freq=None, clear_on_fail=False):
348@@ -61,7 +61,7 @@ class Cloud(object):
349 return None
350 return fn
351
352- # The rest of thes are just useful proxies
353+ # The rest of these are just useful proxies
354 def get_userdata(self, apply_filter=True):
355 return self.datasource.get_userdata(apply_filter)
356
357diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
358index e69de29..3ae28b6 100644
359--- a/cloudinit/cmd/devel/__init__.py
360+++ b/cloudinit/cmd/devel/__init__.py
361@@ -0,0 +1,25 @@
362+# This file is part of cloud-init. See LICENSE file for license information.
363+
364+"""Common cloud-init devel commandline utility functions."""
365+
366+
367+import logging
368+
369+from cloudinit import log
370+from cloudinit.stages import Init
371+
372+
373+def addLogHandlerCLI(logger, log_level):
374+ """Add a commandline logging handler to emit messages to stderr."""
375+ formatter = logging.Formatter('%(levelname)s: %(message)s')
376+ log.setupBasicLogging(log_level, formatter=formatter)
377+ return logger
378+
379+
380+def read_cfg_paths():
381+ """Return a Paths object based on the system configuration on disk."""
382+ init = Init(ds_deps=[])
383+ init.read_cfg()
384+ return init.paths
385+
386+# vi: ts=4 expandtab
387diff --git a/tools/net-convert.py b/cloudinit/cmd/devel/net_convert.py
388index 68559cb..a0f58a0 100755
389--- a/tools/net-convert.py
390+++ b/cloudinit/cmd/devel/net_convert.py
391@@ -1,42 +1,70 @@
392-#!/usr/bin/python3
393 # This file is part of cloud-init. See LICENSE file for license information.
394
395+"""Debug network config format conversions."""
396 import argparse
397 import json
398 import os
399+import sys
400 import yaml
401
402 from cloudinit.sources.helpers import openstack
403+from cloudinit.sources import DataSourceAzure as azure
404
405-from cloudinit.net import eni
406-from cloudinit.net import netplan
407-from cloudinit.net import network_state
408-from cloudinit.net import sysconfig
409+from cloudinit import distros
410+from cloudinit.net import eni, netplan, network_state, sysconfig
411+from cloudinit import log
412
413+NAME = 'net-convert'
414
415-def main():
416- parser = argparse.ArgumentParser()
417- parser.add_argument("--network-data", "-p", type=open,
418+
419+def get_parser(parser=None):
420+ """Build or extend and arg parser for net-convert utility.
421+
422+ @param parser: Optional existing ArgumentParser instance representing the
423+ subcommand which will be extended to support the args of this utility.
424+
425+ @returns: ArgumentParser with proper argument configuration.
426+ """
427+ if not parser:
428+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
429+ parser.add_argument("-p", "--network-data", type=open,
430 metavar="PATH", required=True)
431- parser.add_argument("--kind", "-k",
432- choices=['eni', 'network_data.json', 'yaml'],
433+ parser.add_argument("-k", "--kind",
434+ choices=['eni', 'network_data.json', 'yaml',
435+ 'azure-imds'],
436 required=True)
437 parser.add_argument("-d", "--directory",
438 metavar="PATH",
439 help="directory to place output in",
440 required=True)
441+ parser.add_argument("-D", "--distro",
442+ choices=[item for sublist in
443+ distros.OSFAMILIES.values()
444+ for item in sublist],
445+ required=True)
446 parser.add_argument("-m", "--mac",
447 metavar="name,mac",
448 action='append',
449 help="interface name to mac mapping")
450- parser.add_argument("--output-kind", "-ok",
451+ parser.add_argument("--debug", action='store_true',
452+ help='enable debug logging to stderr.')
453+ parser.add_argument("-O", "--output-kind",
454 choices=['eni', 'netplan', 'sysconfig'],
455 required=True)
456- args = parser.parse_args()
457+ return parser
458+
459+
460+def handle_args(name, args):
461+ if not args.directory.endswith("/"):
462+ args.directory += "/"
463
464 if not os.path.isdir(args.directory):
465 os.makedirs(args.directory)
466
467+ if args.debug:
468+ log.setupBasicLogging(level=log.DEBUG)
469+ else:
470+ log.setupBasicLogging(level=log.WARN)
471 if args.mac:
472 known_macs = {}
473 for item in args.mac:
474@@ -53,32 +81,52 @@ def main():
475 pre_ns = yaml.load(net_data)
476 if 'network' in pre_ns:
477 pre_ns = pre_ns.get('network')
478- print("Input YAML")
479- print(yaml.dump(pre_ns, default_flow_style=False, indent=4))
480+ if args.debug:
481+ sys.stderr.write('\n'.join(
482+ ["Input YAML",
483+ yaml.dump(pre_ns, default_flow_style=False, indent=4), ""]))
484 ns = network_state.parse_net_config_data(pre_ns)
485- else:
486+ elif args.kind == 'network_data.json':
487 pre_ns = openstack.convert_net_json(
488 json.loads(net_data), known_macs=known_macs)
489 ns = network_state.parse_net_config_data(pre_ns)
490+ elif args.kind == 'azure-imds':
491+ pre_ns = azure.parse_network_config(json.loads(net_data))
492+ ns = network_state.parse_net_config_data(pre_ns)
493
494 if not ns:
495 raise RuntimeError("No valid network_state object created from"
496 "input data")
497
498- print("\nInternal State")
499- print(yaml.dump(ns, default_flow_style=False, indent=4))
500+ if args.debug:
501+ sys.stderr.write('\n'.join([
502+ "", "Internal State",
503+ yaml.dump(ns, default_flow_style=False, indent=4), ""]))
504+ distro_cls = distros.fetch(args.distro)
505+ distro = distro_cls(args.distro, {}, None)
506+ config = {}
507 if args.output_kind == "eni":
508 r_cls = eni.Renderer
509+ config = distro.renderer_configs.get('eni')
510 elif args.output_kind == "netplan":
511 r_cls = netplan.Renderer
512+ config = distro.renderer_configs.get('netplan')
513 else:
514 r_cls = sysconfig.Renderer
515+ config = distro.renderer_configs.get('sysconfig')
516
517- r = r_cls()
518+ r = r_cls(config=config)
519+ sys.stderr.write(''.join([
520+ "Read input format '%s' from '%s'.\n" % (
521+ args.kind, args.network_data.name),
522+ "Wrote output format '%s' to '%s'\n" % (
523+ args.output_kind, args.directory)]) + "\n")
524 r.render_network_state(network_state=ns, target=args.directory)
525
526
527 if __name__ == '__main__':
528- main()
529+ args = get_parser().parse_args()
530+ handle_args(NAME, args)
531+
532
533 # vi: ts=4 expandtab
534diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
535index acacc4e..99a234c 100644
536--- a/cloudinit/cmd/devel/parser.py
537+++ b/cloudinit/cmd/devel/parser.py
538@@ -5,8 +5,10 @@
539 """Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
540
541 import argparse
542-from cloudinit.config.schema import (
543- get_parser as schema_parser, handle_schema_args)
544+from cloudinit.config import schema
545+
546+from . import net_convert
547+from . import render
548
549
550 def get_parser(parser=None):
551@@ -17,10 +19,17 @@ def get_parser(parser=None):
552 subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
553 subparsers.required = True
554
555- parser_schema = subparsers.add_parser(
556- 'schema', help='Validate cloud-config files or document schema')
557- # Construct schema subcommand parser
558- schema_parser(parser_schema)
559- parser_schema.set_defaults(action=('schema', handle_schema_args))
560+ subcmds = [
561+ ('schema', 'Validate cloud-config files for document schema',
562+ schema.get_parser, schema.handle_schema_args),
563+ (net_convert.NAME, net_convert.__doc__,
564+ net_convert.get_parser, net_convert.handle_args),
565+ (render.NAME, render.__doc__,
566+ render.get_parser, render.handle_args)
567+ ]
568+ for (subcmd, helpmsg, get_parser, handler) in subcmds:
569+ parser = subparsers.add_parser(subcmd, help=helpmsg)
570+ get_parser(parser)
571+ parser.set_defaults(action=(subcmd, handler))
572
573 return parser
574diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
575new file mode 100755
576index 0000000..2ba6b68
577--- /dev/null
578+++ b/cloudinit/cmd/devel/render.py
579@@ -0,0 +1,85 @@
580+# This file is part of cloud-init. See LICENSE file for license information.
581+
582+"""Debug jinja template rendering of user-data."""
583+
584+import argparse
585+import os
586+import sys
587+
588+from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
589+from cloudinit import log
590+from cloudinit.sources import INSTANCE_JSON_FILE
591+from . import addLogHandlerCLI, read_cfg_paths
592+
593+NAME = 'render'
594+DEFAULT_INSTANCE_DATA = '/run/cloud-init/instance-data.json'
595+
596+LOG = log.getLogger(NAME)
597+
598+
599+def get_parser(parser=None):
600+ """Build or extend and arg parser for jinja render utility.
601+
602+ @param parser: Optional existing ArgumentParser instance representing the
603+ subcommand which will be extended to support the args of this utility.
604+
605+ @returns: ArgumentParser with proper argument configuration.
606+ """
607+ if not parser:
608+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
609+ parser.add_argument(
610+ 'user_data', type=str, help='Path to the user-data file to render')
611+ parser.add_argument(
612+ '-i', '--instance-data', type=str,
613+ help=('Optional path to instance-data.json file. Defaults to'
614+ ' /run/cloud-init/instance-data.json'))
615+ parser.add_argument('-d', '--debug', action='store_true', default=False,
616+ help='Add verbose messages during template render')
617+ return parser
618+
619+
620+def handle_args(name, args):
621+ """Render the provided user-data template file using instance-data values.
622+
623+ Also setup CLI log handlers to report to stderr since this is a development
624+ utility which should be run by a human on the CLI.
625+
626+ @return 0 on success, 1 on failure.
627+ """
628+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
629+ if not args.instance_data:
630+ paths = read_cfg_paths()
631+ instance_data_fn = os.path.join(
632+ paths.run_dir, INSTANCE_JSON_FILE)
633+ else:
634+ instance_data_fn = args.instance_data
635+ if not os.path.exists(instance_data_fn):
636+ LOG.error('Missing instance-data.json file: %s', instance_data_fn)
637+ return 1
638+ try:
639+ with open(args.user_data) as stream:
640+ user_data = stream.read()
641+ except IOError:
642+ LOG.error('Missing user-data file: %s', args.user_data)
643+ return 1
644+ rendered_payload = render_jinja_payload_from_file(
645+ payload=user_data, payload_fn=args.user_data,
646+ instance_data_file=instance_data_fn,
647+ debug=True if args.debug else False)
648+ if not rendered_payload:
649+ LOG.error('Unable to render user-data file: %s', args.user_data)
650+ return 1
651+ sys.stdout.write(rendered_payload)
652+ return 0
653+
654+
655+def main():
656+ args = get_parser().parse_args()
657+ return(handle_args(NAME, args))
658+
659+
660+if __name__ == '__main__':
661+ sys.exit(main())
662+
663+
664+# vi: ts=4 expandtab
665diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py
666new file mode 100644
667index 0000000..fc5d2c0
668--- /dev/null
669+++ b/cloudinit/cmd/devel/tests/test_render.py
670@@ -0,0 +1,101 @@
671+# This file is part of cloud-init. See LICENSE file for license information.
672+
673+from six import StringIO
674+import os
675+
676+from collections import namedtuple
677+from cloudinit.cmd.devel import render
678+from cloudinit.helpers import Paths
679+from cloudinit.sources import INSTANCE_JSON_FILE
680+from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja
681+from cloudinit.util import ensure_dir, write_file
682+
683+
684+class TestRender(CiTestCase):
685+
686+ with_logs = True
687+
688+ args = namedtuple('renderargs', 'user_data instance_data debug')
689+
690+ def setUp(self):
691+ super(TestRender, self).setUp()
692+ self.tmp = self.tmp_dir()
693+
694+ def test_handle_args_error_on_missing_user_data(self):
695+ """When user_data file path does not exist, log an error."""
696+ absent_file = self.tmp_path('user-data', dir=self.tmp)
697+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
698+ write_file(instance_data, '{}')
699+ args = self.args(
700+ user_data=absent_file, instance_data=instance_data, debug=False)
701+ with mock.patch('sys.stderr', new_callable=StringIO):
702+ self.assertEqual(1, render.handle_args('anyname', args))
703+ self.assertIn(
704+ 'Missing user-data file: %s' % absent_file,
705+ self.logs.getvalue())
706+
707+ def test_handle_args_error_on_missing_instance_data(self):
708+ """When instance_data file path does not exist, log an error."""
709+ user_data = self.tmp_path('user-data', dir=self.tmp)
710+ absent_file = self.tmp_path('instance-data', dir=self.tmp)
711+ args = self.args(
712+ user_data=user_data, instance_data=absent_file, debug=False)
713+ with mock.patch('sys.stderr', new_callable=StringIO):
714+ self.assertEqual(1, render.handle_args('anyname', args))
715+ self.assertIn(
716+ 'Missing instance-data.json file: %s' % absent_file,
717+ self.logs.getvalue())
718+
719+ def test_handle_args_defaults_instance_data(self):
720+ """When no instance_data argument, default to configured run_dir."""
721+ user_data = self.tmp_path('user-data', dir=self.tmp)
722+ run_dir = self.tmp_path('run_dir', dir=self.tmp)
723+ ensure_dir(run_dir)
724+ paths = Paths({'run_dir': run_dir})
725+ self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
726+ self.m_paths.return_value = paths
727+ args = self.args(
728+ user_data=user_data, instance_data=None, debug=False)
729+ with mock.patch('sys.stderr', new_callable=StringIO):
730+ self.assertEqual(1, render.handle_args('anyname', args))
731+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
732+ self.assertIn(
733+ 'Missing instance-data.json file: %s' % json_file,
734+ self.logs.getvalue())
735+
736+ @skipUnlessJinja()
737+ def test_handle_args_renders_instance_data_vars_in_template(self):
738+ """If user_data file is a jinja template render instance-data vars."""
739+ user_data = self.tmp_path('user-data', dir=self.tmp)
740+ write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
741+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
742+ write_file(instance_data, '{"my-var": "jinja worked"}')
743+ args = self.args(
744+ user_data=user_data, instance_data=instance_data, debug=True)
745+ with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err:
746+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
747+ self.assertEqual(0, render.handle_args('anyname', args))
748+ self.assertIn(
749+ 'DEBUG: Converted jinja variables\n{', self.logs.getvalue())
750+ self.assertIn(
751+ 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue())
752+ self.assertEqual('rendering: jinja worked', m_stdout.getvalue())
753+
754+ @skipUnlessJinja()
755+ def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self):
756+ """If user_data file has invalid jinja operations log warnings."""
757+ user_data = self.tmp_path('user-data', dir=self.tmp)
758+ write_file(user_data, '##template: jinja\nrendering: {{ my-var }}')
759+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
760+ write_file(instance_data, '{"my-var": "jinja worked"}')
761+ args = self.args(
762+ user_data=user_data, instance_data=instance_data, debug=True)
763+ with mock.patch('sys.stderr', new_callable=StringIO):
764+ self.assertEqual(1, render.handle_args('anyname', args))
765+ self.assertIn(
766+ 'WARNING: Ignoring jinja template for %s: Undefined jinja'
767+ ' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
768+ ' "my_var"?' % user_data,
769+ self.logs.getvalue())
770+
771+# vi: ts=4 expandtab
772diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
773index d6ba90f..5a43702 100644
774--- a/cloudinit/cmd/main.py
775+++ b/cloudinit/cmd/main.py
776@@ -315,7 +315,7 @@ def main_init(name, args):
777 existing = "trust"
778
779 init.purge_cache()
780- # Delete the non-net file as well
781+ # Delete the no-net file as well
782 util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
783
784 # Stage 5
785@@ -339,7 +339,7 @@ def main_init(name, args):
786 " Likely bad things to come!"))
787 if not args.force:
788 init.apply_network_config(bring_up=not args.local)
789- LOG.debug("[%s] Exiting without datasource in local mode", mode)
790+ LOG.debug("[%s] Exiting without datasource", mode)
791 if mode == sources.DSMODE_LOCAL:
792 return (None, [])
793 else:
794@@ -348,6 +348,7 @@ def main_init(name, args):
795 LOG.debug("[%s] barreling on in force mode without datasource",
796 mode)
797
798+ _maybe_persist_instance_data(init)
799 # Stage 6
800 iid = init.instancify()
801 LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
802@@ -490,6 +491,7 @@ def main_modules(action_name, args):
803 print_exc(msg)
804 if not args.force:
805 return [(msg)]
806+ _maybe_persist_instance_data(init)
807 # Stage 3
808 mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
809 # Stage 4
810@@ -541,6 +543,7 @@ def main_single(name, args):
811 " likely bad things to come!"))
812 if not args.force:
813 return 1
814+ _maybe_persist_instance_data(init)
815 # Stage 3
816 mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
817 mod_args = args.module_args
818@@ -688,6 +691,15 @@ def status_wrapper(name, args, data_d=None, link_d=None):
819 return len(v1[mode]['errors'])
820
821
822+def _maybe_persist_instance_data(init):
823+ """Write instance-data.json file if absent and datasource is restored."""
824+ if init.ds_restored:
825+ instance_data_file = os.path.join(
826+ init.paths.run_dir, sources.INSTANCE_JSON_FILE)
827+ if not os.path.exists(instance_data_file):
828+ init.datasource.persist_instance_data()
829+
830+
831 def _maybe_set_hostname(init, stage, retry_stage):
832 """Call set-hostname if metadata, vendordata or userdata provides it.
833
834@@ -779,6 +791,10 @@ def main(sysv_args=None):
835 ' pass to this module'))
836 parser_single.set_defaults(action=('single', main_single))
837
838+ parser_query = subparsers.add_parser(
839+ 'query',
840+ help='Query standardized instance metadata from the command line.')
841+
842 parser_dhclient = subparsers.add_parser('dhclient-hook',
843 help=('run the dhclient hook'
844 'to record network info'))
845@@ -830,6 +846,12 @@ def main(sysv_args=None):
846 clean_parser(parser_clean)
847 parser_clean.set_defaults(
848 action=('clean', handle_clean_args))
849+ elif sysv_args[0] == 'query':
850+ from cloudinit.cmd.query import (
851+ get_parser as query_parser, handle_args as handle_query_args)
852+ query_parser(parser_query)
853+ parser_query.set_defaults(
854+ action=('render', handle_query_args))
855 elif sysv_args[0] == 'status':
856 from cloudinit.cmd.status import (
857 get_parser as status_parser, handle_status_args)
858@@ -877,14 +899,18 @@ def main(sysv_args=None):
859 rname, rdesc, reporting_enabled=report_on)
860
861 with args.reporter:
862- return util.log_time(
863+ retval = util.log_time(
864 logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
865 get_uptime=True, func=functor, args=(name, args))
866+ reporting.flush_events()
867+ return retval
868
869
870 if __name__ == '__main__':
871 if 'TZ' not in os.environ:
872 os.environ['TZ'] = ":/etc/localtime"
873- main(sys.argv)
874+ return_value = main(sys.argv)
875+ if return_value:
876+ sys.exit(return_value)
877
878 # vi: ts=4 expandtab
879diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
880new file mode 100644
881index 0000000..7d2d4fe
882--- /dev/null
883+++ b/cloudinit/cmd/query.py
884@@ -0,0 +1,155 @@
885+# This file is part of cloud-init. See LICENSE file for license information.
886+
887+"""Query standardized instance metadata from the command line."""
888+
889+import argparse
890+import os
891+import six
892+import sys
893+
894+from cloudinit.handlers.jinja_template import (
895+ convert_jinja_instance_data, render_jinja_payload)
896+from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
897+from cloudinit import log
898+from cloudinit.sources import (
899+ INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE)
900+from cloudinit import util
901+
902+NAME = 'query'
903+LOG = log.getLogger(NAME)
904+
905+
906+def get_parser(parser=None):
907+ """Build or extend an arg parser for query utility.
908+
909+ @param parser: Optional existing ArgumentParser instance representing the
910+ query subcommand which will be extended to support the args of
911+ this utility.
912+
913+ @returns: ArgumentParser with proper argument configuration.
914+ """
915+ if not parser:
916+ parser = argparse.ArgumentParser(
917+ prog=NAME, description='Query cloud-init instance data')
918+ parser.add_argument(
919+ '-d', '--debug', action='store_true', default=False,
920+ help='Add verbose messages during template render')
921+ parser.add_argument(
922+ '-i', '--instance-data', type=str,
923+ help=('Path to instance-data.json file. Default is /run/cloud-init/%s'
924+ % INSTANCE_JSON_FILE))
925+ parser.add_argument(
926+ '-l', '--list-keys', action='store_true', default=False,
927+ help=('List query keys available at the provided instance-data'
928+ ' <varname>.'))
929+ parser.add_argument(
930+ '-u', '--user-data', type=str,
931+ help=('Path to user-data file. Default is'
932+ ' /var/lib/cloud/instance/user-data.txt'))
933+ parser.add_argument(
934+ '-v', '--vendor-data', type=str,
935+ help=('Path to vendor-data file. Default is'
936+ ' /var/lib/cloud/instance/vendor-data.txt'))
937+ parser.add_argument(
938+ 'varname', type=str, nargs='?',
939+ help=('A dot-delimited instance data variable to query from'
940+ ' instance-data query. For example: v2.local_hostname'))
941+ parser.add_argument(
942+ '-a', '--all', action='store_true', default=False, dest='dump_all',
943+ help='Dump all available instance-data')
944+ parser.add_argument(
945+ '-f', '--format', type=str, dest='format',
946+ help=('Optionally specify a custom output format string. Any'
947+ ' instance-data variable can be specified between double-curly'
948+ ' braces. For example -f "{{ v2.cloud_name }}"'))
949+ return parser
950+
951+
952+def handle_args(name, args):
953+ """Handle calls to 'cloud-init query' as a subcommand."""
954+ paths = None
955+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
956+ if not any([args.list_keys, args.varname, args.format, args.dump_all]):
957+ LOG.error(
958+ 'Expected one of the options: --all, --format,'
959+ ' --list-keys or varname')
960+ get_parser().print_help()
961+ return 1
962+
963+ uid = os.getuid()
964+ if not all([args.instance_data, args.user_data, args.vendor_data]):
965+ paths = read_cfg_paths()
966+ if not args.instance_data:
967+ if uid == 0:
968+ default_json_fn = INSTANCE_JSON_SENSITIVE_FILE
969+ else:
970+ default_json_fn = INSTANCE_JSON_FILE # World readable
971+ instance_data_fn = os.path.join(paths.run_dir, default_json_fn)
972+ else:
973+ instance_data_fn = args.instance_data
974+ if not args.user_data:
975+ user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
976+ else:
977+ user_data_fn = args.user_data
978+ if not args.vendor_data:
979+ vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
980+ else:
981+ vendor_data_fn = args.vendor_data
982+
983+ try:
984+ instance_json = util.load_file(instance_data_fn)
985+ except IOError:
986+ LOG.error('Missing instance-data.json file: %s', instance_data_fn)
987+ return 1
988+
989+ instance_data = util.load_json(instance_json)
990+ if uid != 0:
991+ instance_data['userdata'] = (
992+ '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn))
993+ instance_data['vendordata'] = (
994+ '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
995+ else:
996+ instance_data['userdata'] = util.load_file(user_data_fn)
997+ instance_data['vendordata'] = util.load_file(vendor_data_fn)
998+ if args.format:
999+ payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
1000+ rendered_payload = render_jinja_payload(
1001+ payload=payload, payload_fn='query commandline',
1002+ instance_data=instance_data,
1003+ debug=True if args.debug else False)
1004+ if rendered_payload:
1005+ print(rendered_payload)
1006+ return 0
1007+ return 1
1008+
1009+ response = convert_jinja_instance_data(instance_data)
1010+ if args.varname:
1011+ try:
1012+ for var in args.varname.split('.'):
1013+ response = response[var]
1014+ except KeyError:
1015+ LOG.error('Undefined instance-data key %s', args.varname)
1016+ return 1
1017+ if args.list_keys:
1018+ if not isinstance(response, dict):
1019+ LOG.error("--list-keys provided but '%s' is not a dict", var)
1020+ return 1
1021+ response = '\n'.join(sorted(response.keys()))
1022+ elif args.list_keys:
1023+ response = '\n'.join(sorted(response.keys()))
1024+ if not isinstance(response, six.string_types):
1025+ response = util.json_dumps(response)
1026+ print(response)
1027+ return 0
1028+
1029+
1030+def main():
1031+ """Tool to query specific instance-data values."""
1032+ parser = get_parser()
1033+ sys.exit(handle_args(NAME, parser.parse_args()))
1034+
1035+
1036+if __name__ == '__main__':
1037+ main()
1038+
1039+# vi: ts=4 expandtab
1040diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
1041index e2c54ae..a1e534f 100644
1042--- a/cloudinit/cmd/tests/test_main.py
1043+++ b/cloudinit/cmd/tests/test_main.py
1044@@ -125,7 +125,9 @@ class TestMain(FilesystemMockingTestCase):
1045 updated_cfg.update(
1046 {'def_log_file': '/var/log/cloud-init.log',
1047 'log_cfgs': [],
1048- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
1049+ 'syslog_fix_perms': [
1050+ 'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
1051+ ],
1052 'vendor_data': {'enabled': True, 'prefix': []}})
1053 updated_cfg.pop('system_info')
1054
1055diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
1056new file mode 100644
1057index 0000000..fb87c6a
1058--- /dev/null
1059+++ b/cloudinit/cmd/tests/test_query.py
1060@@ -0,0 +1,193 @@
1061+# This file is part of cloud-init. See LICENSE file for license information.
1062+
1063+from six import StringIO
1064+from textwrap import dedent
1065+import os
1066+
1067+from collections import namedtuple
1068+from cloudinit.cmd import query
1069+from cloudinit.helpers import Paths
1070+from cloudinit.sources import REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE
1071+from cloudinit.tests.helpers import CiTestCase, mock
1072+from cloudinit.util import ensure_dir, write_file
1073+
1074+
1075+class TestQuery(CiTestCase):
1076+
1077+ with_logs = True
1078+
1079+ args = namedtuple(
1080+ 'queryargs',
1081+ ('debug dump_all format instance_data list_keys user_data vendor_data'
1082+ ' varname'))
1083+
1084+ def setUp(self):
1085+ super(TestQuery, self).setUp()
1086+ self.tmp = self.tmp_dir()
1087+ self.instance_data = self.tmp_path('instance-data', dir=self.tmp)
1088+
1089+ def test_handle_args_error_on_missing_param(self):
1090+ """Error when missing required parameters and print usage."""
1091+ args = self.args(
1092+ debug=False, dump_all=False, format=None, instance_data=None,
1093+ list_keys=False, user_data=None, vendor_data=None, varname=None)
1094+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
1095+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
1096+ self.assertEqual(1, query.handle_args('anyname', args))
1097+ expected_error = (
1098+ 'ERROR: Expected one of the options: --all, --format, --list-keys'
1099+ ' or varname\n')
1100+ self.assertIn(expected_error, self.logs.getvalue())
1101+ self.assertIn('usage: query', m_stdout.getvalue())
1102+ self.assertIn(expected_error, m_stderr.getvalue())
1103+
1104+ def test_handle_args_error_on_missing_instance_data(self):
1105+ """When instance_data file path does not exist, log an error."""
1106+ absent_fn = self.tmp_path('absent', dir=self.tmp)
1107+ args = self.args(
1108+ debug=False, dump_all=True, format=None, instance_data=absent_fn,
1109+ list_keys=False, user_data='ud', vendor_data='vd', varname=None)
1110+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
1111+ self.assertEqual(1, query.handle_args('anyname', args))
1112+ self.assertIn(
1113+ 'ERROR: Missing instance-data.json file: %s' % absent_fn,
1114+ self.logs.getvalue())
1115+ self.assertIn(
1116+ 'ERROR: Missing instance-data.json file: %s' % absent_fn,
1117+ m_stderr.getvalue())
1118+
1119+ def test_handle_args_defaults_instance_data(self):
1120+ """When no instance_data argument, default to configured run_dir."""
1121+ args = self.args(
1122+ debug=False, dump_all=True, format=None, instance_data=None,
1123+ list_keys=False, user_data=None, vendor_data=None, varname=None)
1124+ run_dir = self.tmp_path('run_dir', dir=self.tmp)
1125+ ensure_dir(run_dir)
1126+ paths = Paths({'run_dir': run_dir})
1127+ self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
1128+ self.m_paths.return_value = paths
1129+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
1130+ self.assertEqual(1, query.handle_args('anyname', args))
1131+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
1132+ self.assertIn(
1133+ 'ERROR: Missing instance-data.json file: %s' % json_file,
1134+ self.logs.getvalue())
1135+ self.assertIn(
1136+ 'ERROR: Missing instance-data.json file: %s' % json_file,
1137+ m_stderr.getvalue())
1138+
1139+ def test_handle_args_dumps_all_instance_data(self):
1140+ """When --all is specified query will dump all instance data vars."""
1141+ write_file(self.instance_data, '{"my-var": "it worked"}')
1142+ args = self.args(
1143+ debug=False, dump_all=True, format=None,
1144+ instance_data=self.instance_data, list_keys=False,
1145+ user_data='ud', vendor_data='vd', varname=None)
1146+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
1147+ self.assertEqual(0, query.handle_args('anyname', args))
1148+ self.assertEqual(
1149+ '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
1150+ ' "vendordata": "<%s> file:vd"\n}\n' % (
1151+ REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE),
1152+ m_stdout.getvalue())
1153+
1154+ def test_handle_args_returns_top_level_varname(self):
1155+ """When the argument varname is passed, report its value."""
1156+ write_file(self.instance_data, '{"my-var": "it worked"}')
1157+ args = self.args(
1158+ debug=False, dump_all=True, format=None,
1159+ instance_data=self.instance_data, list_keys=False,
1160+ user_data='ud', vendor_data='vd', varname='my_var')
1161+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
1162+ self.assertEqual(0, query.handle_args('anyname', args))
1163+ self.assertEqual('it worked\n', m_stdout.getvalue())
1164+
1165+ def test_handle_args_returns_nested_varname(self):
1166+ """If user_data file is a jinja template render instance-data vars."""
1167+ write_file(self.instance_data,
1168+ '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}')
1169+ args = self.args(
1170+ debug=False, dump_all=False, format=None,
1171+ instance_data=self.instance_data, user_data='ud', vendor_data='vd',
1172+ list_keys=False, varname='v1.key_2')
1173+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
1174+ self.assertEqual(0, query.handle_args('anyname', args))
1175+ self.assertEqual('value-2\n', m_stdout.getvalue())
1176+
1177+ def test_handle_args_returns_standardized_vars_to_top_level_aliases(self):
1178+ """Any standardized vars under v# are promoted as top-level aliases."""
1179+ write_file(
1180+ self.instance_data,
1181+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
1182+ ' "top": "gun"}')
1183+ expected = dedent("""\
1184+ {
1185+ "top": "gun",
1186+ "userdata": "<redacted for non-root user> file:ud",
1187+ "v1": {
1188+ "v1_1": "val1.1"
1189+ },
1190+ "v1_1": "val1.1",
1191+ "v2": {
1192+ "v2_2": "val2.2"
1193+ },
1194+ "v2_2": "val2.2",
1195+ "vendordata": "<redacted for non-root user> file:vd"
1196+ }
1197+ """)
1198+ args = self.args(
1199+ debug=False, dump_all=True, format=None,
1200+ instance_data=self.instance_data, user_data='ud', vendor_data='vd',
1201+ list_keys=False, varname=None)
1202+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
1203+ self.assertEqual(0, query.handle_args('anyname', args))
1204+ self.assertEqual(expected, m_stdout.getvalue())
1205+
1206+ def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self):
1207+ """Sort all top-level keys when only --list-keys provided."""
1208+ write_file(
1209+ self.instance_data,
1210+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
1211+ ' "top": "gun"}')
1212+ expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n'
1213+ args = self.args(
1214+ debug=False, dump_all=False, format=None,
1215+ instance_data=self.instance_data, list_keys=True, user_data='ud',
1216+ vendor_data='vd', varname=None)
1217+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
1218+ self.assertEqual(0, query.handle_args('anyname', args))
1219+ self.assertEqual(expected, m_stdout.getvalue())
1220+
1221+ def test_handle_args_list_keys_sorts_nested_keys_when_varname(self):
1222+ """Sort all nested keys of varname object when --list-keys provided."""
1223+ write_file(
1224+ self.instance_data,
1225+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' +
1226+ ' {"v2_2": "val2.2"}, "top": "gun"}')
1227+ expected = 'v1_1\nv1_2\n'
1228+ args = self.args(
1229+ debug=False, dump_all=False, format=None,
1230+ instance_data=self.instance_data, list_keys=True,
1231+ user_data='ud', vendor_data='vd', varname='v1')
1232+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
1233+ self.assertEqual(0, query.handle_args('anyname', args))
1234+ self.assertEqual(expected, m_stdout.getvalue())
1235+
1236+ def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self):
1237+ """Raise an error when --list-keys and varname specify a non-list."""
1238+ write_file(
1239+ self.instance_data,
1240+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' +
1241+ '{"v2_2": "val2.2"}, "top": "gun"}')
1242+ expected_error = "ERROR: --list-keys provided but 'top' is not a dict"
1243+ args = self.args(
1244+ debug=False, dump_all=False, format=None,
1245+ instance_data=self.instance_data, list_keys=True, user_data='ud',
1246+ vendor_data='vd', varname='top')
1247+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
1248+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
1249+ self.assertEqual(1, query.handle_args('anyname', args))
1250+ self.assertEqual('', m_stdout.getvalue())
1251+ self.assertIn(expected_error, m_stderr.getvalue())
1252+
1253+# vi: ts=4 expandtab
1254diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
1255index 37a8993..aded858 100644
1256--- a/cloudinit/cmd/tests/test_status.py
1257+++ b/cloudinit/cmd/tests/test_status.py
1258@@ -39,7 +39,8 @@ class TestStatus(CiTestCase):
1259 ensure_file(self.disable_file) # Create the ignored disable file
1260 (is_disabled, reason) = wrap_and_call(
1261 'cloudinit.cmd.status',
1262- {'uses_systemd': False},
1263+ {'uses_systemd': False,
1264+ 'get_cmdline': "root=/dev/my-root not-important"},
1265 status._is_cloudinit_disabled, self.disable_file, self.paths)
1266 self.assertFalse(
1267 is_disabled, 'expected enabled cloud-init on sysvinit')
1268@@ -50,7 +51,8 @@ class TestStatus(CiTestCase):
1269 ensure_file(self.disable_file) # Create observed disable file
1270 (is_disabled, reason) = wrap_and_call(
1271 'cloudinit.cmd.status',
1272- {'uses_systemd': True},
1273+ {'uses_systemd': True,
1274+ 'get_cmdline': "root=/dev/my-root not-important"},
1275 status._is_cloudinit_disabled, self.disable_file, self.paths)
1276 self.assertTrue(is_disabled, 'expected disabled cloud-init')
1277 self.assertEqual(
1278diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
1279index ac72ac4..24a8ebe 100644
1280--- a/cloudinit/config/cc_lxd.py
1281+++ b/cloudinit/config/cc_lxd.py
1282@@ -104,6 +104,7 @@ def handle(name, cfg, cloud, log, args):
1283 'network_address', 'network_port', 'storage_backend',
1284 'storage_create_device', 'storage_create_loop',
1285 'storage_pool', 'trust_password')
1286+ util.subp(['lxd', 'waitready', '--timeout=300'])
1287 cmd = ['lxd', 'init', '--auto']
1288 for k in init_keys:
1289 if init_cfg.get(k):
1290@@ -260,7 +261,9 @@ def bridge_to_cmd(bridge_cfg):
1291
1292
1293 def _lxc(cmd):
1294- env = {'LC_ALL': 'C'}
1295+ env = {'LC_ALL': 'C',
1296+ 'HOME': os.environ.get('HOME', '/root'),
1297+ 'USER': os.environ.get('USER', 'root')}
1298 util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
1299
1300
1301@@ -276,27 +279,27 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
1302 if net_name != _DEFAULT_NETWORK_NAME or not did_init:
1303 return
1304
1305- fail_assume_enoent = " failed. Assuming it did not exist."
1306- succeeded = " succeeded."
1307+ fail_assume_enoent = "failed. Assuming it did not exist."
1308+ succeeded = "succeeded."
1309 if create:
1310- msg = "Deletion of lxd network '%s'" % net_name
1311+ msg = "Deletion of lxd network '%s' %s"
1312 try:
1313 _lxc(["network", "delete", net_name])
1314- LOG.debug(msg + succeeded)
1315+ LOG.debug(msg, net_name, succeeded)
1316 except util.ProcessExecutionError as e:
1317 if e.exit_code != 1:
1318 raise e
1319- LOG.debug(msg + fail_assume_enoent)
1320+ LOG.debug(msg, net_name, fail_assume_enoent)
1321
1322 if attach:
1323- msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile)
1324+ msg = "Removal of device '%s' from profile '%s' %s"
1325 try:
1326 _lxc(["profile", "device", "remove", profile, nic_name])
1327- LOG.debug(msg + succeeded)
1328+ LOG.debug(msg, nic_name, profile, succeeded)
1329 except util.ProcessExecutionError as e:
1330 if e.exit_code != 1:
1331 raise e
1332- LOG.debug(msg + fail_assume_enoent)
1333+ LOG.debug(msg, nic_name, profile, fail_assume_enoent)
1334
1335
1336 # vi: ts=4 expandtab
1337diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
1338index 1c67943..edee01e 100644
1339--- a/cloudinit/config/cc_rh_subscription.py
1340+++ b/cloudinit/config/cc_rh_subscription.py
1341@@ -126,7 +126,6 @@ class SubscriptionManager(object):
1342 self.enable_repo = self.rhel_cfg.get('enable-repo')
1343 self.disable_repo = self.rhel_cfg.get('disable-repo')
1344 self.servicelevel = self.rhel_cfg.get('service-level')
1345- self.subman = ['subscription-manager']
1346
1347 def log_success(self, msg):
1348 '''Simple wrapper for logging info messages. Useful for unittests'''
1349@@ -173,21 +172,12 @@ class SubscriptionManager(object):
1350 cmd = ['identity']
1351
1352 try:
1353- self._sub_man_cli(cmd)
1354+ _sub_man_cli(cmd)
1355 except util.ProcessExecutionError:
1356 return False
1357
1358 return True
1359
1360- def _sub_man_cli(self, cmd, logstring_val=False):
1361- '''
1362- Uses the prefered cloud-init subprocess def of util.subp
1363- and runs subscription-manager. Breaking this to a
1364- separate function for later use in mocking and unittests
1365- '''
1366- cmd = self.subman + cmd
1367- return util.subp(cmd, logstring=logstring_val)
1368-
1369 def rhn_register(self):
1370 '''
1371 Registers the system by userid and password or activation key
1372@@ -209,7 +199,7 @@ class SubscriptionManager(object):
1373 cmd.append("--serverurl={0}".format(self.server_hostname))
1374
1375 try:
1376- return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
1377+ return_out = _sub_man_cli(cmd, logstring_val=True)[0]
1378 except util.ProcessExecutionError as e:
1379 if e.stdout == "":
1380 self.log_warn("Registration failed due "
1381@@ -232,7 +222,7 @@ class SubscriptionManager(object):
1382
1383 # Attempting to register the system only
1384 try:
1385- return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
1386+ return_out = _sub_man_cli(cmd, logstring_val=True)[0]
1387 except util.ProcessExecutionError as e:
1388 if e.stdout == "":
1389 self.log_warn("Registration failed due "
1390@@ -255,7 +245,7 @@ class SubscriptionManager(object):
1391 .format(self.servicelevel)]
1392
1393 try:
1394- return_out = self._sub_man_cli(cmd)[0]
1395+ return_out = _sub_man_cli(cmd)[0]
1396 except util.ProcessExecutionError as e:
1397 if e.stdout.rstrip() != '':
1398 for line in e.stdout.split("\n"):
1399@@ -273,7 +263,7 @@ class SubscriptionManager(object):
1400 def _set_auto_attach(self):
1401 cmd = ['attach', '--auto']
1402 try:
1403- return_out = self._sub_man_cli(cmd)[0]
1404+ return_out = _sub_man_cli(cmd)[0]
1405 except util.ProcessExecutionError as e:
1406 self.log_warn("Auto-attach failed with: {0}".format(e))
1407 return False
1408@@ -292,12 +282,12 @@ class SubscriptionManager(object):
1409
1410 # Get all available pools
1411 cmd = ['list', '--available', '--pool-only']
1412- results = self._sub_man_cli(cmd)[0]
1413+ results = _sub_man_cli(cmd)[0]
1414 available = (results.rstrip()).split("\n")
1415
1416 # Get all consumed pools
1417 cmd = ['list', '--consumed', '--pool-only']
1418- results = self._sub_man_cli(cmd)[0]
1419+ results = _sub_man_cli(cmd)[0]
1420 consumed = (results.rstrip()).split("\n")
1421
1422 return available, consumed
1423@@ -309,14 +299,14 @@ class SubscriptionManager(object):
1424 '''
1425
1426 cmd = ['repos', '--list-enabled']
1427- return_out = self._sub_man_cli(cmd)[0]
1428+ return_out = _sub_man_cli(cmd)[0]
1429 active_repos = []
1430 for repo in return_out.split("\n"):
1431 if "Repo ID:" in repo:
1432 active_repos.append((repo.split(':')[1]).strip())
1433
1434 cmd = ['repos', '--list-disabled']
1435- return_out = self._sub_man_cli(cmd)[0]
1436+ return_out = _sub_man_cli(cmd)[0]
1437
1438 inactive_repos = []
1439 for repo in return_out.split("\n"):
1440@@ -346,7 +336,7 @@ class SubscriptionManager(object):
1441 if len(pool_list) > 0:
1442 cmd.extend(pool_list)
1443 try:
1444- self._sub_man_cli(cmd)
1445+ _sub_man_cli(cmd)
1446 self.log.debug("Attached the following pools to your "
1447 "system: %s", (", ".join(pool_list))
1448 .replace('--pool=', ''))
1449@@ -423,7 +413,7 @@ class SubscriptionManager(object):
1450 cmd.extend(enable_list)
1451
1452 try:
1453- self._sub_man_cli(cmd)
1454+ _sub_man_cli(cmd)
1455 except util.ProcessExecutionError as e:
1456 self.log_warn("Unable to alter repos due to {0}".format(e))
1457 return False
1458@@ -439,4 +429,15 @@ class SubscriptionManager(object):
1459 def is_configured(self):
1460 return bool((self.userid and self.password) or self.activation_key)
1461
1462+
1463+def _sub_man_cli(cmd, logstring_val=False):
1464+ '''
1465+ Uses the prefered cloud-init subprocess def of util.subp
1466+ and runs subscription-manager. Breaking this to a
1467+ separate function for later use in mocking and unittests
1468+ '''
1469+ return util.subp(['subscription-manager'] + cmd,
1470+ logstring=logstring_val)
1471+
1472+
1473 # vi: ts=4 expandtab
1474diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
1475index 45204a0..f8f7cb3 100755
1476--- a/cloudinit/config/cc_ssh.py
1477+++ b/cloudinit/config/cc_ssh.py
1478@@ -101,10 +101,6 @@ from cloudinit.distros import ug_util
1479 from cloudinit import ssh_util
1480 from cloudinit import util
1481
1482-DISABLE_ROOT_OPTS = (
1483- "no-port-forwarding,no-agent-forwarding,"
1484- "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
1485- " rather than the user \\\"root\\\".\';echo;sleep 10\"")
1486
1487 GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
1488 KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
1489@@ -185,7 +181,7 @@ def handle(_name, cfg, cloud, log, _args):
1490 (user, _user_config) = ug_util.extract_default(users)
1491 disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
1492 disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
1493- DISABLE_ROOT_OPTS)
1494+ ssh_util.DISABLE_USER_OPTS)
1495
1496 keys = cloud.get_public_ssh_keys() or []
1497 if "ssh_authorized_keys" in cfg:
1498@@ -207,6 +203,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
1499 if not user:
1500 user = "NONE"
1501 key_prefix = disable_root_opts.replace('$USER', user)
1502+ key_prefix = key_prefix.replace('$DISABLE_USER', 'root')
1503 else:
1504 key_prefix = ''
1505
1506diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
1507index c95bdaa..c32a743 100644
1508--- a/cloudinit/config/cc_users_groups.py
1509+++ b/cloudinit/config/cc_users_groups.py
1510@@ -52,8 +52,17 @@ config keys for an entry in ``users`` are as follows:
1511 associated with the address, username and SSH keys will be requested from
1512 there. Default: none
1513 - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
1514- authkeys file. Default: none
1515- - ``ssh_import_id``: Optional. SSH id to import for user. Default: none
1516+ authkeys file. Default: none. This key can not be combined with
1517+ ``ssh_redirect_user``.
1518+ - ``ssh_import_id``: Optional. SSH id to import for user. Default: none.
1519+ This key can not be combined with ``ssh_redirect_user``.
1520+ - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH
1521+ logins for this user. When specified, all cloud meta-data public ssh
1522+ keys will be set up in a disabled state for this username. Any ssh login
1523+ as this username will timeout and prompt with a message to login instead
1524+ as the configured <default_username> for this instance. Default: false.
1525+ This key can not be combined with ``ssh_import_id`` or
1526+ ``ssh_authorized_keys``.
1527 - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
1528 Default: none. An absence of sudo key, or a value of none or false
1529 will result in no sudo rules being written for the user.
1530@@ -101,6 +110,7 @@ config keys for an entry in ``users`` are as follows:
1531 selinux_user: <selinux username>
1532 shell: <shell path>
1533 snapuser: <email>
1534+ ssh_redirect_user: <true/false>
1535 ssh_authorized_keys:
1536 - <key>
1537 - <key>
1538@@ -114,17 +124,44 @@ config keys for an entry in ``users`` are as follows:
1539 # since the module attribute 'distros'
1540 # is a list of distros that are supported, not a sub-module
1541 from cloudinit.distros import ug_util
1542+from cloudinit import log as logging
1543
1544 from cloudinit.settings import PER_INSTANCE
1545
1546+LOG = logging.getLogger(__name__)
1547+
1548 frequency = PER_INSTANCE
1549
1550
1551 def handle(name, cfg, cloud, _log, _args):
1552 (users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
1553+ (default_user, _user_config) = ug_util.extract_default(users)
1554+ cloud_keys = cloud.get_public_ssh_keys() or []
1555 for (name, members) in groups.items():
1556 cloud.distro.create_group(name, members)
1557 for (user, config) in users.items():
1558+ ssh_redirect_user = config.pop("ssh_redirect_user", False)
1559+ if ssh_redirect_user:
1560+ if 'ssh_authorized_keys' in config or 'ssh_import_id' in config:
1561+ raise ValueError(
1562+ 'Not creating user %s. ssh_redirect_user cannot be'
1563+ ' provided with ssh_import_id or ssh_authorized_keys' %
1564+ user)
1565+ if ssh_redirect_user not in (True, 'default'):
1566+ raise ValueError(
1567+ 'Not creating user %s. Invalid value of'
1568+ ' ssh_redirect_user: %s. Expected values: true, default'
1569+ ' or false.' % (user, ssh_redirect_user))
1570+ if default_user is None:
1571+ LOG.warning(
1572+ 'Ignoring ssh_redirect_user: %s for %s.'
1573+ ' No default_user defined.'
1574+ ' Perhaps missing cloud configuration users: '
1575+ ' [default, ..].',
1576+ ssh_redirect_user, user)
1577+ else:
1578+ config['ssh_redirect_user'] = default_user
1579+ config['cloud_public_ssh_keys'] = cloud_keys
1580 cloud.distro.create_user(user, **config)
1581
1582 # vi: ts=4 expandtab
1583diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
1584index 34c80f1..3c47289 100644
1585--- a/cloudinit/config/tests/test_snap.py
1586+++ b/cloudinit/config/tests/test_snap.py
1587@@ -162,6 +162,7 @@ class TestAddAssertions(CiTestCase):
1588 class TestRunCommands(CiTestCase):
1589
1590 with_logs = True
1591+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
1592
1593 def setUp(self):
1594 super(TestRunCommands, self).setUp()
1595@@ -424,8 +425,10 @@ class TestHandle(CiTestCase):
1596 'snap': {'commands': ['echo "HI" >> %s' % outfile,
1597 'echo "MOM" >> %s' % outfile]}}
1598 mock_path = 'cloudinit.config.cc_snap.sys.stderr'
1599- with mock.patch(mock_path, new_callable=StringIO):
1600- handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
1601+ with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
1602+ with mock.patch(mock_path, new_callable=StringIO):
1603+ handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
1604+
1605 self.assertEqual('HI\nMOM\n', util.load_file(outfile))
1606
1607 @mock.patch('cloudinit.config.cc_snap.util.subp')
1608diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
1609new file mode 100644
1610index 0000000..c8a4271
1611--- /dev/null
1612+++ b/cloudinit/config/tests/test_ssh.py
1613@@ -0,0 +1,151 @@
1614+# This file is part of cloud-init. See LICENSE file for license information.
1615+
1616+
1617+from cloudinit.config import cc_ssh
1618+from cloudinit import ssh_util
1619+from cloudinit.tests.helpers import CiTestCase, mock
1620+
1621+MODPATH = "cloudinit.config.cc_ssh."
1622+
1623+
1624+@mock.patch(MODPATH + "ssh_util.setup_user_keys")
1625+class TestHandleSsh(CiTestCase):
1626+ """Test cc_ssh handling of ssh config."""
1627+
1628+ def test_apply_credentials_with_user(self, m_setup_keys):
1629+ """Apply keys for the given user and root."""
1630+ keys = ["key1"]
1631+ user = "clouduser"
1632+ cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
1633+ self.assertEqual([mock.call(set(keys), user),
1634+ mock.call(set(keys), "root", options="")],
1635+ m_setup_keys.call_args_list)
1636+
1637+ def test_apply_credentials_with_no_user(self, m_setup_keys):
1638+ """Apply keys for root only."""
1639+ keys = ["key1"]
1640+ user = None
1641+ cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
1642+ self.assertEqual([mock.call(set(keys), "root", options="")],
1643+ m_setup_keys.call_args_list)
1644+
1645+ def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
1646+ """Apply keys for the given user and disable root ssh."""
1647+ keys = ["key1"]
1648+ user = "clouduser"
1649+ options = ssh_util.DISABLE_USER_OPTS
1650+ cc_ssh.apply_credentials(keys, user, True, options)
1651+ options = options.replace("$USER", user)
1652+ options = options.replace("$DISABLE_USER", "root")
1653+ self.assertEqual([mock.call(set(keys), user),
1654+ mock.call(set(keys), "root", options=options)],
1655+ m_setup_keys.call_args_list)
1656+
1657+ def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
1658+ """Apply keys no user and disable root ssh."""
1659+ keys = ["key1"]
1660+ user = None
1661+ options = ssh_util.DISABLE_USER_OPTS
1662+ cc_ssh.apply_credentials(keys, user, True, options)
1663+ options = options.replace("$USER", "NONE")
1664+ options = options.replace("$DISABLE_USER", "root")
1665+ self.assertEqual([mock.call(set(keys), "root", options=options)],
1666+ m_setup_keys.call_args_list)
1667+
1668+ @mock.patch(MODPATH + "glob.glob")
1669+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
1670+ @mock.patch(MODPATH + "os.path.exists")
1671+ def test_handle_no_cfg(self, m_path_exists, m_nug,
1672+ m_glob, m_setup_keys):
1673+ """Test handle with no config ignores generating existing keyfiles."""
1674+ cfg = {}
1675+ keys = ["key1"]
1676+ m_glob.return_value = [] # Return no matching keys to prevent removal
1677+ # Mock os.path.exits to True to short-circuit the key writing logic
1678+ m_path_exists.return_value = True
1679+ m_nug.return_value = ([], {})
1680+ cloud = self.tmp_cloud(
1681+ distro='ubuntu', metadata={'public-keys': keys})
1682+ cc_ssh.handle("name", cfg, cloud, None, None)
1683+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
1684+ options = options.replace("$DISABLE_USER", "root")
1685+ m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
1686+ self.assertIn(
1687+ [mock.call('/etc/ssh/ssh_host_rsa_key'),
1688+ mock.call('/etc/ssh/ssh_host_dsa_key'),
1689+ mock.call('/etc/ssh/ssh_host_ecdsa_key'),
1690+ mock.call('/etc/ssh/ssh_host_ed25519_key')],
1691+ m_path_exists.call_args_list)
1692+ self.assertEqual([mock.call(set(keys), "root", options=options)],
1693+ m_setup_keys.call_args_list)
1694+
1695+ @mock.patch(MODPATH + "glob.glob")
1696+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
1697+ @mock.patch(MODPATH + "os.path.exists")
1698+ def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
1699+ m_glob, m_setup_keys):
1700+ """Test handle with no config and a default distro user."""
1701+ cfg = {}
1702+ keys = ["key1"]
1703+ user = "clouduser"
1704+ m_glob.return_value = [] # Return no matching keys to prevent removal
1705+ # Mock os.path.exits to True to short-circuit the key writing logic
1706+ m_path_exists.return_value = True
1707+ m_nug.return_value = ({user: {"default": user}}, {})
1708+ cloud = self.tmp_cloud(
1709+ distro='ubuntu', metadata={'public-keys': keys})
1710+ cc_ssh.handle("name", cfg, cloud, None, None)
1711+
1712+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
1713+ options = options.replace("$DISABLE_USER", "root")
1714+ self.assertEqual([mock.call(set(keys), user),
1715+ mock.call(set(keys), "root", options=options)],
1716+ m_setup_keys.call_args_list)
1717+
1718+ @mock.patch(MODPATH + "glob.glob")
1719+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
1720+ @mock.patch(MODPATH + "os.path.exists")
1721+ def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
1722+ m_glob, m_setup_keys):
1723+ """Test handle with explicit disable_root and a default distro user."""
1724+ # This test is identical to test_handle_no_cfg_and_default_root,
1725+ # except this uses an explicit cfg value
1726+ cfg = {"disable_root": True}
1727+ keys = ["key1"]
1728+ user = "clouduser"
1729+ m_glob.return_value = [] # Return no matching keys to prevent removal
1730+ # Mock os.path.exits to True to short-circuit the key writing logic
1731+ m_path_exists.return_value = True
1732+ m_nug.return_value = ({user: {"default": user}}, {})
1733+ cloud = self.tmp_cloud(
1734+ distro='ubuntu', metadata={'public-keys': keys})
1735+ cc_ssh.handle("name", cfg, cloud, None, None)
1736+
1737+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
1738+ options = options.replace("$DISABLE_USER", "root")
1739+ self.assertEqual([mock.call(set(keys), user),
1740+ mock.call(set(keys), "root", options=options)],
1741+ m_setup_keys.call_args_list)
1742+
1743+ @mock.patch(MODPATH + "glob.glob")
1744+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
1745+ @mock.patch(MODPATH + "os.path.exists")
1746+ def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug,
1747+ m_glob, m_setup_keys):
1748+ """Test handle with disable_root == False."""
1749+ # When disable_root == False, the ssh redirect for root is skipped
1750+ cfg = {"disable_root": False}
1751+ keys = ["key1"]
1752+ user = "clouduser"
1753+ m_glob.return_value = [] # Return no matching keys to prevent removal
1754+ # Mock os.path.exits to True to short-circuit the key writing logic
1755+ m_path_exists.return_value = True
1756+ m_nug.return_value = ({user: {"default": user}}, {})
1757+ cloud = self.tmp_cloud(
1758+ distro='ubuntu', metadata={'public-keys': keys})
1759+ cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
1760+ cc_ssh.handle("name", cfg, cloud, None, None)
1761+
1762+ self.assertEqual([mock.call(set(keys), user),
1763+ mock.call(set(keys), "root", options="")],
1764+ m_setup_keys.call_args_list)
1765diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
1766index f1beeff..b7cf9be 100644
1767--- a/cloudinit/config/tests/test_ubuntu_advantage.py
1768+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
1769@@ -23,6 +23,7 @@ class FakeCloud(object):
1770 class TestRunCommands(CiTestCase):
1771
1772 with_logs = True
1773+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
1774
1775 def setUp(self):
1776 super(TestRunCommands, self).setUp()
1777@@ -234,8 +235,10 @@ class TestHandle(CiTestCase):
1778 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile,
1779 'echo "MOM" >> %s' % outfile]}}
1780 mock_path = '%s.sys.stderr' % MPATH
1781- with mock.patch(mock_path, new_callable=StringIO):
1782- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
1783+ with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
1784+ with mock.patch(mock_path, new_callable=StringIO):
1785+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger,
1786+ args=None)
1787 self.assertEqual('HI\nMOM\n', util.load_file(outfile))
1788
1789
1790diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
1791new file mode 100644
1792index 0000000..ba0afae
1793--- /dev/null
1794+++ b/cloudinit/config/tests/test_users_groups.py
1795@@ -0,0 +1,144 @@
1796+# This file is part of cloud-init. See LICENSE file for license information.
1797+
1798+
1799+from cloudinit.config import cc_users_groups
1800+from cloudinit.tests.helpers import CiTestCase, mock
1801+
1802+MODPATH = "cloudinit.config.cc_users_groups"
1803+
1804+
1805+@mock.patch('cloudinit.distros.ubuntu.Distro.create_group')
1806+@mock.patch('cloudinit.distros.ubuntu.Distro.create_user')
1807+class TestHandleUsersGroups(CiTestCase):
1808+ """Test cc_users_groups handling of config."""
1809+
1810+ with_logs = True
1811+
1812+ def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group):
1813+ """Test handle with no config will not create users or groups."""
1814+ cfg = {} # merged cloud-config
1815+ # System config defines a default user for the distro.
1816+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
1817+ 'groups': ['lxd', 'sudo'],
1818+ 'shell': '/bin/bash'}}
1819+ metadata = {}
1820+ cloud = self.tmp_cloud(
1821+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
1822+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
1823+ m_user.assert_not_called()
1824+ m_group.assert_not_called()
1825+
1826+ def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group):
1827+ """When users in config, create users with distro.create_user."""
1828+ cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
1829+ # System config defines a default user for the distro.
1830+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
1831+ 'groups': ['lxd', 'sudo'],
1832+ 'shell': '/bin/bash'}}
1833+ metadata = {}
1834+ cloud = self.tmp_cloud(
1835+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
1836+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
1837+ self.assertItemsEqual(
1838+ m_user.call_args_list,
1839+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
1840+ shell='/bin/bash'),
1841+ mock.call('me2', default=False)])
1842+ m_group.assert_not_called()
1843+
1844+ def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
1845+ """When ssh_redirect_user is True pass default user and cloud keys."""
1846+ cfg = {
1847+ 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
1848+ # System config defines a default user for the distro.
1849+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
1850+ 'groups': ['lxd', 'sudo'],
1851+ 'shell': '/bin/bash'}}
1852+ metadata = {'public-keys': ['key1']}
1853+ cloud = self.tmp_cloud(
1854+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
1855+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
1856+ self.assertItemsEqual(
1857+ m_user.call_args_list,
1858+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
1859+ shell='/bin/bash'),
1860+ mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
1861+ ssh_redirect_user='ubuntu')])
1862+ m_group.assert_not_called()
1863+
1864+ def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group):
1865+ """When ssh_redirect_user is 'default' pass default username."""
1866+ cfg = {
1867+ 'users': ['default', {'name': 'me2',
1868+ 'ssh_redirect_user': 'default'}]}
1869+ # System config defines a default user for the distro.
1870+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
1871+ 'groups': ['lxd', 'sudo'],
1872+ 'shell': '/bin/bash'}}
1873+ metadata = {'public-keys': ['key1']}
1874+ cloud = self.tmp_cloud(
1875+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
1876+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
1877+ self.assertItemsEqual(
1878+ m_user.call_args_list,
1879+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
1880+ shell='/bin/bash'),
1881+ mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
1882+ ssh_redirect_user='ubuntu')])
1883+ m_group.assert_not_called()
1884+
1885+ def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group):
1886+ """Warn when ssh_redirect_user is not 'default'."""
1887+ cfg = {
1888+ 'users': ['default', {'name': 'me2',
1889+ 'ssh_redirect_user': 'snowflake'}]}
1890+ # System config defines a default user for the distro.
1891+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
1892+ 'groups': ['lxd', 'sudo'],
1893+ 'shell': '/bin/bash'}}
1894+ metadata = {'public-keys': ['key1']}
1895+ cloud = self.tmp_cloud(
1896+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
1897+ with self.assertRaises(ValueError) as context_manager:
1898+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
1899+ m_group.assert_not_called()
1900+ self.assertEqual(
1901+ 'Not creating user me2. Invalid value of ssh_redirect_user:'
1902+ ' snowflake. Expected values: true, default or false.',
1903+ str(context_manager.exception))
1904+
1905+ def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group):
1906+ """When unspecified ssh_redirect_user is false and not set up."""
1907+ cfg = {'users': ['default', {'name': 'me2'}]}
1908+ # System config defines a default user for the distro.
1909+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
1910+ 'groups': ['lxd', 'sudo'],
1911+ 'shell': '/bin/bash'}}
1912+ metadata = {'public-keys': ['key1']}
1913+ cloud = self.tmp_cloud(
1914+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
1915+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
1916+ self.assertItemsEqual(
1917+ m_user.call_args_list,
1918+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
1919+ shell='/bin/bash'),
1920+ mock.call('me2', default=False)])
1921+ m_group.assert_not_called()
1922+
1923+ def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group):
1924+ """Warn when ssh_redirect_user is True and no default user present."""
1925+ cfg = {
1926+ 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
1927+ # System config defines *no* default user for the distro.
1928+ sys_cfg = {}
1929+ metadata = {} # no public-keys defined
1930+ cloud = self.tmp_cloud(
1931+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
1932+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
1933+ m_user.assert_called_once_with('me2', default=False)
1934+ m_group.assert_not_called()
1935+ self.assertEqual(
1936+ 'WARNING: Ignoring ssh_redirect_user: True for me2. No'
1937+ ' default_user defined. Perhaps missing'
1938+ ' cloud configuration users: [default, ..].\n',
1939+ self.logs.getvalue())
1940diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
1941old mode 100755
1942new mode 100644
1943index ab0b077..ef618c2
1944--- a/cloudinit/distros/__init__.py
1945+++ b/cloudinit/distros/__init__.py
1946@@ -74,11 +74,10 @@ class Distro(object):
1947 def install_packages(self, pkglist):
1948 raise NotImplementedError()
1949
1950- @abc.abstractmethod
1951 def _write_network(self, settings):
1952- # In the future use the http://fedorahosted.org/netcf/
1953- # to write this blob out in a distro format
1954- raise NotImplementedError()
1955+ raise RuntimeError(
1956+ "Legacy function '_write_network' was called in distro '%s'.\n"
1957+ "_write_network_config needs implementation.\n" % self.name)
1958
1959 def _write_network_config(self, settings):
1960 raise NotImplementedError()
1961@@ -91,7 +90,7 @@ class Distro(object):
1962 LOG.debug("Selected renderer '%s' from priority list: %s",
1963 name, priority)
1964 renderer = render_cls(config=self.renderer_configs.get(name))
1965- renderer.render_network_config(network_config=network_config)
1966+ renderer.render_network_config(network_config)
1967 return []
1968
1969 def _find_tz_file(self, tz):
1970@@ -144,7 +143,11 @@ class Distro(object):
1971 # this applies network where 'settings' is interfaces(5) style
1972 # it is obsolete compared to apply_network_config
1973 # Write it out
1974+
1975+ # pylint: disable=assignment-from-no-return
1976+ # We have implementations in arch, freebsd and gentoo still
1977 dev_names = self._write_network(settings)
1978+ # pylint: enable=assignment-from-no-return
1979 # Now try to bring them up
1980 if bring_up:
1981 return self._bring_up_interfaces(dev_names)
1982@@ -157,7 +160,7 @@ class Distro(object):
1983 distro)
1984 header = '\n'.join([
1985 "# Converted from network_config for distro %s" % distro,
1986- "# Implmentation of _write_network_config is needed."
1987+ "# Implementation of _write_network_config is needed."
1988 ])
1989 ns = network_state.parse_net_config_data(netconfig)
1990 contents = eni.network_state_to_eni(
1991@@ -381,6 +384,9 @@ class Distro(object):
1992 """
1993 Add a user to the system using standard GNU tools
1994 """
1995+ # XXX need to make add_user idempotent somehow as we
1996+ # still want to add groups or modify ssh keys on pre-existing
1997+ # users in the image.
1998 if util.is_user(name):
1999 LOG.info("User %s already exists, skipping.", name)
2000 return
2001@@ -547,10 +553,24 @@ class Distro(object):
2002 LOG.warning("Invalid type '%s' detected for"
2003 " 'ssh_authorized_keys', expected list,"
2004 " string, dict, or set.", type(keys))
2005+ keys = []
2006 else:
2007 keys = set(keys) or []
2008- ssh_util.setup_user_keys(keys, name, options=None)
2009-
2010+ ssh_util.setup_user_keys(set(keys), name)
2011+ if 'ssh_redirect_user' in kwargs:
2012+ cloud_keys = kwargs.get('cloud_public_ssh_keys', [])
2013+ if not cloud_keys:
2014+ LOG.warning(
2015+ 'Unable to disable ssh logins for %s given'
2016+ ' ssh_redirect_user: %s. No cloud public-keys present.',
2017+ name, kwargs['ssh_redirect_user'])
2018+ else:
2019+ redirect_user = kwargs['ssh_redirect_user']
2020+ disable_option = ssh_util.DISABLE_USER_OPTS
2021+ disable_option = disable_option.replace('$USER', redirect_user)
2022+ disable_option = disable_option.replace('$DISABLE_USER', name)
2023+ ssh_util.setup_user_keys(
2024+ set(cloud_keys), name, options=disable_option)
2025 return True
2026
2027 def lock_passwd(self, name):
2028diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
2029index 33cc0bf..d517fb8 100644
2030--- a/cloudinit/distros/debian.py
2031+++ b/cloudinit/distros/debian.py
2032@@ -109,11 +109,6 @@ class Distro(distros.Distro):
2033 self.update_package_sources()
2034 self.package_command('install', pkgs=pkglist)
2035
2036- def _write_network(self, settings):
2037- # this is a legacy method, it will always write eni
2038- util.write_file(self.network_conf_fn["eni"], settings)
2039- return ['all']
2040-
2041 def _write_network_config(self, netconfig):
2042 _maybe_remove_legacy_eth0()
2043 return self._supported_write_network_config(netconfig)
2044diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
2045index 1ce1aa7..edfcd99 100644
2046--- a/cloudinit/distros/net_util.py
2047+++ b/cloudinit/distros/net_util.py
2048@@ -67,6 +67,10 @@
2049 # }
2050 # }
2051
2052+from cloudinit.net.network_state import (
2053+ net_prefix_to_ipv4_mask, mask_and_ipv4_to_bcast_addr)
2054+
2055+
2056 def translate_network(settings):
2057 # Get the standard cmd, args from the ubuntu format
2058 entries = []
2059@@ -134,6 +138,21 @@ def translate_network(settings):
2060 val = info[k].strip().lower()
2061 if val:
2062 iface_info[k] = val
2063+ # handle static ip configurations using
2064+ # ipaddress/prefix-length format
2065+ if 'address' in iface_info:
2066+ if 'netmask' not in iface_info:
2067+ # check if the address has a network prefix
2068+ addr, _, prefix = iface_info['address'].partition('/')
2069+ if prefix:
2070+ iface_info['netmask'] = (
2071+ net_prefix_to_ipv4_mask(prefix))
2072+ iface_info['address'] = addr
2073+ # if we set the netmask, we also can set the broadcast
2074+ iface_info['broadcast'] = (
2075+ mask_and_ipv4_to_bcast_addr(
2076+ iface_info['netmask'], addr))
2077+
2078 # Name server info provided??
2079 if 'dns-nameservers' in info:
2080 iface_info['dns-nameservers'] = info['dns-nameservers'].split()
2081diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
2082index 9f90e95..1bfe047 100644
2083--- a/cloudinit/distros/opensuse.py
2084+++ b/cloudinit/distros/opensuse.py
2085@@ -16,7 +16,6 @@ from cloudinit import helpers
2086 from cloudinit import log as logging
2087 from cloudinit import util
2088
2089-from cloudinit.distros import net_util
2090 from cloudinit.distros import rhel_util as rhutil
2091 from cloudinit.settings import PER_INSTANCE
2092
2093@@ -28,13 +27,23 @@ class Distro(distros.Distro):
2094 hostname_conf_fn = '/etc/HOSTNAME'
2095 init_cmd = ['service']
2096 locale_conf_fn = '/etc/sysconfig/language'
2097- network_conf_fn = '/etc/sysconfig/network'
2098+ network_conf_fn = '/etc/sysconfig/network/config'
2099 network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
2100 resolve_conf_fn = '/etc/resolv.conf'
2101 route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
2102 systemd_hostname_conf_fn = '/etc/hostname'
2103 systemd_locale_conf_fn = '/etc/locale.conf'
2104 tz_local_fn = '/etc/localtime'
2105+ renderer_configs = {
2106+ 'sysconfig': {
2107+ 'control': 'etc/sysconfig/network/config',
2108+ 'iface_templates': '%(base)s/network/ifcfg-%(name)s',
2109+ 'route_templates': {
2110+ 'ipv4': '%(base)s/network/ifroute-%(name)s',
2111+ 'ipv6': '%(base)s/network/ifroute-%(name)s',
2112+ }
2113+ }
2114+ }
2115
2116 def __init__(self, name, cfg, paths):
2117 distros.Distro.__init__(self, name, cfg, paths)
2118@@ -162,51 +171,8 @@ class Distro(distros.Distro):
2119 conf.set_hostname(hostname)
2120 util.write_file(out_fn, str(conf), 0o644)
2121
2122- def _write_network(self, settings):
2123- # Convert debian settings to ifcfg format
2124- entries = net_util.translate_network(settings)
2125- LOG.debug("Translated ubuntu style network settings %s into %s",
2126- settings, entries)
2127- # Make the intermediate format as the suse format...
2128- nameservers = []
2129- searchservers = []
2130- dev_names = entries.keys()
2131- for (dev, info) in entries.items():
2132- net_fn = self.network_script_tpl % (dev)
2133- route_fn = self.route_conf_tpl % (dev)
2134- mode = None
2135- if info.get('auto', None):
2136- mode = 'auto'
2137- else:
2138- mode = 'manual'
2139- bootproto = info.get('bootproto', None)
2140- gateway = info.get('gateway', None)
2141- net_cfg = {
2142- 'BOOTPROTO': bootproto,
2143- 'BROADCAST': info.get('broadcast'),
2144- 'GATEWAY': gateway,
2145- 'IPADDR': info.get('address'),
2146- 'LLADDR': info.get('hwaddress'),
2147- 'NETMASK': info.get('netmask'),
2148- 'STARTMODE': mode,
2149- 'USERCONTROL': 'no'
2150- }
2151- if dev != 'lo':
2152- net_cfg['ETHTOOL_OPTIONS'] = ''
2153- else:
2154- net_cfg['FIREWALL'] = 'no'
2155- rhutil.update_sysconfig_file(net_fn, net_cfg, True)
2156- if gateway and bootproto == 'static':
2157- default_route = 'default %s' % gateway
2158- util.write_file(route_fn, default_route, 0o644)
2159- if 'dns-nameservers' in info:
2160- nameservers.extend(info['dns-nameservers'])
2161- if 'dns-search' in info:
2162- searchservers.extend(info['dns-search'])
2163- if nameservers or searchservers:
2164- rhutil.update_resolve_conf_file(self.resolve_conf_fn,
2165- nameservers, searchservers)
2166- return dev_names
2167+ def _write_network_config(self, netconfig):
2168+ return self._supported_write_network_config(netconfig)
2169
2170 @property
2171 def preferred_ntp_clients(self):
2172diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
2173index 1fecb61..f55d96f 100644
2174--- a/cloudinit/distros/rhel.py
2175+++ b/cloudinit/distros/rhel.py
2176@@ -13,7 +13,6 @@ from cloudinit import helpers
2177 from cloudinit import log as logging
2178 from cloudinit import util
2179
2180-from cloudinit.distros import net_util
2181 from cloudinit.distros import rhel_util
2182 from cloudinit.settings import PER_INSTANCE
2183
2184@@ -39,6 +38,16 @@ class Distro(distros.Distro):
2185 resolve_conf_fn = "/etc/resolv.conf"
2186 tz_local_fn = "/etc/localtime"
2187 usr_lib_exec = "/usr/libexec"
2188+ renderer_configs = {
2189+ 'sysconfig': {
2190+ 'control': 'etc/sysconfig/network',
2191+ 'iface_templates': '%(base)s/network-scripts/ifcfg-%(name)s',
2192+ 'route_templates': {
2193+ 'ipv4': '%(base)s/network-scripts/route-%(name)s',
2194+ 'ipv6': '%(base)s/network-scripts/route6-%(name)s'
2195+ }
2196+ }
2197+ }
2198
2199 def __init__(self, name, cfg, paths):
2200 distros.Distro.__init__(self, name, cfg, paths)
2201@@ -55,54 +64,6 @@ class Distro(distros.Distro):
2202 def _write_network_config(self, netconfig):
2203 return self._supported_write_network_config(netconfig)
2204
2205- def _write_network(self, settings):
2206- # TODO(harlowja) fix this... since this is the ubuntu format
2207- entries = net_util.translate_network(settings)
2208- LOG.debug("Translated ubuntu style network settings %s into %s",
2209- settings, entries)
2210- # Make the intermediate format as the rhel format...
2211- nameservers = []
2212- searchservers = []
2213- dev_names = entries.keys()
2214- use_ipv6 = False
2215- for (dev, info) in entries.items():
2216- net_fn = self.network_script_tpl % (dev)
2217- net_cfg = {
2218- 'DEVICE': dev,
2219- 'NETMASK': info.get('netmask'),
2220- 'IPADDR': info.get('address'),
2221- 'BOOTPROTO': info.get('bootproto'),
2222- 'GATEWAY': info.get('gateway'),
2223- 'BROADCAST': info.get('broadcast'),
2224- 'MACADDR': info.get('hwaddress'),
2225- 'ONBOOT': _make_sysconfig_bool(info.get('auto')),
2226- }
2227- if info.get('inet6'):
2228- use_ipv6 = True
2229- net_cfg.update({
2230- 'IPV6INIT': _make_sysconfig_bool(True),
2231- 'IPV6ADDR': info.get('ipv6').get('address'),
2232- 'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'),
2233- })
2234- rhel_util.update_sysconfig_file(net_fn, net_cfg)
2235- if 'dns-nameservers' in info:
2236- nameservers.extend(info['dns-nameservers'])
2237- if 'dns-search' in info:
2238- searchservers.extend(info['dns-search'])
2239- if nameservers or searchservers:
2240- rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
2241- nameservers, searchservers)
2242- if dev_names:
2243- net_cfg = {
2244- 'NETWORKING': _make_sysconfig_bool(True),
2245- }
2246- # If IPv6 interface present, enable ipv6 networking
2247- if use_ipv6:
2248- net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True)
2249- net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False)
2250- rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
2251- return dev_names
2252-
2253 def apply_locale(self, locale, out_fn=None):
2254 if self.uses_systemd():
2255 if not out_fn:
2256diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
2257index c3576c0..0db75af 100644
2258--- a/cloudinit/handlers/__init__.py
2259+++ b/cloudinit/handlers/__init__.py
2260@@ -41,7 +41,7 @@ PART_HANDLER_FN_TMPL = 'part-handler-%03d'
2261 # For parts without filenames
2262 PART_FN_TPL = 'part-%03d'
2263
2264-# Different file beginnings to there content type
2265+# Different file beginnings to their content type
2266 INCLUSION_TYPES_MAP = {
2267 '#include': 'text/x-include-url',
2268 '#include-once': 'text/x-include-once-url',
2269@@ -52,6 +52,7 @@ INCLUSION_TYPES_MAP = {
2270 '#cloud-boothook': 'text/cloud-boothook',
2271 '#cloud-config-archive': 'text/cloud-config-archive',
2272 '#cloud-config-jsonp': 'text/cloud-config-jsonp',
2273+ '## template: jinja': 'text/jinja2',
2274 }
2275
2276 # Sorted longest first
2277@@ -69,9 +70,13 @@ class Handler(object):
2278 def __repr__(self):
2279 return "%s: [%s]" % (type_utils.obj_name(self), self.list_types())
2280
2281- @abc.abstractmethod
2282 def list_types(self):
2283- raise NotImplementedError()
2284+ # Each subclass must define the supported content prefixes it handles.
2285+ if not hasattr(self, 'prefixes'):
2286+ raise NotImplementedError('Missing prefixes subclass attribute')
2287+ else:
2288+ return [INCLUSION_TYPES_MAP[prefix]
2289+ for prefix in getattr(self, 'prefixes')]
2290
2291 @abc.abstractmethod
2292 def handle_part(self, *args, **kwargs):
2293diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
2294index 057b4db..dca50a4 100644
2295--- a/cloudinit/handlers/boot_hook.py
2296+++ b/cloudinit/handlers/boot_hook.py
2297@@ -17,10 +17,13 @@ from cloudinit import util
2298 from cloudinit.settings import (PER_ALWAYS)
2299
2300 LOG = logging.getLogger(__name__)
2301-BOOTHOOK_PREFIX = "#cloud-boothook"
2302
2303
2304 class BootHookPartHandler(handlers.Handler):
2305+
2306+ # The content prefixes this handler understands.
2307+ prefixes = ['#cloud-boothook']
2308+
2309 def __init__(self, paths, datasource, **_kwargs):
2310 handlers.Handler.__init__(self, PER_ALWAYS)
2311 self.boothook_dir = paths.get_ipath("boothooks")
2312@@ -28,16 +31,11 @@ class BootHookPartHandler(handlers.Handler):
2313 if datasource:
2314 self.instance_id = datasource.get_instance_id()
2315
2316- def list_types(self):
2317- return [
2318- handlers.type_from_starts_with(BOOTHOOK_PREFIX),
2319- ]
2320-
2321 def _write_part(self, payload, filename):
2322 filename = util.clean_filename(filename)
2323 filepath = os.path.join(self.boothook_dir, filename)
2324 contents = util.strip_prefix_suffix(util.dos2unix(payload),
2325- prefix=BOOTHOOK_PREFIX)
2326+ prefix=self.prefixes[0])
2327 util.write_file(filepath, contents.lstrip(), 0o700)
2328 return filepath
2329
2330diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
2331index 178a5b9..99bf0e6 100644
2332--- a/cloudinit/handlers/cloud_config.py
2333+++ b/cloudinit/handlers/cloud_config.py
2334@@ -42,14 +42,12 @@ DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
2335 CLOUD_PREFIX = "#cloud-config"
2336 JSONP_PREFIX = "#cloud-config-jsonp"
2337
2338-# The file header -> content types this module will handle.
2339-CC_TYPES = {
2340- JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX),
2341- CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX),
2342-}
2343-
2344
2345 class CloudConfigPartHandler(handlers.Handler):
2346+
2347+ # The content prefixes this handler understands.
2348+ prefixes = [CLOUD_PREFIX, JSONP_PREFIX]
2349+
2350 def __init__(self, paths, **_kwargs):
2351 handlers.Handler.__init__(self, PER_ALWAYS, version=3)
2352 self.cloud_buf = None
2353@@ -58,9 +56,6 @@ class CloudConfigPartHandler(handlers.Handler):
2354 self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
2355 self.file_names = []
2356
2357- def list_types(self):
2358- return list(CC_TYPES.values())
2359-
2360 def _write_cloud_config(self):
2361 if not self.cloud_fn:
2362 return
2363@@ -138,7 +133,7 @@ class CloudConfigPartHandler(handlers.Handler):
2364 # First time through, merge with an empty dict...
2365 if self.cloud_buf is None or not self.file_names:
2366 self.cloud_buf = {}
2367- if ctype == CC_TYPES[JSONP_PREFIX]:
2368+ if ctype == handlers.INCLUSION_TYPES_MAP[JSONP_PREFIX]:
2369 self._merge_patch(payload)
2370 else:
2371 self._merge_part(payload, headers)
2372diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
2373new file mode 100644
2374index 0000000..3fa4097
2375--- /dev/null
2376+++ b/cloudinit/handlers/jinja_template.py
2377@@ -0,0 +1,137 @@
2378+# This file is part of cloud-init. See LICENSE file for license information.
2379+
2380+import os
2381+import re
2382+
2383+try:
2384+ from jinja2.exceptions import UndefinedError as JUndefinedError
2385+except ImportError:
2386+ # No jinja2 dependency
2387+ JUndefinedError = Exception
2388+
2389+from cloudinit import handlers
2390+from cloudinit import log as logging
2391+from cloudinit.sources import INSTANCE_JSON_FILE
2392+from cloudinit.templater import render_string, MISSING_JINJA_PREFIX
2393+from cloudinit.util import b64d, load_file, load_json, json_dumps
2394+
2395+from cloudinit.settings import PER_ALWAYS
2396+
2397+LOG = logging.getLogger(__name__)
2398+
2399+
2400+class JinjaTemplatePartHandler(handlers.Handler):
2401+
2402+ prefixes = ['## template: jinja']
2403+
2404+ def __init__(self, paths, **_kwargs):
2405+ handlers.Handler.__init__(self, PER_ALWAYS, version=3)
2406+ self.paths = paths
2407+ self.sub_handlers = {}
2408+ for handler in _kwargs.get('sub_handlers', []):
2409+ for ctype in handler.list_types():
2410+ self.sub_handlers[ctype] = handler
2411+
2412+ def handle_part(self, data, ctype, filename, payload, frequency, headers):
2413+ if ctype in handlers.CONTENT_SIGNALS:
2414+ return
2415+ jinja_json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
2416+ rendered_payload = render_jinja_payload_from_file(
2417+ payload, filename, jinja_json_file)
2418+ if not rendered_payload:
2419+ return
2420+ subtype = handlers.type_from_starts_with(rendered_payload)
2421+ sub_handler = self.sub_handlers.get(subtype)
2422+ if not sub_handler:
2423+ LOG.warning(
2424+ 'Ignoring jinja template for %s. Could not find supported'
2425+ ' sub-handler for type %s', filename, subtype)
2426+ return
2427+ if sub_handler.handler_version == 3:
2428+ sub_handler.handle_part(
2429+ data, ctype, filename, rendered_payload, frequency, headers)
2430+ elif sub_handler.handler_version == 2:
2431+ sub_handler.handle_part(
2432+ data, ctype, filename, rendered_payload, frequency)
2433+
2434+
2435+def render_jinja_payload_from_file(
2436+ payload, payload_fn, instance_data_file, debug=False):
2437+ """Render a jinja template payload sourcing variables from jinja_vars_path.
2438+
2439+ @param payload: String of jinja template content. Should begin with
2440+ ## template: jinja\n.
2441+ @param payload_fn: String representing the filename from which the payload
2442+ was read used in error reporting. Generally in part-handling this is
2443+ 'part-##'.
2444+ @param instance_data_file: A path to a json file containing variables that
2445+ will be used as jinja template variables.
2446+
2447+ @return: A string of jinja-rendered content with the jinja header removed.
2448+ Returns None on error.
2449+ """
2450+ instance_data = {}
2451+ rendered_payload = None
2452+ if not os.path.exists(instance_data_file):
2453+ raise RuntimeError(
2454+ 'Cannot render jinja template vars. Instance data not yet'
2455+ ' present at %s' % instance_data_file)
2456+ instance_data = load_json(load_file(instance_data_file))
2457+ rendered_payload = render_jinja_payload(
2458+ payload, payload_fn, instance_data, debug)
2459+ if not rendered_payload:
2460+ return None
2461+ return rendered_payload
2462+
2463+
2464+def render_jinja_payload(payload, payload_fn, instance_data, debug=False):
2465+ instance_jinja_vars = convert_jinja_instance_data(
2466+ instance_data,
2467+ decode_paths=instance_data.get('base64-encoded-keys', []))
2468+ if debug:
2469+ LOG.debug('Converted jinja variables\n%s',
2470+ json_dumps(instance_jinja_vars))
2471+ try:
2472+ rendered_payload = render_string(payload, instance_jinja_vars)
2473+ except (TypeError, JUndefinedError) as e:
2474+ LOG.warning(
2475+ 'Ignoring jinja template for %s: %s', payload_fn, str(e))
2476+ return None
2477+ warnings = [
2478+ "'%s'" % var.replace(MISSING_JINJA_PREFIX, '')
2479+ for var in re.findall(
2480+ r'%s[^\s]+' % MISSING_JINJA_PREFIX, rendered_payload)]
2481+ if warnings:
2482+ LOG.warning(
2483+ "Could not render jinja template variables in file '%s': %s",
2484+ payload_fn, ', '.join(warnings))
2485+ return rendered_payload
2486+
2487+
2488+def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()):
2489+ """Process instance-data.json dict for use in jinja templates.
2490+
2491+ Replace hyphens with underscores for jinja templates and decode any
2492+ base64_encoded_keys.
2493+ """
2494+ result = {}
2495+ decode_paths = [path.replace('-', '_') for path in decode_paths]
2496+ for key, value in sorted(data.items()):
2497+ if '-' in key:
2498+ # Standardize keys for use in #cloud-config/shell templates
2499+ key = key.replace('-', '_')
2500+ key_path = '{0}{1}{2}'.format(prefix, sep, key) if prefix else key
2501+ if key_path in decode_paths:
2502+ value = b64d(value)
2503+ if isinstance(value, dict):
2504+ result[key] = convert_jinja_instance_data(
2505+ value, key_path, sep=sep, decode_paths=decode_paths)
2506+ if re.match(r'v\d+', key):
2507+ # Copy values to top-level aliases
2508+ for subkey, subvalue in result[key].items():
2509+ result[subkey] = subvalue
2510+ else:
2511+ result[key] = value
2512+ return result
2513+
2514+# vi: ts=4 expandtab
2515diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
2516index e4945a2..214714b 100644
2517--- a/cloudinit/handlers/shell_script.py
2518+++ b/cloudinit/handlers/shell_script.py
2519@@ -17,21 +17,18 @@ from cloudinit import util
2520 from cloudinit.settings import (PER_ALWAYS)
2521
2522 LOG = logging.getLogger(__name__)
2523-SHELL_PREFIX = "#!"
2524
2525
2526 class ShellScriptPartHandler(handlers.Handler):
2527+
2528+ prefixes = ['#!']
2529+
2530 def __init__(self, paths, **_kwargs):
2531 handlers.Handler.__init__(self, PER_ALWAYS)
2532 self.script_dir = paths.get_ipath_cur('scripts')
2533 if 'script_path' in _kwargs:
2534 self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
2535
2536- def list_types(self):
2537- return [
2538- handlers.type_from_starts_with(SHELL_PREFIX),
2539- ]
2540-
2541 def handle_part(self, data, ctype, filename, payload, frequency):
2542 if ctype in handlers.CONTENT_SIGNALS:
2543 # TODO(harlowja): maybe delete existing things here
2544diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
2545index dc33876..83fb072 100644
2546--- a/cloudinit/handlers/upstart_job.py
2547+++ b/cloudinit/handlers/upstart_job.py
2548@@ -18,19 +18,16 @@ from cloudinit import util
2549 from cloudinit.settings import (PER_INSTANCE)
2550
2551 LOG = logging.getLogger(__name__)
2552-UPSTART_PREFIX = "#upstart-job"
2553
2554
2555 class UpstartJobPartHandler(handlers.Handler):
2556+
2557+ prefixes = ['#upstart-job']
2558+
2559 def __init__(self, paths, **_kwargs):
2560 handlers.Handler.__init__(self, PER_INSTANCE)
2561 self.upstart_dir = paths.upstart_conf_d
2562
2563- def list_types(self):
2564- return [
2565- handlers.type_from_starts_with(UPSTART_PREFIX),
2566- ]
2567-
2568 def handle_part(self, data, ctype, filename, payload, frequency):
2569 if ctype in handlers.CONTENT_SIGNALS:
2570 return
2571diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
2572index 1979cd9..dcd2645 100644
2573--- a/cloudinit/helpers.py
2574+++ b/cloudinit/helpers.py
2575@@ -239,6 +239,10 @@ class ConfigMerger(object):
2576 if cc_fn and os.path.isfile(cc_fn):
2577 try:
2578 i_cfgs.append(util.read_conf(cc_fn))
2579+ except PermissionError:
2580+ LOG.debug(
2581+ 'Skipped loading cloud-config from %s due to'
2582+ ' non-root.', cc_fn)
2583 except Exception:
2584 util.logexc(LOG, 'Failed loading of cloud-config from %s',
2585 cc_fn)
2586@@ -449,4 +453,8 @@ class DefaultingConfigParser(RawConfigParser):
2587 contents = '\n'.join([header, contents, ''])
2588 return contents
2589
2590+
2591+def identity(object):
2592+ return object
2593+
2594 # vi: ts=4 expandtab
2595diff --git a/cloudinit/log.py b/cloudinit/log.py
2596index 1d75c9f..5ae312b 100644
2597--- a/cloudinit/log.py
2598+++ b/cloudinit/log.py
2599@@ -38,10 +38,18 @@ DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
2600 logging.Formatter.converter = time.gmtime
2601
2602
2603-def setupBasicLogging(level=DEBUG):
2604+def setupBasicLogging(level=DEBUG, formatter=None):
2605+ if not formatter:
2606+ formatter = logging.Formatter(DEF_CON_FORMAT)
2607 root = logging.getLogger()
2608+ for handler in root.handlers:
2609+ if hasattr(handler, 'stream') and hasattr(handler.stream, 'name'):
2610+ if handler.stream.name == '<stderr>':
2611+ handler.setLevel(level)
2612+ return
2613+ # Didn't have an existing stderr handler; create a new handler
2614 console = logging.StreamHandler(sys.stderr)
2615- console.setFormatter(logging.Formatter(DEF_CON_FORMAT))
2616+ console.setFormatter(formatter)
2617 console.setLevel(level)
2618 root.addHandler(console)
2619 root.setLevel(level)
2620diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
2621index 3ffde52..f83d368 100644
2622--- a/cloudinit/net/__init__.py
2623+++ b/cloudinit/net/__init__.py
2624@@ -569,6 +569,20 @@ def get_interface_mac(ifname):
2625 return read_sys_net_safe(ifname, path)
2626
2627
2628+def get_ib_interface_hwaddr(ifname, ethernet_format):
2629+ """Returns the string value of an Infiniband interface's hardware
2630+ address. If ethernet_format is True, an Ethernet MAC-style 6 byte
2631+ representation of the address will be returned.
2632+ """
2633+ # Type 32 is Infiniband.
2634+ if read_sys_net_safe(ifname, 'type') == '32':
2635+ mac = get_interface_mac(ifname)
2636+ if mac and ethernet_format:
2637+ # Use bytes 13-15 and 18-20 of the hardware address.
2638+ mac = mac[36:-14] + mac[51:]
2639+ return mac
2640+
2641+
2642 def get_interfaces_by_mac():
2643 """Build a dictionary of tuples {mac: name}.
2644
2645@@ -580,6 +594,15 @@ def get_interfaces_by_mac():
2646 "duplicate mac found! both '%s' and '%s' have mac '%s'" %
2647 (name, ret[mac], mac))
2648 ret[mac] = name
2649+ # Try to get an Infiniband hardware address (in 6 byte Ethernet format)
2650+ # for the interface.
2651+ ib_mac = get_ib_interface_hwaddr(name, True)
2652+ if ib_mac:
2653+ if ib_mac in ret:
2654+ raise RuntimeError(
2655+ "duplicate mac found! both '%s' and '%s' have mac '%s'" %
2656+ (name, ret[ib_mac], ib_mac))
2657+ ret[ib_mac] = name
2658 return ret
2659
2660
2661@@ -607,6 +630,21 @@ def get_interfaces():
2662 return ret
2663
2664
2665+def get_ib_hwaddrs_by_interface():
2666+ """Build a dictionary mapping Infiniband interface names to their hardware
2667+ address."""
2668+ ret = {}
2669+ for name, _, _, _ in get_interfaces():
2670+ ib_mac = get_ib_interface_hwaddr(name, False)
2671+ if ib_mac:
2672+ if ib_mac in ret:
2673+ raise RuntimeError(
2674+ "duplicate mac found! both '%s' and '%s' have mac '%s'" %
2675+ (name, ret[ib_mac], ib_mac))
2676+ ret[name] = ib_mac
2677+ return ret
2678+
2679+
2680 class EphemeralIPv4Network(object):
2681 """Context manager which sets up temporary static network configuration.
2682
2683@@ -698,6 +736,13 @@ class EphemeralIPv4Network(object):
2684 self.interface, out.strip())
2685 return
2686 util.subp(
2687+ ['ip', '-4', 'route', 'add', self.router, 'dev', self.interface,
2688+ 'src', self.ip], capture=True)
2689+ self.cleanup_cmds.insert(
2690+ 0,
2691+ ['ip', '-4', 'route', 'del', self.router, 'dev', self.interface,
2692+ 'src', self.ip])
2693+ util.subp(
2694 ['ip', '-4', 'route', 'add', 'default', 'via', self.router,
2695 'dev', self.interface], capture=True)
2696 self.cleanup_cmds.insert(
2697diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
2698index bd20a36..c6f631a 100644
2699--- a/cloudinit/net/eni.py
2700+++ b/cloudinit/net/eni.py
2701@@ -247,8 +247,15 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
2702 ifaces[currif]['bridge']['ports'] = []
2703 for iface in split[1:]:
2704 ifaces[currif]['bridge']['ports'].append(iface)
2705- elif option == "bridge_hw" and split[1].lower() == "mac":
2706- ifaces[currif]['bridge']['mac'] = split[2]
2707+ elif option == "bridge_hw":
2708+ # doc is confusing and thus some may put literal 'MAC'
2709+ # bridge_hw MAC <address>
2710+ # but correct is:
2711+ # bridge_hw <address>
2712+ if split[1].lower() == "mac":
2713+ ifaces[currif]['bridge']['mac'] = split[2]
2714+ else:
2715+ ifaces[currif]['bridge']['mac'] = split[1]
2716 elif option == "bridge_pathcost":
2717 if 'pathcost' not in ifaces[currif]['bridge']:
2718 ifaces[currif]['bridge']['pathcost'] = {}
2719@@ -473,7 +480,7 @@ class Renderer(renderer.Renderer):
2720
2721 return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
2722
2723- def render_network_state(self, network_state, target=None):
2724+ def render_network_state(self, network_state, templates=None, target=None):
2725 fpeni = util.target_path(target, self.eni_path)
2726 util.ensure_dir(os.path.dirname(fpeni))
2727 header = self.eni_header if self.eni_header else ""
2728diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
2729index 4014363..bc1087f 100644
2730--- a/cloudinit/net/netplan.py
2731+++ b/cloudinit/net/netplan.py
2732@@ -189,7 +189,7 @@ class Renderer(renderer.Renderer):
2733 self._postcmds = config.get('postcmds', False)
2734 self.clean_default = config.get('clean_default', True)
2735
2736- def render_network_state(self, network_state, target):
2737+ def render_network_state(self, network_state, templates=None, target=None):
2738 # check network state for version
2739 # if v2, then extract network_state.config
2740 # else render_v2_from_state
2741@@ -291,6 +291,8 @@ class Renderer(renderer.Renderer):
2742
2743 if len(bond_config) > 0:
2744 bond.update({'parameters': bond_config})
2745+ if ifcfg.get('mac_address'):
2746+ bond['macaddress'] = ifcfg.get('mac_address').lower()
2747 slave_interfaces = ifcfg.get('bond-slaves')
2748 if slave_interfaces == 'none':
2749 _extract_bond_slaves_by_name(interfaces, bond, ifname)
2750@@ -327,6 +329,8 @@ class Renderer(renderer.Renderer):
2751
2752 if len(br_config) > 0:
2753 bridge.update({'parameters': br_config})
2754+ if ifcfg.get('mac_address'):
2755+ bridge['macaddress'] = ifcfg.get('mac_address').lower()
2756 _extract_addresses(ifcfg, bridge, ifname)
2757 bridges.update({ifname: bridge})
2758
2759diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
2760index 72c803e..f76e508 100644
2761--- a/cloudinit/net/network_state.py
2762+++ b/cloudinit/net/network_state.py
2763@@ -483,6 +483,10 @@ class NetworkStateInterpreter(object):
2764
2765 interfaces.update({iface['name']: iface})
2766
2767+ @ensure_command_keys(['name'])
2768+ def handle_infiniband(self, command):
2769+ self.handle_physical(command)
2770+
2771 @ensure_command_keys(['address'])
2772 def handle_nameserver(self, command):
2773 dns = self._network_state.get('dns')
2774diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
2775index 57652e2..5f32e90 100644
2776--- a/cloudinit/net/renderer.py
2777+++ b/cloudinit/net/renderer.py
2778@@ -45,11 +45,14 @@ class Renderer(object):
2779 return content.getvalue()
2780
2781 @abc.abstractmethod
2782- def render_network_state(self, network_state, target=None):
2783+ def render_network_state(self, network_state, templates=None,
2784+ target=None):
2785 """Render network state."""
2786
2787- def render_network_config(self, network_config, target=None):
2788+ def render_network_config(self, network_config, templates=None,
2789+ target=None):
2790 return self.render_network_state(
2791- network_state=parse_net_config_data(network_config), target=target)
2792+ network_state=parse_net_config_data(network_config),
2793+ templates=templates, target=target)
2794
2795 # vi: ts=4 expandtab
2796diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
2797index 3d71923..9c16d3a 100644
2798--- a/cloudinit/net/sysconfig.py
2799+++ b/cloudinit/net/sysconfig.py
2800@@ -91,19 +91,20 @@ class ConfigMap(object):
2801 class Route(ConfigMap):
2802 """Represents a route configuration."""
2803
2804- route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s'
2805- route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s'
2806-
2807- def __init__(self, route_name, base_sysconf_dir):
2808+ def __init__(self, route_name, base_sysconf_dir,
2809+ ipv4_tpl, ipv6_tpl):
2810 super(Route, self).__init__()
2811 self.last_idx = 1
2812 self.has_set_default_ipv4 = False
2813 self.has_set_default_ipv6 = False
2814 self._route_name = route_name
2815 self._base_sysconf_dir = base_sysconf_dir
2816+ self.route_fn_tpl_ipv4 = ipv4_tpl
2817+ self.route_fn_tpl_ipv6 = ipv6_tpl
2818
2819 def copy(self):
2820- r = Route(self._route_name, self._base_sysconf_dir)
2821+ r = Route(self._route_name, self._base_sysconf_dir,
2822+ self.route_fn_tpl_ipv4, self.route_fn_tpl_ipv6)
2823 r._conf = self._conf.copy()
2824 r.last_idx = self.last_idx
2825 r.has_set_default_ipv4 = self.has_set_default_ipv4
2826@@ -169,18 +170,23 @@ class Route(ConfigMap):
2827 class NetInterface(ConfigMap):
2828 """Represents a sysconfig/networking-script (and its config + children)."""
2829
2830- iface_fn_tpl = '%(base)s/network-scripts/ifcfg-%(name)s'
2831-
2832 iface_types = {
2833 'ethernet': 'Ethernet',
2834 'bond': 'Bond',
2835 'bridge': 'Bridge',
2836+ 'infiniband': 'InfiniBand',
2837 }
2838
2839- def __init__(self, iface_name, base_sysconf_dir, kind='ethernet'):
2840+ def __init__(self, iface_name, base_sysconf_dir, templates,
2841+ kind='ethernet'):
2842 super(NetInterface, self).__init__()
2843 self.children = []
2844- self.routes = Route(iface_name, base_sysconf_dir)
2845+ self.templates = templates
2846+ route_tpl = self.templates.get('route_templates')
2847+ self.routes = Route(iface_name, base_sysconf_dir,
2848+ ipv4_tpl=route_tpl.get('ipv4'),
2849+ ipv6_tpl=route_tpl.get('ipv6'))
2850+ self.iface_fn_tpl = self.templates.get('iface_templates')
2851 self.kind = kind
2852
2853 self._iface_name = iface_name
2854@@ -213,7 +219,8 @@ class NetInterface(ConfigMap):
2855 'name': self.name})
2856
2857 def copy(self, copy_children=False, copy_routes=False):
2858- c = NetInterface(self.name, self._base_sysconf_dir, kind=self._kind)
2859+ c = NetInterface(self.name, self._base_sysconf_dir,
2860+ self.templates, kind=self._kind)
2861 c._conf = self._conf.copy()
2862 if copy_children:
2863 c.children = list(self.children)
2864@@ -251,6 +258,8 @@ class Renderer(renderer.Renderer):
2865 ('bridge_bridgeprio', 'PRIO'),
2866 ])
2867
2868+ templates = {}
2869+
2870 def __init__(self, config=None):
2871 if not config:
2872 config = {}
2873@@ -261,6 +270,11 @@ class Renderer(renderer.Renderer):
2874 nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf'
2875 self.networkmanager_conf_path = config.get('networkmanager_conf_path',
2876 nm_conf_path)
2877+ self.templates = {
2878+ 'control': config.get('control'),
2879+ 'iface_templates': config.get('iface_templates'),
2880+ 'route_templates': config.get('route_templates'),
2881+ }
2882
2883 @classmethod
2884 def _render_iface_shared(cls, iface, iface_cfg):
2885@@ -512,7 +526,7 @@ class Renderer(renderer.Renderer):
2886 return content_str
2887
2888 @staticmethod
2889- def _render_networkmanager_conf(network_state):
2890+ def _render_networkmanager_conf(network_state, templates=None):
2891 content = networkmanager_conf.NetworkManagerConf("")
2892
2893 # If DNS server information is provided, configure
2894@@ -556,20 +570,36 @@ class Renderer(renderer.Renderer):
2895 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
2896
2897 @classmethod
2898- def _render_sysconfig(cls, base_sysconf_dir, network_state):
2899+ def _render_ib_interfaces(cls, network_state, iface_contents):
2900+ ib_filter = renderer.filter_by_type('infiniband')
2901+ for iface in network_state.iter_interfaces(ib_filter):
2902+ iface_name = iface['name']
2903+ iface_cfg = iface_contents[iface_name]
2904+ iface_cfg.kind = 'infiniband'
2905+ iface_subnets = iface.get("subnets", [])
2906+ route_cfg = iface_cfg.routes
2907+ cls._render_subnets(iface_cfg, iface_subnets)
2908+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
2909+
2910+ @classmethod
2911+ def _render_sysconfig(cls, base_sysconf_dir, network_state,
2912+ templates=None):
2913 '''Given state, return /etc/sysconfig files + contents'''
2914+ if not templates:
2915+ templates = cls.templates
2916 iface_contents = {}
2917 for iface in network_state.iter_interfaces():
2918 if iface['type'] == "loopback":
2919 continue
2920 iface_name = iface['name']
2921- iface_cfg = NetInterface(iface_name, base_sysconf_dir)
2922+ iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates)
2923 cls._render_iface_shared(iface, iface_cfg)
2924 iface_contents[iface_name] = iface_cfg
2925 cls._render_physical_interfaces(network_state, iface_contents)
2926 cls._render_bond_interfaces(network_state, iface_contents)
2927 cls._render_vlan_interfaces(network_state, iface_contents)
2928 cls._render_bridge_interfaces(network_state, iface_contents)
2929+ cls._render_ib_interfaces(network_state, iface_contents)
2930 contents = {}
2931 for iface_name, iface_cfg in iface_contents.items():
2932 if iface_cfg or iface_cfg.children:
2933@@ -578,17 +608,21 @@ class Renderer(renderer.Renderer):
2934 if iface_cfg:
2935 contents[iface_cfg.path] = iface_cfg.to_string()
2936 if iface_cfg.routes:
2937- contents[iface_cfg.routes.path_ipv4] = \
2938- iface_cfg.routes.to_string("ipv4")
2939- contents[iface_cfg.routes.path_ipv6] = \
2940- iface_cfg.routes.to_string("ipv6")
2941+ for cpath, proto in zip([iface_cfg.routes.path_ipv4,
2942+ iface_cfg.routes.path_ipv6],
2943+ ["ipv4", "ipv6"]):
2944+ if cpath not in contents:
2945+ contents[cpath] = iface_cfg.routes.to_string(proto)
2946 return contents
2947
2948- def render_network_state(self, network_state, target=None):
2949+ def render_network_state(self, network_state, templates=None, target=None):
2950+ if not templates:
2951+ templates = self.templates
2952 file_mode = 0o644
2953 base_sysconf_dir = util.target_path(target, self.sysconf_dir)
2954 for path, data in self._render_sysconfig(base_sysconf_dir,
2955- network_state).items():
2956+ network_state,
2957+ templates=templates).items():
2958 util.write_file(path, data, file_mode)
2959 if self.dns_path:
2960 dns_path = util.target_path(target, self.dns_path)
2961@@ -598,7 +632,8 @@ class Renderer(renderer.Renderer):
2962 if self.networkmanager_conf_path:
2963 nm_conf_path = util.target_path(target,
2964 self.networkmanager_conf_path)
2965- nm_conf_content = self._render_networkmanager_conf(network_state)
2966+ nm_conf_content = self._render_networkmanager_conf(network_state,
2967+ templates)
2968 if nm_conf_content:
2969 util.write_file(nm_conf_path, nm_conf_content, file_mode)
2970 if self.netrules_path:
2971@@ -606,13 +641,16 @@ class Renderer(renderer.Renderer):
2972 netrules_path = util.target_path(target, self.netrules_path)
2973 util.write_file(netrules_path, netrules_content, file_mode)
2974
2975- # always write /etc/sysconfig/network configuration
2976- sysconfig_path = util.target_path(target, "etc/sysconfig/network")
2977- netcfg = [_make_header(), 'NETWORKING=yes']
2978- if network_state.use_ipv6:
2979- netcfg.append('NETWORKING_IPV6=yes')
2980- netcfg.append('IPV6_AUTOCONF=no')
2981- util.write_file(sysconfig_path, "\n".join(netcfg) + "\n", file_mode)
2982+ sysconfig_path = util.target_path(target, templates.get('control'))
2983+ # Distros configuring /etc/sysconfig/network as a file e.g. Centos
2984+ if sysconfig_path.endswith('network'):
2985+ util.ensure_dir(os.path.dirname(sysconfig_path))
2986+ netcfg = [_make_header(), 'NETWORKING=yes']
2987+ if network_state.use_ipv6:
2988+ netcfg.append('NETWORKING_IPV6=yes')
2989+ netcfg.append('IPV6_AUTOCONF=no')
2990+ util.write_file(sysconfig_path,
2991+ "\n".join(netcfg) + "\n", file_mode)
2992
2993
2994 def available(target=None):
2995diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
2996index 5c017d1..58e0a59 100644
2997--- a/cloudinit/net/tests/test_init.py
2998+++ b/cloudinit/net/tests/test_init.py
2999@@ -199,6 +199,8 @@ class TestGenerateFallbackConfig(CiTestCase):
3000 self.sysdir = self.tmp_dir() + '/'
3001 self.m_sys_path.return_value = self.sysdir
3002 self.addCleanup(sys_mock.stop)
3003+ self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
3004+ return_value=False)
3005 self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
3006
3007 def test_generate_fallback_finds_connected_eth_with_mac(self):
3008@@ -513,12 +515,17 @@ class TestEphemeralIPV4Network(CiTestCase):
3009 capture=True),
3010 mock.call(
3011 ['ip', 'route', 'show', '0.0.0.0/0'], capture=True),
3012+ mock.call(['ip', '-4', 'route', 'add', '192.168.2.1',
3013+ 'dev', 'eth0', 'src', '192.168.2.2'], capture=True),
3014 mock.call(
3015 ['ip', '-4', 'route', 'add', 'default', 'via',
3016 '192.168.2.1', 'dev', 'eth0'], capture=True)]
3017- expected_teardown_calls = [mock.call(
3018- ['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
3019- capture=True)]
3020+ expected_teardown_calls = [
3021+ mock.call(['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
3022+ capture=True),
3023+ mock.call(['ip', '-4', 'route', 'del', '192.168.2.1',
3024+ 'dev', 'eth0', 'src', '192.168.2.2'], capture=True),
3025+ ]
3026
3027 with net.EphemeralIPv4Network(**params):
3028 self.assertEqual(expected_setup_calls, m_subp.call_args_list)
3029diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
3030index 1ed2b48..ed5c703 100644
3031--- a/cloudinit/reporting/__init__.py
3032+++ b/cloudinit/reporting/__init__.py
3033@@ -18,7 +18,7 @@ DEFAULT_CONFIG = {
3034
3035
3036 def update_configuration(config):
3037- """Update the instanciated_handler_registry.
3038+ """Update the instantiated_handler_registry.
3039
3040 :param config:
3041 The dictionary containing changes to apply. If a key is given
3042@@ -37,6 +37,12 @@ def update_configuration(config):
3043 instantiated_handler_registry.register_item(handler_name, instance)
3044
3045
3046+def flush_events():
3047+ for _, handler in instantiated_handler_registry.registered_items.items():
3048+ if hasattr(handler, 'flush'):
3049+ handler.flush()
3050+
3051+
3052 instantiated_handler_registry = DictRegistry()
3053 update_configuration(DEFAULT_CONFIG)
3054
3055diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
3056index 4066076..6d23558 100644
3057--- a/cloudinit/reporting/handlers.py
3058+++ b/cloudinit/reporting/handlers.py
3059@@ -1,17 +1,32 @@
3060 # This file is part of cloud-init. See LICENSE file for license information.
3061
3062 import abc
3063+import fcntl
3064 import json
3065 import six
3066+import os
3067+import re
3068+import struct
3069+import threading
3070+import time
3071
3072 from cloudinit import log as logging
3073 from cloudinit.registry import DictRegistry
3074 from cloudinit import (url_helper, util)
3075+from datetime import datetime
3076
3077+if six.PY2:
3078+ from multiprocessing.queues import JoinableQueue as JQueue
3079+else:
3080+ from queue import Queue as JQueue
3081
3082 LOG = logging.getLogger(__name__)
3083
3084
3085+class ReportException(Exception):
3086+ pass
3087+
3088+
3089 @six.add_metaclass(abc.ABCMeta)
3090 class ReportingHandler(object):
3091 """Base class for report handlers.
3092@@ -24,6 +39,10 @@ class ReportingHandler(object):
3093 def publish_event(self, event):
3094 """Publish an event."""
3095
3096+ def flush(self):
3097+ """Ensure ReportingHandler has published all events"""
3098+ pass
3099+
3100
3101 class LogHandler(ReportingHandler):
3102 """Publishes events to the cloud-init log at the ``DEBUG`` log level."""
3103@@ -85,9 +104,236 @@ class WebHookHandler(ReportingHandler):
3104 LOG.warning("failed posting event: %s", event.as_string())
3105
3106
3107+class HyperVKvpReportingHandler(ReportingHandler):
3108+ """
3109+ Reports events to a Hyper-V host using Key-Value-Pair exchange protocol
3110+ and can be used to obtain high level diagnostic information from the host.
3111+
3112+ To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be
3113+ running. It reads the kvp_file when the host requests the guest to
3114+ enumerate the KVP's.
3115+
3116+ This reporter collates all events for a module (origin|name) in a single
3117+ json string in the dictionary.
3118+
3119+ For more information, see
3120+ https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
3121+ """
3122+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
3123+ HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
3124+ HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
3125+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
3126+ EVENT_PREFIX = 'CLOUD_INIT'
3127+ MSG_KEY = 'msg'
3128+ RESULT_KEY = 'result'
3129+ DESC_IDX_KEY = 'msg_i'
3130+ JSON_SEPARATORS = (',', ':')
3131+ KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
3132+
3133+ def __init__(self,
3134+ kvp_file_path=KVP_POOL_FILE_GUEST,
3135+ event_types=None):
3136+ super(HyperVKvpReportingHandler, self).__init__()
3137+ self._kvp_file_path = kvp_file_path
3138+ self._event_types = event_types
3139+ self.q = JQueue()
3140+ self.kvp_file = None
3141+ self.incarnation_no = self._get_incarnation_no()
3142+ self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
3143+ self.incarnation_no)
3144+ self._current_offset = 0
3145+ self.publish_thread = threading.Thread(
3146+ target=self._publish_event_routine)
3147+ self.publish_thread.daemon = True
3148+ self.publish_thread.start()
3149+
3150+ def _get_incarnation_no(self):
3151+ """
3152+ use the time passed as the incarnation number.
3153+ the incarnation number is the number which are used to
3154+ distinguish the old data stored in kvp and the new data.
3155+ """
3156+ uptime_str = util.uptime()
3157+ try:
3158+ return int(time.time() - float(uptime_str))
3159+ except ValueError:
3160+ LOG.warning("uptime '%s' not in correct format.", uptime_str)
3161+ return 0
3162+
3163+ def _iterate_kvps(self, offset):
3164+ """iterate the kvp file from the current offset."""
3165+ try:
3166+ with open(self._kvp_file_path, 'rb+') as f:
3167+ self.kvp_file = f
3168+ fcntl.flock(f, fcntl.LOCK_EX)
3169+ f.seek(offset)
3170+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
3171+ while len(record_data) == self.HV_KVP_RECORD_SIZE:
3172+ self._current_offset += self.HV_KVP_RECORD_SIZE
3173+ kvp_item = self._decode_kvp_item(record_data)
3174+ yield kvp_item
3175+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
3176+ fcntl.flock(f, fcntl.LOCK_UN)
3177+ finally:
3178+ self.kvp_file = None
3179+
3180+ def _event_key(self, event):
3181+ """
3182+ the event key format is:
3183+ CLOUD_INIT|<incarnation number>|<event_type>|<event_name>
3184+ """
3185+ return u"{0}|{1}|{2}".format(self.event_key_prefix,
3186+ event.event_type, event.name)
3187+
3188+ def _encode_kvp_item(self, key, value):
3189+ data = (struct.pack("%ds%ds" % (
3190+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
3191+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
3192+ key.encode('utf-8'), value.encode('utf-8')))
3193+ return data
3194+
3195+ def _decode_kvp_item(self, record_data):
3196+ record_data_len = len(record_data)
3197+ if record_data_len != self.HV_KVP_RECORD_SIZE:
3198+ raise ReportException(
3199+ "record_data len not correct {0} {1}."
3200+ .format(record_data_len, self.HV_KVP_RECORD_SIZE))
3201+ k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
3202+ .strip('\x00'))
3203+ v = (
3204+ record_data[
3205+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
3206+ ].decode('utf-8').strip('\x00'))
3207+
3208+ return {'key': k, 'value': v}
3209+
3210+ def _update_kvp_item(self, record_data):
3211+ if self.kvp_file is None:
3212+ raise ReportException(
3213+ "kvp file '{0}' not opened."
3214+ .format(self._kvp_file_path))
3215+ self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1)
3216+ self.kvp_file.write(record_data)
3217+
3218+ def _append_kvp_item(self, record_data):
3219+ with open(self._kvp_file_path, 'rb+') as f:
3220+ fcntl.flock(f, fcntl.LOCK_EX)
3221+ # seek to end of the file
3222+ f.seek(0, 2)
3223+ f.write(record_data)
3224+ f.flush()
3225+ fcntl.flock(f, fcntl.LOCK_UN)
3226+ self._current_offset = f.tell()
3227+
3228+ def _break_down(self, key, meta_data, description):
3229+ del meta_data[self.MSG_KEY]
3230+ des_in_json = json.dumps(description)
3231+ des_in_json = des_in_json[1:(len(des_in_json) - 1)]
3232+ i = 0
3233+ result_array = []
3234+ message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
3235+ while True:
3236+ meta_data[self.DESC_IDX_KEY] = i
3237+ meta_data[self.MSG_KEY] = ''
3238+ data_without_desc = json.dumps(meta_data,
3239+ separators=self.JSON_SEPARATORS)
3240+ room_for_desc = (
3241+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
3242+ len(data_without_desc) - 8)
3243+ value = data_without_desc.replace(
3244+ message_place_holder,
3245+ '"{key}":"{desc}"'.format(
3246+ key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
3247+ result_array.append(self._encode_kvp_item(key, value))
3248+ i += 1
3249+ des_in_json = des_in_json[room_for_desc:]
3250+ if len(des_in_json) == 0:
3251+ break
3252+ return result_array
3253+
3254+ def _encode_event(self, event):
3255+ """
3256+ encode the event into kvp data bytes.
3257+ if the event content reaches the maximum length of kvp value.
3258+ then it would be cut to multiple slices.
3259+ """
3260+ key = self._event_key(event)
3261+ meta_data = {
3262+ "name": event.name,
3263+ "type": event.event_type,
3264+ "ts": (datetime.utcfromtimestamp(event.timestamp)
3265+ .isoformat() + 'Z'),
3266+ }
3267+ if hasattr(event, self.RESULT_KEY):
3268+ meta_data[self.RESULT_KEY] = event.result
3269+ meta_data[self.MSG_KEY] = event.description
3270+ value = json.dumps(meta_data, separators=self.JSON_SEPARATORS)
3271+ # if it reaches the maximum length of kvp value,
3272+ # break it down to slices.
3273+ # this should be very corner case.
3274+ if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
3275+ return self._break_down(key, meta_data, event.description)
3276+ else:
3277+ data = self._encode_kvp_item(key, value)
3278+ return [data]
3279+
3280+ def _publish_event_routine(self):
3281+ while True:
3282+ try:
3283+ event = self.q.get(block=True)
3284+ need_append = True
3285+ try:
3286+ if not os.path.exists(self._kvp_file_path):
3287+ LOG.warning(
3288+ "skip writing events %s to %s. file not present.",
3289+ event.as_string(),
3290+ self._kvp_file_path)
3291+ encoded_event = self._encode_event(event)
3292+ # for each encoded_event
3293+ for encoded_data in (encoded_event):
3294+ for kvp in self._iterate_kvps(self._current_offset):
3295+ match = (
3296+ re.match(
3297+ r"^{0}\|(\d+)\|.+"
3298+ .format(self.EVENT_PREFIX),
3299+ kvp['key']
3300+ ))
3301+ if match:
3302+ match_groups = match.groups(0)
3303+ if int(match_groups[0]) < self.incarnation_no:
3304+ need_append = False
3305+ self._update_kvp_item(encoded_data)
3306+ continue
3307+ if need_append:
3308+ self._append_kvp_item(encoded_data)
3309+ except IOError as e:
3310+ LOG.warning(
3311+ "failed posting event to kvp: %s e:%s",
3312+ event.as_string(), e)
3313+ finally:
3314+ self.q.task_done()
3315+
3316+ # when main process exits, q.get() will through EOFError
3317+ # indicating we should exit this thread.
3318+ except EOFError:
3319+ return
3320+
3321+ # since the saving to the kvp pool can be a time costing task
3322+ # if the kvp pool already contains a chunk of data,
3323+ # so defer it to another thread.
3324+ def publish_event(self, event):
3325+ if (not self._event_types or event.event_type in self._event_types):
3326+ self.q.put(event)
3327+
3328+ def flush(self):
3329+ LOG.debug('HyperVReportingHandler flushing remaining events')
3330+ self.q.join()
3331+
3332+
3333 available_handlers = DictRegistry()
3334 available_handlers.register_item('log', LogHandler)
3335 available_handlers.register_item('print', PrintHandler)
3336 available_handlers.register_item('webhook', WebHookHandler)
3337+available_handlers.register_item('hyperv', HyperVKvpReportingHandler)
3338
3339 # vi: ts=4 expandtab
3340diff --git a/cloudinit/settings.py b/cloudinit/settings.py
3341index dde5749..b1ebaad 100644
3342--- a/cloudinit/settings.py
3343+++ b/cloudinit/settings.py
3344@@ -38,12 +38,13 @@ CFG_BUILTIN = {
3345 'Scaleway',
3346 'Hetzner',
3347 'IBMCloud',
3348+ 'Oracle',
3349 # At the end to act as a 'catch' when none of the above work...
3350 'None',
3351 ],
3352 'def_log_file': '/var/log/cloud-init.log',
3353 'log_cfgs': [],
3354- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
3355+ 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'],
3356 'system_info': {
3357 'paths': {
3358 'cloud_dir': '/var/lib/cloud',
3359diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
3360index 24fd65f..8cd312d 100644
3361--- a/cloudinit/sources/DataSourceAltCloud.py
3362+++ b/cloudinit/sources/DataSourceAltCloud.py
3363@@ -181,27 +181,18 @@ class DataSourceAltCloud(sources.DataSource):
3364
3365 # modprobe floppy
3366 try:
3367- cmd = CMD_PROBE_FLOPPY
3368- (cmd_out, _err) = util.subp(cmd)
3369- LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
3370+ modprobe_floppy()
3371 except ProcessExecutionError as e:
3372- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
3373- return False
3374- except OSError as e:
3375- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
3376+ util.logexc(LOG, 'Failed modprobe: %s', e)
3377 return False
3378
3379 floppy_dev = '/dev/fd0'
3380
3381 # udevadm settle for floppy device
3382 try:
3383- (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
3384- LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
3385- except ProcessExecutionError as e:
3386- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
3387- return False
3388- except OSError as e:
3389- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
3390+ util.udevadm_settle(exists=floppy_dev, timeout=5)
3391+ except (ProcessExecutionError, OSError) as e:
3392+ util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
3393 return False
3394
3395 try:
3396@@ -258,6 +249,11 @@ class DataSourceAltCloud(sources.DataSource):
3397 return False
3398
3399
3400+def modprobe_floppy():
3401+ out, _err = util.subp(CMD_PROBE_FLOPPY)
3402+ LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
3403+
3404+
3405 # Used to match classes to dependencies
3406 # Source DataSourceAltCloud does not really depend on networking.
3407 # In the future 'dsmode' like behavior can be added to offer user
3408diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
3409index 7007d9e..783445e 100644
3410--- a/cloudinit/sources/DataSourceAzure.py
3411+++ b/cloudinit/sources/DataSourceAzure.py
3412@@ -8,6 +8,7 @@ import base64
3413 import contextlib
3414 import crypt
3415 from functools import partial
3416+import json
3417 import os
3418 import os.path
3419 import re
3420@@ -17,6 +18,7 @@ import xml.etree.ElementTree as ET
3421
3422 from cloudinit import log as logging
3423 from cloudinit import net
3424+from cloudinit.event import EventType
3425 from cloudinit.net.dhcp import EphemeralDHCPv4
3426 from cloudinit import sources
3427 from cloudinit.sources.helpers.azure import get_metadata_from_fabric
3428@@ -49,7 +51,17 @@ DEFAULT_FS = 'ext4'
3429 AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
3430 REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
3431 REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
3432-IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
3433+AGENT_SEED_DIR = '/var/lib/waagent'
3434+IMDS_URL = "http://169.254.169.254/metadata/"
3435+
3436+# List of static scripts and network config artifacts created by
3437+# stock ubuntu suported images.
3438+UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
3439+ '/etc/netplan/90-azure-hotplug.yaml',
3440+ '/usr/local/sbin/ephemeral_eth.sh',
3441+ '/etc/udev/rules.d/10-net-device-added.rules',
3442+ '/run/network/interfaces.ephemeral.d',
3443+]
3444
3445
3446 def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
3447@@ -185,7 +197,7 @@ if util.is_FreeBSD():
3448
3449 BUILTIN_DS_CONFIG = {
3450 'agent_command': AGENT_START_BUILTIN,
3451- 'data_dir': "/var/lib/waagent",
3452+ 'data_dir': AGENT_SEED_DIR,
3453 'set_hostname': True,
3454 'hostname_bounce': {
3455 'interface': DEFAULT_PRIMARY_NIC,
3456@@ -252,6 +264,7 @@ class DataSourceAzure(sources.DataSource):
3457
3458 dsname = 'Azure'
3459 _negotiated = False
3460+ _metadata_imds = sources.UNSET
3461
3462 def __init__(self, sys_cfg, distro, paths):
3463 sources.DataSource.__init__(self, sys_cfg, distro, paths)
3464@@ -263,6 +276,8 @@ class DataSourceAzure(sources.DataSource):
3465 BUILTIN_DS_CONFIG])
3466 self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
3467 self._network_config = None
3468+ # Regenerate network config new_instance boot and every boot
3469+ self.update_events['network'].add(EventType.BOOT)
3470
3471 def __str__(self):
3472 root = sources.DataSource.__str__(self)
3473@@ -336,15 +351,17 @@ class DataSourceAzure(sources.DataSource):
3474 metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
3475 return metadata
3476
3477- def _get_data(self):
3478+ def crawl_metadata(self):
3479+ """Walk all instance metadata sources returning a dict on success.
3480+
3481+ @return: A dictionary of any metadata content for this instance.
3482+ @raise: InvalidMetaDataException when the expected metadata service is
3483+ unavailable, broken or disabled.
3484+ """
3485+ crawled_data = {}
3486 # azure removes/ejects the cdrom containing the ovf-env.xml
3487 # file on reboot. So, in order to successfully reboot we
3488 # need to look in the datadir and consider that valid
3489- asset_tag = util.read_dmi_data('chassis-asset-tag')
3490- if asset_tag != AZURE_CHASSIS_ASSET_TAG:
3491- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
3492- return False
3493-
3494 ddir = self.ds_cfg['data_dir']
3495
3496 candidates = [self.seed_dir]
3497@@ -373,41 +390,84 @@ class DataSourceAzure(sources.DataSource):
3498 except NonAzureDataSource:
3499 continue
3500 except BrokenAzureDataSource as exc:
3501- raise exc
3502+ msg = 'BrokenAzureDataSource: %s' % exc
3503+ raise sources.InvalidMetaDataException(msg)
3504 except util.MountFailedError:
3505 LOG.warning("%s was not mountable", cdev)
3506 continue
3507
3508 if reprovision or self._should_reprovision(ret):
3509 ret = self._reprovision()
3510- (md, self.userdata_raw, cfg, files) = ret
3511+ imds_md = get_metadata_from_imds(
3512+ self.fallback_interface, retries=3)
3513+ (md, userdata_raw, cfg, files) = ret
3514 self.seed = cdev
3515- self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
3516- self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
3517+ crawled_data.update({
3518+ 'cfg': cfg,
3519+ 'files': files,
3520+ 'metadata': util.mergemanydict(
3521+ [md, {'imds': imds_md}]),
3522+ 'userdata_raw': userdata_raw})
3523 found = cdev
3524
3525 LOG.debug("found datasource in %s", cdev)
3526 break
3527
3528 if not found:
3529- return False
3530+ raise sources.InvalidMetaDataException('No Azure metadata found')
3531
3532 if found == ddir:
3533 LOG.debug("using files cached in %s", ddir)
3534
3535 seed = _get_random_seed()
3536 if seed:
3537- self.metadata['random_seed'] = seed
3538+ crawled_data['metadata']['random_seed'] = seed
3539+ crawled_data['metadata']['instance-id'] = util.read_dmi_data(
3540+ 'system-uuid')
3541+ return crawled_data
3542+
3543+ def _is_platform_viable(self):
3544+ """Check platform environment to report if this datasource may run."""
3545+ return _is_platform_viable(self.seed_dir)
3546+
3547+ def clear_cached_attrs(self, attr_defaults=()):
3548+ """Reset any cached class attributes to defaults."""
3549+ super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
3550+ self._metadata_imds = sources.UNSET
3551+
3552+ def _get_data(self):
3553+ """Crawl and process datasource metadata caching metadata as attrs.
3554+
3555+ @return: True on success, False on error, invalid or disabled
3556+ datasource.
3557+ """
3558+ if not self._is_platform_viable():
3559+ return False
3560+ try:
3561+ crawled_data = util.log_time(
3562+ logfunc=LOG.debug, msg='Crawl of metadata service',
3563+ func=self.crawl_metadata)
3564+ except sources.InvalidMetaDataException as e:
3565+ LOG.warning('Could not crawl Azure metadata: %s', e)
3566+ return False
3567+ if self.distro and self.distro.name == 'ubuntu':
3568+ maybe_remove_ubuntu_network_config_scripts()
3569+
3570+ # Process crawled data and augment with various config defaults
3571+ self.cfg = util.mergemanydict(
3572+ [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
3573+ self._metadata_imds = crawled_data['metadata']['imds']
3574+ self.metadata = util.mergemanydict(
3575+ [crawled_data['metadata'], DEFAULT_METADATA])
3576+ self.userdata_raw = crawled_data['userdata_raw']
3577
3578 user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
3579 self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
3580
3581 # walinux agent writes files world readable, but expects
3582 # the directory to be protected.
3583- write_files(ddir, files, dirmode=0o700)
3584-
3585- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
3586-
3587+ write_files(
3588+ self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
3589 return True
3590
3591 def device_name_to_device(self, name):
3592@@ -436,7 +496,7 @@ class DataSourceAzure(sources.DataSource):
3593 def _poll_imds(self):
3594 """Poll IMDS for the new provisioning data until we get a valid
3595 response. Then return the returned JSON object."""
3596- url = IMDS_URL + "?api-version=2017-04-02"
3597+ url = IMDS_URL + "reprovisiondata?api-version=2017-04-02"
3598 headers = {"Metadata": "true"}
3599 report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
3600 LOG.debug("Start polling IMDS")
3601@@ -487,7 +547,7 @@ class DataSourceAzure(sources.DataSource):
3602 jump back into the polling loop in order to retrieve the ovf_env."""
3603 if not ret:
3604 return False
3605- (_md, self.userdata_raw, cfg, _files) = ret
3606+ (_md, _userdata_raw, cfg, _files) = ret
3607 path = REPROVISION_MARKER_FILE
3608 if (cfg.get('PreprovisionedVm') is True or
3609 os.path.isfile(path)):
3610@@ -543,22 +603,15 @@ class DataSourceAzure(sources.DataSource):
3611 @property
3612 def network_config(self):
3613 """Generate a network config like net.generate_fallback_network() with
3614- the following execptions.
3615+ the following exceptions.
3616
3617 1. Probe the drivers of the net-devices present and inject them in
3618 the network configuration under params: driver: <driver> value
3619 2. Generate a fallback network config that does not include any of
3620 the blacklisted devices.
3621 """
3622- blacklist = ['mlx4_core']
3623 if not self._network_config:
3624- LOG.debug('Azure: generating fallback configuration')
3625- # generate a network config, blacklist picking any mlx4_core devs
3626- netconfig = net.generate_fallback_config(
3627- blacklist_drivers=blacklist, config_driver=True)
3628-
3629- self._network_config = netconfig
3630-
3631+ self._network_config = parse_network_config(self._metadata_imds)
3632 return self._network_config
3633
3634
3635@@ -1025,6 +1078,151 @@ def load_azure_ds_dir(source_dir):
3636 return (md, ud, cfg, {'ovf-env.xml': contents})
3637
3638
3639+def parse_network_config(imds_metadata):
3640+ """Convert imds_metadata dictionary to network v2 configuration.
3641+
3642+ Parses network configuration from imds metadata if present or generate
3643+ fallback network config excluding mlx4_core devices.
3644+
3645+ @param: imds_metadata: Dict of content read from IMDS network service.
3646+ @return: Dictionary containing network version 2 standard configuration.
3647+ """
3648+ if imds_metadata != sources.UNSET and imds_metadata:
3649+ netconfig = {'version': 2, 'ethernets': {}}
3650+ LOG.debug('Azure: generating network configuration from IMDS')
3651+ network_metadata = imds_metadata['network']
3652+ for idx, intf in enumerate(network_metadata['interface']):
3653+ nicname = 'eth{idx}'.format(idx=idx)
3654+ dev_config = {}
3655+ for addr4 in intf['ipv4']['ipAddress']:
3656+ privateIpv4 = addr4['privateIpAddress']
3657+ if privateIpv4:
3658+ if dev_config.get('dhcp4', False):
3659+ # Append static address config for nic > 1
3660+ netPrefix = intf['ipv4']['subnet'][0].get(
3661+ 'prefix', '24')
3662+ if not dev_config.get('addresses'):
3663+ dev_config['addresses'] = []
3664+ dev_config['addresses'].append(
3665+ '{ip}/{prefix}'.format(
3666+ ip=privateIpv4, prefix=netPrefix))
3667+ else:
3668+ dev_config['dhcp4'] = True
3669+ for addr6 in intf['ipv6']['ipAddress']:
3670+ privateIpv6 = addr6['privateIpAddress']
3671+ if privateIpv6:
3672+ dev_config['dhcp6'] = True
3673+ break
3674+ if dev_config:
3675+ mac = ':'.join(re.findall(r'..', intf['macAddress']))
3676+ dev_config.update(
3677+ {'match': {'macaddress': mac.lower()},
3678+ 'set-name': nicname})
3679+ netconfig['ethernets'][nicname] = dev_config
3680+ else:
3681+ blacklist = ['mlx4_core']
3682+ LOG.debug('Azure: generating fallback configuration')
3683+ # generate a network config, blacklist picking mlx4_core devs
3684+ netconfig = net.generate_fallback_config(
3685+ blacklist_drivers=blacklist, config_driver=True)
3686+ return netconfig
3687+
3688+
3689+def get_metadata_from_imds(fallback_nic, retries):
3690+ """Query Azure's network metadata service, returning a dictionary.
3691+
3692+ If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
3693+ IMDS. For more info on IMDS:
3694+ https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
3695+
3696+ @param fallback_nic: String. The name of the nic which requires active
3697+ network in order to query IMDS.
3698+ @param retries: The number of retries of the IMDS_URL.
3699+
3700+ @return: A dict of instance metadata containing compute and network
3701+ info.
3702+ """
3703+ kwargs = {'logfunc': LOG.debug,
3704+ 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
3705+ 'func': _get_metadata_from_imds, 'args': (retries,)}
3706+ if net.is_up(fallback_nic):
3707+ return util.log_time(**kwargs)
3708+ else:
3709+ with EphemeralDHCPv4(fallback_nic):
3710+ return util.log_time(**kwargs)
3711+
3712+
3713+def _get_metadata_from_imds(retries):
3714+
3715+ def retry_on_url_error(msg, exception):
3716+ if isinstance(exception, UrlError) and exception.code == 404:
3717+ return True # Continue retries
3718+ return False # Stop retries on all other exceptions
3719+
3720+ url = IMDS_URL + "instance?api-version=2017-12-01"
3721+ headers = {"Metadata": "true"}
3722+ try:
3723+ response = readurl(
3724+ url, timeout=1, headers=headers, retries=retries,
3725+ exception_cb=retry_on_url_error)
3726+ except Exception as e:
3727+ LOG.debug('Ignoring IMDS instance metadata: %s', e)
3728+ return {}
3729+ try:
3730+ return util.load_json(str(response))
3731+ except json.decoder.JSONDecodeError:
3732+ LOG.warning(
3733+ 'Ignoring non-json IMDS instance metadata: %s', str(response))
3734+ return {}
3735+
3736+
3737+def maybe_remove_ubuntu_network_config_scripts(paths=None):
3738+ """Remove Azure-specific ubuntu network config for non-primary nics.
3739+
3740+ @param paths: List of networking scripts or directories to remove when
3741+ present.
3742+
3743+ In certain supported ubuntu images, static udev rules or netplan yaml
3744+ config is delivered in the base ubuntu image to support dhcp on any
3745+ additional interfaces which get attached by a customer at some point
3746+ after initial boot. Since the Azure datasource can now regenerate
3747+ network configuration as metadata reports these new devices, we no longer
3748+ want the udev rules or netplan's 90-azure-hotplug.yaml to configure
3749+ networking on eth1 or greater as it might collide with cloud-init's
3750+ configuration.
3751+
3752+ Remove the any existing extended network scripts if the datasource is
3753+ enabled to write network per-boot.
3754+ """
3755+ if not paths:
3756+ paths = UBUNTU_EXTENDED_NETWORK_SCRIPTS
3757+ logged = False
3758+ for path in paths:
3759+ if os.path.exists(path):
3760+ if not logged:
3761+ LOG.info(
3762+ 'Removing Ubuntu extended network scripts because'
3763+ ' cloud-init updates Azure network configuration on the'
3764+ ' following event: %s.',
3765+ EventType.BOOT)
3766+ logged = True
3767+ if os.path.isdir(path):
3768+ util.del_dir(path)
3769+ else:
3770+ util.del_file(path)
3771+
3772+
3773+def _is_platform_viable(seed_dir):
3774+ """Check platform environment to report if this datasource may run."""
3775+ asset_tag = util.read_dmi_data('chassis-asset-tag')
3776+ if asset_tag == AZURE_CHASSIS_ASSET_TAG:
3777+ return True
3778+ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
3779+ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
3780+ return True
3781+ return False
3782+
3783+
3784 class BrokenAzureDataSource(Exception):
3785 pass
3786
3787diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
3788index 4cb2897..664dc4b 100644
3789--- a/cloudinit/sources/DataSourceConfigDrive.py
3790+++ b/cloudinit/sources/DataSourceConfigDrive.py
3791@@ -196,7 +196,7 @@ def on_first_boot(data, distro=None, network=True):
3792 net_conf = data.get("network_config", '')
3793 if net_conf and distro:
3794 LOG.warning("Updating network interfaces from config drive")
3795- distro.apply_network(net_conf)
3796+ distro.apply_network_config(eni.convert_eni_data(net_conf))
3797 write_injected_files(data.get('files'))
3798
3799
3800diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
3801index 01106ec..a535814 100644
3802--- a/cloudinit/sources/DataSourceIBMCloud.py
3803+++ b/cloudinit/sources/DataSourceIBMCloud.py
3804@@ -295,7 +295,7 @@ def read_md():
3805 results = metadata_from_dir(path)
3806 else:
3807 results = util.mount_cb(path, metadata_from_dir)
3808- except BrokenMetadata as e:
3809+ except sources.BrokenMetadata as e:
3810 raise RuntimeError(
3811 "Failed reading IBM config disk (platform=%s path=%s): %s" %
3812 (platform, path, e))
3813@@ -304,10 +304,6 @@ def read_md():
3814 return ret
3815
3816
3817-class BrokenMetadata(IOError):
3818- pass
3819-
3820-
3821 def metadata_from_dir(source_dir):
3822 """Walk source_dir extracting standardized metadata.
3823
3824@@ -352,12 +348,13 @@ def metadata_from_dir(source_dir):
3825 try:
3826 data = transl(raw)
3827 except Exception as e:
3828- raise BrokenMetadata("Failed decoding %s: %s" % (path, e))
3829+ raise sources.BrokenMetadata(
3830+ "Failed decoding %s: %s" % (path, e))
3831
3832 results[name] = data
3833
3834 if results.get('metadata_raw') is None:
3835- raise BrokenMetadata(
3836+ raise sources.BrokenMetadata(
3837 "%s missing required file 'meta_data.json'" % source_dir)
3838
3839 results['metadata'] = {}
3840@@ -368,7 +365,7 @@ def metadata_from_dir(source_dir):
3841 try:
3842 md['random_seed'] = base64.b64decode(md_raw['random_seed'])
3843 except (ValueError, TypeError) as e:
3844- raise BrokenMetadata(
3845+ raise sources.BrokenMetadata(
3846 "Badly formatted metadata random_seed entry: %s" % e)
3847
3848 renames = (
3849diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
3850index 16c1078..77ccd12 100644
3851--- a/cloudinit/sources/DataSourceOpenNebula.py
3852+++ b/cloudinit/sources/DataSourceOpenNebula.py
3853@@ -232,7 +232,7 @@ class OpenNebulaNetwork(object):
3854
3855 # Set IPv6 default gateway
3856 gateway6 = self.get_gateway6(c_dev)
3857- if gateway:
3858+ if gateway6:
3859 devconf['gateway6'] = gateway6
3860
3861 # Set DNS servers and search domains
3862diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
3863index 365af96..4a01524 100644
3864--- a/cloudinit/sources/DataSourceOpenStack.py
3865+++ b/cloudinit/sources/DataSourceOpenStack.py
3866@@ -13,6 +13,7 @@ from cloudinit import url_helper
3867 from cloudinit import util
3868
3869 from cloudinit.sources.helpers import openstack
3870+from cloudinit.sources import DataSourceOracle as oracle
3871
3872 LOG = logging.getLogger(__name__)
3873
3874@@ -121,8 +122,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
3875 False when unable to contact metadata service or when metadata
3876 format is invalid or disabled.
3877 """
3878- if not detect_openstack():
3879+ oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
3880+ if not detect_openstack(accept_oracle=not oracle_considered):
3881 return False
3882+
3883 if self.perform_dhcp_setup: # Setup networking in init-local stage.
3884 try:
3885 with EphemeralDHCPv4(self.fallback_interface):
3886@@ -214,7 +217,7 @@ def read_metadata_service(base_url, ssl_details=None,
3887 return reader.read_v2()
3888
3889
3890-def detect_openstack():
3891+def detect_openstack(accept_oracle=False):
3892 """Return True when a potential OpenStack platform is detected."""
3893 if not util.is_x86():
3894 return True # Non-Intel cpus don't properly report dmi product names
3895@@ -223,6 +226,8 @@ def detect_openstack():
3896 return True
3897 elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
3898 return True
3899+ elif accept_oracle and oracle._is_platform_viable():
3900+ return True
3901 elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
3902 return True
3903 return False
3904diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
3905new file mode 100644
3906index 0000000..fab39af
3907--- /dev/null
3908+++ b/cloudinit/sources/DataSourceOracle.py
3909@@ -0,0 +1,233 @@
3910+# This file is part of cloud-init. See LICENSE file for license information.
3911+"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure)
3912+
3913+OCI provides a OpenStack like metadata service which provides only
3914+'2013-10-17' and 'latest' versions..
3915+
3916+Notes:
3917+ * This datasource does not support the OCI-Classic. OCI-Classic
3918+ provides an EC2 lookalike metadata service.
3919+ * The uuid provided in DMI data is not the same as the meta-data provided
3920+ instance-id, but has an equivalent lifespan.
3921+ * We do need to support upgrade from an instance that cloud-init
3922+ identified as OpenStack.
3923+ * Both bare-metal and vms use iscsi root
3924+ * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com
3925+"""
3926+
3927+from cloudinit.url_helper import combine_url, readurl, UrlError
3928+from cloudinit.net import dhcp
3929+from cloudinit import net
3930+from cloudinit import sources
3931+from cloudinit import util
3932+from cloudinit.net import cmdline
3933+from cloudinit import log as logging
3934+
3935+import json
3936+import re
3937+
3938+LOG = logging.getLogger(__name__)
3939+
3940+CHASSIS_ASSET_TAG = "OracleCloud.com"
3941+METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
3942+
3943+
3944+class DataSourceOracle(sources.DataSource):
3945+
3946+ dsname = 'Oracle'
3947+ system_uuid = None
3948+ vendordata_pure = None
3949+ _network_config = sources.UNSET
3950+
3951+ def _is_platform_viable(self):
3952+ """Check platform environment to report if this datasource may run."""
3953+ return _is_platform_viable()
3954+
3955+ def _get_data(self):
3956+ if not self._is_platform_viable():
3957+ return False
3958+
3959+ # network may be configured if iscsi root. If that is the case
3960+ # then read_kernel_cmdline_config will return non-None.
3961+ if _is_iscsi_root():
3962+ data = self.crawl_metadata()
3963+ else:
3964+ with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):
3965+ data = self.crawl_metadata()
3966+
3967+ self._crawled_metadata = data
3968+ vdata = data['2013-10-17']
3969+
3970+ self.userdata_raw = vdata.get('user_data')
3971+ self.system_uuid = vdata['system_uuid']
3972+
3973+ vd = vdata.get('vendor_data')
3974+ if vd:
3975+ self.vendordata_pure = vd
3976+ try:
3977+ self.vendordata_raw = sources.convert_vendordata(vd)
3978+ except ValueError as e:
3979+ LOG.warning("Invalid content in vendor-data: %s", e)
3980+ self.vendordata_raw = None
3981+
3982+ mdcopies = ('public_keys',)
3983+ md = dict([(k, vdata['meta_data'].get(k))
3984+ for k in mdcopies if k in vdata['meta_data']])
3985+
3986+ mdtrans = (
3987+ # oracle meta_data.json name, cloudinit.datasource.metadata name
3988+ ('availability_zone', 'availability-zone'),
3989+ ('hostname', 'local-hostname'),
3990+ ('launch_index', 'launch-index'),
3991+ ('uuid', 'instance-id'),
3992+ )
3993+ for dsname, ciname in mdtrans:
3994+ if dsname in vdata['meta_data']:
3995+ md[ciname] = vdata['meta_data'][dsname]
3996+
3997+ self.metadata = md
3998+ return True
3999+
4000+ def crawl_metadata(self):
4001+ return read_metadata()
4002+
4003+ def check_instance_id(self, sys_cfg):
4004+ """quickly check (local only) if self.instance_id is still valid
4005+
4006+ On Oracle, the dmi-provided system uuid differs from the instance-id
4007+ but has the same life-span."""
4008+ return sources.instance_id_matches_system_uuid(self.system_uuid)
4009+
4010+ def get_public_ssh_keys(self):
4011+ return sources.normalize_pubkey_data(self.metadata.get('public_keys'))
4012+
4013+ @property
4014+ def network_config(self):
4015+ """Network config is read from initramfs provided files
4016+ If none is present, then we fall back to fallback configuration.
4017+
4018+ One thing to note here is that this method is not currently
4019+ considered at all if there is is kernel/initramfs provided
4020+ data. In that case, stages considers that the cmdline data
4021+ overrides datasource provided data and does not consult here.
4022+
4023+ We nonetheless return cmdline provided config if present
4024+ and fallback to generate fallback."""
4025+ if self._network_config == sources.UNSET:
4026+ cmdline_cfg = cmdline.read_kernel_cmdline_config()
4027+ if cmdline_cfg:
4028+ self._network_config = cmdline_cfg
4029+ else:
4030+ self._network_config = self.distro.generate_fallback_config()
4031+ return self._network_config
4032+
4033+
4034+def _read_system_uuid():
4035+ sys_uuid = util.read_dmi_data('system-uuid')
4036+ return None if sys_uuid is None else sys_uuid.lower()
4037+
4038+
4039+def _is_platform_viable():
4040+ asset_tag = util.read_dmi_data('chassis-asset-tag')
4041+ return asset_tag == CHASSIS_ASSET_TAG
4042+
4043+
4044+def _is_iscsi_root():
4045+ return bool(cmdline.read_kernel_cmdline_config())
4046+
4047+
4048+def _load_index(content):
4049+ """Return a list entries parsed from content.
4050+
4051+ OpenStack's metadata service returns a newline delimited list
4052+ of items. Oracle's implementation has html formatted list of links.
4053+ The parser here just grabs targets from <a href="target">
4054+ and throws away "../".
4055+
4056+ Oracle has accepted that to be buggy and may fix in the future
4057+ to instead return a '\n' delimited plain text list. This function
4058+ will continue to work if that change is made."""
4059+ if not content.lower().startswith("<html>"):
4060+ return content.splitlines()
4061+ items = re.findall(
4062+ r'href="(?P<target>[^"]*)"', content, re.MULTILINE | re.IGNORECASE)
4063+ return [i for i in items if not i.startswith(".")]
4064+
4065+
4066+def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None,
4067+ version='2013-10-17'):
4068+ """Read metadata, return a dictionary.
4069+
4070+ Each path listed in the index will be represented in the dictionary.
4071+ If the path ends in .json, then the content will be decoded and
4072+ populated into the dictionary.
4073+
4074+ The system uuid (/sys/class/dmi/id/product_uuid) is also populated.
4075+ Example: given paths = ('user_data', 'meta_data.json')
4076+ This would return:
4077+ {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode())
4078+ 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}}
4079+ """
4080+ endpoint = combine_url(endpoint_base, version) + "/"
4081+ if sys_uuid is None:
4082+ sys_uuid = _read_system_uuid()
4083+ if not sys_uuid:
4084+ raise sources.BrokenMetadata("Failed to read system uuid.")
4085+
4086+ try:
4087+ resp = readurl(endpoint)
4088+ if not resp.ok():
4089+ raise sources.BrokenMetadata(
4090+ "Bad response from %s: %s" % (endpoint, resp.code))
4091+ except UrlError as e:
4092+ raise sources.BrokenMetadata(
4093+ "Failed to read index at %s: %s" % (endpoint, e))
4094+
4095+ entries = _load_index(resp.contents.decode('utf-8'))
4096+ LOG.debug("index url %s contained: %s", endpoint, entries)
4097+
4098+ # meta_data.json is required.
4099+ mdj = 'meta_data.json'
4100+ if mdj not in entries:
4101+ raise sources.BrokenMetadata(
4102+ "Required field '%s' missing in index at %s" % (mdj, endpoint))
4103+
4104+ ret = {'system_uuid': sys_uuid}
4105+ for path in entries:
4106+ response = readurl(combine_url(endpoint, path))
4107+ if path.endswith(".json"):
4108+ ret[path.rpartition(".")[0]] = (
4109+ json.loads(response.contents.decode('utf-8')))
4110+ else:
4111+ ret[path] = response.contents
4112+
4113+ return {version: ret}
4114+
4115+
4116+# Used to match classes to dependencies
4117+datasources = [
4118+ (DataSourceOracle, (sources.DEP_FILESYSTEM,)),
4119+]
4120+
4121+
4122+# Return a list of data sources that match this set of dependencies
4123+def get_datasource_list(depends):
4124+ return sources.list_from_depends(depends, datasources)
4125+
4126+
4127+if __name__ == "__main__":
4128+ import argparse
4129+ import os
4130+
4131+ parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata')
4132+ parser.add_argument("--endpoint", metavar="URL",
4133+ help="The url of the metadata service.",
4134+ default=METADATA_ENDPOINT)
4135+ args = parser.parse_args()
4136+ sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None
4137+
4138+ data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid)
4139+ data['is_platform_viable'] = _is_platform_viable()
4140+ print(util.json_dumps(data))
4141+
4142+# vi: ts=4 expandtab
4143diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
4144index e2502b0..9dc4ab2 100644
4145--- a/cloudinit/sources/DataSourceScaleway.py
4146+++ b/cloudinit/sources/DataSourceScaleway.py
4147@@ -29,7 +29,9 @@ from cloudinit import log as logging
4148 from cloudinit import sources
4149 from cloudinit import url_helper
4150 from cloudinit import util
4151-
4152+from cloudinit import net
4153+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
4154+from cloudinit.event import EventType
4155
4156 LOG = logging.getLogger(__name__)
4157
4158@@ -168,8 +170,8 @@ def query_data_api(api_type, api_address, retries, timeout):
4159
4160
4161 class DataSourceScaleway(sources.DataSource):
4162-
4163 dsname = "Scaleway"
4164+ update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
4165
4166 def __init__(self, sys_cfg, distro, paths):
4167 super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
4168@@ -185,11 +187,10 @@ class DataSourceScaleway(sources.DataSource):
4169
4170 self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
4171 self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
4172+ self._fallback_interface = None
4173+ self._network_config = None
4174
4175- def _get_data(self):
4176- if not on_scaleway():
4177- return False
4178-
4179+ def _crawl_metadata(self):
4180 resp = url_helper.readurl(self.metadata_address,
4181 timeout=self.timeout,
4182 retries=self.retries)
4183@@ -203,9 +204,48 @@ class DataSourceScaleway(sources.DataSource):
4184 'vendor-data', self.vendordata_address,
4185 self.retries, self.timeout
4186 )
4187+
4188+ def _get_data(self):
4189+ if not on_scaleway():
4190+ return False
4191+
4192+ if self._fallback_interface is None:
4193+ self._fallback_interface = net.find_fallback_nic()
4194+ try:
4195+ with EphemeralDHCPv4(self._fallback_interface):
4196+ util.log_time(
4197+ logfunc=LOG.debug, msg='Crawl of metadata service',
4198+ func=self._crawl_metadata)
4199+ except (NoDHCPLeaseError) as e:
4200+ util.logexc(LOG, str(e))
4201+ return False
4202 return True
4203
4204 @property
4205+ def network_config(self):
4206+ """
4207+ Configure networking according to data received from the
4208+ metadata API.
4209+ """
4210+ if self._network_config:
4211+ return self._network_config
4212+
4213+ if self._fallback_interface is None:
4214+ self._fallback_interface = net.find_fallback_nic()
4215+
4216+ netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
4217+ subnets = [{'type': 'dhcp4'}]
4218+ if self.metadata['ipv6']:
4219+ subnets += [{'type': 'static',
4220+ 'address': '%s' % self.metadata['ipv6']['address'],
4221+ 'gateway': '%s' % self.metadata['ipv6']['gateway'],
4222+ 'netmask': '%s' % self.metadata['ipv6']['netmask'],
4223+ }]
4224+ netcfg['subnets'] = subnets
4225+ self._network_config = {'version': 1, 'config': [netcfg]}
4226+ return self._network_config
4227+
4228+ @property
4229 def launch_index(self):
4230 return None
4231
4232@@ -228,7 +268,7 @@ class DataSourceScaleway(sources.DataSource):
4233
4234
4235 datasources = [
4236- (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
4237+ (DataSourceScaleway, (sources.DEP_FILESYSTEM,)),
4238 ]
4239
4240
4241diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
4242index f92e8b5..593ac91 100644
4243--- a/cloudinit/sources/DataSourceSmartOS.py
4244+++ b/cloudinit/sources/DataSourceSmartOS.py
4245@@ -564,7 +564,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
4246 continue
4247 LOG.warning('Unexpected response "%s" during flush', response)
4248 except JoyentMetadataTimeoutException:
4249- LOG.warning('Timeout while initializing metadata client. ' +
4250+ LOG.warning('Timeout while initializing metadata client. '
4251 'Is the host metadata service running?')
4252 LOG.debug('Got "invalid command". Flush complete.')
4253 self.fp.timeout = timeout
4254@@ -683,6 +683,18 @@ def jmc_client_factory(
4255 raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
4256
4257
4258+def identify_file(content_f):
4259+ cmd = ["file", "--brief", "--mime-type", content_f]
4260+ f_type = None
4261+ try:
4262+ (f_type, _err) = util.subp(cmd)
4263+ LOG.debug("script %s mime type is %s", content_f, f_type)
4264+ except util.ProcessExecutionError as e:
4265+ util.logexc(
4266+ LOG, ("Failed to identify script type for %s" % content_f, e))
4267+ return None if f_type is None else f_type.strip()
4268+
4269+
4270 def write_boot_content(content, content_f, link=None, shebang=False,
4271 mode=0o400):
4272 """
4273@@ -715,18 +727,11 @@ def write_boot_content(content, content_f, link=None, shebang=False,
4274 util.write_file(content_f, content, mode=mode)
4275
4276 if shebang and not content.startswith("#!"):
4277- try:
4278- cmd = ["file", "--brief", "--mime-type", content_f]
4279- (f_type, _err) = util.subp(cmd)
4280- LOG.debug("script %s mime type is %s", content_f, f_type)
4281- if f_type.strip() == "text/plain":
4282- new_content = "\n".join(["#!/bin/bash", content])
4283- util.write_file(content_f, new_content, mode=mode)
4284- LOG.debug("added shebang to file %s", content_f)
4285-
4286- except Exception as e:
4287- util.logexc(LOG, ("Failed to identify script type for %s" %
4288- content_f, e))
4289+ f_type = identify_file(content_f)
4290+ if f_type == "text/plain":
4291+ util.write_file(
4292+ content_f, "\n".join(["#!/bin/bash", content]), mode=mode)
4293+ LOG.debug("added shebang to file %s", content_f)
4294
4295 if link:
4296 try:
4297diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
4298index f424316..5ac9882 100644
4299--- a/cloudinit/sources/__init__.py
4300+++ b/cloudinit/sources/__init__.py
4301@@ -38,8 +38,17 @@ DEP_FILESYSTEM = "FILESYSTEM"
4302 DEP_NETWORK = "NETWORK"
4303 DS_PREFIX = 'DataSource'
4304
4305-# File in which instance meta-data, user-data and vendor-data is written
4306+EXPERIMENTAL_TEXT = (
4307+ "EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
4308+ " key may change in subsequent releases of cloud-init.")
4309+
4310+
4311+# File in which public available instance meta-data is written
4312+# security-sensitive key values are redacted from this world-readable file
4313 INSTANCE_JSON_FILE = 'instance-data.json'
4314+# security-sensitive key values are present in this root-readable file
4315+INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
4316+REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
4317
4318 # Key which can be provide a cloud's official product name to cloud-init
4319 METADATA_CLOUD_NAME_KEY = 'cloud-name'
4320@@ -58,26 +67,55 @@ class InvalidMetaDataException(Exception):
4321 pass
4322
4323
4324-def process_base64_metadata(metadata, key_path=''):
4325- """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
4326+def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
4327+ """Process all instance metadata cleaning it up for persisting as json.
4328+
4329+ Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
4330+
4331+ @return Dict copy of processed metadata.
4332+ """
4333 md_copy = copy.deepcopy(metadata)
4334- md_copy['base64-encoded-keys'] = []
4335+ md_copy['base64_encoded_keys'] = []
4336+ md_copy['sensitive_keys'] = []
4337 for key, val in metadata.items():
4338 if key_path:
4339 sub_key_path = key_path + '/' + key
4340 else:
4341 sub_key_path = key
4342+ if key in sensitive_keys or sub_key_path in sensitive_keys:
4343+ md_copy['sensitive_keys'].append(sub_key_path)
4344 if isinstance(val, str) and val.startswith('ci-b64:'):
4345- md_copy['base64-encoded-keys'].append(sub_key_path)
4346+ md_copy['base64_encoded_keys'].append(sub_key_path)
4347 md_copy[key] = val.replace('ci-b64:', '')
4348 if isinstance(val, dict):
4349- return_val = process_base64_metadata(val, sub_key_path)
4350- md_copy['base64-encoded-keys'].extend(
4351- return_val.pop('base64-encoded-keys'))
4352+ return_val = process_instance_metadata(
4353+ val, sub_key_path, sensitive_keys)
4354+ md_copy['base64_encoded_keys'].extend(
4355+ return_val.pop('base64_encoded_keys'))
4356+ md_copy['sensitive_keys'].extend(
4357+ return_val.pop('sensitive_keys'))
4358 md_copy[key] = return_val
4359 return md_copy
4360
4361
4362+def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
4363+ """Redact any sensitive keys from to provided metadata dictionary.
4364+
4365+ Replace any keys values listed in 'sensitive_keys' with redact_value.
4366+ """
4367+ if not metadata.get('sensitive_keys', []):
4368+ return metadata
4369+ md_copy = copy.deepcopy(metadata)
4370+ for key_path in metadata.get('sensitive_keys'):
4371+ path_parts = key_path.split('/')
4372+ obj = md_copy
4373+ for path in path_parts:
4374+ if isinstance(obj[path], dict) and path != path_parts[-1]:
4375+ obj = obj[path]
4376+ obj[path] = redact_value
4377+ return md_copy
4378+
4379+
4380 URLParams = namedtuple(
4381 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
4382
4383@@ -103,14 +141,14 @@ class DataSource(object):
4384 url_timeout = 10 # timeout for each metadata url read attempt
4385 url_retries = 5 # number of times to retry url upon 404
4386
4387- # The datasource defines a list of supported EventTypes during which
4388+ # The datasource defines a set of supported EventTypes during which
4389 # the datasource can react to changes in metadata and regenerate
4390 # network configuration on metadata changes.
4391 # A datasource which supports writing network config on each system boot
4392- # would set update_events = {'network': [EventType.BOOT]}
4393+ # would call update_events['network'].add(EventType.BOOT).
4394
4395 # Default: generate network config on new instance id (first boot).
4396- update_events = {'network': [EventType.BOOT_NEW_INSTANCE]}
4397+ update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
4398
4399 # N-tuple listing default values for any metadata-related class
4400 # attributes cached on an instance by a process_data runs. These attribute
4401@@ -122,6 +160,10 @@ class DataSource(object):
4402
4403 _dirty_cache = False
4404
4405+ # N-tuple of keypaths or keynames redact from instance-data.json for
4406+ # non-root users
4407+ sensitive_metadata_keys = ('security-credentials',)
4408+
4409 def __init__(self, sys_cfg, distro, paths, ud_proc=None):
4410 self.sys_cfg = sys_cfg
4411 self.distro = distro
4412@@ -147,12 +189,24 @@ class DataSource(object):
4413
4414 def _get_standardized_metadata(self):
4415 """Return a dictionary of standardized metadata keys."""
4416- return {'v1': {
4417- 'local-hostname': self.get_hostname(),
4418- 'instance-id': self.get_instance_id(),
4419- 'cloud-name': self.cloud_name,
4420- 'region': self.region,
4421- 'availability-zone': self.availability_zone}}
4422+ local_hostname = self.get_hostname()
4423+ instance_id = self.get_instance_id()
4424+ availability_zone = self.availability_zone
4425+ cloud_name = self.cloud_name
4426+ # When adding new standard keys prefer underscore-delimited instead
4427+ # of hyphen-delimted to support simple variable references in jinja
4428+ # templates.
4429+ return {
4430+ 'v1': {
4431+ 'availability-zone': availability_zone,
4432+ 'availability_zone': availability_zone,
4433+ 'cloud-name': cloud_name,
4434+ 'cloud_name': cloud_name,
4435+ 'instance-id': instance_id,
4436+ 'instance_id': instance_id,
4437+ 'local-hostname': local_hostname,
4438+ 'local_hostname': local_hostname,
4439+ 'region': self.region}}
4440
4441 def clear_cached_attrs(self, attr_defaults=()):
4442 """Reset any cached metadata attributes to datasource defaults.
4443@@ -180,15 +234,22 @@ class DataSource(object):
4444 """
4445 self._dirty_cache = True
4446 return_value = self._get_data()
4447- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
4448 if not return_value:
4449 return return_value
4450+ self.persist_instance_data()
4451+ return return_value
4452+
4453+ def persist_instance_data(self):
4454+ """Process and write INSTANCE_JSON_FILE with all instance metadata.
4455
4456+ Replace any hyphens with underscores in key names for use in template
4457+ processing.
4458+
4459+ @return True on successful write, False otherwise.
4460+ """
4461 instance_data = {
4462- 'ds': {
4463- 'meta-data': self.metadata,
4464- 'user-data': self.get_userdata_raw(),
4465- 'vendor-data': self.get_vendordata_raw()}}
4466+ 'ds': {'_doc': EXPERIMENTAL_TEXT,
4467+ 'meta_data': self.metadata}}
4468 if hasattr(self, 'network_json'):
4469 network_json = getattr(self, 'network_json')
4470 if network_json != UNSET:
4471@@ -202,16 +263,23 @@ class DataSource(object):
4472 try:
4473 # Process content base64encoding unserializable values
4474 content = util.json_dumps(instance_data)
4475- # Strip base64: prefix and return base64-encoded-keys
4476- processed_data = process_base64_metadata(json.loads(content))
4477+ # Strip base64: prefix and set base64_encoded_keys list.
4478+ processed_data = process_instance_metadata(
4479+ json.loads(content),
4480+ sensitive_keys=self.sensitive_metadata_keys)
4481 except TypeError as e:
4482 LOG.warning('Error persisting instance-data.json: %s', str(e))
4483- return return_value
4484+ return False
4485 except UnicodeDecodeError as e:
4486 LOG.warning('Error persisting instance-data.json: %s', str(e))
4487- return return_value
4488- write_json(json_file, processed_data, mode=0o600)
4489- return return_value
4490+ return False
4491+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
4492+ write_json(json_file, processed_data) # World readable
4493+ json_sensitive_file = os.path.join(self.paths.run_dir,
4494+ INSTANCE_JSON_SENSITIVE_FILE)
4495+ write_json(json_sensitive_file,
4496+ redact_sensitive_keys(processed_data), mode=0o600)
4497+ return True
4498
4499 def _get_data(self):
4500 """Walk metadata sources, process crawled data and save attributes."""
4501@@ -475,8 +543,8 @@ class DataSource(object):
4502 for update_scope, update_events in self.update_events.items():
4503 if event in update_events:
4504 if not supported_events.get(update_scope):
4505- supported_events[update_scope] = []
4506- supported_events[update_scope].append(event)
4507+ supported_events[update_scope] = set()
4508+ supported_events[update_scope].add(event)
4509 for scope, matched_events in supported_events.items():
4510 LOG.debug(
4511 "Update datasource metadata and %s config due to events: %s",
4512@@ -490,6 +558,8 @@ class DataSource(object):
4513 result = self.get_data()
4514 if result:
4515 return True
4516+ LOG.debug("Datasource %s not updated for events: %s", self,
4517+ ', '.join(source_event_types))
4518 return False
4519
4520 def check_instance_id(self, sys_cfg):
4521@@ -669,6 +739,10 @@ def convert_vendordata(data, recurse=True):
4522 raise ValueError("Unknown data type for vendordata: %s" % type(data))
4523
4524
4525+class BrokenMetadata(IOError):
4526+ pass
4527+
4528+
4529 # 'depends' is a list of dependencies (DEP_FILESYSTEM)
4530 # ds_list is a list of 2 item lists
4531 # ds_list = [
4532diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
4533index a4cf066..9c29cea 100644
4534--- a/cloudinit/sources/helpers/openstack.py
4535+++ b/cloudinit/sources/helpers/openstack.py
4536@@ -21,6 +21,8 @@ from cloudinit import sources
4537 from cloudinit import url_helper
4538 from cloudinit import util
4539
4540+from cloudinit.sources import BrokenMetadata
4541+
4542 # See https://docs.openstack.org/user-guide/cli-config-drive.html
4543
4544 LOG = logging.getLogger(__name__)
4545@@ -36,21 +38,38 @@ KEY_COPIES = (
4546 ('local-hostname', 'hostname', False),
4547 ('instance-id', 'uuid', True),
4548 )
4549+
4550+# Versions and names taken from nova source nova/api/metadata/base.py
4551 OS_LATEST = 'latest'
4552 OS_FOLSOM = '2012-08-10'
4553 OS_GRIZZLY = '2013-04-04'
4554 OS_HAVANA = '2013-10-17'
4555 OS_LIBERTY = '2015-10-15'
4556+# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
4557+OS_NEWTON_ONE = '2016-06-30'
4558+# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
4559+OS_NEWTON_TWO = '2016-10-06'
4560+# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
4561+OS_OCATA = '2017-02-22'
4562+# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
4563+OS_ROCKY = '2018-08-27'
4564+
4565+
4566 # keep this in chronological order. new supported versions go at the end.
4567 OS_VERSIONS = (
4568 OS_FOLSOM,
4569 OS_GRIZZLY,
4570 OS_HAVANA,
4571 OS_LIBERTY,
4572+ OS_NEWTON_ONE,
4573+ OS_NEWTON_TWO,
4574+ OS_OCATA,
4575+ OS_ROCKY,
4576 )
4577
4578 PHYSICAL_TYPES = (
4579 None,
4580+ 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
4581 'bridge',
4582 'dvs',
4583 'ethernet',
4584@@ -68,10 +87,6 @@ class NonReadable(IOError):
4585 pass
4586
4587
4588-class BrokenMetadata(IOError):
4589- pass
4590-
4591-
4592 class SourceMixin(object):
4593 def _ec2_name_to_device(self, name):
4594 if not self.ec2_metadata:
4595@@ -441,7 +456,7 @@ class MetadataReader(BaseReader):
4596 return self._versions
4597 found = []
4598 version_path = self._path_join(self.base_path, "openstack")
4599- content = self._path_read(version_path)
4600+ content = self._path_read(version_path, decode=True)
4601 for line in content.splitlines():
4602 line = line.strip()
4603 if not line:
4604@@ -589,6 +604,8 @@ def convert_net_json(network_json=None, known_macs=None):
4605 cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
4606 elif link['type'] in ['bond']:
4607 params = {}
4608+ if link_mac_addr:
4609+ params['mac_address'] = link_mac_addr
4610 for k, v in link.items():
4611 if k == 'bond_links':
4612 continue
4613@@ -658,6 +675,17 @@ def convert_net_json(network_json=None, known_macs=None):
4614 else:
4615 cfg[key] = fmt % link_id_info[target]['name']
4616
4617+ # Infiniband interfaces may be referenced in network_data.json by a 6 byte
4618+ # Ethernet MAC-style address, and we use that address to look up the
4619+ # interface name above. Now ensure that the hardware address is set to the
4620+ # full 20 byte address.
4621+ ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
4622+ if ib_known_hwaddrs:
4623+ for cfg in config:
4624+ if cfg['name'] in ib_known_hwaddrs:
4625+ cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
4626+ cfg['type'] = 'infiniband'
4627+
4628 for service in services:
4629 cfg = service
4630 cfg.update({'type': 'nameserver'})
4631diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
4632index 3ef8c62..e1890e2 100644
4633--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
4634+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
4635@@ -164,7 +164,7 @@ class NicConfigurator(object):
4636 return ([subnet], route_list)
4637
4638 # Add routes if there is no primary nic
4639- if not self._primaryNic:
4640+ if not self._primaryNic and v4.gateways:
4641 route_list.extend(self.gen_ipv4_route(nic,
4642 v4.gateways,
4643 v4.netmask))
4644diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
4645index dcd221b..8082019 100644
4646--- a/cloudinit/sources/tests/test_init.py
4647+++ b/cloudinit/sources/tests/test_init.py
4648@@ -1,5 +1,6 @@
4649 # This file is part of cloud-init. See LICENSE file for license information.
4650
4651+import copy
4652 import inspect
4653 import os
4654 import six
4655@@ -9,7 +10,8 @@ from cloudinit.event import EventType
4656 from cloudinit.helpers import Paths
4657 from cloudinit import importer
4658 from cloudinit.sources import (
4659- INSTANCE_JSON_FILE, DataSource, UNSET)
4660+ EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
4661+ REDACT_SENSITIVE_VALUE, UNSET, DataSource, redact_sensitive_keys)
4662 from cloudinit.tests.helpers import CiTestCase, skipIf, mock
4663 from cloudinit.user_data import UserDataProcessor
4664 from cloudinit import util
4665@@ -20,24 +22,30 @@ class DataSourceTestSubclassNet(DataSource):
4666 dsname = 'MyTestSubclass'
4667 url_max_wait = 55
4668
4669- def __init__(self, sys_cfg, distro, paths, custom_userdata=None):
4670+ def __init__(self, sys_cfg, distro, paths, custom_metadata=None,
4671+ custom_userdata=None, get_data_retval=True):
4672 super(DataSourceTestSubclassNet, self).__init__(
4673 sys_cfg, distro, paths)
4674 self._custom_userdata = custom_userdata
4675+ self._custom_metadata = custom_metadata
4676+ self._get_data_retval = get_data_retval
4677
4678 def _get_cloud_name(self):
4679 return 'SubclassCloudName'
4680
4681 def _get_data(self):
4682- self.metadata = {'availability_zone': 'myaz',
4683- 'local-hostname': 'test-subclass-hostname',
4684- 'region': 'myregion'}
4685+ if self._custom_metadata:
4686+ self.metadata = self._custom_metadata
4687+ else:
4688+ self.metadata = {'availability_zone': 'myaz',
4689+ 'local-hostname': 'test-subclass-hostname',
4690+ 'region': 'myregion'}
4691 if self._custom_userdata:
4692 self.userdata_raw = self._custom_userdata
4693 else:
4694 self.userdata_raw = 'userdata_raw'
4695 self.vendordata_raw = 'vendordata_raw'
4696- return True
4697+ return self._get_data_retval
4698
4699
4700 class InvalidDataSourceTestSubclassNet(DataSource):
4701@@ -264,8 +272,19 @@ class TestDataSource(CiTestCase):
4702 self.assertEqual('fqdnhostname.domain.com',
4703 datasource.get_hostname(fqdn=True))
4704
4705- def test_get_data_write_json_instance_data(self):
4706- """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""
4707+ def test_get_data_does_not_write_instance_data_on_failure(self):
4708+ """get_data does not write INSTANCE_JSON_FILE on get_data False."""
4709+ tmp = self.tmp_dir()
4710+ datasource = DataSourceTestSubclassNet(
4711+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
4712+ get_data_retval=False)
4713+ self.assertFalse(datasource.get_data())
4714+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4715+ self.assertFalse(
4716+ os.path.exists(json_file), 'Found unexpected file %s' % json_file)
4717+
4718+ def test_get_data_writes_json_instance_data_on_success(self):
4719+ """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
4720 tmp = self.tmp_dir()
4721 datasource = DataSourceTestSubclassNet(
4722 self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
4723@@ -273,40 +292,126 @@ class TestDataSource(CiTestCase):
4724 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4725 content = util.load_file(json_file)
4726 expected = {
4727- 'base64-encoded-keys': [],
4728+ 'base64_encoded_keys': [],
4729+ 'sensitive_keys': [],
4730 'v1': {
4731 'availability-zone': 'myaz',
4732+ 'availability_zone': 'myaz',
4733 'cloud-name': 'subclasscloudname',
4734+ 'cloud_name': 'subclasscloudname',
4735 'instance-id': 'iid-datasource',
4736+ 'instance_id': 'iid-datasource',
4737 'local-hostname': 'test-subclass-hostname',
4738+ 'local_hostname': 'test-subclass-hostname',
4739 'region': 'myregion'},
4740 'ds': {
4741- 'meta-data': {'availability_zone': 'myaz',
4742+ '_doc': EXPERIMENTAL_TEXT,
4743+ 'meta_data': {'availability_zone': 'myaz',
4744 'local-hostname': 'test-subclass-hostname',
4745- 'region': 'myregion'},
4746- 'user-data': 'userdata_raw',
4747- 'vendor-data': 'vendordata_raw'}}
4748+ 'region': 'myregion'}}}
4749 self.assertEqual(expected, util.load_json(content))
4750 file_stat = os.stat(json_file)
4751+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
4752+ self.assertEqual(expected, util.load_json(content))
4753+
4754+ def test_get_data_writes_json_instance_data_sensitive(self):
4755+ """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
4756+ tmp = self.tmp_dir()
4757+ datasource = DataSourceTestSubclassNet(
4758+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
4759+ custom_metadata={
4760+ 'availability_zone': 'myaz',
4761+ 'local-hostname': 'test-subclass-hostname',
4762+ 'region': 'myregion',
4763+ 'some': {'security-credentials': {
4764+ 'cred1': 'sekret', 'cred2': 'othersekret'}}})
4765+ self.assertEqual(
4766+ ('security-credentials',), datasource.sensitive_metadata_keys)
4767+ datasource.get_data()
4768+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4769+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
4770+ redacted = util.load_json(util.load_file(json_file))
4771+ self.assertEqual(
4772+ {'cred1': 'sekret', 'cred2': 'othersekret'},
4773+ redacted['ds']['meta_data']['some']['security-credentials'])
4774+ content = util.load_file(sensitive_json_file)
4775+ expected = {
4776+ 'base64_encoded_keys': [],
4777+ 'sensitive_keys': ['ds/meta_data/some/security-credentials'],
4778+ 'v1': {
4779+ 'availability-zone': 'myaz',
4780+ 'availability_zone': 'myaz',
4781+ 'cloud-name': 'subclasscloudname',
4782+ 'cloud_name': 'subclasscloudname',
4783+ 'instance-id': 'iid-datasource',
4784+ 'instance_id': 'iid-datasource',
4785+ 'local-hostname': 'test-subclass-hostname',
4786+ 'local_hostname': 'test-subclass-hostname',
4787+ 'region': 'myregion'},
4788+ 'ds': {
4789+ '_doc': EXPERIMENTAL_TEXT,
4790+ 'meta_data': {
4791+ 'availability_zone': 'myaz',
4792+ 'local-hostname': 'test-subclass-hostname',
4793+ 'region': 'myregion',
4794+ 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
4795+ }
4796+ self.maxDiff = None
4797+ self.assertEqual(expected, util.load_json(content))
4798+ file_stat = os.stat(sensitive_json_file)
4799 self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
4800+ self.assertEqual(expected, util.load_json(content))
4801
4802 def test_get_data_handles_redacted_unserializable_content(self):
4803 """get_data warns unserializable content in INSTANCE_JSON_FILE."""
4804 tmp = self.tmp_dir()
4805 datasource = DataSourceTestSubclassNet(
4806 self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
4807- custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
4808- self.assertTrue(datasource.get_data())
4809+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
4810+ datasource.get_data()
4811 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4812 content = util.load_file(json_file)
4813- expected_userdata = {
4814+ expected_metadata = {
4815 'key1': 'val1',
4816 'key2': {
4817 'key2.1': "Warning: redacted unserializable type <class"
4818 " 'cloudinit.helpers.Paths'>"}}
4819 instance_json = util.load_json(content)
4820 self.assertEqual(
4821- expected_userdata, instance_json['ds']['user-data'])
4822+ expected_metadata, instance_json['ds']['meta_data'])
4823+
4824+ def test_persist_instance_data_writes_ec2_metadata_when_set(self):
4825+ """When ec2_metadata class attribute is set, persist to json."""
4826+ tmp = self.tmp_dir()
4827+ datasource = DataSourceTestSubclassNet(
4828+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
4829+ datasource.ec2_metadata = UNSET
4830+ datasource.get_data()
4831+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4832+ instance_data = util.load_json(util.load_file(json_file))
4833+ self.assertNotIn('ec2_metadata', instance_data['ds'])
4834+ datasource.ec2_metadata = {'ec2stuff': 'is good'}
4835+ datasource.persist_instance_data()
4836+ instance_data = util.load_json(util.load_file(json_file))
4837+ self.assertEqual(
4838+ {'ec2stuff': 'is good'},
4839+ instance_data['ds']['ec2_metadata'])
4840+
4841+ def test_persist_instance_data_writes_network_json_when_set(self):
4842+ """When network_data.json class attribute is set, persist to json."""
4843+ tmp = self.tmp_dir()
4844+ datasource = DataSourceTestSubclassNet(
4845+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
4846+ datasource.get_data()
4847+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4848+ instance_data = util.load_json(util.load_file(json_file))
4849+ self.assertNotIn('network_json', instance_data['ds'])
4850+ datasource.network_json = {'network_json': 'is good'}
4851+ datasource.persist_instance_data()
4852+ instance_data = util.load_json(util.load_file(json_file))
4853+ self.assertEqual(
4854+ {'network_json': 'is good'},
4855+ instance_data['ds']['network_json'])
4856
4857 @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
4858 def test_get_data_base64encodes_unserializable_bytes(self):
4859@@ -314,17 +419,17 @@ class TestDataSource(CiTestCase):
4860 tmp = self.tmp_dir()
4861 datasource = DataSourceTestSubclassNet(
4862 self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
4863- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
4864+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
4865 self.assertTrue(datasource.get_data())
4866 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4867 content = util.load_file(json_file)
4868 instance_json = util.load_json(content)
4869- self.assertEqual(
4870- ['ds/user-data/key2/key2.1'],
4871- instance_json['base64-encoded-keys'])
4872+ self.assertItemsEqual(
4873+ ['ds/meta_data/key2/key2.1'],
4874+ instance_json['base64_encoded_keys'])
4875 self.assertEqual(
4876 {'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
4877- instance_json['ds']['user-data'])
4878+ instance_json['ds']['meta_data'])
4879
4880 @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
4881 def test_get_data_handles_bytes_values(self):
4882@@ -332,15 +437,15 @@ class TestDataSource(CiTestCase):
4883 tmp = self.tmp_dir()
4884 datasource = DataSourceTestSubclassNet(
4885 self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
4886- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
4887+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
4888 self.assertTrue(datasource.get_data())
4889 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4890 content = util.load_file(json_file)
4891 instance_json = util.load_json(content)
4892- self.assertEqual([], instance_json['base64-encoded-keys'])
4893+ self.assertEqual([], instance_json['base64_encoded_keys'])
4894 self.assertEqual(
4895 {'key1': 'val1', 'key2': {'key2.1': '\x123'}},
4896- instance_json['ds']['user-data'])
4897+ instance_json['ds']['meta_data'])
4898
4899 @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
4900 def test_non_utf8_encoding_logs_warning(self):
4901@@ -348,7 +453,7 @@ class TestDataSource(CiTestCase):
4902 tmp = self.tmp_dir()
4903 datasource = DataSourceTestSubclassNet(
4904 self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
4905- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
4906+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
4907 self.assertTrue(datasource.get_data())
4908 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
4909 self.assertFalse(os.path.exists(json_file))
4910@@ -429,8 +534,9 @@ class TestDataSource(CiTestCase):
4911
4912 def test_update_metadata_only_acts_on_supported_update_events(self):
4913 """update_metadata won't get_data on unsupported update events."""
4914+ self.datasource.update_events['network'].discard(EventType.BOOT)
4915 self.assertEqual(
4916- {'network': [EventType.BOOT_NEW_INSTANCE]},
4917+ {'network': set([EventType.BOOT_NEW_INSTANCE])},
4918 self.datasource.update_events)
4919
4920 def fake_get_data():
4921@@ -461,4 +567,36 @@ class TestDataSource(CiTestCase):
4922 self.logs.getvalue())
4923
4924
4925+class TestRedactSensitiveData(CiTestCase):
4926+
4927+ def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
4928+ """When sensitive_keys is absent or empty from metadata do nothing."""
4929+ md = {'my': 'data'}
4930+ self.assertEqual(
4931+ md, redact_sensitive_keys(md, redact_value='redacted'))
4932+ md['sensitive_keys'] = []
4933+ self.assertEqual(
4934+ md, redact_sensitive_keys(md, redact_value='redacted'))
4935+
4936+ def test_redact_sensitive_data_redacts_exact_match_name(self):
4937+ """Only exact matched sensitive_keys are redacted from metadata."""
4938+ md = {'sensitive_keys': ['md/secure'],
4939+ 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
4940+ secure_md = copy.deepcopy(md)
4941+ secure_md['md']['secure'] = 'redacted'
4942+ self.assertEqual(
4943+ secure_md,
4944+ redact_sensitive_keys(md, redact_value='redacted'))
4945+
4946+ def test_redact_sensitive_data_does_redacts_with_default_string(self):
4947+ """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
4948+ md = {'sensitive_keys': ['md/secure'],
4949+ 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
4950+ secure_md = copy.deepcopy(md)
4951+ secure_md['md']['secure'] = 'redacted for non-root user'
4952+ self.assertEqual(
4953+ secure_md,
4954+ redact_sensitive_keys(md))
4955+
4956+
4957 # vi: ts=4 expandtab
4958diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
4959new file mode 100644
4960index 0000000..7599126
4961--- /dev/null
4962+++ b/cloudinit/sources/tests/test_oracle.py
4963@@ -0,0 +1,331 @@
4964+# This file is part of cloud-init. See LICENSE file for license information.
4965+
4966+from cloudinit.sources import DataSourceOracle as oracle
4967+from cloudinit.sources import BrokenMetadata
4968+from cloudinit import helpers
4969+
4970+from cloudinit.tests import helpers as test_helpers
4971+
4972+from textwrap import dedent
4973+import argparse
4974+import httpretty
4975+import json
4976+import mock
4977+import os
4978+import six
4979+import uuid
4980+
4981+DS_PATH = "cloudinit.sources.DataSourceOracle"
4982+MD_VER = "2013-10-17"
4983+
4984+
4985+class TestDataSourceOracle(test_helpers.CiTestCase):
4986+ """Test datasource DataSourceOracle."""
4987+
4988+ ds_class = oracle.DataSourceOracle
4989+
4990+ my_uuid = str(uuid.uuid4())
4991+ my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
4992+ "name": "ci-vm1", "availability_zone": "phx-ad-3",
4993+ "hostname": "ci-vm1hostname",
4994+ "launch_index": 0, "files": [],
4995+ "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
4996+ "meta": {}}
4997+
4998+ def _patch_instance(self, inst, patches):
4999+ """Patch an instance of a class 'inst'.
5000+ for each name, kwargs in patches:
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches