Merge ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel
- Git
- lp:~chad.smith/cloud-init
- ubuntu/devel
- Merge into ubuntu/devel
Status: | Merged | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | 5e4916bdc4f7be6bcbf5650e93ea8835cc495b95 | ||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/devel | ||||||||||||
Merge into: | cloud-init:ubuntu/devel | ||||||||||||
Diff against target: |
1433 lines (+661/-139) 21 files modified
cloudinit/cmd/devel/render.py (+24/-11) cloudinit/cmd/devel/tests/test_render.py (+44/-1) cloudinit/cmd/query.py (+24/-12) cloudinit/cmd/tests/test_query.py (+71/-5) cloudinit/config/cc_disk_setup.py (+1/-1) cloudinit/handlers/jinja_template.py (+9/-1) cloudinit/net/dhcp.py (+32/-10) cloudinit/sources/DataSourceAzure.py (+46/-33) cloudinit/tests/test_url_helper.py (+24/-1) cloudinit/tests/test_util.py (+66/-17) cloudinit/url_helper.py (+25/-6) cloudinit/util.py (+4/-3) debian/changelog (+28/-0) doc/rtd/topics/datasources/azure.rst (+46/-0) packages/redhat/cloud-init.spec.in (+1/-0) packages/suse/cloud-init.spec.in (+1/-0) systemd/cloud-init.service.tmpl (+1/-2) tests/unittests/test_builtin_handlers.py (+25/-0) tests/unittests/test_datasource/test_azure.py (+148/-19) tests/unittests/test_datasource/test_ec2.py (+24/-16) udev/66-azure-ephemeral.rules (+17/-1) |
||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
Ryan Harper | Approve | ||
Review via email:
|
Commit message
new upstream snapshot for upload into Disco. Note changed series represented in debian/changelog
Description of the change

Server Team CI bot (server-team-bot) wrote : | # |

Ryan Harper (raharper) wrote : | # |
I get the same as you, but something is causing a typo in the LP: of the first changelog entry:
+cloud-init (18.4-22-
+
+ * New upstream snapshot.
+ - azure: retry imds polling on requests.Timeout (LP: LP:1800223)
I think we can fix up in the changelog commit even if the original commit has a busted comment.

Chad Smith (chad.smith) wrote : | # |
+1, I had forgotten to --force on my push which did just that. fixed.

Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:5e4916bdc4f
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild:
https:/
Preview Diff
1 | diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py |
2 | index 2ba6b68..1bc2240 100755 |
3 | --- a/cloudinit/cmd/devel/render.py |
4 | +++ b/cloudinit/cmd/devel/render.py |
5 | @@ -8,11 +8,10 @@ import sys |
6 | |
7 | from cloudinit.handlers.jinja_template import render_jinja_payload_from_file |
8 | from cloudinit import log |
9 | -from cloudinit.sources import INSTANCE_JSON_FILE |
10 | +from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE |
11 | from . import addLogHandlerCLI, read_cfg_paths |
12 | |
13 | NAME = 'render' |
14 | -DEFAULT_INSTANCE_DATA = '/run/cloud-init/instance-data.json' |
15 | |
16 | LOG = log.getLogger(NAME) |
17 | |
18 | @@ -47,12 +46,22 @@ def handle_args(name, args): |
19 | @return 0 on success, 1 on failure. |
20 | """ |
21 | addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) |
22 | - if not args.instance_data: |
23 | - paths = read_cfg_paths() |
24 | - instance_data_fn = os.path.join( |
25 | - paths.run_dir, INSTANCE_JSON_FILE) |
26 | - else: |
27 | + if args.instance_data: |
28 | instance_data_fn = args.instance_data |
29 | + else: |
30 | + paths = read_cfg_paths() |
31 | + uid = os.getuid() |
32 | + redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) |
33 | + if uid == 0: |
34 | + instance_data_fn = os.path.join( |
35 | + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) |
36 | + if not os.path.exists(instance_data_fn): |
37 | + LOG.warning( |
38 | + 'Missing root-readable %s. Using redacted %s instead.', |
39 | + instance_data_fn, redacted_data_fn) |
40 | + instance_data_fn = redacted_data_fn |
41 | + else: |
42 | + instance_data_fn = redacted_data_fn |
43 | if not os.path.exists(instance_data_fn): |
44 | LOG.error('Missing instance-data.json file: %s', instance_data_fn) |
45 | return 1 |
46 | @@ -62,10 +71,14 @@ def handle_args(name, args): |
47 | except IOError: |
48 | LOG.error('Missing user-data file: %s', args.user_data) |
49 | return 1 |
50 | - rendered_payload = render_jinja_payload_from_file( |
51 | - payload=user_data, payload_fn=args.user_data, |
52 | - instance_data_file=instance_data_fn, |
53 | - debug=True if args.debug else False) |
54 | + try: |
55 | + rendered_payload = render_jinja_payload_from_file( |
56 | + payload=user_data, payload_fn=args.user_data, |
57 | + instance_data_file=instance_data_fn, |
58 | + debug=True if args.debug else False) |
59 | + except RuntimeError as e: |
60 | + LOG.error('Cannot render from instance data: %s', str(e)) |
61 | + return 1 |
62 | if not rendered_payload: |
63 | LOG.error('Unable to render user-data file: %s', args.user_data) |
64 | return 1 |
65 | diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py |
66 | index fc5d2c0..988bba0 100644 |
67 | --- a/cloudinit/cmd/devel/tests/test_render.py |
68 | +++ b/cloudinit/cmd/devel/tests/test_render.py |
69 | @@ -6,7 +6,7 @@ import os |
70 | from collections import namedtuple |
71 | from cloudinit.cmd.devel import render |
72 | from cloudinit.helpers import Paths |
73 | -from cloudinit.sources import INSTANCE_JSON_FILE |
74 | +from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE |
75 | from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja |
76 | from cloudinit.util import ensure_dir, write_file |
77 | |
78 | @@ -63,6 +63,49 @@ class TestRender(CiTestCase): |
79 | 'Missing instance-data.json file: %s' % json_file, |
80 | self.logs.getvalue()) |
81 | |
82 | + def test_handle_args_root_fallback_from_sensitive_instance_data(self): |
83 | + """When root user defaults to sensitive.json.""" |
84 | + user_data = self.tmp_path('user-data', dir=self.tmp) |
85 | + run_dir = self.tmp_path('run_dir', dir=self.tmp) |
86 | + ensure_dir(run_dir) |
87 | + paths = Paths({'run_dir': run_dir}) |
88 | + self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') |
89 | + self.m_paths.return_value = paths |
90 | + args = self.args( |
91 | + user_data=user_data, instance_data=None, debug=False) |
92 | + with mock.patch('sys.stderr', new_callable=StringIO): |
93 | + with mock.patch('os.getuid') as m_getuid: |
94 | + m_getuid.return_value = 0 |
95 | + self.assertEqual(1, render.handle_args('anyname', args)) |
96 | + json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) |
97 | + json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) |
98 | + self.assertIn( |
99 | + 'WARNING: Missing root-readable %s. Using redacted %s' % ( |
100 | + json_sensitive, json_file), self.logs.getvalue()) |
101 | + self.assertIn( |
102 | + 'ERROR: Missing instance-data.json file: %s' % json_file, |
103 | + self.logs.getvalue()) |
104 | + |
105 | + def test_handle_args_root_uses_sensitive_instance_data(self): |
106 | + """When root user, and no instance-data arg, use sensitive.json.""" |
107 | + user_data = self.tmp_path('user-data', dir=self.tmp) |
108 | + write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') |
109 | + run_dir = self.tmp_path('run_dir', dir=self.tmp) |
110 | + ensure_dir(run_dir) |
111 | + json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) |
112 | + write_file(json_sensitive, '{"my-var": "jinja worked"}') |
113 | + paths = Paths({'run_dir': run_dir}) |
114 | + self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') |
115 | + self.m_paths.return_value = paths |
116 | + args = self.args( |
117 | + user_data=user_data, instance_data=None, debug=False) |
118 | + with mock.patch('sys.stderr', new_callable=StringIO): |
119 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
120 | + with mock.patch('os.getuid') as m_getuid: |
121 | + m_getuid.return_value = 0 |
122 | + self.assertEqual(0, render.handle_args('anyname', args)) |
123 | + self.assertIn('rendering: jinja worked', m_stdout.getvalue()) |
124 | + |
125 | @skipUnlessJinja() |
126 | def test_handle_args_renders_instance_data_vars_in_template(self): |
127 | """If user_data file is a jinja template render instance-data vars.""" |
128 | diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py |
129 | index 7d2d4fe..1d888b9 100644 |
130 | --- a/cloudinit/cmd/query.py |
131 | +++ b/cloudinit/cmd/query.py |
132 | @@ -3,6 +3,7 @@ |
133 | """Query standardized instance metadata from the command line.""" |
134 | |
135 | import argparse |
136 | +from errno import EACCES |
137 | import os |
138 | import six |
139 | import sys |
140 | @@ -79,27 +80,38 @@ def handle_args(name, args): |
141 | uid = os.getuid() |
142 | if not all([args.instance_data, args.user_data, args.vendor_data]): |
143 | paths = read_cfg_paths() |
144 | - if not args.instance_data: |
145 | + if args.instance_data: |
146 | + instance_data_fn = args.instance_data |
147 | + else: |
148 | + redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) |
149 | if uid == 0: |
150 | - default_json_fn = INSTANCE_JSON_SENSITIVE_FILE |
151 | + sensitive_data_fn = os.path.join( |
152 | + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) |
153 | + if os.path.exists(sensitive_data_fn): |
154 | + instance_data_fn = sensitive_data_fn |
155 | + else: |
156 | + LOG.warning( |
157 | + 'Missing root-readable %s. Using redacted %s instead.', |
158 | + sensitive_data_fn, redacted_data_fn) |
159 | + instance_data_fn = redacted_data_fn |
160 | else: |
161 | - default_json_fn = INSTANCE_JSON_FILE # World readable |
162 | - instance_data_fn = os.path.join(paths.run_dir, default_json_fn) |
163 | + instance_data_fn = redacted_data_fn |
164 | + if args.user_data: |
165 | + user_data_fn = args.user_data |
166 | else: |
167 | - instance_data_fn = args.instance_data |
168 | - if not args.user_data: |
169 | user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') |
170 | + if args.vendor_data: |
171 | + vendor_data_fn = args.vendor_data |
172 | else: |
173 | - user_data_fn = args.user_data |
174 | - if not args.vendor_data: |
175 | vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') |
176 | - else: |
177 | - vendor_data_fn = args.vendor_data |
178 | |
179 | try: |
180 | instance_json = util.load_file(instance_data_fn) |
181 | - except IOError: |
182 | - LOG.error('Missing instance-data.json file: %s', instance_data_fn) |
183 | + except (IOError, OSError) as e: |
184 | + if e.errno == EACCES: |
185 | + LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) |
186 | + else: |
187 | + LOG.error('Missing instance-data file: %s', instance_data_fn) |
188 | return 1 |
189 | |
190 | instance_data = util.load_json(instance_json) |
191 | diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py |
192 | index fb87c6a..28738b1 100644 |
193 | --- a/cloudinit/cmd/tests/test_query.py |
194 | +++ b/cloudinit/cmd/tests/test_query.py |
195 | @@ -1,5 +1,6 @@ |
196 | # This file is part of cloud-init. See LICENSE file for license information. |
197 | |
198 | +import errno |
199 | from six import StringIO |
200 | from textwrap import dedent |
201 | import os |
202 | @@ -7,7 +8,8 @@ import os |
203 | from collections import namedtuple |
204 | from cloudinit.cmd import query |
205 | from cloudinit.helpers import Paths |
206 | -from cloudinit.sources import REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE |
207 | +from cloudinit.sources import ( |
208 | + REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE) |
209 | from cloudinit.tests.helpers import CiTestCase, mock |
210 | from cloudinit.util import ensure_dir, write_file |
211 | |
212 | @@ -50,10 +52,28 @@ class TestQuery(CiTestCase): |
213 | with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: |
214 | self.assertEqual(1, query.handle_args('anyname', args)) |
215 | self.assertIn( |
216 | - 'ERROR: Missing instance-data.json file: %s' % absent_fn, |
217 | + 'ERROR: Missing instance-data file: %s' % absent_fn, |
218 | self.logs.getvalue()) |
219 | self.assertIn( |
220 | - 'ERROR: Missing instance-data.json file: %s' % absent_fn, |
221 | + 'ERROR: Missing instance-data file: %s' % absent_fn, |
222 | + m_stderr.getvalue()) |
223 | + |
224 | + def test_handle_args_error_when_no_read_permission_instance_data(self): |
225 | + """When instance_data file is unreadable, log an error.""" |
226 | + noread_fn = self.tmp_path('unreadable', dir=self.tmp) |
227 | + write_file(noread_fn, 'thou shall not pass') |
228 | + args = self.args( |
229 | + debug=False, dump_all=True, format=None, instance_data=noread_fn, |
230 | + list_keys=False, user_data='ud', vendor_data='vd', varname=None) |
231 | + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: |
232 | + with mock.patch('cloudinit.cmd.query.util.load_file') as m_load: |
233 | + m_load.side_effect = OSError(errno.EACCES, 'Not allowed') |
234 | + self.assertEqual(1, query.handle_args('anyname', args)) |
235 | + self.assertIn( |
236 | + "ERROR: No read permission on '%s'. Try sudo" % noread_fn, |
237 | + self.logs.getvalue()) |
238 | + self.assertIn( |
239 | + "ERROR: No read permission on '%s'. Try sudo" % noread_fn, |
240 | m_stderr.getvalue()) |
241 | |
242 | def test_handle_args_defaults_instance_data(self): |
243 | @@ -70,12 +90,58 @@ class TestQuery(CiTestCase): |
244 | self.assertEqual(1, query.handle_args('anyname', args)) |
245 | json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) |
246 | self.assertIn( |
247 | - 'ERROR: Missing instance-data.json file: %s' % json_file, |
248 | + 'ERROR: Missing instance-data file: %s' % json_file, |
249 | self.logs.getvalue()) |
250 | self.assertIn( |
251 | - 'ERROR: Missing instance-data.json file: %s' % json_file, |
252 | + 'ERROR: Missing instance-data file: %s' % json_file, |
253 | m_stderr.getvalue()) |
254 | |
255 | + def test_handle_args_root_fallsback_to_instance_data(self): |
256 | + """When no instance_data argument, root falls back to redacted json.""" |
257 | + args = self.args( |
258 | + debug=False, dump_all=True, format=None, instance_data=None, |
259 | + list_keys=False, user_data=None, vendor_data=None, varname=None) |
260 | + run_dir = self.tmp_path('run_dir', dir=self.tmp) |
261 | + ensure_dir(run_dir) |
262 | + paths = Paths({'run_dir': run_dir}) |
263 | + self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') |
264 | + self.m_paths.return_value = paths |
265 | + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: |
266 | + with mock.patch('os.getuid') as m_getuid: |
267 | + m_getuid.return_value = 0 |
268 | + self.assertEqual(1, query.handle_args('anyname', args)) |
269 | + json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) |
270 | + sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) |
271 | + self.assertIn( |
272 | + 'WARNING: Missing root-readable %s. Using redacted %s instead.' % ( |
273 | + sensitive_file, json_file), |
274 | + m_stderr.getvalue()) |
275 | + |
276 | + def test_handle_args_root_uses_instance_sensitive_data(self): |
277 | + """When no instance_data argument, root uses semsitive json.""" |
278 | + user_data = self.tmp_path('user-data', dir=self.tmp) |
279 | + vendor_data = self.tmp_path('vendor-data', dir=self.tmp) |
280 | + write_file(user_data, 'ud') |
281 | + write_file(vendor_data, 'vd') |
282 | + run_dir = self.tmp_path('run_dir', dir=self.tmp) |
283 | + sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) |
284 | + write_file(sensitive_file, '{"my-var": "it worked"}') |
285 | + ensure_dir(run_dir) |
286 | + paths = Paths({'run_dir': run_dir}) |
287 | + self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') |
288 | + self.m_paths.return_value = paths |
289 | + args = self.args( |
290 | + debug=False, dump_all=True, format=None, instance_data=None, |
291 | + list_keys=False, user_data=vendor_data, vendor_data=vendor_data, |
292 | + varname=None) |
293 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
294 | + with mock.patch('os.getuid') as m_getuid: |
295 | + m_getuid.return_value = 0 |
296 | + self.assertEqual(0, query.handle_args('anyname', args)) |
297 | + self.assertEqual( |
298 | + '{\n "my_var": "it worked",\n "userdata": "vd",\n ' |
299 | + '"vendordata": "vd"\n}\n', m_stdout.getvalue()) |
300 | + |
301 | def test_handle_args_dumps_all_instance_data(self): |
302 | """When --all is specified query will dump all instance data vars.""" |
303 | write_file(self.instance_data, '{"my-var": "it worked"}') |
304 | diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py |
305 | index 943089e..29e192e 100644 |
306 | --- a/cloudinit/config/cc_disk_setup.py |
307 | +++ b/cloudinit/config/cc_disk_setup.py |
308 | @@ -743,7 +743,7 @@ def assert_and_settle_device(device): |
309 | util.udevadm_settle() |
310 | if not os.path.exists(device): |
311 | raise RuntimeError("Device %s did not exist and was not created " |
312 | - "with a udevamd settle." % device) |
313 | + "with a udevadm settle." % device) |
314 | |
315 | # Whether or not the device existed above, it is possible that udev |
316 | # events that would populate udev database (for reading by lsdname) have |
317 | diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py |
318 | index 3fa4097..ce3accf 100644 |
319 | --- a/cloudinit/handlers/jinja_template.py |
320 | +++ b/cloudinit/handlers/jinja_template.py |
321 | @@ -1,5 +1,6 @@ |
322 | # This file is part of cloud-init. See LICENSE file for license information. |
323 | |
324 | +from errno import EACCES |
325 | import os |
326 | import re |
327 | |
328 | @@ -76,7 +77,14 @@ def render_jinja_payload_from_file( |
329 | raise RuntimeError( |
330 | 'Cannot render jinja template vars. Instance data not yet' |
331 | ' present at %s' % instance_data_file) |
332 | - instance_data = load_json(load_file(instance_data_file)) |
333 | + try: |
334 | + instance_data = load_json(load_file(instance_data_file)) |
335 | + except (IOError, OSError) as e: |
336 | + if e.errno == EACCES: |
337 | + raise RuntimeError( |
338 | + 'Cannot render jinja template vars. No read permission on' |
339 | + " '%s'. Try sudo" % instance_data_file) |
340 | + |
341 | rendered_payload = render_jinja_payload( |
342 | payload, payload_fn, instance_data, debug) |
343 | if not rendered_payload: |
344 | diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py |
345 | index 12cf509..bdc5799 100644 |
346 | --- a/cloudinit/net/dhcp.py |
347 | +++ b/cloudinit/net/dhcp.py |
348 | @@ -40,34 +40,56 @@ class EphemeralDHCPv4(object): |
349 | def __init__(self, iface=None): |
350 | self.iface = iface |
351 | self._ephipv4 = None |
352 | + self.lease = None |
353 | |
354 | def __enter__(self): |
355 | + """Setup sandboxed dhcp context.""" |
356 | + return self.obtain_lease() |
357 | + |
358 | + def __exit__(self, excp_type, excp_value, excp_traceback): |
359 | + """Teardown sandboxed dhcp context.""" |
360 | + self.clean_network() |
361 | + |
362 | + def clean_network(self): |
363 | + """Exit _ephipv4 context to teardown of ip configuration performed.""" |
364 | + if self.lease: |
365 | + self.lease = None |
366 | + if not self._ephipv4: |
367 | + return |
368 | + self._ephipv4.__exit__(None, None, None) |
369 | + |
370 | + def obtain_lease(self): |
371 | + """Perform dhcp discovery in a sandboxed environment if possible. |
372 | + |
373 | + @return: A dict representing dhcp options on the most recent lease |
374 | + obtained from the dhclient discovery if run, otherwise an error |
375 | + is raised. |
376 | + |
377 | + @raises: NoDHCPLeaseError if no leases could be obtained. |
378 | + """ |
379 | + if self.lease: |
380 | + return self.lease |
381 | try: |
382 | leases = maybe_perform_dhcp_discovery(self.iface) |
383 | except InvalidDHCPLeaseFileError: |
384 | raise NoDHCPLeaseError() |
385 | if not leases: |
386 | raise NoDHCPLeaseError() |
387 | - lease = leases[-1] |
388 | + self.lease = leases[-1] |
389 | LOG.debug("Received dhcp lease on %s for %s/%s", |
390 | - lease['interface'], lease['fixed-address'], |
391 | - lease['subnet-mask']) |
392 | + self.lease['interface'], self.lease['fixed-address'], |
393 | + self.lease['subnet-mask']) |
394 | nmap = {'interface': 'interface', 'ip': 'fixed-address', |
395 | 'prefix_or_mask': 'subnet-mask', |
396 | 'broadcast': 'broadcast-address', |
397 | 'router': 'routers'} |
398 | - kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) |
399 | + kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()]) |
400 | if not kwargs['broadcast']: |
401 | kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) |
402 | ephipv4 = EphemeralIPv4Network(**kwargs) |
403 | ephipv4.__enter__() |
404 | self._ephipv4 = ephipv4 |
405 | - return lease |
406 | - |
407 | - def __exit__(self, excp_type, excp_value, excp_traceback): |
408 | - if not self._ephipv4: |
409 | - return |
410 | - self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) |
411 | + return self.lease |
412 | |
413 | |
414 | def maybe_perform_dhcp_discovery(nic=None): |
415 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py |
416 | index 39391d0..9e8a1a8 100644 |
417 | --- a/cloudinit/sources/DataSourceAzure.py |
418 | +++ b/cloudinit/sources/DataSourceAzure.py |
419 | @@ -22,7 +22,7 @@ from cloudinit.event import EventType |
420 | from cloudinit.net.dhcp import EphemeralDHCPv4 |
421 | from cloudinit import sources |
422 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric |
423 | -from cloudinit.url_helper import readurl, UrlError |
424 | +from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc |
425 | from cloudinit import util |
426 | |
427 | LOG = logging.getLogger(__name__) |
428 | @@ -57,7 +57,7 @@ IMDS_URL = "http://169.254.169.254/metadata/" |
429 | # List of static scripts and network config artifacts created by |
430 | # stock ubuntu suported images. |
431 | UBUNTU_EXTENDED_NETWORK_SCRIPTS = [ |
432 | - '/etc/netplan/90-azure-hotplug.yaml', |
433 | + '/etc/netplan/90-hotplug-azure.yaml', |
434 | '/usr/local/sbin/ephemeral_eth.sh', |
435 | '/etc/udev/rules.d/10-net-device-added.rules', |
436 | '/run/network/interfaces.ephemeral.d', |
437 | @@ -207,7 +207,9 @@ BUILTIN_DS_CONFIG = { |
438 | }, |
439 | 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, |
440 | 'dhclient_lease_file': LEASE_FILE, |
441 | + 'apply_network_config': True, # Use IMDS published network configuration |
442 | } |
443 | +# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False |
444 | |
445 | BUILTIN_CLOUD_CONFIG = { |
446 | 'disk_setup': { |
447 | @@ -278,6 +280,7 @@ class DataSourceAzure(sources.DataSource): |
448 | self._network_config = None |
449 | # Regenerate network config new_instance boot and every boot |
450 | self.update_events['network'].add(EventType.BOOT) |
451 | + self._ephemeral_dhcp_ctx = None |
452 | |
453 | def __str__(self): |
454 | root = sources.DataSource.__str__(self) |
455 | @@ -404,7 +407,8 @@ class DataSourceAzure(sources.DataSource): |
456 | LOG.warning("%s was not mountable", cdev) |
457 | continue |
458 | |
459 | - if reprovision or self._should_reprovision(ret): |
460 | + perform_reprovision = reprovision or self._should_reprovision(ret) |
461 | + if perform_reprovision: |
462 | ret = self._reprovision() |
463 | imds_md = get_metadata_from_imds( |
464 | self.fallback_interface, retries=3) |
465 | @@ -432,6 +436,18 @@ class DataSourceAzure(sources.DataSource): |
466 | crawled_data['metadata']['random_seed'] = seed |
467 | crawled_data['metadata']['instance-id'] = util.read_dmi_data( |
468 | 'system-uuid') |
469 | + |
470 | + if perform_reprovision: |
471 | + LOG.info("Reporting ready to Azure after getting ReprovisionData") |
472 | + use_cached_ephemeral = (net.is_up(self.fallback_interface) and |
473 | + getattr(self, '_ephemeral_dhcp_ctx', None)) |
474 | + if use_cached_ephemeral: |
475 | + self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) |
476 | + self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral |
477 | + else: |
478 | + with EphemeralDHCPv4() as lease: |
479 | + self._report_ready(lease=lease) |
480 | + |
481 | return crawled_data |
482 | |
483 | def _is_platform_viable(self): |
484 | @@ -458,7 +474,8 @@ class DataSourceAzure(sources.DataSource): |
485 | except sources.InvalidMetaDataException as e: |
486 | LOG.warning('Could not crawl Azure metadata: %s', e) |
487 | return False |
488 | - if self.distro and self.distro.name == 'ubuntu': |
489 | + if (self.distro and self.distro.name == 'ubuntu' and |
490 | + self.ds_cfg.get('apply_network_config')): |
491 | maybe_remove_ubuntu_network_config_scripts() |
492 | |
493 | # Process crawled data and augment with various config defaults |
494 | @@ -509,32 +526,29 @@ class DataSourceAzure(sources.DataSource): |
495 | report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) |
496 | LOG.debug("Start polling IMDS") |
497 | |
498 | - def exc_cb(msg, exception): |
499 | - if isinstance(exception, UrlError) and exception.code == 404: |
500 | - return True |
501 | - # If we get an exception while trying to call IMDS, we |
502 | - # call DHCP and setup the ephemeral network to acquire the new IP. |
503 | - return False |
504 | - |
505 | while True: |
506 | try: |
507 | - with EphemeralDHCPv4() as lease: |
508 | - if report_ready: |
509 | - path = REPORTED_READY_MARKER_FILE |
510 | - LOG.info( |
511 | - "Creating a marker file to report ready: %s", path) |
512 | - util.write_file(path, "{pid}: {time}\n".format( |
513 | - pid=os.getpid(), time=time())) |
514 | - self._report_ready(lease=lease) |
515 | - report_ready = False |
516 | - return readurl(url, timeout=1, headers=headers, |
517 | - exception_cb=exc_cb, infinite=True).contents |
518 | + # Save our EphemeralDHCPv4 context so we avoid repeated dhcp |
519 | + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() |
520 | + lease = self._ephemeral_dhcp_ctx.obtain_lease() |
521 | + if report_ready: |
522 | + path = REPORTED_READY_MARKER_FILE |
523 | + LOG.info( |
524 | + "Creating a marker file to report ready: %s", path) |
525 | + util.write_file(path, "{pid}: {time}\n".format( |
526 | + pid=os.getpid(), time=time())) |
527 | + self._report_ready(lease=lease) |
528 | + report_ready = False |
529 | + return readurl(url, timeout=1, headers=headers, |
530 | + exception_cb=retry_on_url_exc, infinite=True, |
531 | + log_req_resp=False).contents |
532 | except UrlError: |
533 | + # Teardown our EphemeralDHCPv4 context on failure as we retry |
534 | + self._ephemeral_dhcp_ctx.clean_network() |
535 | pass |
536 | |
537 | def _report_ready(self, lease): |
538 | - """Tells the fabric provisioning has completed |
539 | - before we go into our polling loop.""" |
540 | + """Tells the fabric provisioning has completed """ |
541 | try: |
542 | get_metadata_from_fabric(None, lease['unknown-245']) |
543 | except Exception: |
544 | @@ -619,7 +633,11 @@ class DataSourceAzure(sources.DataSource): |
545 | the blacklisted devices. |
546 | """ |
547 | if not self._network_config: |
548 | - self._network_config = parse_network_config(self._metadata_imds) |
549 | + if self.ds_cfg.get('apply_network_config'): |
550 | + nc_src = self._metadata_imds |
551 | + else: |
552 | + nc_src = None |
553 | + self._network_config = parse_network_config(nc_src) |
554 | return self._network_config |
555 | |
556 | |
557 | @@ -700,7 +718,7 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): |
558 | file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", |
559 | update_env_for_mount={'LANG': 'C'}) |
560 | except util.MountFailedError as e: |
561 | - if "mount: unknown filesystem type 'ntfs'" in str(e): |
562 | + if "unknown filesystem type 'ntfs'" in str(e): |
563 | return True, (bmsg + ' but this system cannot mount NTFS,' |
564 | ' assuming there are no important files.' |
565 | ' Formatting allowed.') |
566 | @@ -1162,17 +1180,12 @@ def get_metadata_from_imds(fallback_nic, retries): |
567 | |
568 | def _get_metadata_from_imds(retries): |
569 | |
570 | - def retry_on_url_error(msg, exception): |
571 | - if isinstance(exception, UrlError) and exception.code == 404: |
572 | - return True # Continue retries |
573 | - return False # Stop retries on all other exceptions |
574 | - |
575 | url = IMDS_URL + "instance?api-version=2017-12-01" |
576 | headers = {"Metadata": "true"} |
577 | try: |
578 | response = readurl( |
579 | url, timeout=1, headers=headers, retries=retries, |
580 | - exception_cb=retry_on_url_error) |
581 | + exception_cb=retry_on_url_exc) |
582 | except Exception as e: |
583 | LOG.debug('Ignoring IMDS instance metadata: %s', e) |
584 | return {} |
585 | @@ -1195,7 +1208,7 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): |
586 | additional interfaces which get attached by a customer at some point |
587 | after initial boot. Since the Azure datasource can now regenerate |
588 | network configuration as metadata reports these new devices, we no longer |
589 | - want the udev rules or netplan's 90-azure-hotplug.yaml to configure |
590 | + want the udev rules or netplan's 90-hotplug-azure.yaml to configure |
591 | networking on eth1 or greater as it might collide with cloud-init's |
592 | configuration. |
593 | |
594 | diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py |
595 | index 113249d..aa9f3ec 100644 |
596 | --- a/cloudinit/tests/test_url_helper.py |
597 | +++ b/cloudinit/tests/test_url_helper.py |
598 | @@ -1,10 +1,12 @@ |
599 | # This file is part of cloud-init. See LICENSE file for license information. |
600 | |
601 | -from cloudinit.url_helper import oauth_headers, read_file_or_url |
602 | +from cloudinit.url_helper import ( |
603 | + NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) |
604 | from cloudinit.tests.helpers import CiTestCase, mock, skipIf |
605 | from cloudinit import util |
606 | |
607 | import httpretty |
608 | +import requests |
609 | |
610 | |
611 | try: |
612 | @@ -64,3 +66,24 @@ class TestReadFileOrUrl(CiTestCase): |
613 | result = read_file_or_url(url) |
614 | self.assertEqual(result.contents, data) |
615 | self.assertEqual(str(result), data.decode('utf-8')) |
616 | + |
617 | + |
618 | +class TestRetryOnUrlExc(CiTestCase): |
619 | + |
620 | + def test_do_not_retry_non_urlerror(self): |
621 | + """When exception is not UrlError return False.""" |
622 | + myerror = IOError('something unexcpected') |
623 | + self.assertFalse(retry_on_url_exc(msg='', exc=myerror)) |
624 | + |
625 | + def test_perform_retries_on_not_found(self): |
626 | + """When exception is UrlError with a 404 status code return True.""" |
627 | + myerror = UrlError(cause=RuntimeError( |
628 | + 'something was not found'), code=NOT_FOUND) |
629 | + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) |
630 | + |
631 | + def test_perform_retries_on_timeout(self): |
632 | + """When exception is a requests.Timout return True.""" |
633 | + myerror = UrlError(cause=requests.Timeout('something timed out')) |
634 | + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) |
635 | + |
636 | +# vi: ts=4 expandtab |
637 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py |
638 | index 749a384..e3d2dba 100644 |
639 | --- a/cloudinit/tests/test_util.py |
640 | +++ b/cloudinit/tests/test_util.py |
641 | @@ -18,25 +18,51 @@ MOUNT_INFO = [ |
642 | ] |
643 | |
644 | OS_RELEASE_SLES = dedent("""\ |
645 | - NAME="SLES"\n |
646 | - VERSION="12-SP3"\n |
647 | - VERSION_ID="12.3"\n |
648 | - PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n |
649 | - ID="sles"\nANSI_COLOR="0;32"\n |
650 | - CPE_NAME="cpe:/o:suse:sles:12:sp3"\n |
651 | + NAME="SLES" |
652 | + VERSION="12-SP3" |
653 | + VERSION_ID="12.3" |
654 | + PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" |
655 | + ID="sles" |
656 | + ANSI_COLOR="0;32" |
657 | + CPE_NAME="cpe:/o:suse:sles:12:sp3" |
658 | """) |
659 | |
660 | OS_RELEASE_OPENSUSE = dedent("""\ |
661 | -NAME="openSUSE Leap" |
662 | -VERSION="42.3" |
663 | -ID=opensuse |
664 | -ID_LIKE="suse" |
665 | -VERSION_ID="42.3" |
666 | -PRETTY_NAME="openSUSE Leap 42.3" |
667 | -ANSI_COLOR="0;32" |
668 | -CPE_NAME="cpe:/o:opensuse:leap:42.3" |
669 | -BUG_REPORT_URL="https://bugs.opensuse.org" |
670 | -HOME_URL="https://www.opensuse.org/" |
671 | + NAME="openSUSE Leap" |
672 | + VERSION="42.3" |
673 | + ID=opensuse |
674 | + ID_LIKE="suse" |
675 | + VERSION_ID="42.3" |
676 | + PRETTY_NAME="openSUSE Leap 42.3" |
677 | + ANSI_COLOR="0;32" |
678 | + CPE_NAME="cpe:/o:opensuse:leap:42.3" |
679 | + BUG_REPORT_URL="https://bugs.opensuse.org" |
680 | + HOME_URL="https://www.opensuse.org/" |
681 | +""") |
682 | + |
683 | +OS_RELEASE_OPENSUSE_L15 = dedent("""\ |
684 | + NAME="openSUSE Leap" |
685 | + VERSION="15.0" |
686 | + ID="opensuse-leap" |
687 | + ID_LIKE="suse opensuse" |
688 | + VERSION_ID="15.0" |
689 | + PRETTY_NAME="openSUSE Leap 15.0" |
690 | + ANSI_COLOR="0;32" |
691 | + CPE_NAME="cpe:/o:opensuse:leap:15.0" |
692 | + BUG_REPORT_URL="https://bugs.opensuse.org" |
693 | + HOME_URL="https://www.opensuse.org/" |
694 | +""") |
695 | + |
696 | +OS_RELEASE_OPENSUSE_TW = dedent("""\ |
697 | + NAME="openSUSE Tumbleweed" |
698 | + ID="opensuse-tumbleweed" |
699 | + ID_LIKE="opensuse suse" |
700 | + VERSION_ID="20180920" |
701 | + PRETTY_NAME="openSUSE Tumbleweed" |
702 | + ANSI_COLOR="0;32" |
703 | + CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920" |
704 | + BUG_REPORT_URL="https://bugs.opensuse.org" |
705 | + HOME_URL="https://www.opensuse.org/" |
706 | """) |
707 | |
708 | OS_RELEASE_CENTOS = dedent("""\ |
709 | @@ -447,12 +473,35 @@ class TestGetLinuxDistro(CiTestCase): |
710 | |
711 | @mock.patch('cloudinit.util.load_file') |
712 | def test_get_linux_opensuse(self, m_os_release, m_path_exists): |
713 | - """Verify we get the correct name and machine arch on OpenSUSE.""" |
714 | + """Verify we get the correct name and machine arch on openSUSE |
715 | + prior to openSUSE Leap 15. |
716 | + """ |
717 | m_os_release.return_value = OS_RELEASE_OPENSUSE |
718 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists |
719 | dist = util.get_linux_distro() |
720 | self.assertEqual(('opensuse', '42.3', platform.machine()), dist) |
721 | |
722 | + @mock.patch('cloudinit.util.load_file') |
723 | + def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): |
724 | + """Verify we get the correct name and machine arch on openSUSE |
725 | + for openSUSE Leap 15.0 and later. |
726 | + """ |
727 | + m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 |
728 | + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists |
729 | + dist = util.get_linux_distro() |
730 | + self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) |
731 | + |
732 | + @mock.patch('cloudinit.util.load_file') |
733 | + def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): |
734 | + """Verify we get the correct name and machine arch on openSUSE |
735 | + for openSUSE Tumbleweed |
736 | + """ |
737 | + m_os_release.return_value = OS_RELEASE_OPENSUSE_TW |
738 | + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists |
739 | + dist = util.get_linux_distro() |
740 | + self.assertEqual( |
741 | + ('opensuse-tumbleweed', '20180920', platform.machine()), dist) |
742 | + |
743 | @mock.patch('platform.dist') |
744 | def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): |
745 | """Verify we get no information if os-release does not exist""" |
746 | diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py |
747 | index 8067979..396d69a 100644 |
748 | --- a/cloudinit/url_helper.py |
749 | +++ b/cloudinit/url_helper.py |
750 | @@ -199,7 +199,7 @@ def _get_ssl_args(url, ssl_details): |
751 | def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
752 | headers=None, headers_cb=None, ssl_details=None, |
753 | check_status=True, allow_redirects=True, exception_cb=None, |
754 | - session=None, infinite=False): |
755 | + session=None, infinite=False, log_req_resp=True): |
756 | url = _cleanurl(url) |
757 | req_args = { |
758 | 'url': url, |
759 | @@ -256,9 +256,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
760 | continue |
761 | filtered_req_args[k] = v |
762 | try: |
763 | - LOG.debug("[%s/%s] open '%s' with %s configuration", i, |
764 | - "infinite" if infinite else manual_tries, url, |
765 | - filtered_req_args) |
766 | + |
767 | + if log_req_resp: |
768 | + LOG.debug("[%s/%s] open '%s' with %s configuration", i, |
769 | + "infinite" if infinite else manual_tries, url, |
770 | + filtered_req_args) |
771 | |
772 | if session is None: |
773 | session = requests.Session() |
774 | @@ -294,8 +296,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
775 | break |
776 | if (infinite and sec_between > 0) or \ |
777 | (i + 1 < manual_tries and sec_between > 0): |
778 | - LOG.debug("Please wait %s seconds while we wait to try again", |
779 | - sec_between) |
780 | + |
781 | + if log_req_resp: |
782 | + LOG.debug( |
783 | + "Please wait %s seconds while we wait to try again", |
784 | + sec_between) |
785 | time.sleep(sec_between) |
786 | if excps: |
787 | raise excps[-1] |
788 | @@ -549,4 +554,18 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, |
789 | _uri, signed_headers, _body = client.sign(url) |
790 | return signed_headers |
791 | |
792 | + |
793 | +def retry_on_url_exc(msg, exc): |
794 | + """readurl exception_cb that will retry on NOT_FOUND and Timeout. |
795 | + |
796 | + Returns False to raise the exception from readurl, True to retry. |
797 | + """ |
798 | + if not isinstance(exc, UrlError): |
799 | + return False |
800 | + if exc.code == NOT_FOUND: |
801 | + return True |
802 | + if exc.cause and isinstance(exc.cause, requests.Timeout): |
803 | + return True |
804 | + return False |
805 | + |
806 | # vi: ts=4 expandtab |
807 | diff --git a/cloudinit/util.py b/cloudinit/util.py |
808 | index c67d6be..7800f7b 100644 |
809 | --- a/cloudinit/util.py |
810 | +++ b/cloudinit/util.py |
811 | @@ -615,8 +615,8 @@ def get_linux_distro(): |
812 | distro_name = os_release.get('ID', '') |
813 | distro_version = os_release.get('VERSION_ID', '') |
814 | if 'sles' in distro_name or 'suse' in distro_name: |
815 | - # RELEASE_BLOCKER: We will drop this sles ivergent behavior in |
816 | - # before 18.4 so that get_linux_distro returns a named tuple |
817 | + # RELEASE_BLOCKER: We will drop this sles divergent behavior in |
818 | + # the future so that get_linux_distro returns a named tuple |
819 | # which will include both version codename and architecture |
820 | # on all distributions. |
821 | flavor = platform.machine() |
822 | @@ -668,7 +668,8 @@ def system_info(): |
823 | var = 'ubuntu' |
824 | elif linux_dist == 'redhat': |
825 | var = 'rhel' |
826 | - elif linux_dist in ('opensuse', 'sles'): |
827 | + elif linux_dist in ( |
828 | + 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap', 'sles'): |
829 | var = 'suse' |
830 | else: |
831 | var = 'linux' |
832 | diff --git a/debian/changelog b/debian/changelog |
833 | index 117fd16..a85c8cc 100644 |
834 | --- a/debian/changelog |
835 | +++ b/debian/changelog |
836 | @@ -1,3 +1,31 @@ |
837 | +cloud-init (18.4-22-g6062595b-0ubuntu1) disco; urgency=medium |
838 | + |
839 | + * New upstream snapshot. |
840 | + - azure: retry imds polling on requests.Timeout (LP: #1800223) |
841 | + - azure: Accept variation in error msg from mount for ntfs volumes |
842 | + [Jason Zions] (LP: #1799338) |
843 | + - azure: fix regression introduced when persisting ephemeral dhcp lease |
844 | + [Aswin Rajamannar] |
845 | + - azure: add udev rules to create cloud-init Gen2 disk name symlinks |
846 | + (LP: #1797480) |
847 | + - tests: ec2 mock missing httpretty user-data and instance-identity routes |
848 | + - azure: remove /etc/netplan/90-hotplug-azure.yaml when net from IMDS |
849 | + - azure: report ready to fabric after reprovision and reduce logging |
850 | + [Aswin Rajamannar] (LP: #1799594) |
851 | + - query: better error when missing read permission on instance-data |
852 | + - instance-data: fallback to instance-data.json if sensitive is absent. |
853 | + (LP: #1798189) |
854 | + - docs: remove colon from network v1 config example. [Tomer Cohen] |
855 | + - Add cloud-id binary to packages for SUSE [Jason Zions] |
856 | + - systemd: On SUSE ensure cloud-init.service runs before wicked |
857 | + [Robert Schweikert] (LP: #1799709) |
858 | + - update detection of openSUSE variants [Robert Schweikert] |
859 | + - azure: Add apply_network_config option to disable network from IMDS |
860 | + (LP: #1798424) |
861 | + - Correct spelling in an error message (udevadm). [Katie McLaughlin] |
862 | + |
863 | + -- Chad Smith <chad.smith@canonical.com> Mon, 12 Nov 2018 20:33:12 -0700 |
864 | + |
865 | cloud-init (18.4-7-g4652b196-0ubuntu1) cosmic; urgency=medium |
866 | |
867 | * New upstream snapshot. |
868 | diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst |
869 | index 559011e..f73c369 100644 |
870 | --- a/doc/rtd/topics/datasources/azure.rst |
871 | +++ b/doc/rtd/topics/datasources/azure.rst |
872 | @@ -57,6 +57,52 @@ in order to use waagent.conf with cloud-init, the following settings are recomme |
873 | ResourceDisk.MountPoint=/mnt |
874 | |
875 | |
876 | +Configuration |
877 | +------------- |
878 | +The following configuration can be set for the datasource in system |
879 | +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). |
880 | + |
881 | +The settings that may be configured are: |
882 | + |
883 | + * **agent_command**: Either __builtin__ (default) or a command to run to getcw |
884 | + metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the |
885 | + provided command to obtain metadata. |
886 | + * **apply_network_config**: Boolean set to True to use network configuration |
887 | + described by Azure's IMDS endpoint instead of fallback network config of |
888 | + dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False. |
889 | + * **data_dir**: Path used to read metadata files and write crawled data. |
890 | + * **dhclient_lease_file**: The fallback lease file to source when looking for |
891 | + custom DHCP option 245 from Azure fabric. |
892 | + * **disk_aliases**: A dictionary defining which device paths should be |
893 | + interpreted as ephemeral images. See cc_disk_setup module for more info. |
894 | + * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to |
895 | + metadata changes. |
896 | + * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to |
897 | + metadata changes. Azure will throttle ifup/down in some cases after metadata |
898 | + has been updated to inform dhcp server about updated hostnames. |
899 | + * **set_hostname**: Boolean set to True when we want Azure to set the hostname |
900 | + based on metadata. |
901 | + |
902 | +An example configuration with the default values is provided below: |
903 | + |
904 | +.. sourcecode:: yaml |
905 | + |
906 | + datasource: |
907 | + Azure: |
908 | + agent_command: __builtin__ |
909 | + apply_network_config: true |
910 | + data_dir: /var/lib/waagent |
911 | + dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases |
912 | + disk_aliases: |
913 | + ephemeral0: /dev/disk/cloud/azure_resource |
914 | + hostname_bounce: |
915 | + interface: eth0 |
916 | + command: builtin |
917 | + policy: true |
918 | + hostname_command: hostname |
919 | + set_hostname: true |
920 | + |
921 | + |
922 | Userdata |
923 | -------- |
924 | Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init |
925 | diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in |
926 | index a3a6d1e..6b2022b 100644 |
927 | --- a/packages/redhat/cloud-init.spec.in |
928 | +++ b/packages/redhat/cloud-init.spec.in |
929 | @@ -191,6 +191,7 @@ fi |
930 | |
931 | # Program binaries |
932 | %{_bindir}/cloud-init* |
933 | +%{_bindir}/cloud-id* |
934 | |
935 | # Docs |
936 | %doc LICENSE ChangeLog TODO.rst requirements.txt |
937 | diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in |
938 | index e781d74..26894b3 100644 |
939 | --- a/packages/suse/cloud-init.spec.in |
940 | +++ b/packages/suse/cloud-init.spec.in |
941 | @@ -93,6 +93,7 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) |
942 | |
943 | # Program binaries |
944 | %{_bindir}/cloud-init* |
945 | +%{_bindir}/cloud-id* |
946 | |
947 | # systemd files |
948 | /usr/lib/systemd/system-generators/* |
949 | diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl |
950 | index b92e8ab..5cb0037 100644 |
951 | --- a/systemd/cloud-init.service.tmpl |
952 | +++ b/systemd/cloud-init.service.tmpl |
953 | @@ -14,8 +14,7 @@ After=networking.service |
954 | After=network.service |
955 | {% endif %} |
956 | {% if variant in ["suse"] %} |
957 | -Requires=wicked.service |
958 | -After=wicked.service |
959 | +Before=wicked.service |
960 | # setting hostname via hostnamectl depends on dbus, which otherwise |
961 | # would not be guaranteed at this point. |
962 | After=dbus.service |
963 | diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py |
964 | index abe820e..b92ffc7 100644 |
965 | --- a/tests/unittests/test_builtin_handlers.py |
966 | +++ b/tests/unittests/test_builtin_handlers.py |
967 | @@ -3,6 +3,7 @@ |
968 | """Tests of the built-in user data handlers.""" |
969 | |
970 | import copy |
971 | +import errno |
972 | import os |
973 | import shutil |
974 | import tempfile |
975 | @@ -202,6 +203,30 @@ class TestJinjaTemplatePartHandler(CiTestCase): |
976 | os.path.exists(script_file), |
977 | 'Unexpected file created %s' % script_file) |
978 | |
979 | + def test_jinja_template_handle_errors_on_unreadable_instance_data(self): |
980 | + """If instance-data is unreadable, raise an error from handle_part.""" |
981 | + script_handler = ShellScriptPartHandler(self.paths) |
982 | + instance_json = os.path.join(self.run_dir, 'instance-data.json') |
983 | + util.write_file(instance_json, util.json_dumps({})) |
984 | + h = JinjaTemplatePartHandler( |
985 | + self.paths, sub_handlers=[script_handler]) |
986 | + with mock.patch(self.mpath + 'load_file') as m_load: |
987 | + with self.assertRaises(RuntimeError) as context_manager: |
988 | + m_load.side_effect = OSError(errno.EACCES, 'Not allowed') |
989 | + h.handle_part( |
990 | + data='data', ctype="!" + handlers.CONTENT_START, |
991 | + filename='part01', |
992 | + payload='## template: jinja \n#!/bin/bash\necho himom', |
993 | + frequency='freq', headers='headers') |
994 | + script_file = os.path.join(script_handler.script_dir, 'part01') |
995 | + self.assertEqual( |
996 | + 'Cannot render jinja template vars. No read permission on' |
997 | + " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir), |
998 | + str(context_manager.exception)) |
999 | + self.assertFalse( |
1000 | + os.path.exists(script_file), |
1001 | + 'Unexpected file created %s' % script_file) |
1002 | + |
1003 | @skipUnlessJinja() |
1004 | def test_jinja_template_handle_renders_jinja_content(self): |
1005 | """When present, render jinja variables from instance-data.json.""" |
1006 | diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py |
1007 | index 0f4b7bf..56484b2 100644 |
1008 | --- a/tests/unittests/test_datasource/test_azure.py |
1009 | +++ b/tests/unittests/test_datasource/test_azure.py |
1010 | @@ -17,6 +17,7 @@ import crypt |
1011 | import httpretty |
1012 | import json |
1013 | import os |
1014 | +import requests |
1015 | import stat |
1016 | import xml.etree.ElementTree as ET |
1017 | import yaml |
1018 | @@ -184,6 +185,35 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): |
1019 | "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time |
1020 | self.logs.getvalue()) |
1021 | |
1022 | + @mock.patch('requests.Session.request') |
1023 | + @mock.patch('cloudinit.url_helper.time.sleep') |
1024 | + @mock.patch(MOCKPATH + 'net.is_up') |
1025 | + def test_get_metadata_from_imds_retries_on_timeout( |
1026 | + self, m_net_is_up, m_sleep, m_request): |
1027 | + """Retry IMDS network metadata on timeout errors.""" |
1028 | + |
1029 | + self.attempt = 0 |
1030 | + m_request.side_effect = requests.Timeout('Fake Connection Timeout') |
1031 | + |
1032 | + def retry_callback(request, uri, headers): |
1033 | + self.attempt += 1 |
1034 | + raise requests.Timeout('Fake connection timeout') |
1035 | + |
1036 | + httpretty.register_uri( |
1037 | + httpretty.GET, |
1038 | + dsaz.IMDS_URL + 'instance?api-version=2017-12-01', |
1039 | + body=retry_callback) |
1040 | + |
1041 | + m_net_is_up.return_value = True # skips dhcp |
1042 | + |
1043 | + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) |
1044 | + |
1045 | + m_net_is_up.assert_called_with('eth9') |
1046 | + self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) |
1047 | + self.assertIn( |
1048 | + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time |
1049 | + self.logs.getvalue()) |
1050 | + |
1051 | |
1052 | class TestAzureDataSource(CiTestCase): |
1053 | |
1054 | @@ -256,7 +286,8 @@ scbus-1 on xpt0 bus 0 |
1055 | ]) |
1056 | return dsaz |
1057 | |
1058 | - def _get_ds(self, data, agent_command=None, distro=None): |
1059 | + def _get_ds(self, data, agent_command=None, distro=None, |
1060 | + apply_network=None): |
1061 | |
1062 | def dsdevs(): |
1063 | return data.get('dsdevs', []) |
1064 | @@ -312,6 +343,8 @@ scbus-1 on xpt0 bus 0 |
1065 | data.get('sys_cfg', {}), distro=distro, paths=self.paths) |
1066 | if agent_command is not None: |
1067 | dsrc.ds_cfg['agent_command'] = agent_command |
1068 | + if apply_network is not None: |
1069 | + dsrc.ds_cfg['apply_network_config'] = apply_network |
1070 | |
1071 | return dsrc |
1072 | |
1073 | @@ -434,14 +467,26 @@ fdescfs /dev/fd fdescfs rw 0 0 |
1074 | |
1075 | def test_get_data_on_ubuntu_will_remove_network_scripts(self): |
1076 | """get_data will remove ubuntu net scripts on Ubuntu distro.""" |
1077 | + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} |
1078 | odata = {'HostName': "myhost", 'UserName': "myuser"} |
1079 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), |
1080 | - 'sys_cfg': {}} |
1081 | + 'sys_cfg': sys_cfg} |
1082 | |
1083 | dsrc = self._get_ds(data, distro='ubuntu') |
1084 | dsrc.get_data() |
1085 | self.m_remove_ubuntu_network_scripts.assert_called_once_with() |
1086 | |
1087 | + def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): |
1088 | + """When apply_network_config false, do not remove scripts on Ubuntu.""" |
1089 | + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} |
1090 | + odata = {'HostName': "myhost", 'UserName': "myuser"} |
1091 | + data = {'ovfcontent': construct_valid_ovf_env(data=odata), |
1092 | + 'sys_cfg': sys_cfg} |
1093 | + |
1094 | + dsrc = self._get_ds(data, distro='ubuntu') |
1095 | + dsrc.get_data() |
1096 | + self.m_remove_ubuntu_network_scripts.assert_not_called() |
1097 | + |
1098 | def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): |
1099 | """Return all structured metadata and cache no class attributes.""" |
1100 | yaml_cfg = "{agent_command: my_command}\n" |
1101 | @@ -498,6 +543,58 @@ fdescfs /dev/fd fdescfs rw 0 0 |
1102 | dsrc.crawl_metadata() |
1103 | self.assertEqual(str(cm.exception), error_msg) |
1104 | |
1105 | + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') |
1106 | + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') |
1107 | + @mock.patch( |
1108 | + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') |
1109 | + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') |
1110 | + def test_crawl_metadata_on_reprovision_reports_ready( |
1111 | + self, poll_imds_func, |
1112 | + report_ready_func, |
1113 | + m_write, m_dhcp): |
1114 | + """If reprovisioning, report ready at the end""" |
1115 | + ovfenv = construct_valid_ovf_env( |
1116 | + platform_settings={"PreprovisionedVm": "True"}) |
1117 | + |
1118 | + data = {'ovfcontent': ovfenv, |
1119 | + 'sys_cfg': {}} |
1120 | + dsrc = self._get_ds(data) |
1121 | + poll_imds_func.return_value = ovfenv |
1122 | + dsrc.crawl_metadata() |
1123 | + self.assertEqual(1, report_ready_func.call_count) |
1124 | + |
1125 | + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') |
1126 | + @mock.patch( |
1127 | + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') |
1128 | + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') |
1129 | + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') |
1130 | + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') |
1131 | + def test_crawl_metadata_on_reprovision_reports_ready_using_lease( |
1132 | + self, m_readurl, m_dhcp, |
1133 | + m_net, report_ready_func, |
1134 | + m_write): |
1135 | + """If reprovisioning, report ready using the obtained lease""" |
1136 | + ovfenv = construct_valid_ovf_env( |
1137 | + platform_settings={"PreprovisionedVm": "True"}) |
1138 | + |
1139 | + data = {'ovfcontent': ovfenv, |
1140 | + 'sys_cfg': {}} |
1141 | + dsrc = self._get_ds(data) |
1142 | + |
1143 | + lease = { |
1144 | + 'interface': 'eth9', 'fixed-address': '192.168.2.9', |
1145 | + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', |
1146 | + 'unknown-245': '624c3620'} |
1147 | + m_dhcp.return_value = [lease] |
1148 | + |
1149 | + reprovision_ovfenv = construct_valid_ovf_env() |
1150 | + m_readurl.return_value = url_helper.StringResponse( |
1151 | + reprovision_ovfenv.encode('utf-8')) |
1152 | + |
1153 | + dsrc.crawl_metadata() |
1154 | + self.assertEqual(2, report_ready_func.call_count) |
1155 | + report_ready_func.assert_called_with(lease=lease) |
1156 | + |
1157 | def test_waagent_d_has_0700_perms(self): |
1158 | # we expect /var/lib/waagent to be created 0700 |
1159 | dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) |
1160 | @@ -523,8 +620,10 @@ fdescfs /dev/fd fdescfs rw 0 0 |
1161 | |
1162 | def test_network_config_set_from_imds(self): |
1163 | """Datasource.network_config returns IMDS network data.""" |
1164 | + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} |
1165 | odata = {} |
1166 | - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} |
1167 | + data = {'ovfcontent': construct_valid_ovf_env(data=odata), |
1168 | + 'sys_cfg': sys_cfg} |
1169 | expected_network_config = { |
1170 | 'ethernets': { |
1171 | 'eth0': {'set-name': 'eth0', |
1172 | @@ -803,9 +902,10 @@ fdescfs /dev/fd fdescfs rw 0 0 |
1173 | @mock.patch('cloudinit.net.generate_fallback_config') |
1174 | def test_imds_network_config(self, mock_fallback): |
1175 | """Network config is generated from IMDS network data when present.""" |
1176 | + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} |
1177 | odata = {'HostName': "myhost", 'UserName': "myuser"} |
1178 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), |
1179 | - 'sys_cfg': {}} |
1180 | + 'sys_cfg': sys_cfg} |
1181 | |
1182 | dsrc = self._get_ds(data) |
1183 | ret = dsrc.get_data() |
1184 | @@ -825,6 +925,36 @@ fdescfs /dev/fd fdescfs rw 0 0 |
1185 | @mock.patch('cloudinit.net.get_devicelist') |
1186 | @mock.patch('cloudinit.net.device_driver') |
1187 | @mock.patch('cloudinit.net.generate_fallback_config') |
1188 | + def test_imds_network_ignored_when_apply_network_config_false( |
1189 | + self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): |
1190 | + """When apply_network_config is False, use fallback instead of IMDS.""" |
1191 | + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} |
1192 | + odata = {'HostName': "myhost", 'UserName': "myuser"} |
1193 | + data = {'ovfcontent': construct_valid_ovf_env(data=odata), |
1194 | + 'sys_cfg': sys_cfg} |
1195 | + fallback_config = { |
1196 | + 'version': 1, |
1197 | + 'config': [{ |
1198 | + 'type': 'physical', 'name': 'eth0', |
1199 | + 'mac_address': '00:11:22:33:44:55', |
1200 | + 'params': {'driver': 'hv_netsvc'}, |
1201 | + 'subnets': [{'type': 'dhcp'}], |
1202 | + }] |
1203 | + } |
1204 | + mock_fallback.return_value = fallback_config |
1205 | + |
1206 | + mock_devlist.return_value = ['eth0'] |
1207 | + mock_dd.return_value = ['hv_netsvc'] |
1208 | + mock_get_mac.return_value = '00:11:22:33:44:55' |
1209 | + |
1210 | + dsrc = self._get_ds(data) |
1211 | + self.assertTrue(dsrc.get_data()) |
1212 | + self.assertEqual(dsrc.network_config, fallback_config) |
1213 | + |
1214 | + @mock.patch('cloudinit.net.get_interface_mac') |
1215 | + @mock.patch('cloudinit.net.get_devicelist') |
1216 | + @mock.patch('cloudinit.net.device_driver') |
1217 | + @mock.patch('cloudinit.net.generate_fallback_config') |
1218 | def test_fallback_network_config(self, mock_fallback, mock_dd, |
1219 | mock_devlist, mock_get_mac): |
1220 | """On absent IMDS network data, generate network fallback config.""" |
1221 | @@ -1411,21 +1541,20 @@ class TestCanDevBeReformatted(CiTestCase): |
1222 | '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} |
1223 | }}}) |
1224 | |
1225 | - err = ("Unexpected error while running command.\n", |
1226 | - "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ", |
1227 | - "'/dev/sda1', '/fake-tmp/dir']\n" |
1228 | - "Exit code: 32\n" |
1229 | - "Reason: -\n" |
1230 | - "Stdout: -\n" |
1231 | - "Stderr: mount: unknown filesystem type 'ntfs'") |
1232 | - self.m_mount_cb.side_effect = MountFailedError( |
1233 | - 'Failed mounting %s to %s due to: %s' % |
1234 | - ('/dev/sda', '/fake-tmp/dir', err)) |
1235 | - |
1236 | - value, msg = dsaz.can_dev_be_reformatted('/dev/sda', |
1237 | - preserve_ntfs=False) |
1238 | - self.assertTrue(value) |
1239 | - self.assertIn('cannot mount NTFS, assuming', msg) |
1240 | + error_msgs = [ |
1241 | + "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL |
1242 | + "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES |
1243 | + ] |
1244 | + |
1245 | + for err_msg in error_msgs: |
1246 | + self.m_mount_cb.side_effect = MountFailedError( |
1247 | + "Failed mounting %s to %s due to: \nUnexpected.\n%s" % |
1248 | + ('/dev/sda', '/fake-tmp/dir', err_msg)) |
1249 | + |
1250 | + value, msg = dsaz.can_dev_be_reformatted('/dev/sda', |
1251 | + preserve_ntfs=False) |
1252 | + self.assertTrue(value) |
1253 | + self.assertIn('cannot mount NTFS, assuming', msg) |
1254 | |
1255 | def test_never_destroy_ntfs_config_false(self): |
1256 | """Normally formattable situation with never_destroy_ntfs set.""" |
1257 | diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py |
1258 | index 9f81255..1a5956d 100644 |
1259 | --- a/tests/unittests/test_datasource/test_ec2.py |
1260 | +++ b/tests/unittests/test_datasource/test_ec2.py |
1261 | @@ -211,9 +211,9 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1262 | self.metadata_addr = self.datasource.metadata_urls[0] |
1263 | self.tmp = self.tmp_dir() |
1264 | |
1265 | - def data_url(self, version): |
1266 | + def data_url(self, version, data_item='meta-data'): |
1267 | """Return a metadata url based on the version provided.""" |
1268 | - return '/'.join([self.metadata_addr, version, 'meta-data', '']) |
1269 | + return '/'.join([self.metadata_addr, version, data_item]) |
1270 | |
1271 | def _patch_add_cleanup(self, mpath, *args, **kwargs): |
1272 | p = mock.patch(mpath, *args, **kwargs) |
1273 | @@ -238,10 +238,18 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1274 | all_versions = ( |
1275 | [ds.min_metadata_version] + ds.extended_metadata_versions) |
1276 | for version in all_versions: |
1277 | - metadata_url = self.data_url(version) |
1278 | + metadata_url = self.data_url(version) + '/' |
1279 | if version == md_version: |
1280 | # Register all metadata for desired version |
1281 | - register_mock_metaserver(metadata_url, md) |
1282 | + register_mock_metaserver( |
1283 | + metadata_url, md.get('md', DEFAULT_METADATA)) |
1284 | + userdata_url = self.data_url( |
1285 | + version, data_item='user-data') |
1286 | + register_mock_metaserver(userdata_url, md.get('ud', '')) |
1287 | + identity_url = self.data_url( |
1288 | + version, data_item='dynamic/instance-identity') |
1289 | + register_mock_metaserver( |
1290 | + identity_url, md.get('id', DYNAMIC_METADATA)) |
1291 | else: |
1292 | instance_id_url = metadata_url + 'instance-id' |
1293 | if version == ds.min_metadata_version: |
1294 | @@ -261,7 +269,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1295 | ds = self._setup_ds( |
1296 | platform_data=self.valid_platform_data, |
1297 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1298 | - md=DEFAULT_METADATA) |
1299 | + md={'md': DEFAULT_METADATA}) |
1300 | find_fallback_path = ( |
1301 | 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') |
1302 | with mock.patch(find_fallback_path) as m_find_fallback: |
1303 | @@ -293,7 +301,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1304 | ds = self._setup_ds( |
1305 | platform_data=self.valid_platform_data, |
1306 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1307 | - md=DEFAULT_METADATA) |
1308 | + md={'md': DEFAULT_METADATA}) |
1309 | find_fallback_path = ( |
1310 | 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') |
1311 | with mock.patch(find_fallback_path) as m_find_fallback: |
1312 | @@ -322,7 +330,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1313 | ds = self._setup_ds( |
1314 | platform_data=self.valid_platform_data, |
1315 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1316 | - md=DEFAULT_METADATA) |
1317 | + md={'md': DEFAULT_METADATA}) |
1318 | ds._network_config = {'cached': 'data'} |
1319 | self.assertEqual({'cached': 'data'}, ds.network_config) |
1320 | |
1321 | @@ -338,7 +346,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1322 | ds = self._setup_ds( |
1323 | platform_data=self.valid_platform_data, |
1324 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1325 | - md=old_metadata) |
1326 | + md={'md': old_metadata}) |
1327 | self.assertTrue(ds.get_data()) |
1328 | # Provide new revision of metadata that contains network data |
1329 | register_mock_metaserver( |
1330 | @@ -372,7 +380,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1331 | ds = self._setup_ds( |
1332 | platform_data=self.valid_platform_data, |
1333 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1334 | - md=DEFAULT_METADATA) |
1335 | + md={'md': DEFAULT_METADATA}) |
1336 | # Mock 404s on all versions except latest |
1337 | all_versions = ( |
1338 | [ds.min_metadata_version] + ds.extended_metadata_versions) |
1339 | @@ -399,7 +407,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1340 | ds = self._setup_ds( |
1341 | platform_data=self.valid_platform_data, |
1342 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1343 | - md=DEFAULT_METADATA) |
1344 | + md={'md': DEFAULT_METADATA}) |
1345 | ret = ds.get_data() |
1346 | self.assertTrue(ret) |
1347 | self.assertEqual(0, m_dhcp.call_count) |
1348 | @@ -412,7 +420,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1349 | ds = self._setup_ds( |
1350 | platform_data=self.valid_platform_data, |
1351 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1352 | - md=DEFAULT_METADATA) |
1353 | + md={'md': DEFAULT_METADATA}) |
1354 | ret = ds.get_data() |
1355 | self.assertTrue(ret) |
1356 | |
1357 | @@ -422,7 +430,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1358 | ds = self._setup_ds( |
1359 | platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, |
1360 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1361 | - md=DEFAULT_METADATA) |
1362 | + md={'md': DEFAULT_METADATA}) |
1363 | ret = ds.get_data() |
1364 | self.assertFalse(ret) |
1365 | |
1366 | @@ -432,7 +440,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1367 | ds = self._setup_ds( |
1368 | platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, |
1369 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1370 | - md=DEFAULT_METADATA) |
1371 | + md={'md': DEFAULT_METADATA}) |
1372 | ret = ds.get_data() |
1373 | self.assertTrue(ret) |
1374 | |
1375 | @@ -442,7 +450,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1376 | ds = self._setup_ds( |
1377 | platform_data=self.valid_platform_data, |
1378 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1379 | - md=DEFAULT_METADATA) |
1380 | + md={'md': DEFAULT_METADATA}) |
1381 | platform_attrs = [ |
1382 | attr for attr in ec2.CloudNames.__dict__.keys() |
1383 | if not attr.startswith('__')] |
1384 | @@ -469,7 +477,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1385 | ds = self._setup_ds( |
1386 | platform_data=self.valid_platform_data, |
1387 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1388 | - md=DEFAULT_METADATA) |
1389 | + md={'md': DEFAULT_METADATA}) |
1390 | ret = ds.get_data() |
1391 | self.assertFalse(ret) |
1392 | self.assertIn( |
1393 | @@ -499,7 +507,7 @@ class TestEc2(test_helpers.HttprettyTestCase): |
1394 | ds = self._setup_ds( |
1395 | platform_data=self.valid_platform_data, |
1396 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1397 | - md=DEFAULT_METADATA) |
1398 | + md={'md': DEFAULT_METADATA}) |
1399 | |
1400 | ret = ds.get_data() |
1401 | self.assertTrue(ret) |
1402 | diff --git a/udev/66-azure-ephemeral.rules b/udev/66-azure-ephemeral.rules |
1403 | index b9c5c3e..3032f7e 100644 |
1404 | --- a/udev/66-azure-ephemeral.rules |
1405 | +++ b/udev/66-azure-ephemeral.rules |
1406 | @@ -4,10 +4,26 @@ SUBSYSTEM!="block", GOTO="cloud_init_end" |
1407 | ATTRS{ID_VENDOR}!="Msft", GOTO="cloud_init_end" |
1408 | ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="cloud_init_end" |
1409 | |
1410 | -# Root has a GUID of 0000 as the second value |
1411 | +# Root has a GUID of 0000 as the second value on Gen1 instances |
1412 | # The resource/resource has GUID of 0001 as the second value |
1413 | ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="azure_root", GOTO="ci_azure_names" |
1414 | ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names" |
1415 | + |
1416 | +# Azure well known SCSI controllers on Gen2 instances |
1417 | +ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" |
1418 | +# Do not create symlinks for scsi[1-3] or unmatched device_ids |
1419 | +ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="cloud_init_end" |
1420 | +ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="cloud_init_end" |
1421 | +ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="cloud_init_end" |
1422 | +GOTO="cloud_init_end" |
1423 | + |
1424 | +# Map scsi#/lun# fabric_name to azure_root|resource on Gen2 instances |
1425 | +LABEL="azure_datadisk" |
1426 | +ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result" |
1427 | +ENV{DEVTYPE}=="disk", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result" |
1428 | + |
1429 | +ENV{fabric_name}=="scsi0/lun0", ENV{fabric_name}="azure_root", GOTO="ci_azure_names" |
1430 | +ENV{fabric_name}=="scsi0/lun1", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names" |
1431 | GOTO="cloud_init_end" |
1432 | |
1433 | # Create the symlinks |
PASSED: Continuous integration, rev:db78de17b18 0d531624fd1e296 246692d3db18bb /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 438/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 438/rebuild
https:/