Merge ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel
- Git
- lp:~chad.smith/cloud-init
- ubuntu/devel
- Merge into ubuntu/devel
Status: | Merged | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | 5e4916bdc4f7be6bcbf5650e93ea8835cc495b95 | ||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/devel | ||||||||||||
Merge into: | cloud-init:ubuntu/devel | ||||||||||||
Diff against target: |
1433 lines (+661/-139) 21 files modified
cloudinit/cmd/devel/render.py (+24/-11) cloudinit/cmd/devel/tests/test_render.py (+44/-1) cloudinit/cmd/query.py (+24/-12) cloudinit/cmd/tests/test_query.py (+71/-5) cloudinit/config/cc_disk_setup.py (+1/-1) cloudinit/handlers/jinja_template.py (+9/-1) cloudinit/net/dhcp.py (+32/-10) cloudinit/sources/DataSourceAzure.py (+46/-33) cloudinit/tests/test_url_helper.py (+24/-1) cloudinit/tests/test_util.py (+66/-17) cloudinit/url_helper.py (+25/-6) cloudinit/util.py (+4/-3) debian/changelog (+28/-0) doc/rtd/topics/datasources/azure.rst (+46/-0) packages/redhat/cloud-init.spec.in (+1/-0) packages/suse/cloud-init.spec.in (+1/-0) systemd/cloud-init.service.tmpl (+1/-2) tests/unittests/test_builtin_handlers.py (+25/-0) tests/unittests/test_datasource/test_azure.py (+148/-19) tests/unittests/test_datasource/test_ec2.py (+24/-16) udev/66-azure-ephemeral.rules (+17/-1) |
||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
Ryan Harper | Approve | ||
Review via email: mp+358684@code.launchpad.net |
Commit message
new upstream snapshot for upload into Disco. Note changed series represented in debian/changelog
Description of the change
Server Team CI bot (server-team-bot) wrote : | # |
Ryan Harper (raharper) wrote : | # |
I get the same as you, but something is causing a typo in the LP: of the first changelog entry:
+cloud-init (18.4-22-
+
+ * New upstream snapshot.
+ - azure: retry imds polling on requests.Timeout (LP: LP:1800223)
I think we can fix up in the changelog commit even if the original commit has a busted comment.
Chad Smith (chad.smith) wrote : | # |
+1, I had forgotten to --force on my push which did just that. fixed.
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:5e4916bdc4f
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild:
https:/
Preview Diff
1 | diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py | |||
2 | index 2ba6b68..1bc2240 100755 | |||
3 | --- a/cloudinit/cmd/devel/render.py | |||
4 | +++ b/cloudinit/cmd/devel/render.py | |||
5 | @@ -8,11 +8,10 @@ import sys | |||
6 | 8 | 8 | ||
7 | 9 | from cloudinit.handlers.jinja_template import render_jinja_payload_from_file | 9 | from cloudinit.handlers.jinja_template import render_jinja_payload_from_file |
8 | 10 | from cloudinit import log | 10 | from cloudinit import log |
10 | 11 | from cloudinit.sources import INSTANCE_JSON_FILE | 11 | from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE |
11 | 12 | from . import addLogHandlerCLI, read_cfg_paths | 12 | from . import addLogHandlerCLI, read_cfg_paths |
12 | 13 | 13 | ||
13 | 14 | NAME = 'render' | 14 | NAME = 'render' |
14 | 15 | DEFAULT_INSTANCE_DATA = '/run/cloud-init/instance-data.json' | ||
15 | 16 | 15 | ||
16 | 17 | LOG = log.getLogger(NAME) | 16 | LOG = log.getLogger(NAME) |
17 | 18 | 17 | ||
18 | @@ -47,12 +46,22 @@ def handle_args(name, args): | |||
19 | 47 | @return 0 on success, 1 on failure. | 46 | @return 0 on success, 1 on failure. |
20 | 48 | """ | 47 | """ |
21 | 49 | addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) | 48 | addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) |
27 | 50 | if not args.instance_data: | 49 | if args.instance_data: |
23 | 51 | paths = read_cfg_paths() | ||
24 | 52 | instance_data_fn = os.path.join( | ||
25 | 53 | paths.run_dir, INSTANCE_JSON_FILE) | ||
26 | 54 | else: | ||
28 | 55 | instance_data_fn = args.instance_data | 50 | instance_data_fn = args.instance_data |
29 | 51 | else: | ||
30 | 52 | paths = read_cfg_paths() | ||
31 | 53 | uid = os.getuid() | ||
32 | 54 | redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) | ||
33 | 55 | if uid == 0: | ||
34 | 56 | instance_data_fn = os.path.join( | ||
35 | 57 | paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) | ||
36 | 58 | if not os.path.exists(instance_data_fn): | ||
37 | 59 | LOG.warning( | ||
38 | 60 | 'Missing root-readable %s. Using redacted %s instead.', | ||
39 | 61 | instance_data_fn, redacted_data_fn) | ||
40 | 62 | instance_data_fn = redacted_data_fn | ||
41 | 63 | else: | ||
42 | 64 | instance_data_fn = redacted_data_fn | ||
43 | 56 | if not os.path.exists(instance_data_fn): | 65 | if not os.path.exists(instance_data_fn): |
44 | 57 | LOG.error('Missing instance-data.json file: %s', instance_data_fn) | 66 | LOG.error('Missing instance-data.json file: %s', instance_data_fn) |
45 | 58 | return 1 | 67 | return 1 |
46 | @@ -62,10 +71,14 @@ def handle_args(name, args): | |||
47 | 62 | except IOError: | 71 | except IOError: |
48 | 63 | LOG.error('Missing user-data file: %s', args.user_data) | 72 | LOG.error('Missing user-data file: %s', args.user_data) |
49 | 64 | return 1 | 73 | return 1 |
54 | 65 | rendered_payload = render_jinja_payload_from_file( | 74 | try: |
55 | 66 | payload=user_data, payload_fn=args.user_data, | 75 | rendered_payload = render_jinja_payload_from_file( |
56 | 67 | instance_data_file=instance_data_fn, | 76 | payload=user_data, payload_fn=args.user_data, |
57 | 68 | debug=True if args.debug else False) | 77 | instance_data_file=instance_data_fn, |
58 | 78 | debug=True if args.debug else False) | ||
59 | 79 | except RuntimeError as e: | ||
60 | 80 | LOG.error('Cannot render from instance data: %s', str(e)) | ||
61 | 81 | return 1 | ||
62 | 69 | if not rendered_payload: | 82 | if not rendered_payload: |
63 | 70 | LOG.error('Unable to render user-data file: %s', args.user_data) | 83 | LOG.error('Unable to render user-data file: %s', args.user_data) |
64 | 71 | return 1 | 84 | return 1 |
65 | diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py | |||
66 | index fc5d2c0..988bba0 100644 | |||
67 | --- a/cloudinit/cmd/devel/tests/test_render.py | |||
68 | +++ b/cloudinit/cmd/devel/tests/test_render.py | |||
69 | @@ -6,7 +6,7 @@ import os | |||
70 | 6 | from collections import namedtuple | 6 | from collections import namedtuple |
71 | 7 | from cloudinit.cmd.devel import render | 7 | from cloudinit.cmd.devel import render |
72 | 8 | from cloudinit.helpers import Paths | 8 | from cloudinit.helpers import Paths |
74 | 9 | from cloudinit.sources import INSTANCE_JSON_FILE | 9 | from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE |
75 | 10 | from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja | 10 | from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja |
76 | 11 | from cloudinit.util import ensure_dir, write_file | 11 | from cloudinit.util import ensure_dir, write_file |
77 | 12 | 12 | ||
78 | @@ -63,6 +63,49 @@ class TestRender(CiTestCase): | |||
79 | 63 | 'Missing instance-data.json file: %s' % json_file, | 63 | 'Missing instance-data.json file: %s' % json_file, |
80 | 64 | self.logs.getvalue()) | 64 | self.logs.getvalue()) |
81 | 65 | 65 | ||
82 | 66 | def test_handle_args_root_fallback_from_sensitive_instance_data(self): | ||
83 | 67 | """When root user defaults to sensitive.json.""" | ||
84 | 68 | user_data = self.tmp_path('user-data', dir=self.tmp) | ||
85 | 69 | run_dir = self.tmp_path('run_dir', dir=self.tmp) | ||
86 | 70 | ensure_dir(run_dir) | ||
87 | 71 | paths = Paths({'run_dir': run_dir}) | ||
88 | 72 | self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') | ||
89 | 73 | self.m_paths.return_value = paths | ||
90 | 74 | args = self.args( | ||
91 | 75 | user_data=user_data, instance_data=None, debug=False) | ||
92 | 76 | with mock.patch('sys.stderr', new_callable=StringIO): | ||
93 | 77 | with mock.patch('os.getuid') as m_getuid: | ||
94 | 78 | m_getuid.return_value = 0 | ||
95 | 79 | self.assertEqual(1, render.handle_args('anyname', args)) | ||
96 | 80 | json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) | ||
97 | 81 | json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) | ||
98 | 82 | self.assertIn( | ||
99 | 83 | 'WARNING: Missing root-readable %s. Using redacted %s' % ( | ||
100 | 84 | json_sensitive, json_file), self.logs.getvalue()) | ||
101 | 85 | self.assertIn( | ||
102 | 86 | 'ERROR: Missing instance-data.json file: %s' % json_file, | ||
103 | 87 | self.logs.getvalue()) | ||
104 | 88 | |||
105 | 89 | def test_handle_args_root_uses_sensitive_instance_data(self): | ||
106 | 90 | """When root user, and no instance-data arg, use sensitive.json.""" | ||
107 | 91 | user_data = self.tmp_path('user-data', dir=self.tmp) | ||
108 | 92 | write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') | ||
109 | 93 | run_dir = self.tmp_path('run_dir', dir=self.tmp) | ||
110 | 94 | ensure_dir(run_dir) | ||
111 | 95 | json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) | ||
112 | 96 | write_file(json_sensitive, '{"my-var": "jinja worked"}') | ||
113 | 97 | paths = Paths({'run_dir': run_dir}) | ||
114 | 98 | self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') | ||
115 | 99 | self.m_paths.return_value = paths | ||
116 | 100 | args = self.args( | ||
117 | 101 | user_data=user_data, instance_data=None, debug=False) | ||
118 | 102 | with mock.patch('sys.stderr', new_callable=StringIO): | ||
119 | 103 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
120 | 104 | with mock.patch('os.getuid') as m_getuid: | ||
121 | 105 | m_getuid.return_value = 0 | ||
122 | 106 | self.assertEqual(0, render.handle_args('anyname', args)) | ||
123 | 107 | self.assertIn('rendering: jinja worked', m_stdout.getvalue()) | ||
124 | 108 | |||
125 | 66 | @skipUnlessJinja() | 109 | @skipUnlessJinja() |
126 | 67 | def test_handle_args_renders_instance_data_vars_in_template(self): | 110 | def test_handle_args_renders_instance_data_vars_in_template(self): |
127 | 68 | """If user_data file is a jinja template render instance-data vars.""" | 111 | """If user_data file is a jinja template render instance-data vars.""" |
128 | diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py | |||
129 | index 7d2d4fe..1d888b9 100644 | |||
130 | --- a/cloudinit/cmd/query.py | |||
131 | +++ b/cloudinit/cmd/query.py | |||
132 | @@ -3,6 +3,7 @@ | |||
133 | 3 | """Query standardized instance metadata from the command line.""" | 3 | """Query standardized instance metadata from the command line.""" |
134 | 4 | 4 | ||
135 | 5 | import argparse | 5 | import argparse |
136 | 6 | from errno import EACCES | ||
137 | 6 | import os | 7 | import os |
138 | 7 | import six | 8 | import six |
139 | 8 | import sys | 9 | import sys |
140 | @@ -79,27 +80,38 @@ def handle_args(name, args): | |||
141 | 79 | uid = os.getuid() | 80 | uid = os.getuid() |
142 | 80 | if not all([args.instance_data, args.user_data, args.vendor_data]): | 81 | if not all([args.instance_data, args.user_data, args.vendor_data]): |
143 | 81 | paths = read_cfg_paths() | 82 | paths = read_cfg_paths() |
145 | 82 | if not args.instance_data: | 83 | if args.instance_data: |
146 | 84 | instance_data_fn = args.instance_data | ||
147 | 85 | else: | ||
148 | 86 | redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) | ||
149 | 83 | if uid == 0: | 87 | if uid == 0: |
151 | 84 | default_json_fn = INSTANCE_JSON_SENSITIVE_FILE | 88 | sensitive_data_fn = os.path.join( |
152 | 89 | paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) | ||
153 | 90 | if os.path.exists(sensitive_data_fn): | ||
154 | 91 | instance_data_fn = sensitive_data_fn | ||
155 | 92 | else: | ||
156 | 93 | LOG.warning( | ||
157 | 94 | 'Missing root-readable %s. Using redacted %s instead.', | ||
158 | 95 | sensitive_data_fn, redacted_data_fn) | ||
159 | 96 | instance_data_fn = redacted_data_fn | ||
160 | 85 | else: | 97 | else: |
163 | 86 | default_json_fn = INSTANCE_JSON_FILE # World readable | 98 | instance_data_fn = redacted_data_fn |
164 | 87 | instance_data_fn = os.path.join(paths.run_dir, default_json_fn) | 99 | if args.user_data: |
165 | 100 | user_data_fn = args.user_data | ||
166 | 88 | else: | 101 | else: |
167 | 89 | instance_data_fn = args.instance_data | ||
168 | 90 | if not args.user_data: | ||
169 | 91 | user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') | 102 | user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') |
170 | 103 | if args.vendor_data: | ||
171 | 104 | vendor_data_fn = args.vendor_data | ||
172 | 92 | else: | 105 | else: |
173 | 93 | user_data_fn = args.user_data | ||
174 | 94 | if not args.vendor_data: | ||
175 | 95 | vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') | 106 | vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') |
176 | 96 | else: | ||
177 | 97 | vendor_data_fn = args.vendor_data | ||
178 | 98 | 107 | ||
179 | 99 | try: | 108 | try: |
180 | 100 | instance_json = util.load_file(instance_data_fn) | 109 | instance_json = util.load_file(instance_data_fn) |
183 | 101 | except IOError: | 110 | except (IOError, OSError) as e: |
184 | 102 | LOG.error('Missing instance-data.json file: %s', instance_data_fn) | 111 | if e.errno == EACCES: |
185 | 112 | LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) | ||
186 | 113 | else: | ||
187 | 114 | LOG.error('Missing instance-data file: %s', instance_data_fn) | ||
188 | 103 | return 1 | 115 | return 1 |
189 | 104 | 116 | ||
190 | 105 | instance_data = util.load_json(instance_json) | 117 | instance_data = util.load_json(instance_json) |
191 | diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py | |||
192 | index fb87c6a..28738b1 100644 | |||
193 | --- a/cloudinit/cmd/tests/test_query.py | |||
194 | +++ b/cloudinit/cmd/tests/test_query.py | |||
195 | @@ -1,5 +1,6 @@ | |||
196 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
197 | 2 | 2 | ||
198 | 3 | import errno | ||
199 | 3 | from six import StringIO | 4 | from six import StringIO |
200 | 4 | from textwrap import dedent | 5 | from textwrap import dedent |
201 | 5 | import os | 6 | import os |
202 | @@ -7,7 +8,8 @@ import os | |||
203 | 7 | from collections import namedtuple | 8 | from collections import namedtuple |
204 | 8 | from cloudinit.cmd import query | 9 | from cloudinit.cmd import query |
205 | 9 | from cloudinit.helpers import Paths | 10 | from cloudinit.helpers import Paths |
207 | 10 | from cloudinit.sources import REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE | 11 | from cloudinit.sources import ( |
208 | 12 | REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE) | ||
209 | 11 | from cloudinit.tests.helpers import CiTestCase, mock | 13 | from cloudinit.tests.helpers import CiTestCase, mock |
210 | 12 | from cloudinit.util import ensure_dir, write_file | 14 | from cloudinit.util import ensure_dir, write_file |
211 | 13 | 15 | ||
212 | @@ -50,10 +52,28 @@ class TestQuery(CiTestCase): | |||
213 | 50 | with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: | 52 | with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: |
214 | 51 | self.assertEqual(1, query.handle_args('anyname', args)) | 53 | self.assertEqual(1, query.handle_args('anyname', args)) |
215 | 52 | self.assertIn( | 54 | self.assertIn( |
217 | 53 | 'ERROR: Missing instance-data.json file: %s' % absent_fn, | 55 | 'ERROR: Missing instance-data file: %s' % absent_fn, |
218 | 54 | self.logs.getvalue()) | 56 | self.logs.getvalue()) |
219 | 55 | self.assertIn( | 57 | self.assertIn( |
221 | 56 | 'ERROR: Missing instance-data.json file: %s' % absent_fn, | 58 | 'ERROR: Missing instance-data file: %s' % absent_fn, |
222 | 59 | m_stderr.getvalue()) | ||
223 | 60 | |||
224 | 61 | def test_handle_args_error_when_no_read_permission_instance_data(self): | ||
225 | 62 | """When instance_data file is unreadable, log an error.""" | ||
226 | 63 | noread_fn = self.tmp_path('unreadable', dir=self.tmp) | ||
227 | 64 | write_file(noread_fn, 'thou shall not pass') | ||
228 | 65 | args = self.args( | ||
229 | 66 | debug=False, dump_all=True, format=None, instance_data=noread_fn, | ||
230 | 67 | list_keys=False, user_data='ud', vendor_data='vd', varname=None) | ||
231 | 68 | with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: | ||
232 | 69 | with mock.patch('cloudinit.cmd.query.util.load_file') as m_load: | ||
233 | 70 | m_load.side_effect = OSError(errno.EACCES, 'Not allowed') | ||
234 | 71 | self.assertEqual(1, query.handle_args('anyname', args)) | ||
235 | 72 | self.assertIn( | ||
236 | 73 | "ERROR: No read permission on '%s'. Try sudo" % noread_fn, | ||
237 | 74 | self.logs.getvalue()) | ||
238 | 75 | self.assertIn( | ||
239 | 76 | "ERROR: No read permission on '%s'. Try sudo" % noread_fn, | ||
240 | 57 | m_stderr.getvalue()) | 77 | m_stderr.getvalue()) |
241 | 58 | 78 | ||
242 | 59 | def test_handle_args_defaults_instance_data(self): | 79 | def test_handle_args_defaults_instance_data(self): |
243 | @@ -70,12 +90,58 @@ class TestQuery(CiTestCase): | |||
244 | 70 | self.assertEqual(1, query.handle_args('anyname', args)) | 90 | self.assertEqual(1, query.handle_args('anyname', args)) |
245 | 71 | json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) | 91 | json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) |
246 | 72 | self.assertIn( | 92 | self.assertIn( |
248 | 73 | 'ERROR: Missing instance-data.json file: %s' % json_file, | 93 | 'ERROR: Missing instance-data file: %s' % json_file, |
249 | 74 | self.logs.getvalue()) | 94 | self.logs.getvalue()) |
250 | 75 | self.assertIn( | 95 | self.assertIn( |
252 | 76 | 'ERROR: Missing instance-data.json file: %s' % json_file, | 96 | 'ERROR: Missing instance-data file: %s' % json_file, |
253 | 77 | m_stderr.getvalue()) | 97 | m_stderr.getvalue()) |
254 | 78 | 98 | ||
255 | 99 | def test_handle_args_root_fallsback_to_instance_data(self): | ||
256 | 100 | """When no instance_data argument, root falls back to redacted json.""" | ||
257 | 101 | args = self.args( | ||
258 | 102 | debug=False, dump_all=True, format=None, instance_data=None, | ||
259 | 103 | list_keys=False, user_data=None, vendor_data=None, varname=None) | ||
260 | 104 | run_dir = self.tmp_path('run_dir', dir=self.tmp) | ||
261 | 105 | ensure_dir(run_dir) | ||
262 | 106 | paths = Paths({'run_dir': run_dir}) | ||
263 | 107 | self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') | ||
264 | 108 | self.m_paths.return_value = paths | ||
265 | 109 | with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: | ||
266 | 110 | with mock.patch('os.getuid') as m_getuid: | ||
267 | 111 | m_getuid.return_value = 0 | ||
268 | 112 | self.assertEqual(1, query.handle_args('anyname', args)) | ||
269 | 113 | json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) | ||
270 | 114 | sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) | ||
271 | 115 | self.assertIn( | ||
272 | 116 | 'WARNING: Missing root-readable %s. Using redacted %s instead.' % ( | ||
273 | 117 | sensitive_file, json_file), | ||
274 | 118 | m_stderr.getvalue()) | ||
275 | 119 | |||
276 | 120 | def test_handle_args_root_uses_instance_sensitive_data(self): | ||
277 | 121 | """When no instance_data argument, root uses semsitive json.""" | ||
278 | 122 | user_data = self.tmp_path('user-data', dir=self.tmp) | ||
279 | 123 | vendor_data = self.tmp_path('vendor-data', dir=self.tmp) | ||
280 | 124 | write_file(user_data, 'ud') | ||
281 | 125 | write_file(vendor_data, 'vd') | ||
282 | 126 | run_dir = self.tmp_path('run_dir', dir=self.tmp) | ||
283 | 127 | sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) | ||
284 | 128 | write_file(sensitive_file, '{"my-var": "it worked"}') | ||
285 | 129 | ensure_dir(run_dir) | ||
286 | 130 | paths = Paths({'run_dir': run_dir}) | ||
287 | 131 | self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') | ||
288 | 132 | self.m_paths.return_value = paths | ||
289 | 133 | args = self.args( | ||
290 | 134 | debug=False, dump_all=True, format=None, instance_data=None, | ||
291 | 135 | list_keys=False, user_data=vendor_data, vendor_data=vendor_data, | ||
292 | 136 | varname=None) | ||
293 | 137 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
294 | 138 | with mock.patch('os.getuid') as m_getuid: | ||
295 | 139 | m_getuid.return_value = 0 | ||
296 | 140 | self.assertEqual(0, query.handle_args('anyname', args)) | ||
297 | 141 | self.assertEqual( | ||
298 | 142 | '{\n "my_var": "it worked",\n "userdata": "vd",\n ' | ||
299 | 143 | '"vendordata": "vd"\n}\n', m_stdout.getvalue()) | ||
300 | 144 | |||
301 | 79 | def test_handle_args_dumps_all_instance_data(self): | 145 | def test_handle_args_dumps_all_instance_data(self): |
302 | 80 | """When --all is specified query will dump all instance data vars.""" | 146 | """When --all is specified query will dump all instance data vars.""" |
303 | 81 | write_file(self.instance_data, '{"my-var": "it worked"}') | 147 | write_file(self.instance_data, '{"my-var": "it worked"}') |
304 | diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py | |||
305 | index 943089e..29e192e 100644 | |||
306 | --- a/cloudinit/config/cc_disk_setup.py | |||
307 | +++ b/cloudinit/config/cc_disk_setup.py | |||
308 | @@ -743,7 +743,7 @@ def assert_and_settle_device(device): | |||
309 | 743 | util.udevadm_settle() | 743 | util.udevadm_settle() |
310 | 744 | if not os.path.exists(device): | 744 | if not os.path.exists(device): |
311 | 745 | raise RuntimeError("Device %s did not exist and was not created " | 745 | raise RuntimeError("Device %s did not exist and was not created " |
313 | 746 | "with a udevamd settle." % device) | 746 | "with a udevadm settle." % device) |
314 | 747 | 747 | ||
315 | 748 | # Whether or not the device existed above, it is possible that udev | 748 | # Whether or not the device existed above, it is possible that udev |
316 | 749 | # events that would populate udev database (for reading by lsdname) have | 749 | # events that would populate udev database (for reading by lsdname) have |
317 | diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py | |||
318 | index 3fa4097..ce3accf 100644 | |||
319 | --- a/cloudinit/handlers/jinja_template.py | |||
320 | +++ b/cloudinit/handlers/jinja_template.py | |||
321 | @@ -1,5 +1,6 @@ | |||
322 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
323 | 2 | 2 | ||
324 | 3 | from errno import EACCES | ||
325 | 3 | import os | 4 | import os |
326 | 4 | import re | 5 | import re |
327 | 5 | 6 | ||
328 | @@ -76,7 +77,14 @@ def render_jinja_payload_from_file( | |||
329 | 76 | raise RuntimeError( | 77 | raise RuntimeError( |
330 | 77 | 'Cannot render jinja template vars. Instance data not yet' | 78 | 'Cannot render jinja template vars. Instance data not yet' |
331 | 78 | ' present at %s' % instance_data_file) | 79 | ' present at %s' % instance_data_file) |
333 | 79 | instance_data = load_json(load_file(instance_data_file)) | 80 | try: |
334 | 81 | instance_data = load_json(load_file(instance_data_file)) | ||
335 | 82 | except (IOError, OSError) as e: | ||
336 | 83 | if e.errno == EACCES: | ||
337 | 84 | raise RuntimeError( | ||
338 | 85 | 'Cannot render jinja template vars. No read permission on' | ||
339 | 86 | " '%s'. Try sudo" % instance_data_file) | ||
340 | 87 | |||
341 | 80 | rendered_payload = render_jinja_payload( | 88 | rendered_payload = render_jinja_payload( |
342 | 81 | payload, payload_fn, instance_data, debug) | 89 | payload, payload_fn, instance_data, debug) |
343 | 82 | if not rendered_payload: | 90 | if not rendered_payload: |
344 | diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py | |||
345 | index 12cf509..bdc5799 100644 | |||
346 | --- a/cloudinit/net/dhcp.py | |||
347 | +++ b/cloudinit/net/dhcp.py | |||
348 | @@ -40,34 +40,56 @@ class EphemeralDHCPv4(object): | |||
349 | 40 | def __init__(self, iface=None): | 40 | def __init__(self, iface=None): |
350 | 41 | self.iface = iface | 41 | self.iface = iface |
351 | 42 | self._ephipv4 = None | 42 | self._ephipv4 = None |
352 | 43 | self.lease = None | ||
353 | 43 | 44 | ||
354 | 44 | def __enter__(self): | 45 | def __enter__(self): |
355 | 46 | """Setup sandboxed dhcp context.""" | ||
356 | 47 | return self.obtain_lease() | ||
357 | 48 | |||
358 | 49 | def __exit__(self, excp_type, excp_value, excp_traceback): | ||
359 | 50 | """Teardown sandboxed dhcp context.""" | ||
360 | 51 | self.clean_network() | ||
361 | 52 | |||
362 | 53 | def clean_network(self): | ||
363 | 54 | """Exit _ephipv4 context to teardown of ip configuration performed.""" | ||
364 | 55 | if self.lease: | ||
365 | 56 | self.lease = None | ||
366 | 57 | if not self._ephipv4: | ||
367 | 58 | return | ||
368 | 59 | self._ephipv4.__exit__(None, None, None) | ||
369 | 60 | |||
370 | 61 | def obtain_lease(self): | ||
371 | 62 | """Perform dhcp discovery in a sandboxed environment if possible. | ||
372 | 63 | |||
373 | 64 | @return: A dict representing dhcp options on the most recent lease | ||
374 | 65 | obtained from the dhclient discovery if run, otherwise an error | ||
375 | 66 | is raised. | ||
376 | 67 | |||
377 | 68 | @raises: NoDHCPLeaseError if no leases could be obtained. | ||
378 | 69 | """ | ||
379 | 70 | if self.lease: | ||
380 | 71 | return self.lease | ||
381 | 45 | try: | 72 | try: |
382 | 46 | leases = maybe_perform_dhcp_discovery(self.iface) | 73 | leases = maybe_perform_dhcp_discovery(self.iface) |
383 | 47 | except InvalidDHCPLeaseFileError: | 74 | except InvalidDHCPLeaseFileError: |
384 | 48 | raise NoDHCPLeaseError() | 75 | raise NoDHCPLeaseError() |
385 | 49 | if not leases: | 76 | if not leases: |
386 | 50 | raise NoDHCPLeaseError() | 77 | raise NoDHCPLeaseError() |
388 | 51 | lease = leases[-1] | 78 | self.lease = leases[-1] |
389 | 52 | LOG.debug("Received dhcp lease on %s for %s/%s", | 79 | LOG.debug("Received dhcp lease on %s for %s/%s", |
392 | 53 | lease['interface'], lease['fixed-address'], | 80 | self.lease['interface'], self.lease['fixed-address'], |
393 | 54 | lease['subnet-mask']) | 81 | self.lease['subnet-mask']) |
394 | 55 | nmap = {'interface': 'interface', 'ip': 'fixed-address', | 82 | nmap = {'interface': 'interface', 'ip': 'fixed-address', |
395 | 56 | 'prefix_or_mask': 'subnet-mask', | 83 | 'prefix_or_mask': 'subnet-mask', |
396 | 57 | 'broadcast': 'broadcast-address', | 84 | 'broadcast': 'broadcast-address', |
397 | 58 | 'router': 'routers'} | 85 | 'router': 'routers'} |
399 | 59 | kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) | 86 | kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()]) |
400 | 60 | if not kwargs['broadcast']: | 87 | if not kwargs['broadcast']: |
401 | 61 | kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) | 88 | kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) |
402 | 62 | ephipv4 = EphemeralIPv4Network(**kwargs) | 89 | ephipv4 = EphemeralIPv4Network(**kwargs) |
403 | 63 | ephipv4.__enter__() | 90 | ephipv4.__enter__() |
404 | 64 | self._ephipv4 = ephipv4 | 91 | self._ephipv4 = ephipv4 |
411 | 65 | return lease | 92 | return self.lease |
406 | 66 | |||
407 | 67 | def __exit__(self, excp_type, excp_value, excp_traceback): | ||
408 | 68 | if not self._ephipv4: | ||
409 | 69 | return | ||
410 | 70 | self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) | ||
412 | 71 | 93 | ||
413 | 72 | 94 | ||
414 | 73 | def maybe_perform_dhcp_discovery(nic=None): | 95 | def maybe_perform_dhcp_discovery(nic=None): |
415 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py | |||
416 | index 39391d0..9e8a1a8 100644 | |||
417 | --- a/cloudinit/sources/DataSourceAzure.py | |||
418 | +++ b/cloudinit/sources/DataSourceAzure.py | |||
419 | @@ -22,7 +22,7 @@ from cloudinit.event import EventType | |||
420 | 22 | from cloudinit.net.dhcp import EphemeralDHCPv4 | 22 | from cloudinit.net.dhcp import EphemeralDHCPv4 |
421 | 23 | from cloudinit import sources | 23 | from cloudinit import sources |
422 | 24 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric | 24 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric |
424 | 25 | from cloudinit.url_helper import readurl, UrlError | 25 | from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc |
425 | 26 | from cloudinit import util | 26 | from cloudinit import util |
426 | 27 | 27 | ||
427 | 28 | LOG = logging.getLogger(__name__) | 28 | LOG = logging.getLogger(__name__) |
428 | @@ -57,7 +57,7 @@ IMDS_URL = "http://169.254.169.254/metadata/" | |||
429 | 57 | # List of static scripts and network config artifacts created by | 57 | # List of static scripts and network config artifacts created by |
430 | 58 | # stock ubuntu suported images. | 58 | # stock ubuntu suported images. |
431 | 59 | UBUNTU_EXTENDED_NETWORK_SCRIPTS = [ | 59 | UBUNTU_EXTENDED_NETWORK_SCRIPTS = [ |
433 | 60 | '/etc/netplan/90-azure-hotplug.yaml', | 60 | '/etc/netplan/90-hotplug-azure.yaml', |
434 | 61 | '/usr/local/sbin/ephemeral_eth.sh', | 61 | '/usr/local/sbin/ephemeral_eth.sh', |
435 | 62 | '/etc/udev/rules.d/10-net-device-added.rules', | 62 | '/etc/udev/rules.d/10-net-device-added.rules', |
436 | 63 | '/run/network/interfaces.ephemeral.d', | 63 | '/run/network/interfaces.ephemeral.d', |
437 | @@ -207,7 +207,9 @@ BUILTIN_DS_CONFIG = { | |||
438 | 207 | }, | 207 | }, |
439 | 208 | 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, | 208 | 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, |
440 | 209 | 'dhclient_lease_file': LEASE_FILE, | 209 | 'dhclient_lease_file': LEASE_FILE, |
441 | 210 | 'apply_network_config': True, # Use IMDS published network configuration | ||
442 | 210 | } | 211 | } |
443 | 212 | # RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False | ||
444 | 211 | 213 | ||
445 | 212 | BUILTIN_CLOUD_CONFIG = { | 214 | BUILTIN_CLOUD_CONFIG = { |
446 | 213 | 'disk_setup': { | 215 | 'disk_setup': { |
447 | @@ -278,6 +280,7 @@ class DataSourceAzure(sources.DataSource): | |||
448 | 278 | self._network_config = None | 280 | self._network_config = None |
449 | 279 | # Regenerate network config new_instance boot and every boot | 281 | # Regenerate network config new_instance boot and every boot |
450 | 280 | self.update_events['network'].add(EventType.BOOT) | 282 | self.update_events['network'].add(EventType.BOOT) |
451 | 283 | self._ephemeral_dhcp_ctx = None | ||
452 | 281 | 284 | ||
453 | 282 | def __str__(self): | 285 | def __str__(self): |
454 | 283 | root = sources.DataSource.__str__(self) | 286 | root = sources.DataSource.__str__(self) |
455 | @@ -404,7 +407,8 @@ class DataSourceAzure(sources.DataSource): | |||
456 | 404 | LOG.warning("%s was not mountable", cdev) | 407 | LOG.warning("%s was not mountable", cdev) |
457 | 405 | continue | 408 | continue |
458 | 406 | 409 | ||
460 | 407 | if reprovision or self._should_reprovision(ret): | 410 | perform_reprovision = reprovision or self._should_reprovision(ret) |
461 | 411 | if perform_reprovision: | ||
462 | 408 | ret = self._reprovision() | 412 | ret = self._reprovision() |
463 | 409 | imds_md = get_metadata_from_imds( | 413 | imds_md = get_metadata_from_imds( |
464 | 410 | self.fallback_interface, retries=3) | 414 | self.fallback_interface, retries=3) |
465 | @@ -432,6 +436,18 @@ class DataSourceAzure(sources.DataSource): | |||
466 | 432 | crawled_data['metadata']['random_seed'] = seed | 436 | crawled_data['metadata']['random_seed'] = seed |
467 | 433 | crawled_data['metadata']['instance-id'] = util.read_dmi_data( | 437 | crawled_data['metadata']['instance-id'] = util.read_dmi_data( |
468 | 434 | 'system-uuid') | 438 | 'system-uuid') |
469 | 439 | |||
470 | 440 | if perform_reprovision: | ||
471 | 441 | LOG.info("Reporting ready to Azure after getting ReprovisionData") | ||
472 | 442 | use_cached_ephemeral = (net.is_up(self.fallback_interface) and | ||
473 | 443 | getattr(self, '_ephemeral_dhcp_ctx', None)) | ||
474 | 444 | if use_cached_ephemeral: | ||
475 | 445 | self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) | ||
476 | 446 | self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral | ||
477 | 447 | else: | ||
478 | 448 | with EphemeralDHCPv4() as lease: | ||
479 | 449 | self._report_ready(lease=lease) | ||
480 | 450 | |||
481 | 435 | return crawled_data | 451 | return crawled_data |
482 | 436 | 452 | ||
483 | 437 | def _is_platform_viable(self): | 453 | def _is_platform_viable(self): |
484 | @@ -458,7 +474,8 @@ class DataSourceAzure(sources.DataSource): | |||
485 | 458 | except sources.InvalidMetaDataException as e: | 474 | except sources.InvalidMetaDataException as e: |
486 | 459 | LOG.warning('Could not crawl Azure metadata: %s', e) | 475 | LOG.warning('Could not crawl Azure metadata: %s', e) |
487 | 460 | return False | 476 | return False |
489 | 461 | if self.distro and self.distro.name == 'ubuntu': | 477 | if (self.distro and self.distro.name == 'ubuntu' and |
490 | 478 | self.ds_cfg.get('apply_network_config')): | ||
491 | 462 | maybe_remove_ubuntu_network_config_scripts() | 479 | maybe_remove_ubuntu_network_config_scripts() |
492 | 463 | 480 | ||
493 | 464 | # Process crawled data and augment with various config defaults | 481 | # Process crawled data and augment with various config defaults |
494 | @@ -509,32 +526,29 @@ class DataSourceAzure(sources.DataSource): | |||
495 | 509 | report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) | 526 | report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) |
496 | 510 | LOG.debug("Start polling IMDS") | 527 | LOG.debug("Start polling IMDS") |
497 | 511 | 528 | ||
498 | 512 | def exc_cb(msg, exception): | ||
499 | 513 | if isinstance(exception, UrlError) and exception.code == 404: | ||
500 | 514 | return True | ||
501 | 515 | # If we get an exception while trying to call IMDS, we | ||
502 | 516 | # call DHCP and setup the ephemeral network to acquire the new IP. | ||
503 | 517 | return False | ||
504 | 518 | |||
505 | 519 | while True: | 529 | while True: |
506 | 520 | try: | 530 | try: |
518 | 521 | with EphemeralDHCPv4() as lease: | 531 | # Save our EphemeralDHCPv4 context so we avoid repeated dhcp |
519 | 522 | if report_ready: | 532 | self._ephemeral_dhcp_ctx = EphemeralDHCPv4() |
520 | 523 | path = REPORTED_READY_MARKER_FILE | 533 | lease = self._ephemeral_dhcp_ctx.obtain_lease() |
521 | 524 | LOG.info( | 534 | if report_ready: |
522 | 525 | "Creating a marker file to report ready: %s", path) | 535 | path = REPORTED_READY_MARKER_FILE |
523 | 526 | util.write_file(path, "{pid}: {time}\n".format( | 536 | LOG.info( |
524 | 527 | pid=os.getpid(), time=time())) | 537 | "Creating a marker file to report ready: %s", path) |
525 | 528 | self._report_ready(lease=lease) | 538 | util.write_file(path, "{pid}: {time}\n".format( |
526 | 529 | report_ready = False | 539 | pid=os.getpid(), time=time())) |
527 | 530 | return readurl(url, timeout=1, headers=headers, | 540 | self._report_ready(lease=lease) |
528 | 531 | exception_cb=exc_cb, infinite=True).contents | 541 | report_ready = False |
529 | 542 | return readurl(url, timeout=1, headers=headers, | ||
530 | 543 | exception_cb=retry_on_url_exc, infinite=True, | ||
531 | 544 | log_req_resp=False).contents | ||
532 | 532 | except UrlError: | 545 | except UrlError: |
533 | 546 | # Teardown our EphemeralDHCPv4 context on failure as we retry | ||
534 | 547 | self._ephemeral_dhcp_ctx.clean_network() | ||
535 | 533 | pass | 548 | pass |
536 | 534 | 549 | ||
537 | 535 | def _report_ready(self, lease): | 550 | def _report_ready(self, lease): |
540 | 536 | """Tells the fabric provisioning has completed | 551 | """Tells the fabric provisioning has completed """ |
539 | 537 | before we go into our polling loop.""" | ||
541 | 538 | try: | 552 | try: |
542 | 539 | get_metadata_from_fabric(None, lease['unknown-245']) | 553 | get_metadata_from_fabric(None, lease['unknown-245']) |
543 | 540 | except Exception: | 554 | except Exception: |
544 | @@ -619,7 +633,11 @@ class DataSourceAzure(sources.DataSource): | |||
545 | 619 | the blacklisted devices. | 633 | the blacklisted devices. |
546 | 620 | """ | 634 | """ |
547 | 621 | if not self._network_config: | 635 | if not self._network_config: |
549 | 622 | self._network_config = parse_network_config(self._metadata_imds) | 636 | if self.ds_cfg.get('apply_network_config'): |
550 | 637 | nc_src = self._metadata_imds | ||
551 | 638 | else: | ||
552 | 639 | nc_src = None | ||
553 | 640 | self._network_config = parse_network_config(nc_src) | ||
554 | 623 | return self._network_config | 641 | return self._network_config |
555 | 624 | 642 | ||
556 | 625 | 643 | ||
557 | @@ -700,7 +718,7 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): | |||
558 | 700 | file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", | 718 | file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", |
559 | 701 | update_env_for_mount={'LANG': 'C'}) | 719 | update_env_for_mount={'LANG': 'C'}) |
560 | 702 | except util.MountFailedError as e: | 720 | except util.MountFailedError as e: |
562 | 703 | if "mount: unknown filesystem type 'ntfs'" in str(e): | 721 | if "unknown filesystem type 'ntfs'" in str(e): |
563 | 704 | return True, (bmsg + ' but this system cannot mount NTFS,' | 722 | return True, (bmsg + ' but this system cannot mount NTFS,' |
564 | 705 | ' assuming there are no important files.' | 723 | ' assuming there are no important files.' |
565 | 706 | ' Formatting allowed.') | 724 | ' Formatting allowed.') |
566 | @@ -1162,17 +1180,12 @@ def get_metadata_from_imds(fallback_nic, retries): | |||
567 | 1162 | 1180 | ||
568 | 1163 | def _get_metadata_from_imds(retries): | 1181 | def _get_metadata_from_imds(retries): |
569 | 1164 | 1182 | ||
570 | 1165 | def retry_on_url_error(msg, exception): | ||
571 | 1166 | if isinstance(exception, UrlError) and exception.code == 404: | ||
572 | 1167 | return True # Continue retries | ||
573 | 1168 | return False # Stop retries on all other exceptions | ||
574 | 1169 | |||
575 | 1170 | url = IMDS_URL + "instance?api-version=2017-12-01" | 1183 | url = IMDS_URL + "instance?api-version=2017-12-01" |
576 | 1171 | headers = {"Metadata": "true"} | 1184 | headers = {"Metadata": "true"} |
577 | 1172 | try: | 1185 | try: |
578 | 1173 | response = readurl( | 1186 | response = readurl( |
579 | 1174 | url, timeout=1, headers=headers, retries=retries, | 1187 | url, timeout=1, headers=headers, retries=retries, |
581 | 1175 | exception_cb=retry_on_url_error) | 1188 | exception_cb=retry_on_url_exc) |
582 | 1176 | except Exception as e: | 1189 | except Exception as e: |
583 | 1177 | LOG.debug('Ignoring IMDS instance metadata: %s', e) | 1190 | LOG.debug('Ignoring IMDS instance metadata: %s', e) |
584 | 1178 | return {} | 1191 | return {} |
585 | @@ -1195,7 +1208,7 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): | |||
586 | 1195 | additional interfaces which get attached by a customer at some point | 1208 | additional interfaces which get attached by a customer at some point |
587 | 1196 | after initial boot. Since the Azure datasource can now regenerate | 1209 | after initial boot. Since the Azure datasource can now regenerate |
588 | 1197 | network configuration as metadata reports these new devices, we no longer | 1210 | network configuration as metadata reports these new devices, we no longer |
590 | 1198 | want the udev rules or netplan's 90-azure-hotplug.yaml to configure | 1211 | want the udev rules or netplan's 90-hotplug-azure.yaml to configure |
591 | 1199 | networking on eth1 or greater as it might collide with cloud-init's | 1212 | networking on eth1 or greater as it might collide with cloud-init's |
592 | 1200 | configuration. | 1213 | configuration. |
593 | 1201 | 1214 | ||
594 | diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py | |||
595 | index 113249d..aa9f3ec 100644 | |||
596 | --- a/cloudinit/tests/test_url_helper.py | |||
597 | +++ b/cloudinit/tests/test_url_helper.py | |||
598 | @@ -1,10 +1,12 @@ | |||
599 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
600 | 2 | 2 | ||
602 | 3 | from cloudinit.url_helper import oauth_headers, read_file_or_url | 3 | from cloudinit.url_helper import ( |
603 | 4 | NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) | ||
604 | 4 | from cloudinit.tests.helpers import CiTestCase, mock, skipIf | 5 | from cloudinit.tests.helpers import CiTestCase, mock, skipIf |
605 | 5 | from cloudinit import util | 6 | from cloudinit import util |
606 | 6 | 7 | ||
607 | 7 | import httpretty | 8 | import httpretty |
608 | 9 | import requests | ||
609 | 8 | 10 | ||
610 | 9 | 11 | ||
611 | 10 | try: | 12 | try: |
612 | @@ -64,3 +66,24 @@ class TestReadFileOrUrl(CiTestCase): | |||
613 | 64 | result = read_file_or_url(url) | 66 | result = read_file_or_url(url) |
614 | 65 | self.assertEqual(result.contents, data) | 67 | self.assertEqual(result.contents, data) |
615 | 66 | self.assertEqual(str(result), data.decode('utf-8')) | 68 | self.assertEqual(str(result), data.decode('utf-8')) |
616 | 69 | |||
617 | 70 | |||
618 | 71 | class TestRetryOnUrlExc(CiTestCase): | ||
619 | 72 | |||
620 | 73 | def test_do_not_retry_non_urlerror(self): | ||
621 | 74 | """When exception is not UrlError return False.""" | ||
622 | 75 | myerror = IOError('something unexcpected') | ||
623 | 76 | self.assertFalse(retry_on_url_exc(msg='', exc=myerror)) | ||
624 | 77 | |||
625 | 78 | def test_perform_retries_on_not_found(self): | ||
626 | 79 | """When exception is UrlError with a 404 status code return True.""" | ||
627 | 80 | myerror = UrlError(cause=RuntimeError( | ||
628 | 81 | 'something was not found'), code=NOT_FOUND) | ||
629 | 82 | self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) | ||
630 | 83 | |||
631 | 84 | def test_perform_retries_on_timeout(self): | ||
632 | 85 | """When exception is a requests.Timout return True.""" | ||
633 | 86 | myerror = UrlError(cause=requests.Timeout('something timed out')) | ||
634 | 87 | self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) | ||
635 | 88 | |||
636 | 89 | # vi: ts=4 expandtab | ||
637 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py | |||
638 | index 749a384..e3d2dba 100644 | |||
639 | --- a/cloudinit/tests/test_util.py | |||
640 | +++ b/cloudinit/tests/test_util.py | |||
641 | @@ -18,25 +18,51 @@ MOUNT_INFO = [ | |||
642 | 18 | ] | 18 | ] |
643 | 19 | 19 | ||
644 | 20 | OS_RELEASE_SLES = dedent("""\ | 20 | OS_RELEASE_SLES = dedent("""\ |
651 | 21 | NAME="SLES"\n | 21 | NAME="SLES" |
652 | 22 | VERSION="12-SP3"\n | 22 | VERSION="12-SP3" |
653 | 23 | VERSION_ID="12.3"\n | 23 | VERSION_ID="12.3" |
654 | 24 | PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n | 24 | PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" |
655 | 25 | ID="sles"\nANSI_COLOR="0;32"\n | 25 | ID="sles" |
656 | 26 | CPE_NAME="cpe:/o:suse:sles:12:sp3"\n | 26 | ANSI_COLOR="0;32" |
657 | 27 | CPE_NAME="cpe:/o:suse:sles:12:sp3" | ||
658 | 27 | """) | 28 | """) |
659 | 28 | 29 | ||
660 | 29 | OS_RELEASE_OPENSUSE = dedent("""\ | 30 | OS_RELEASE_OPENSUSE = dedent("""\ |
671 | 30 | NAME="openSUSE Leap" | 31 | NAME="openSUSE Leap" |
672 | 31 | VERSION="42.3" | 32 | VERSION="42.3" |
673 | 32 | ID=opensuse | 33 | ID=opensuse |
674 | 33 | ID_LIKE="suse" | 34 | ID_LIKE="suse" |
675 | 34 | VERSION_ID="42.3" | 35 | VERSION_ID="42.3" |
676 | 35 | PRETTY_NAME="openSUSE Leap 42.3" | 36 | PRETTY_NAME="openSUSE Leap 42.3" |
677 | 36 | ANSI_COLOR="0;32" | 37 | ANSI_COLOR="0;32" |
678 | 37 | CPE_NAME="cpe:/o:opensuse:leap:42.3" | 38 | CPE_NAME="cpe:/o:opensuse:leap:42.3" |
679 | 38 | BUG_REPORT_URL="https://bugs.opensuse.org" | 39 | BUG_REPORT_URL="https://bugs.opensuse.org" |
680 | 39 | HOME_URL="https://www.opensuse.org/" | 40 | HOME_URL="https://www.opensuse.org/" |
681 | 41 | """) | ||
682 | 42 | |||
683 | 43 | OS_RELEASE_OPENSUSE_L15 = dedent("""\ | ||
684 | 44 | NAME="openSUSE Leap" | ||
685 | 45 | VERSION="15.0" | ||
686 | 46 | ID="opensuse-leap" | ||
687 | 47 | ID_LIKE="suse opensuse" | ||
688 | 48 | VERSION_ID="15.0" | ||
689 | 49 | PRETTY_NAME="openSUSE Leap 15.0" | ||
690 | 50 | ANSI_COLOR="0;32" | ||
691 | 51 | CPE_NAME="cpe:/o:opensuse:leap:15.0" | ||
692 | 52 | BUG_REPORT_URL="https://bugs.opensuse.org" | ||
693 | 53 | HOME_URL="https://www.opensuse.org/" | ||
694 | 54 | """) | ||
695 | 55 | |||
696 | 56 | OS_RELEASE_OPENSUSE_TW = dedent("""\ | ||
697 | 57 | NAME="openSUSE Tumbleweed" | ||
698 | 58 | ID="opensuse-tumbleweed" | ||
699 | 59 | ID_LIKE="opensuse suse" | ||
700 | 60 | VERSION_ID="20180920" | ||
701 | 61 | PRETTY_NAME="openSUSE Tumbleweed" | ||
702 | 62 | ANSI_COLOR="0;32" | ||
703 | 63 | CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920" | ||
704 | 64 | BUG_REPORT_URL="https://bugs.opensuse.org" | ||
705 | 65 | HOME_URL="https://www.opensuse.org/" | ||
706 | 40 | """) | 66 | """) |
707 | 41 | 67 | ||
708 | 42 | OS_RELEASE_CENTOS = dedent("""\ | 68 | OS_RELEASE_CENTOS = dedent("""\ |
709 | @@ -447,12 +473,35 @@ class TestGetLinuxDistro(CiTestCase): | |||
710 | 447 | 473 | ||
711 | 448 | @mock.patch('cloudinit.util.load_file') | 474 | @mock.patch('cloudinit.util.load_file') |
712 | 449 | def test_get_linux_opensuse(self, m_os_release, m_path_exists): | 475 | def test_get_linux_opensuse(self, m_os_release, m_path_exists): |
714 | 450 | """Verify we get the correct name and machine arch on OpenSUSE.""" | 476 | """Verify we get the correct name and machine arch on openSUSE |
715 | 477 | prior to openSUSE Leap 15. | ||
716 | 478 | """ | ||
717 | 451 | m_os_release.return_value = OS_RELEASE_OPENSUSE | 479 | m_os_release.return_value = OS_RELEASE_OPENSUSE |
718 | 452 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | 480 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists |
719 | 453 | dist = util.get_linux_distro() | 481 | dist = util.get_linux_distro() |
720 | 454 | self.assertEqual(('opensuse', '42.3', platform.machine()), dist) | 482 | self.assertEqual(('opensuse', '42.3', platform.machine()), dist) |
721 | 455 | 483 | ||
722 | 484 | @mock.patch('cloudinit.util.load_file') | ||
723 | 485 | def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): | ||
724 | 486 | """Verify we get the correct name and machine arch on openSUSE | ||
725 | 487 | for openSUSE Leap 15.0 and later. | ||
726 | 488 | """ | ||
727 | 489 | m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 | ||
728 | 490 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | ||
729 | 491 | dist = util.get_linux_distro() | ||
730 | 492 | self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) | ||
731 | 493 | |||
732 | 494 | @mock.patch('cloudinit.util.load_file') | ||
733 | 495 | def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): | ||
734 | 496 | """Verify we get the correct name and machine arch on openSUSE | ||
735 | 497 | for openSUSE Tumbleweed | ||
736 | 498 | """ | ||
737 | 499 | m_os_release.return_value = OS_RELEASE_OPENSUSE_TW | ||
738 | 500 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | ||
739 | 501 | dist = util.get_linux_distro() | ||
740 | 502 | self.assertEqual( | ||
741 | 503 | ('opensuse-tumbleweed', '20180920', platform.machine()), dist) | ||
742 | 504 | |||
743 | 456 | @mock.patch('platform.dist') | 505 | @mock.patch('platform.dist') |
744 | 457 | def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): | 506 | def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): |
745 | 458 | """Verify we get no information if os-release does not exist""" | 507 | """Verify we get no information if os-release does not exist""" |
746 | diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py | |||
747 | index 8067979..396d69a 100644 | |||
748 | --- a/cloudinit/url_helper.py | |||
749 | +++ b/cloudinit/url_helper.py | |||
750 | @@ -199,7 +199,7 @@ def _get_ssl_args(url, ssl_details): | |||
751 | 199 | def readurl(url, data=None, timeout=None, retries=0, sec_between=1, | 199 | def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
752 | 200 | headers=None, headers_cb=None, ssl_details=None, | 200 | headers=None, headers_cb=None, ssl_details=None, |
753 | 201 | check_status=True, allow_redirects=True, exception_cb=None, | 201 | check_status=True, allow_redirects=True, exception_cb=None, |
755 | 202 | session=None, infinite=False): | 202 | session=None, infinite=False, log_req_resp=True): |
756 | 203 | url = _cleanurl(url) | 203 | url = _cleanurl(url) |
757 | 204 | req_args = { | 204 | req_args = { |
758 | 205 | 'url': url, | 205 | 'url': url, |
759 | @@ -256,9 +256,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, | |||
760 | 256 | continue | 256 | continue |
761 | 257 | filtered_req_args[k] = v | 257 | filtered_req_args[k] = v |
762 | 258 | try: | 258 | try: |
766 | 259 | LOG.debug("[%s/%s] open '%s' with %s configuration", i, | 259 | |
767 | 260 | "infinite" if infinite else manual_tries, url, | 260 | if log_req_resp: |
768 | 261 | filtered_req_args) | 261 | LOG.debug("[%s/%s] open '%s' with %s configuration", i, |
769 | 262 | "infinite" if infinite else manual_tries, url, | ||
770 | 263 | filtered_req_args) | ||
771 | 262 | 264 | ||
772 | 263 | if session is None: | 265 | if session is None: |
773 | 264 | session = requests.Session() | 266 | session = requests.Session() |
774 | @@ -294,8 +296,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, | |||
775 | 294 | break | 296 | break |
776 | 295 | if (infinite and sec_between > 0) or \ | 297 | if (infinite and sec_between > 0) or \ |
777 | 296 | (i + 1 < manual_tries and sec_between > 0): | 298 | (i + 1 < manual_tries and sec_between > 0): |
780 | 297 | LOG.debug("Please wait %s seconds while we wait to try again", | 299 | |
781 | 298 | sec_between) | 300 | if log_req_resp: |
782 | 301 | LOG.debug( | ||
783 | 302 | "Please wait %s seconds while we wait to try again", | ||
784 | 303 | sec_between) | ||
785 | 299 | time.sleep(sec_between) | 304 | time.sleep(sec_between) |
786 | 300 | if excps: | 305 | if excps: |
787 | 301 | raise excps[-1] | 306 | raise excps[-1] |
788 | @@ -549,4 +554,18 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, | |||
789 | 549 | _uri, signed_headers, _body = client.sign(url) | 554 | _uri, signed_headers, _body = client.sign(url) |
790 | 550 | return signed_headers | 555 | return signed_headers |
791 | 551 | 556 | ||
792 | 557 | |||
793 | 558 | def retry_on_url_exc(msg, exc): | ||
794 | 559 | """readurl exception_cb that will retry on NOT_FOUND and Timeout. | ||
795 | 560 | |||
796 | 561 | Returns False to raise the exception from readurl, True to retry. | ||
797 | 562 | """ | ||
798 | 563 | if not isinstance(exc, UrlError): | ||
799 | 564 | return False | ||
800 | 565 | if exc.code == NOT_FOUND: | ||
801 | 566 | return True | ||
802 | 567 | if exc.cause and isinstance(exc.cause, requests.Timeout): | ||
803 | 568 | return True | ||
804 | 569 | return False | ||
805 | 570 | |||
806 | 552 | # vi: ts=4 expandtab | 571 | # vi: ts=4 expandtab |
807 | diff --git a/cloudinit/util.py b/cloudinit/util.py | |||
808 | index c67d6be..7800f7b 100644 | |||
809 | --- a/cloudinit/util.py | |||
810 | +++ b/cloudinit/util.py | |||
811 | @@ -615,8 +615,8 @@ def get_linux_distro(): | |||
812 | 615 | distro_name = os_release.get('ID', '') | 615 | distro_name = os_release.get('ID', '') |
813 | 616 | distro_version = os_release.get('VERSION_ID', '') | 616 | distro_version = os_release.get('VERSION_ID', '') |
814 | 617 | if 'sles' in distro_name or 'suse' in distro_name: | 617 | if 'sles' in distro_name or 'suse' in distro_name: |
817 | 618 | # RELEASE_BLOCKER: We will drop this sles ivergent behavior in | 618 | # RELEASE_BLOCKER: We will drop this sles divergent behavior in |
818 | 619 | # before 18.4 so that get_linux_distro returns a named tuple | 619 | # the future so that get_linux_distro returns a named tuple |
819 | 620 | # which will include both version codename and architecture | 620 | # which will include both version codename and architecture |
820 | 621 | # on all distributions. | 621 | # on all distributions. |
821 | 622 | flavor = platform.machine() | 622 | flavor = platform.machine() |
822 | @@ -668,7 +668,8 @@ def system_info(): | |||
823 | 668 | var = 'ubuntu' | 668 | var = 'ubuntu' |
824 | 669 | elif linux_dist == 'redhat': | 669 | elif linux_dist == 'redhat': |
825 | 670 | var = 'rhel' | 670 | var = 'rhel' |
827 | 671 | elif linux_dist in ('opensuse', 'sles'): | 671 | elif linux_dist in ( |
828 | 672 | 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap', 'sles'): | ||
829 | 672 | var = 'suse' | 673 | var = 'suse' |
830 | 673 | else: | 674 | else: |
831 | 674 | var = 'linux' | 675 | var = 'linux' |
832 | diff --git a/debian/changelog b/debian/changelog | |||
833 | index 117fd16..a85c8cc 100644 | |||
834 | --- a/debian/changelog | |||
835 | +++ b/debian/changelog | |||
836 | @@ -1,3 +1,31 @@ | |||
837 | 1 | cloud-init (18.4-22-g6062595b-0ubuntu1) disco; urgency=medium | ||
838 | 2 | |||
839 | 3 | * New upstream snapshot. | ||
840 | 4 | - azure: retry imds polling on requests.Timeout (LP: #1800223) | ||
841 | 5 | - azure: Accept variation in error msg from mount for ntfs volumes | ||
842 | 6 | [Jason Zions] (LP: #1799338) | ||
843 | 7 | - azure: fix regression introduced when persisting ephemeral dhcp lease | ||
844 | 8 | [Aswin Rajamannar] | ||
845 | 9 | - azure: add udev rules to create cloud-init Gen2 disk name symlinks | ||
846 | 10 | (LP: #1797480) | ||
847 | 11 | - tests: ec2 mock missing httpretty user-data and instance-identity routes | ||
848 | 12 | - azure: remove /etc/netplan/90-hotplug-azure.yaml when net from IMDS | ||
849 | 13 | - azure: report ready to fabric after reprovision and reduce logging | ||
850 | 14 | [Aswin Rajamannar] (LP: #1799594) | ||
851 | 15 | - query: better error when missing read permission on instance-data | ||
852 | 16 | - instance-data: fallback to instance-data.json if sensitive is absent. | ||
853 | 17 | (LP: #1798189) | ||
854 | 18 | - docs: remove colon from network v1 config example. [Tomer Cohen] | ||
855 | 19 | - Add cloud-id binary to packages for SUSE [Jason Zions] | ||
856 | 20 | - systemd: On SUSE ensure cloud-init.service runs before wicked | ||
857 | 21 | [Robert Schweikert] (LP: #1799709) | ||
858 | 22 | - update detection of openSUSE variants [Robert Schweikert] | ||
859 | 23 | - azure: Add apply_network_config option to disable network from IMDS | ||
860 | 24 | (LP: #1798424) | ||
861 | 25 | - Correct spelling in an error message (udevadm). [Katie McLaughlin] | ||
862 | 26 | |||
863 | 27 | -- Chad Smith <chad.smith@canonical.com> Mon, 12 Nov 2018 20:33:12 -0700 | ||
864 | 28 | |||
865 | 1 | cloud-init (18.4-7-g4652b196-0ubuntu1) cosmic; urgency=medium | 29 | cloud-init (18.4-7-g4652b196-0ubuntu1) cosmic; urgency=medium |
866 | 2 | 30 | ||
867 | 3 | * New upstream snapshot. | 31 | * New upstream snapshot. |
868 | diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst | |||
869 | index 559011e..f73c369 100644 | |||
870 | --- a/doc/rtd/topics/datasources/azure.rst | |||
871 | +++ b/doc/rtd/topics/datasources/azure.rst | |||
872 | @@ -57,6 +57,52 @@ in order to use waagent.conf with cloud-init, the following settings are recomme | |||
873 | 57 | ResourceDisk.MountPoint=/mnt | 57 | ResourceDisk.MountPoint=/mnt |
874 | 58 | 58 | ||
875 | 59 | 59 | ||
876 | 60 | Configuration | ||
877 | 61 | ------------- | ||
878 | 62 | The following configuration can be set for the datasource in system | ||
879 | 63 | configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). | ||
880 | 64 | |||
881 | 65 | The settings that may be configured are: | ||
882 | 66 | |||
883 | 67 | * **agent_command**: Either __builtin__ (default) or a command to run to getcw | ||
884 | 68 | metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the | ||
885 | 69 | provided command to obtain metadata. | ||
886 | 70 | * **apply_network_config**: Boolean set to True to use network configuration | ||
887 | 71 | described by Azure's IMDS endpoint instead of fallback network config of | ||
888 | 72 | dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False. | ||
889 | 73 | * **data_dir**: Path used to read metadata files and write crawled data. | ||
890 | 74 | * **dhclient_lease_file**: The fallback lease file to source when looking for | ||
891 | 75 | custom DHCP option 245 from Azure fabric. | ||
892 | 76 | * **disk_aliases**: A dictionary defining which device paths should be | ||
893 | 77 | interpreted as ephemeral images. See cc_disk_setup module for more info. | ||
894 | 78 | * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to | ||
895 | 79 | metadata changes. | ||
896 | 80 | * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to | ||
897 | 81 | metadata changes. Azure will throttle ifup/down in some cases after metadata | ||
898 | 82 | has been updated to inform dhcp server about updated hostnames. | ||
899 | 83 | * **set_hostname**: Boolean set to True when we want Azure to set the hostname | ||
900 | 84 | based on metadata. | ||
901 | 85 | |||
902 | 86 | An example configuration with the default values is provided below: | ||
903 | 87 | |||
904 | 88 | .. sourcecode:: yaml | ||
905 | 89 | |||
906 | 90 | datasource: | ||
907 | 91 | Azure: | ||
908 | 92 | agent_command: __builtin__ | ||
909 | 93 | apply_network_config: true | ||
910 | 94 | data_dir: /var/lib/waagent | ||
911 | 95 | dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases | ||
912 | 96 | disk_aliases: | ||
913 | 97 | ephemeral0: /dev/disk/cloud/azure_resource | ||
914 | 98 | hostname_bounce: | ||
915 | 99 | interface: eth0 | ||
916 | 100 | command: builtin | ||
917 | 101 | policy: true | ||
918 | 102 | hostname_command: hostname | ||
919 | 103 | set_hostname: true | ||
920 | 104 | |||
921 | 105 | |||
922 | 60 | Userdata | 106 | Userdata |
923 | 61 | -------- | 107 | -------- |
924 | 62 | Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init | 108 | Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init |
925 | diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in | |||
926 | index a3a6d1e..6b2022b 100644 | |||
927 | --- a/packages/redhat/cloud-init.spec.in | |||
928 | +++ b/packages/redhat/cloud-init.spec.in | |||
929 | @@ -191,6 +191,7 @@ fi | |||
930 | 191 | 191 | ||
931 | 192 | # Program binaries | 192 | # Program binaries |
932 | 193 | %{_bindir}/cloud-init* | 193 | %{_bindir}/cloud-init* |
933 | 194 | %{_bindir}/cloud-id* | ||
934 | 194 | 195 | ||
935 | 195 | # Docs | 196 | # Docs |
936 | 196 | %doc LICENSE ChangeLog TODO.rst requirements.txt | 197 | %doc LICENSE ChangeLog TODO.rst requirements.txt |
937 | diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in | |||
938 | index e781d74..26894b3 100644 | |||
939 | --- a/packages/suse/cloud-init.spec.in | |||
940 | +++ b/packages/suse/cloud-init.spec.in | |||
941 | @@ -93,6 +93,7 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) | |||
942 | 93 | 93 | ||
943 | 94 | # Program binaries | 94 | # Program binaries |
944 | 95 | %{_bindir}/cloud-init* | 95 | %{_bindir}/cloud-init* |
945 | 96 | %{_bindir}/cloud-id* | ||
946 | 96 | 97 | ||
947 | 97 | # systemd files | 98 | # systemd files |
948 | 98 | /usr/lib/systemd/system-generators/* | 99 | /usr/lib/systemd/system-generators/* |
949 | diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl | |||
950 | index b92e8ab..5cb0037 100644 | |||
951 | --- a/systemd/cloud-init.service.tmpl | |||
952 | +++ b/systemd/cloud-init.service.tmpl | |||
953 | @@ -14,8 +14,7 @@ After=networking.service | |||
954 | 14 | After=network.service | 14 | After=network.service |
955 | 15 | {% endif %} | 15 | {% endif %} |
956 | 16 | {% if variant in ["suse"] %} | 16 | {% if variant in ["suse"] %} |
959 | 17 | Requires=wicked.service | 17 | Before=wicked.service |
958 | 18 | After=wicked.service | ||
960 | 19 | # setting hostname via hostnamectl depends on dbus, which otherwise | 18 | # setting hostname via hostnamectl depends on dbus, which otherwise |
961 | 20 | # would not be guaranteed at this point. | 19 | # would not be guaranteed at this point. |
962 | 21 | After=dbus.service | 20 | After=dbus.service |
963 | diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py | |||
964 | index abe820e..b92ffc7 100644 | |||
965 | --- a/tests/unittests/test_builtin_handlers.py | |||
966 | +++ b/tests/unittests/test_builtin_handlers.py | |||
967 | @@ -3,6 +3,7 @@ | |||
968 | 3 | """Tests of the built-in user data handlers.""" | 3 | """Tests of the built-in user data handlers.""" |
969 | 4 | 4 | ||
970 | 5 | import copy | 5 | import copy |
971 | 6 | import errno | ||
972 | 6 | import os | 7 | import os |
973 | 7 | import shutil | 8 | import shutil |
974 | 8 | import tempfile | 9 | import tempfile |
975 | @@ -202,6 +203,30 @@ class TestJinjaTemplatePartHandler(CiTestCase): | |||
976 | 202 | os.path.exists(script_file), | 203 | os.path.exists(script_file), |
977 | 203 | 'Unexpected file created %s' % script_file) | 204 | 'Unexpected file created %s' % script_file) |
978 | 204 | 205 | ||
979 | 206 | def test_jinja_template_handle_errors_on_unreadable_instance_data(self): | ||
980 | 207 | """If instance-data is unreadable, raise an error from handle_part.""" | ||
981 | 208 | script_handler = ShellScriptPartHandler(self.paths) | ||
982 | 209 | instance_json = os.path.join(self.run_dir, 'instance-data.json') | ||
983 | 210 | util.write_file(instance_json, util.json_dumps({})) | ||
984 | 211 | h = JinjaTemplatePartHandler( | ||
985 | 212 | self.paths, sub_handlers=[script_handler]) | ||
986 | 213 | with mock.patch(self.mpath + 'load_file') as m_load: | ||
987 | 214 | with self.assertRaises(RuntimeError) as context_manager: | ||
988 | 215 | m_load.side_effect = OSError(errno.EACCES, 'Not allowed') | ||
989 | 216 | h.handle_part( | ||
990 | 217 | data='data', ctype="!" + handlers.CONTENT_START, | ||
991 | 218 | filename='part01', | ||
992 | 219 | payload='## template: jinja \n#!/bin/bash\necho himom', | ||
993 | 220 | frequency='freq', headers='headers') | ||
994 | 221 | script_file = os.path.join(script_handler.script_dir, 'part01') | ||
995 | 222 | self.assertEqual( | ||
996 | 223 | 'Cannot render jinja template vars. No read permission on' | ||
997 | 224 | " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir), | ||
998 | 225 | str(context_manager.exception)) | ||
999 | 226 | self.assertFalse( | ||
1000 | 227 | os.path.exists(script_file), | ||
1001 | 228 | 'Unexpected file created %s' % script_file) | ||
1002 | 229 | |||
1003 | 205 | @skipUnlessJinja() | 230 | @skipUnlessJinja() |
1004 | 206 | def test_jinja_template_handle_renders_jinja_content(self): | 231 | def test_jinja_template_handle_renders_jinja_content(self): |
1005 | 207 | """When present, render jinja variables from instance-data.json.""" | 232 | """When present, render jinja variables from instance-data.json.""" |
1006 | diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py | |||
1007 | index 0f4b7bf..56484b2 100644 | |||
1008 | --- a/tests/unittests/test_datasource/test_azure.py | |||
1009 | +++ b/tests/unittests/test_datasource/test_azure.py | |||
1010 | @@ -17,6 +17,7 @@ import crypt | |||
1011 | 17 | import httpretty | 17 | import httpretty |
1012 | 18 | import json | 18 | import json |
1013 | 19 | import os | 19 | import os |
1014 | 20 | import requests | ||
1015 | 20 | import stat | 21 | import stat |
1016 | 21 | import xml.etree.ElementTree as ET | 22 | import xml.etree.ElementTree as ET |
1017 | 22 | import yaml | 23 | import yaml |
1018 | @@ -184,6 +185,35 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): | |||
1019 | 184 | "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time | 185 | "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time |
1020 | 185 | self.logs.getvalue()) | 186 | self.logs.getvalue()) |
1021 | 186 | 187 | ||
1022 | 188 | @mock.patch('requests.Session.request') | ||
1023 | 189 | @mock.patch('cloudinit.url_helper.time.sleep') | ||
1024 | 190 | @mock.patch(MOCKPATH + 'net.is_up') | ||
1025 | 191 | def test_get_metadata_from_imds_retries_on_timeout( | ||
1026 | 192 | self, m_net_is_up, m_sleep, m_request): | ||
1027 | 193 | """Retry IMDS network metadata on timeout errors.""" | ||
1028 | 194 | |||
1029 | 195 | self.attempt = 0 | ||
1030 | 196 | m_request.side_effect = requests.Timeout('Fake Connection Timeout') | ||
1031 | 197 | |||
1032 | 198 | def retry_callback(request, uri, headers): | ||
1033 | 199 | self.attempt += 1 | ||
1034 | 200 | raise requests.Timeout('Fake connection timeout') | ||
1035 | 201 | |||
1036 | 202 | httpretty.register_uri( | ||
1037 | 203 | httpretty.GET, | ||
1038 | 204 | dsaz.IMDS_URL + 'instance?api-version=2017-12-01', | ||
1039 | 205 | body=retry_callback) | ||
1040 | 206 | |||
1041 | 207 | m_net_is_up.return_value = True # skips dhcp | ||
1042 | 208 | |||
1043 | 209 | self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) | ||
1044 | 210 | |||
1045 | 211 | m_net_is_up.assert_called_with('eth9') | ||
1046 | 212 | self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) | ||
1047 | 213 | self.assertIn( | ||
1048 | 214 | "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time | ||
1049 | 215 | self.logs.getvalue()) | ||
1050 | 216 | |||
1051 | 187 | 217 | ||
1052 | 188 | class TestAzureDataSource(CiTestCase): | 218 | class TestAzureDataSource(CiTestCase): |
1053 | 189 | 219 | ||
1054 | @@ -256,7 +286,8 @@ scbus-1 on xpt0 bus 0 | |||
1055 | 256 | ]) | 286 | ]) |
1056 | 257 | return dsaz | 287 | return dsaz |
1057 | 258 | 288 | ||
1059 | 259 | def _get_ds(self, data, agent_command=None, distro=None): | 289 | def _get_ds(self, data, agent_command=None, distro=None, |
1060 | 290 | apply_network=None): | ||
1061 | 260 | 291 | ||
1062 | 261 | def dsdevs(): | 292 | def dsdevs(): |
1063 | 262 | return data.get('dsdevs', []) | 293 | return data.get('dsdevs', []) |
1064 | @@ -312,6 +343,8 @@ scbus-1 on xpt0 bus 0 | |||
1065 | 312 | data.get('sys_cfg', {}), distro=distro, paths=self.paths) | 343 | data.get('sys_cfg', {}), distro=distro, paths=self.paths) |
1066 | 313 | if agent_command is not None: | 344 | if agent_command is not None: |
1067 | 314 | dsrc.ds_cfg['agent_command'] = agent_command | 345 | dsrc.ds_cfg['agent_command'] = agent_command |
1068 | 346 | if apply_network is not None: | ||
1069 | 347 | dsrc.ds_cfg['apply_network_config'] = apply_network | ||
1070 | 315 | 348 | ||
1071 | 316 | return dsrc | 349 | return dsrc |
1072 | 317 | 350 | ||
1073 | @@ -434,14 +467,26 @@ fdescfs /dev/fd fdescfs rw 0 0 | |||
1074 | 434 | 467 | ||
1075 | 435 | def test_get_data_on_ubuntu_will_remove_network_scripts(self): | 468 | def test_get_data_on_ubuntu_will_remove_network_scripts(self): |
1076 | 436 | """get_data will remove ubuntu net scripts on Ubuntu distro.""" | 469 | """get_data will remove ubuntu net scripts on Ubuntu distro.""" |
1077 | 470 | sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} | ||
1078 | 437 | odata = {'HostName': "myhost", 'UserName': "myuser"} | 471 | odata = {'HostName': "myhost", 'UserName': "myuser"} |
1079 | 438 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), | 472 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), |
1081 | 439 | 'sys_cfg': {}} | 473 | 'sys_cfg': sys_cfg} |
1082 | 440 | 474 | ||
1083 | 441 | dsrc = self._get_ds(data, distro='ubuntu') | 475 | dsrc = self._get_ds(data, distro='ubuntu') |
1084 | 442 | dsrc.get_data() | 476 | dsrc.get_data() |
1085 | 443 | self.m_remove_ubuntu_network_scripts.assert_called_once_with() | 477 | self.m_remove_ubuntu_network_scripts.assert_called_once_with() |
1086 | 444 | 478 | ||
1087 | 479 | def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): | ||
1088 | 480 | """When apply_network_config false, do not remove scripts on Ubuntu.""" | ||
1089 | 481 | sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} | ||
1090 | 482 | odata = {'HostName': "myhost", 'UserName': "myuser"} | ||
1091 | 483 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), | ||
1092 | 484 | 'sys_cfg': sys_cfg} | ||
1093 | 485 | |||
1094 | 486 | dsrc = self._get_ds(data, distro='ubuntu') | ||
1095 | 487 | dsrc.get_data() | ||
1096 | 488 | self.m_remove_ubuntu_network_scripts.assert_not_called() | ||
1097 | 489 | |||
1098 | 445 | def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): | 490 | def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): |
1099 | 446 | """Return all structured metadata and cache no class attributes.""" | 491 | """Return all structured metadata and cache no class attributes.""" |
1100 | 447 | yaml_cfg = "{agent_command: my_command}\n" | 492 | yaml_cfg = "{agent_command: my_command}\n" |
1101 | @@ -498,6 +543,58 @@ fdescfs /dev/fd fdescfs rw 0 0 | |||
1102 | 498 | dsrc.crawl_metadata() | 543 | dsrc.crawl_metadata() |
1103 | 499 | self.assertEqual(str(cm.exception), error_msg) | 544 | self.assertEqual(str(cm.exception), error_msg) |
1104 | 500 | 545 | ||
1105 | 546 | @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') | ||
1106 | 547 | @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') | ||
1107 | 548 | @mock.patch( | ||
1108 | 549 | 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') | ||
1109 | 550 | @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') | ||
1110 | 551 | def test_crawl_metadata_on_reprovision_reports_ready( | ||
1111 | 552 | self, poll_imds_func, | ||
1112 | 553 | report_ready_func, | ||
1113 | 554 | m_write, m_dhcp): | ||
1114 | 555 | """If reprovisioning, report ready at the end""" | ||
1115 | 556 | ovfenv = construct_valid_ovf_env( | ||
1116 | 557 | platform_settings={"PreprovisionedVm": "True"}) | ||
1117 | 558 | |||
1118 | 559 | data = {'ovfcontent': ovfenv, | ||
1119 | 560 | 'sys_cfg': {}} | ||
1120 | 561 | dsrc = self._get_ds(data) | ||
1121 | 562 | poll_imds_func.return_value = ovfenv | ||
1122 | 563 | dsrc.crawl_metadata() | ||
1123 | 564 | self.assertEqual(1, report_ready_func.call_count) | ||
1124 | 565 | |||
1125 | 566 | @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') | ||
1126 | 567 | @mock.patch( | ||
1127 | 568 | 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') | ||
1128 | 569 | @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') | ||
1129 | 570 | @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') | ||
1130 | 571 | @mock.patch('cloudinit.sources.DataSourceAzure.readurl') | ||
1131 | 572 | def test_crawl_metadata_on_reprovision_reports_ready_using_lease( | ||
1132 | 573 | self, m_readurl, m_dhcp, | ||
1133 | 574 | m_net, report_ready_func, | ||
1134 | 575 | m_write): | ||
1135 | 576 | """If reprovisioning, report ready using the obtained lease""" | ||
1136 | 577 | ovfenv = construct_valid_ovf_env( | ||
1137 | 578 | platform_settings={"PreprovisionedVm": "True"}) | ||
1138 | 579 | |||
1139 | 580 | data = {'ovfcontent': ovfenv, | ||
1140 | 581 | 'sys_cfg': {}} | ||
1141 | 582 | dsrc = self._get_ds(data) | ||
1142 | 583 | |||
1143 | 584 | lease = { | ||
1144 | 585 | 'interface': 'eth9', 'fixed-address': '192.168.2.9', | ||
1145 | 586 | 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', | ||
1146 | 587 | 'unknown-245': '624c3620'} | ||
1147 | 588 | m_dhcp.return_value = [lease] | ||
1148 | 589 | |||
1149 | 590 | reprovision_ovfenv = construct_valid_ovf_env() | ||
1150 | 591 | m_readurl.return_value = url_helper.StringResponse( | ||
1151 | 592 | reprovision_ovfenv.encode('utf-8')) | ||
1152 | 593 | |||
1153 | 594 | dsrc.crawl_metadata() | ||
1154 | 595 | self.assertEqual(2, report_ready_func.call_count) | ||
1155 | 596 | report_ready_func.assert_called_with(lease=lease) | ||
1156 | 597 | |||
1157 | 501 | def test_waagent_d_has_0700_perms(self): | 598 | def test_waagent_d_has_0700_perms(self): |
1158 | 502 | # we expect /var/lib/waagent to be created 0700 | 599 | # we expect /var/lib/waagent to be created 0700 |
1159 | 503 | dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) | 600 | dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) |
1160 | @@ -523,8 +620,10 @@ fdescfs /dev/fd fdescfs rw 0 0 | |||
1161 | 523 | 620 | ||
1162 | 524 | def test_network_config_set_from_imds(self): | 621 | def test_network_config_set_from_imds(self): |
1163 | 525 | """Datasource.network_config returns IMDS network data.""" | 622 | """Datasource.network_config returns IMDS network data.""" |
1164 | 623 | sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} | ||
1165 | 526 | odata = {} | 624 | odata = {} |
1167 | 527 | data = {'ovfcontent': construct_valid_ovf_env(data=odata)} | 625 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), |
1168 | 626 | 'sys_cfg': sys_cfg} | ||
1169 | 528 | expected_network_config = { | 627 | expected_network_config = { |
1170 | 529 | 'ethernets': { | 628 | 'ethernets': { |
1171 | 530 | 'eth0': {'set-name': 'eth0', | 629 | 'eth0': {'set-name': 'eth0', |
1172 | @@ -803,9 +902,10 @@ fdescfs /dev/fd fdescfs rw 0 0 | |||
1173 | 803 | @mock.patch('cloudinit.net.generate_fallback_config') | 902 | @mock.patch('cloudinit.net.generate_fallback_config') |
1174 | 804 | def test_imds_network_config(self, mock_fallback): | 903 | def test_imds_network_config(self, mock_fallback): |
1175 | 805 | """Network config is generated from IMDS network data when present.""" | 904 | """Network config is generated from IMDS network data when present.""" |
1176 | 905 | sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} | ||
1177 | 806 | odata = {'HostName': "myhost", 'UserName': "myuser"} | 906 | odata = {'HostName': "myhost", 'UserName': "myuser"} |
1178 | 807 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), | 907 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), |
1180 | 808 | 'sys_cfg': {}} | 908 | 'sys_cfg': sys_cfg} |
1181 | 809 | 909 | ||
1182 | 810 | dsrc = self._get_ds(data) | 910 | dsrc = self._get_ds(data) |
1183 | 811 | ret = dsrc.get_data() | 911 | ret = dsrc.get_data() |
1184 | @@ -825,6 +925,36 @@ fdescfs /dev/fd fdescfs rw 0 0 | |||
1185 | 825 | @mock.patch('cloudinit.net.get_devicelist') | 925 | @mock.patch('cloudinit.net.get_devicelist') |
1186 | 826 | @mock.patch('cloudinit.net.device_driver') | 926 | @mock.patch('cloudinit.net.device_driver') |
1187 | 827 | @mock.patch('cloudinit.net.generate_fallback_config') | 927 | @mock.patch('cloudinit.net.generate_fallback_config') |
1188 | 928 | def test_imds_network_ignored_when_apply_network_config_false( | ||
1189 | 929 | self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): | ||
1190 | 930 | """When apply_network_config is False, use fallback instead of IMDS.""" | ||
1191 | 931 | sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} | ||
1192 | 932 | odata = {'HostName': "myhost", 'UserName': "myuser"} | ||
1193 | 933 | data = {'ovfcontent': construct_valid_ovf_env(data=odata), | ||
1194 | 934 | 'sys_cfg': sys_cfg} | ||
1195 | 935 | fallback_config = { | ||
1196 | 936 | 'version': 1, | ||
1197 | 937 | 'config': [{ | ||
1198 | 938 | 'type': 'physical', 'name': 'eth0', | ||
1199 | 939 | 'mac_address': '00:11:22:33:44:55', | ||
1200 | 940 | 'params': {'driver': 'hv_netsvc'}, | ||
1201 | 941 | 'subnets': [{'type': 'dhcp'}], | ||
1202 | 942 | }] | ||
1203 | 943 | } | ||
1204 | 944 | mock_fallback.return_value = fallback_config | ||
1205 | 945 | |||
1206 | 946 | mock_devlist.return_value = ['eth0'] | ||
1207 | 947 | mock_dd.return_value = ['hv_netsvc'] | ||
1208 | 948 | mock_get_mac.return_value = '00:11:22:33:44:55' | ||
1209 | 949 | |||
1210 | 950 | dsrc = self._get_ds(data) | ||
1211 | 951 | self.assertTrue(dsrc.get_data()) | ||
1212 | 952 | self.assertEqual(dsrc.network_config, fallback_config) | ||
1213 | 953 | |||
1214 | 954 | @mock.patch('cloudinit.net.get_interface_mac') | ||
1215 | 955 | @mock.patch('cloudinit.net.get_devicelist') | ||
1216 | 956 | @mock.patch('cloudinit.net.device_driver') | ||
1217 | 957 | @mock.patch('cloudinit.net.generate_fallback_config') | ||
1218 | 828 | def test_fallback_network_config(self, mock_fallback, mock_dd, | 958 | def test_fallback_network_config(self, mock_fallback, mock_dd, |
1219 | 829 | mock_devlist, mock_get_mac): | 959 | mock_devlist, mock_get_mac): |
1220 | 830 | """On absent IMDS network data, generate network fallback config.""" | 960 | """On absent IMDS network data, generate network fallback config.""" |
1221 | @@ -1411,21 +1541,20 @@ class TestCanDevBeReformatted(CiTestCase): | |||
1222 | 1411 | '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} | 1541 | '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} |
1223 | 1412 | }}}) | 1542 | }}}) |
1224 | 1413 | 1543 | ||
1240 | 1414 | err = ("Unexpected error while running command.\n", | 1544 | error_msgs = [ |
1241 | 1415 | "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ", | 1545 | "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL |
1242 | 1416 | "'/dev/sda1', '/fake-tmp/dir']\n" | 1546 | "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES |
1243 | 1417 | "Exit code: 32\n" | 1547 | ] |
1244 | 1418 | "Reason: -\n" | 1548 | |
1245 | 1419 | "Stdout: -\n" | 1549 | for err_msg in error_msgs: |
1246 | 1420 | "Stderr: mount: unknown filesystem type 'ntfs'") | 1550 | self.m_mount_cb.side_effect = MountFailedError( |
1247 | 1421 | self.m_mount_cb.side_effect = MountFailedError( | 1551 | "Failed mounting %s to %s due to: \nUnexpected.\n%s" % |
1248 | 1422 | 'Failed mounting %s to %s due to: %s' % | 1552 | ('/dev/sda', '/fake-tmp/dir', err_msg)) |
1249 | 1423 | ('/dev/sda', '/fake-tmp/dir', err)) | 1553 | |
1250 | 1424 | 1554 | value, msg = dsaz.can_dev_be_reformatted('/dev/sda', | |
1251 | 1425 | value, msg = dsaz.can_dev_be_reformatted('/dev/sda', | 1555 | preserve_ntfs=False) |
1252 | 1426 | preserve_ntfs=False) | 1556 | self.assertTrue(value) |
1253 | 1427 | self.assertTrue(value) | 1557 | self.assertIn('cannot mount NTFS, assuming', msg) |
1239 | 1428 | self.assertIn('cannot mount NTFS, assuming', msg) | ||
1254 | 1429 | 1558 | ||
1255 | 1430 | def test_never_destroy_ntfs_config_false(self): | 1559 | def test_never_destroy_ntfs_config_false(self): |
1256 | 1431 | """Normally formattable situation with never_destroy_ntfs set.""" | 1560 | """Normally formattable situation with never_destroy_ntfs set.""" |
1257 | diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py | |||
1258 | index 9f81255..1a5956d 100644 | |||
1259 | --- a/tests/unittests/test_datasource/test_ec2.py | |||
1260 | +++ b/tests/unittests/test_datasource/test_ec2.py | |||
1261 | @@ -211,9 +211,9 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1262 | 211 | self.metadata_addr = self.datasource.metadata_urls[0] | 211 | self.metadata_addr = self.datasource.metadata_urls[0] |
1263 | 212 | self.tmp = self.tmp_dir() | 212 | self.tmp = self.tmp_dir() |
1264 | 213 | 213 | ||
1266 | 214 | def data_url(self, version): | 214 | def data_url(self, version, data_item='meta-data'): |
1267 | 215 | """Return a metadata url based on the version provided.""" | 215 | """Return a metadata url based on the version provided.""" |
1269 | 216 | return '/'.join([self.metadata_addr, version, 'meta-data', '']) | 216 | return '/'.join([self.metadata_addr, version, data_item]) |
1270 | 217 | 217 | ||
1271 | 218 | def _patch_add_cleanup(self, mpath, *args, **kwargs): | 218 | def _patch_add_cleanup(self, mpath, *args, **kwargs): |
1272 | 219 | p = mock.patch(mpath, *args, **kwargs) | 219 | p = mock.patch(mpath, *args, **kwargs) |
1273 | @@ -238,10 +238,18 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1274 | 238 | all_versions = ( | 238 | all_versions = ( |
1275 | 239 | [ds.min_metadata_version] + ds.extended_metadata_versions) | 239 | [ds.min_metadata_version] + ds.extended_metadata_versions) |
1276 | 240 | for version in all_versions: | 240 | for version in all_versions: |
1278 | 241 | metadata_url = self.data_url(version) | 241 | metadata_url = self.data_url(version) + '/' |
1279 | 242 | if version == md_version: | 242 | if version == md_version: |
1280 | 243 | # Register all metadata for desired version | 243 | # Register all metadata for desired version |
1282 | 244 | register_mock_metaserver(metadata_url, md) | 244 | register_mock_metaserver( |
1283 | 245 | metadata_url, md.get('md', DEFAULT_METADATA)) | ||
1284 | 246 | userdata_url = self.data_url( | ||
1285 | 247 | version, data_item='user-data') | ||
1286 | 248 | register_mock_metaserver(userdata_url, md.get('ud', '')) | ||
1287 | 249 | identity_url = self.data_url( | ||
1288 | 250 | version, data_item='dynamic/instance-identity') | ||
1289 | 251 | register_mock_metaserver( | ||
1290 | 252 | identity_url, md.get('id', DYNAMIC_METADATA)) | ||
1291 | 245 | else: | 253 | else: |
1292 | 246 | instance_id_url = metadata_url + 'instance-id' | 254 | instance_id_url = metadata_url + 'instance-id' |
1293 | 247 | if version == ds.min_metadata_version: | 255 | if version == ds.min_metadata_version: |
1294 | @@ -261,7 +269,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1295 | 261 | ds = self._setup_ds( | 269 | ds = self._setup_ds( |
1296 | 262 | platform_data=self.valid_platform_data, | 270 | platform_data=self.valid_platform_data, |
1297 | 263 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, | 271 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1299 | 264 | md=DEFAULT_METADATA) | 272 | md={'md': DEFAULT_METADATA}) |
1300 | 265 | find_fallback_path = ( | 273 | find_fallback_path = ( |
1301 | 266 | 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') | 274 | 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') |
1302 | 267 | with mock.patch(find_fallback_path) as m_find_fallback: | 275 | with mock.patch(find_fallback_path) as m_find_fallback: |
1303 | @@ -293,7 +301,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1304 | 293 | ds = self._setup_ds( | 301 | ds = self._setup_ds( |
1305 | 294 | platform_data=self.valid_platform_data, | 302 | platform_data=self.valid_platform_data, |
1306 | 295 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, | 303 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1308 | 296 | md=DEFAULT_METADATA) | 304 | md={'md': DEFAULT_METADATA}) |
1309 | 297 | find_fallback_path = ( | 305 | find_fallback_path = ( |
1310 | 298 | 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') | 306 | 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') |
1311 | 299 | with mock.patch(find_fallback_path) as m_find_fallback: | 307 | with mock.patch(find_fallback_path) as m_find_fallback: |
1312 | @@ -322,7 +330,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1313 | 322 | ds = self._setup_ds( | 330 | ds = self._setup_ds( |
1314 | 323 | platform_data=self.valid_platform_data, | 331 | platform_data=self.valid_platform_data, |
1315 | 324 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, | 332 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1317 | 325 | md=DEFAULT_METADATA) | 333 | md={'md': DEFAULT_METADATA}) |
1318 | 326 | ds._network_config = {'cached': 'data'} | 334 | ds._network_config = {'cached': 'data'} |
1319 | 327 | self.assertEqual({'cached': 'data'}, ds.network_config) | 335 | self.assertEqual({'cached': 'data'}, ds.network_config) |
1320 | 328 | 336 | ||
1321 | @@ -338,7 +346,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1322 | 338 | ds = self._setup_ds( | 346 | ds = self._setup_ds( |
1323 | 339 | platform_data=self.valid_platform_data, | 347 | platform_data=self.valid_platform_data, |
1324 | 340 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, | 348 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1326 | 341 | md=old_metadata) | 349 | md={'md': old_metadata}) |
1327 | 342 | self.assertTrue(ds.get_data()) | 350 | self.assertTrue(ds.get_data()) |
1328 | 343 | # Provide new revision of metadata that contains network data | 351 | # Provide new revision of metadata that contains network data |
1329 | 344 | register_mock_metaserver( | 352 | register_mock_metaserver( |
1330 | @@ -372,7 +380,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1331 | 372 | ds = self._setup_ds( | 380 | ds = self._setup_ds( |
1332 | 373 | platform_data=self.valid_platform_data, | 381 | platform_data=self.valid_platform_data, |
1333 | 374 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, | 382 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1335 | 375 | md=DEFAULT_METADATA) | 383 | md={'md': DEFAULT_METADATA}) |
1336 | 376 | # Mock 404s on all versions except latest | 384 | # Mock 404s on all versions except latest |
1337 | 377 | all_versions = ( | 385 | all_versions = ( |
1338 | 378 | [ds.min_metadata_version] + ds.extended_metadata_versions) | 386 | [ds.min_metadata_version] + ds.extended_metadata_versions) |
1339 | @@ -399,7 +407,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1340 | 399 | ds = self._setup_ds( | 407 | ds = self._setup_ds( |
1341 | 400 | platform_data=self.valid_platform_data, | 408 | platform_data=self.valid_platform_data, |
1342 | 401 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, | 409 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1344 | 402 | md=DEFAULT_METADATA) | 410 | md={'md': DEFAULT_METADATA}) |
1345 | 403 | ret = ds.get_data() | 411 | ret = ds.get_data() |
1346 | 404 | self.assertTrue(ret) | 412 | self.assertTrue(ret) |
1347 | 405 | self.assertEqual(0, m_dhcp.call_count) | 413 | self.assertEqual(0, m_dhcp.call_count) |
1348 | @@ -412,7 +420,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1349 | 412 | ds = self._setup_ds( | 420 | ds = self._setup_ds( |
1350 | 413 | platform_data=self.valid_platform_data, | 421 | platform_data=self.valid_platform_data, |
1351 | 414 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, | 422 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1353 | 415 | md=DEFAULT_METADATA) | 423 | md={'md': DEFAULT_METADATA}) |
1354 | 416 | ret = ds.get_data() | 424 | ret = ds.get_data() |
1355 | 417 | self.assertTrue(ret) | 425 | self.assertTrue(ret) |
1356 | 418 | 426 | ||
1357 | @@ -422,7 +430,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1358 | 422 | ds = self._setup_ds( | 430 | ds = self._setup_ds( |
1359 | 423 | platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, | 431 | platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, |
1360 | 424 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, | 432 | sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, |
1362 | 425 | md=DEFAULT_METADATA) | 433 | md={'md': DEFAULT_METADATA}) |
1363 | 426 | ret = ds.get_data() | 434 | ret = ds.get_data() |
1364 | 427 | self.assertFalse(ret) | 435 | self.assertFalse(ret) |
1365 | 428 | 436 | ||
1366 | @@ -432,7 +440,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1367 | 432 | ds = self._setup_ds( | 440 | ds = self._setup_ds( |
1368 | 433 | platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, | 441 | platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, |
1369 | 434 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, | 442 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1371 | 435 | md=DEFAULT_METADATA) | 443 | md={'md': DEFAULT_METADATA}) |
1372 | 436 | ret = ds.get_data() | 444 | ret = ds.get_data() |
1373 | 437 | self.assertTrue(ret) | 445 | self.assertTrue(ret) |
1374 | 438 | 446 | ||
1375 | @@ -442,7 +450,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1376 | 442 | ds = self._setup_ds( | 450 | ds = self._setup_ds( |
1377 | 443 | platform_data=self.valid_platform_data, | 451 | platform_data=self.valid_platform_data, |
1378 | 444 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, | 452 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1380 | 445 | md=DEFAULT_METADATA) | 453 | md={'md': DEFAULT_METADATA}) |
1381 | 446 | platform_attrs = [ | 454 | platform_attrs = [ |
1382 | 447 | attr for attr in ec2.CloudNames.__dict__.keys() | 455 | attr for attr in ec2.CloudNames.__dict__.keys() |
1383 | 448 | if not attr.startswith('__')] | 456 | if not attr.startswith('__')] |
1384 | @@ -469,7 +477,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1385 | 469 | ds = self._setup_ds( | 477 | ds = self._setup_ds( |
1386 | 470 | platform_data=self.valid_platform_data, | 478 | platform_data=self.valid_platform_data, |
1387 | 471 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, | 479 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1389 | 472 | md=DEFAULT_METADATA) | 480 | md={'md': DEFAULT_METADATA}) |
1390 | 473 | ret = ds.get_data() | 481 | ret = ds.get_data() |
1391 | 474 | self.assertFalse(ret) | 482 | self.assertFalse(ret) |
1392 | 475 | self.assertIn( | 483 | self.assertIn( |
1393 | @@ -499,7 +507,7 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
1394 | 499 | ds = self._setup_ds( | 507 | ds = self._setup_ds( |
1395 | 500 | platform_data=self.valid_platform_data, | 508 | platform_data=self.valid_platform_data, |
1396 | 501 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, | 509 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
1398 | 502 | md=DEFAULT_METADATA) | 510 | md={'md': DEFAULT_METADATA}) |
1399 | 503 | 511 | ||
1400 | 504 | ret = ds.get_data() | 512 | ret = ds.get_data() |
1401 | 505 | self.assertTrue(ret) | 513 | self.assertTrue(ret) |
1402 | diff --git a/udev/66-azure-ephemeral.rules b/udev/66-azure-ephemeral.rules | |||
1403 | index b9c5c3e..3032f7e 100644 | |||
1404 | --- a/udev/66-azure-ephemeral.rules | |||
1405 | +++ b/udev/66-azure-ephemeral.rules | |||
1406 | @@ -4,10 +4,26 @@ SUBSYSTEM!="block", GOTO="cloud_init_end" | |||
1407 | 4 | ATTRS{ID_VENDOR}!="Msft", GOTO="cloud_init_end" | 4 | ATTRS{ID_VENDOR}!="Msft", GOTO="cloud_init_end" |
1408 | 5 | ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="cloud_init_end" | 5 | ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="cloud_init_end" |
1409 | 6 | 6 | ||
1411 | 7 | # Root has a GUID of 0000 as the second value | 7 | # Root has a GUID of 0000 as the second value on Gen1 instances |
1412 | 8 | # The resource/resource has GUID of 0001 as the second value | 8 | # The resource/resource has GUID of 0001 as the second value |
1413 | 9 | ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="azure_root", GOTO="ci_azure_names" | 9 | ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="azure_root", GOTO="ci_azure_names" |
1414 | 10 | ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names" | 10 | ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names" |
1415 | 11 | |||
1416 | 12 | # Azure well known SCSI controllers on Gen2 instances | ||
1417 | 13 | ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" | ||
1418 | 14 | # Do not create symlinks for scsi[1-3] or unmatched device_ids | ||
1419 | 15 | ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="cloud_init_end" | ||
1420 | 16 | ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="cloud_init_end" | ||
1421 | 17 | ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="cloud_init_end" | ||
1422 | 18 | GOTO="cloud_init_end" | ||
1423 | 19 | |||
1424 | 20 | # Map scsi#/lun# fabric_name to azure_root|resource on Gen2 instances | ||
1425 | 21 | LABEL="azure_datadisk" | ||
1426 | 22 | ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result" | ||
1427 | 23 | ENV{DEVTYPE}=="disk", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result" | ||
1428 | 24 | |||
1429 | 25 | ENV{fabric_name}=="scsi0/lun0", ENV{fabric_name}="azure_root", GOTO="ci_azure_names" | ||
1430 | 26 | ENV{fabric_name}=="scsi0/lun1", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names" | ||
1431 | 11 | GOTO="cloud_init_end" | 27 | GOTO="cloud_init_end" |
1432 | 12 | 28 | ||
1433 | 13 | # Create the symlinks | 29 | # Create the symlinks |
PASSED: Continuous integration, rev:db78de17b18 0d531624fd1e296 246692d3db18bb /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 438/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 438/rebuild
https:/