Merge ~chad.smith/cloud-init:ubuntu/artful into cloud-init:ubuntu/artful
- Git
- lp:~chad.smith/cloud-init
- ubuntu/artful
- Merge into ubuntu/artful
Proposed by
Chad Smith
Status: | Merged | ||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | 9ddf54f3ca597b416d00ebb43450159013fcd3e8 | ||||||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/artful | ||||||||||||||||
Merge into: | cloud-init:ubuntu/artful | ||||||||||||||||
Diff against target: |
722 lines (+396/-58) 11 files modified
cloudinit/cmd/status.py (+4/-1) cloudinit/cmd/tests/test_status.py (+28/-7) cloudinit/net/__init__.py (+46/-17) cloudinit/net/tests/test_init.py (+91/-0) cloudinit/sources/DataSourceEc2.py (+6/-0) cloudinit/ssh_util.py (+1/-4) debian/changelog (+11/-0) tests/cloud_tests/collect.py (+3/-2) tests/cloud_tests/platforms/lxd/instance.py (+106/-26) tests/unittests/test_datasource/test_ec2.py (+58/-1) tests/unittests/test_sshutil.py (+42/-0) |
||||||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Scott Moser | Pending | ||
Review via email:
|
Commit message
Description of the change
Sync tip of master including SRU fix for release into artful.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py |
2 | index d7aaee9..ea79a85 100644 |
3 | --- a/cloudinit/cmd/status.py |
4 | +++ b/cloudinit/cmd/status.py |
5 | @@ -105,12 +105,12 @@ def _get_status_details(paths): |
6 | |
7 | Values are obtained from parsing paths.run_dir/status.json. |
8 | """ |
9 | - |
10 | status = STATUS_ENABLED_NOT_RUN |
11 | status_detail = '' |
12 | status_v1 = {} |
13 | |
14 | status_file = os.path.join(paths.run_dir, 'status.json') |
15 | + result_file = os.path.join(paths.run_dir, 'result.json') |
16 | |
17 | (is_disabled, reason) = _is_cloudinit_disabled( |
18 | CLOUDINIT_DISABLED_FILE, paths) |
19 | @@ -118,12 +118,15 @@ def _get_status_details(paths): |
20 | status = STATUS_DISABLED |
21 | status_detail = reason |
22 | if os.path.exists(status_file): |
23 | + if not os.path.exists(result_file): |
24 | + status = STATUS_RUNNING |
25 | status_v1 = load_json(load_file(status_file)).get('v1', {}) |
26 | errors = [] |
27 | latest_event = 0 |
28 | for key, value in sorted(status_v1.items()): |
29 | if key == 'stage': |
30 | if value: |
31 | + status = STATUS_RUNNING |
32 | status_detail = 'Running in stage: {0}'.format(value) |
33 | elif key == 'datasource': |
34 | status_detail = value |
35 | diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py |
36 | index a7c0a91..4a5a8c0 100644 |
37 | --- a/cloudinit/cmd/tests/test_status.py |
38 | +++ b/cloudinit/cmd/tests/test_status.py |
39 | @@ -7,7 +7,7 @@ from textwrap import dedent |
40 | |
41 | from cloudinit.atomic_helper import write_json |
42 | from cloudinit.cmd import status |
43 | -from cloudinit.util import write_file |
44 | +from cloudinit.util import ensure_file |
45 | from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock |
46 | |
47 | mypaths = namedtuple('MyPaths', 'run_dir') |
48 | @@ -36,7 +36,7 @@ class TestStatus(CiTestCase): |
49 | |
50 | def test__is_cloudinit_disabled_false_on_sysvinit(self): |
51 | '''When not in an environment using systemd, return False.''' |
52 | - write_file(self.disable_file, '') # Create the ignored disable file |
53 | + ensure_file(self.disable_file) # Create the ignored disable file |
54 | (is_disabled, reason) = wrap_and_call( |
55 | 'cloudinit.cmd.status', |
56 | {'uses_systemd': False}, |
57 | @@ -47,7 +47,7 @@ class TestStatus(CiTestCase): |
58 | |
59 | def test__is_cloudinit_disabled_true_on_disable_file(self): |
60 | '''When using systemd and disable_file is present return disabled.''' |
61 | - write_file(self.disable_file, '') # Create observed disable file |
62 | + ensure_file(self.disable_file) # Create observed disable file |
63 | (is_disabled, reason) = wrap_and_call( |
64 | 'cloudinit.cmd.status', |
65 | {'uses_systemd': True}, |
66 | @@ -58,7 +58,7 @@ class TestStatus(CiTestCase): |
67 | |
68 | def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): |
69 | '''Not disabled when using systemd and enabled via commandline.''' |
70 | - write_file(self.disable_file, '') # Create ignored disable file |
71 | + ensure_file(self.disable_file) # Create ignored disable file |
72 | (is_disabled, reason) = wrap_and_call( |
73 | 'cloudinit.cmd.status', |
74 | {'uses_systemd': True, |
75 | @@ -96,7 +96,7 @@ class TestStatus(CiTestCase): |
76 | def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): |
77 | '''Report enabled when systemd generator creates the enabled file.''' |
78 | enabled_file = os.path.join(self.paths.run_dir, 'enabled') |
79 | - write_file(enabled_file, '') |
80 | + ensure_file(enabled_file) |
81 | (is_disabled, reason) = wrap_and_call( |
82 | 'cloudinit.cmd.status', |
83 | {'uses_systemd': True, |
84 | @@ -149,8 +149,25 @@ class TestStatus(CiTestCase): |
85 | ''') |
86 | self.assertEqual(expected, m_stdout.getvalue()) |
87 | |
88 | + def test_status_returns_running_on_no_results_json(self): |
89 | + '''Report running when status.json exists but result.json does not.''' |
90 | + result_file = self.tmp_path('result.json', self.new_root) |
91 | + write_json(self.status_file, {}) |
92 | + self.assertFalse( |
93 | + os.path.exists(result_file), 'Unexpected result.json found') |
94 | + cmdargs = myargs(long=False, wait=False) |
95 | + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
96 | + retcode = wrap_and_call( |
97 | + 'cloudinit.cmd.status', |
98 | + {'_is_cloudinit_disabled': (False, ''), |
99 | + 'Init': {'side_effect': self.init_class}}, |
100 | + status.handle_status_args, 'ignored', cmdargs) |
101 | + self.assertEqual(0, retcode) |
102 | + self.assertEqual('status: running\n', m_stdout.getvalue()) |
103 | + |
104 | def test_status_returns_running(self): |
105 | '''Report running when status exists with an unfinished stage.''' |
106 | + ensure_file(self.tmp_path('result.json', self.new_root)) |
107 | write_json(self.status_file, |
108 | {'v1': {'init': {'start': 1, 'finished': None}}}) |
109 | cmdargs = myargs(long=False, wait=False) |
110 | @@ -164,10 +181,11 @@ class TestStatus(CiTestCase): |
111 | self.assertEqual('status: running\n', m_stdout.getvalue()) |
112 | |
113 | def test_status_returns_done(self): |
114 | - '''Reports done when stage is None and all stages are finished.''' |
115 | + '''Report done results.json exists no stages are unfinished.''' |
116 | + ensure_file(self.tmp_path('result.json', self.new_root)) |
117 | write_json( |
118 | self.status_file, |
119 | - {'v1': {'stage': None, |
120 | + {'v1': {'stage': None, # No current stage running |
121 | 'datasource': ( |
122 | 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' |
123 | '[dsmode=net]'), |
124 | @@ -187,6 +205,7 @@ class TestStatus(CiTestCase): |
125 | |
126 | def test_status_returns_done_long(self): |
127 | '''Long format of done status includes datasource info.''' |
128 | + ensure_file(self.tmp_path('result.json', self.new_root)) |
129 | write_json( |
130 | self.status_file, |
131 | {'v1': {'stage': None, |
132 | @@ -303,6 +322,8 @@ class TestStatus(CiTestCase): |
133 | write_json(self.status_file, running_json) |
134 | elif self.sleep_calls == 3: |
135 | write_json(self.status_file, done_json) |
136 | + result_file = self.tmp_path('result.json', self.new_root) |
137 | + ensure_file(result_file) |
138 | |
139 | cmdargs = myargs(long=False, wait=True) |
140 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
141 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py |
142 | index c015e79..f69c0ef 100644 |
143 | --- a/cloudinit/net/__init__.py |
144 | +++ b/cloudinit/net/__init__.py |
145 | @@ -274,23 +274,52 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): |
146 | renames are only attempted for interfaces of type 'physical'. It is |
147 | expected that the network system will create other devices with the |
148 | correct name in place.""" |
149 | - renames = [] |
150 | - for ent in netcfg.get('config', {}): |
151 | - if ent.get('type') != 'physical': |
152 | - continue |
153 | - mac = ent.get('mac_address') |
154 | - if not mac: |
155 | - continue |
156 | - name = ent.get('name') |
157 | - driver = ent.get('params', {}).get('driver') |
158 | - device_id = ent.get('params', {}).get('device_id') |
159 | - if not driver: |
160 | - driver = device_driver(name) |
161 | - if not device_id: |
162 | - device_id = device_devid(name) |
163 | - renames.append([mac, name, driver, device_id]) |
164 | - |
165 | - return _rename_interfaces(renames) |
166 | + |
167 | + def _version_1(netcfg): |
168 | + renames = [] |
169 | + for ent in netcfg.get('config', {}): |
170 | + if ent.get('type') != 'physical': |
171 | + continue |
172 | + mac = ent.get('mac_address') |
173 | + if not mac: |
174 | + continue |
175 | + name = ent.get('name') |
176 | + driver = ent.get('params', {}).get('driver') |
177 | + device_id = ent.get('params', {}).get('device_id') |
178 | + if not driver: |
179 | + driver = device_driver(name) |
180 | + if not device_id: |
181 | + device_id = device_devid(name) |
182 | + renames.append([mac, name, driver, device_id]) |
183 | + return renames |
184 | + |
185 | + def _version_2(netcfg): |
186 | + renames = [] |
187 | + for key, ent in netcfg.get('ethernets', {}).items(): |
188 | + # only rename if configured to do so |
189 | + name = ent.get('set-name') |
190 | + if not name: |
191 | + continue |
192 | + # cloud-init requires macaddress for renaming |
193 | + mac = ent.get('match', {}).get('macaddress') |
194 | + if not mac: |
195 | + continue |
196 | + driver = ent.get('match', {}).get('driver') |
197 | + device_id = ent.get('match', {}).get('device_id') |
198 | + if not driver: |
199 | + driver = device_driver(name) |
200 | + if not device_id: |
201 | + device_id = device_devid(name) |
202 | + renames.append([mac, name, driver, device_id]) |
203 | + return renames |
204 | + |
205 | + if netcfg.get('version') == 1: |
206 | + return _rename_interfaces(_version_1(netcfg)) |
207 | + elif netcfg.get('version') == 2: |
208 | + return _rename_interfaces(_version_2(netcfg)) |
209 | + |
210 | + raise RuntimeError('Failed to apply network config names. Found bad' |
211 | + ' network config version: %s' % netcfg.get('version')) |
212 | |
213 | |
214 | def interface_has_own_mac(ifname, strict=False): |
215 | diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py |
216 | index 8cb4114..276556e 100644 |
217 | --- a/cloudinit/net/tests/test_init.py |
218 | +++ b/cloudinit/net/tests/test_init.py |
219 | @@ -4,6 +4,8 @@ import copy |
220 | import errno |
221 | import mock |
222 | import os |
223 | +import textwrap |
224 | +import yaml |
225 | |
226 | import cloudinit.net as net |
227 | from cloudinit.util import ensure_file, write_file, ProcessExecutionError |
228 | @@ -520,3 +522,92 @@ class TestEphemeralIPV4Network(CiTestCase): |
229 | with net.EphemeralIPv4Network(**params): |
230 | self.assertEqual(expected_setup_calls, m_subp.call_args_list) |
231 | m_subp.assert_has_calls(expected_teardown_calls) |
232 | + |
233 | + |
234 | +class TestApplyNetworkCfgNames(CiTestCase): |
235 | + V1_CONFIG = textwrap.dedent("""\ |
236 | + version: 1 |
237 | + config: |
238 | + - type: physical |
239 | + name: interface0 |
240 | + mac_address: "52:54:00:12:34:00" |
241 | + subnets: |
242 | + - type: static |
243 | + address: 10.0.2.15 |
244 | + netmask: 255.255.255.0 |
245 | + gateway: 10.0.2.2 |
246 | + """) |
247 | + V2_CONFIG = textwrap.dedent("""\ |
248 | + version: 2 |
249 | + ethernets: |
250 | + interface0: |
251 | + match: |
252 | + macaddress: "52:54:00:12:34:00" |
253 | + addresses: |
254 | + - 10.0.2.15/24 |
255 | + gateway4: 10.0.2.2 |
256 | + set-name: interface0 |
257 | + """) |
258 | + |
259 | + V2_CONFIG_NO_SETNAME = textwrap.dedent("""\ |
260 | + version: 2 |
261 | + ethernets: |
262 | + interface0: |
263 | + match: |
264 | + macaddress: "52:54:00:12:34:00" |
265 | + addresses: |
266 | + - 10.0.2.15/24 |
267 | + gateway4: 10.0.2.2 |
268 | + """) |
269 | + |
270 | + V2_CONFIG_NO_MAC = textwrap.dedent("""\ |
271 | + version: 2 |
272 | + ethernets: |
273 | + interface0: |
274 | + match: |
275 | + driver: virtio-net |
276 | + addresses: |
277 | + - 10.0.2.15/24 |
278 | + gateway4: 10.0.2.2 |
279 | + set-name: interface0 |
280 | + """) |
281 | + |
282 | + @mock.patch('cloudinit.net.device_devid') |
283 | + @mock.patch('cloudinit.net.device_driver') |
284 | + @mock.patch('cloudinit.net._rename_interfaces') |
285 | + def test_apply_v1_renames(self, m_rename_interfaces, m_device_driver, |
286 | + m_device_devid): |
287 | + m_device_driver.return_value = 'virtio_net' |
288 | + m_device_devid.return_value = '0x15d8' |
289 | + |
290 | + net.apply_network_config_names(yaml.load(self.V1_CONFIG)) |
291 | + |
292 | + call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] |
293 | + m_rename_interfaces.assert_called_with([call]) |
294 | + |
295 | + @mock.patch('cloudinit.net.device_devid') |
296 | + @mock.patch('cloudinit.net.device_driver') |
297 | + @mock.patch('cloudinit.net._rename_interfaces') |
298 | + def test_apply_v2_renames(self, m_rename_interfaces, m_device_driver, |
299 | + m_device_devid): |
300 | + m_device_driver.return_value = 'virtio_net' |
301 | + m_device_devid.return_value = '0x15d8' |
302 | + |
303 | + net.apply_network_config_names(yaml.load(self.V2_CONFIG)) |
304 | + |
305 | + call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] |
306 | + m_rename_interfaces.assert_called_with([call]) |
307 | + |
308 | + @mock.patch('cloudinit.net._rename_interfaces') |
309 | + def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces): |
310 | + net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME)) |
311 | + m_rename_interfaces.assert_called_with([]) |
312 | + |
313 | + @mock.patch('cloudinit.net._rename_interfaces') |
314 | + def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces): |
315 | + net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC)) |
316 | + m_rename_interfaces.assert_called_with([]) |
317 | + |
318 | + def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self): |
319 | + with self.assertRaises(RuntimeError): |
320 | + net.apply_network_config_names(yaml.load("version: 3")) |
321 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py |
322 | index e14553b..21e9ef8 100644 |
323 | --- a/cloudinit/sources/DataSourceEc2.py |
324 | +++ b/cloudinit/sources/DataSourceEc2.py |
325 | @@ -147,6 +147,12 @@ class DataSourceEc2(sources.DataSource): |
326 | def get_instance_id(self): |
327 | if self.cloud_platform == Platforms.AWS: |
328 | # Prefer the ID from the instance identity document, but fall back |
329 | + if not getattr(self, 'identity', None): |
330 | + # If re-using cached datasource, it's get_data run didn't |
331 | + # setup self.identity. So we need to do that now. |
332 | + api_version = self.get_metadata_api_version() |
333 | + self.identity = ec2.get_instance_identity( |
334 | + api_version, self.metadata_address).get('document', {}) |
335 | return self.identity.get( |
336 | 'instanceId', self.metadata['instance-id']) |
337 | else: |
338 | diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py |
339 | index b95b956..882517f 100644 |
340 | --- a/cloudinit/ssh_util.py |
341 | +++ b/cloudinit/ssh_util.py |
342 | @@ -171,16 +171,13 @@ def parse_authorized_keys(fname): |
343 | |
344 | |
345 | def update_authorized_keys(old_entries, keys): |
346 | - to_add = list(keys) |
347 | - |
348 | + to_add = list([k for k in keys if k.valid()]) |
349 | for i in range(0, len(old_entries)): |
350 | ent = old_entries[i] |
351 | if not ent.valid(): |
352 | continue |
353 | # Replace those with the same base64 |
354 | for k in keys: |
355 | - if not ent.valid(): |
356 | - continue |
357 | if k.base64 == ent.base64: |
358 | # Replace it with our better one |
359 | ent = k |
360 | diff --git a/debian/changelog b/debian/changelog |
361 | index 35cabb1..fe1b23d 100644 |
362 | --- a/debian/changelog |
363 | +++ b/debian/changelog |
364 | @@ -1,3 +1,14 @@ |
365 | +cloud-init (17.2-35-gf576b2a2-0ubuntu1~17.10.1) artful-proposed; urgency=medium |
366 | + |
367 | + * New upstream snapshot. (LP: #1747059) |
368 | + - tests: add support for logs with lxd from snap and future lxd 3. |
369 | + - EC2: Fix get_instance_id called against cached datasource pickle. |
370 | + - cli: fix cloud-init status to report running when before result.json |
371 | + - net: accept network-config in netplan format for renaming interfaces |
372 | + - Fix ssh keys validation in ssh_util [Tatiana Kholkina] |
373 | + |
374 | + -- Chad Smith <chad.smith@canonical.com> Mon, 12 Feb 2018 10:16:42 -0700 |
375 | + |
376 | cloud-init (17.2-30-gf7deaf15-0ubuntu1~17.10.1) artful-proposed; urgency=medium |
377 | |
378 | * New upstream snapshot. (LP: #1747059) |
379 | diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py |
380 | index 5ea88e5..d4f9135 100644 |
381 | --- a/tests/cloud_tests/collect.py |
382 | +++ b/tests/cloud_tests/collect.py |
383 | @@ -44,8 +44,9 @@ def collect_console(instance, base_dir): |
384 | LOG.debug('getting console log for %s to %s', instance, logfile) |
385 | try: |
386 | data = instance.console_log() |
387 | - except NotImplementedError: |
388 | - data = b'instance.console_log: not implemented' |
389 | + except NotImplementedError as e: |
390 | + # args[0] is hacky, but thats all I see to get at the message. |
391 | + data = b'NotImplementedError:' + e.args[0].encode() |
392 | with open(logfile, "wb") as fp: |
393 | fp.write(data) |
394 | |
395 | diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py |
396 | index d2d2a1f..0488da5 100644 |
397 | --- a/tests/cloud_tests/platforms/lxd/instance.py |
398 | +++ b/tests/cloud_tests/platforms/lxd/instance.py |
399 | @@ -6,7 +6,9 @@ import os |
400 | import shutil |
401 | from tempfile import mkdtemp |
402 | |
403 | -from cloudinit.util import subp, ProcessExecutionError |
404 | +from cloudinit.util import load_yaml, subp, ProcessExecutionError, which |
405 | +from tests.cloud_tests import LOG |
406 | +from tests.cloud_tests.util import PlatformError |
407 | |
408 | from ..instances import Instance |
409 | |
410 | @@ -15,6 +17,8 @@ class LXDInstance(Instance): |
411 | """LXD container backed instance.""" |
412 | |
413 | platform_name = "lxd" |
414 | + _console_log_method = None |
415 | + _console_log_file = None |
416 | |
417 | def __init__(self, platform, name, properties, config, features, |
418 | pylxd_container): |
419 | @@ -30,8 +34,8 @@ class LXDInstance(Instance): |
420 | super(LXDInstance, self).__init__( |
421 | platform, name, properties, config, features) |
422 | self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name)) |
423 | - self._setup_console_log() |
424 | self.name = name |
425 | + self._setup_console_log() |
426 | |
427 | @property |
428 | def pylxd_container(self): |
429 | @@ -39,21 +43,6 @@ class LXDInstance(Instance): |
430 | self._pylxd_container.sync() |
431 | return self._pylxd_container |
432 | |
433 | - def _setup_console_log(self): |
434 | - logf = os.path.join(self.tmpd, "console.log") |
435 | - |
436 | - # doing this ensures we can read it. Otherwise it ends up root:root. |
437 | - with open(logf, "w") as fp: |
438 | - fp.write("# %s\n" % self.name) |
439 | - |
440 | - cfg = "lxc.console.logfile=%s" % logf |
441 | - orig = self._pylxd_container.config.get('raw.lxc', "") |
442 | - if orig: |
443 | - orig += "\n" |
444 | - self._pylxd_container.config['raw.lxc'] = orig + cfg |
445 | - self._pylxd_container.save() |
446 | - self._console_log_file = logf |
447 | - |
448 | def _execute(self, command, stdin=None, env=None): |
449 | if env is None: |
450 | env = {} |
451 | @@ -97,19 +86,80 @@ class LXDInstance(Instance): |
452 | """ |
453 | self.pylxd_container.files.put(remote_path, data) |
454 | |
455 | + @property |
456 | + def console_log_method(self): |
457 | + if self._console_log_method is not None: |
458 | + return self._console_log_method |
459 | + |
460 | + client = which('lxc') |
461 | + if not client: |
462 | + raise PlatformError("No 'lxc' client.") |
463 | + |
464 | + elif _has_proper_console_support(): |
465 | + self._console_log_method = 'show-log' |
466 | + elif client.startswith("/snap"): |
467 | + self._console_log_method = 'logfile-snap' |
468 | + else: |
469 | + self._console_log_method = 'logfile-tmp' |
470 | + |
471 | + LOG.debug("Set console log method to %s", self._console_log_method) |
472 | + return self._console_log_method |
473 | + |
474 | + def _setup_console_log(self): |
475 | + method = self.console_log_method |
476 | + if not method.startswith("logfile-"): |
477 | + return |
478 | + |
479 | + if method == "logfile-snap": |
480 | + log_dir = "/var/snap/lxd/common/consoles" |
481 | + if not os.path.exists(log_dir): |
482 | + raise PlatformError( |
483 | + "Unable to log with snap lxc. Please run:\n" |
484 | + " sudo mkdir --mode=1777 -p %s" % log_dir) |
485 | + elif method == "logfile-tmp": |
486 | + log_dir = "/tmp" |
487 | + else: |
488 | + raise PlatformError( |
489 | + "Unexpected value for console method: %s" % method) |
490 | + |
491 | + # doing this ensures we can read it. Otherwise it ends up root:root. |
492 | + log_file = os.path.join(log_dir, self.name) |
493 | + with open(log_file, "w") as fp: |
494 | + fp.write("# %s\n" % self.name) |
495 | + |
496 | + cfg = "lxc.console.logfile=%s" % log_file |
497 | + orig = self._pylxd_container.config.get('raw.lxc', "") |
498 | + if orig: |
499 | + orig += "\n" |
500 | + self._pylxd_container.config['raw.lxc'] = orig + cfg |
501 | + self._pylxd_container.save() |
502 | + self._console_log_file = log_file |
503 | + |
504 | def console_log(self): |
505 | """Console log. |
506 | |
507 | - @return_value: bytes of this instance’s console |
508 | + @return_value: bytes of this instance's console |
509 | """ |
510 | - if not os.path.exists(self._console_log_file): |
511 | - raise NotImplementedError( |
512 | - "Console log '%s' does not exist. If this is a remote " |
513 | - "lxc, then this is really NotImplementedError. If it is " |
514 | - "A local lxc, then this is a RuntimeError." |
515 | - "https://github.com/lxc/lxd/issues/1129") |
516 | - with open(self._console_log_file, "rb") as fp: |
517 | - return fp.read() |
518 | + |
519 | + if self._console_log_file: |
520 | + if not os.path.exists(self._console_log_file): |
521 | + raise NotImplementedError( |
522 | + "Console log '%s' does not exist. If this is a remote " |
523 | + "lxc, then this is really NotImplementedError. If it is " |
524 | + "A local lxc, then this is a RuntimeError." |
525 | + "https://github.com/lxc/lxd/issues/1129") |
526 | + with open(self._console_log_file, "rb") as fp: |
527 | + return fp.read() |
528 | + |
529 | + try: |
530 | + stdout, stderr = subp( |
531 | + ['lxc', 'console', '--show-log', self.name], decode=False) |
532 | + return stdout |
533 | + except ProcessExecutionError as e: |
534 | + raise PlatformError( |
535 | + "console log", |
536 | + "Console log failed [%d]: stdout=%s stderr=%s" % ( |
537 | + e.exit_code, e.stdout, e.stderr)) |
538 | |
539 | def reboot(self, wait=True): |
540 | """Reboot instance.""" |
541 | @@ -146,7 +196,37 @@ class LXDInstance(Instance): |
542 | if self.platform.container_exists(self.name): |
543 | raise OSError('container {} was not properly removed' |
544 | .format(self.name)) |
545 | + if self._console_log_file and os.path.exists(self._console_log_file): |
546 | + os.unlink(self._console_log_file) |
547 | shutil.rmtree(self.tmpd) |
548 | super(LXDInstance, self).destroy() |
549 | |
550 | + |
551 | +def _has_proper_console_support(): |
552 | + stdout, _ = subp(['lxc', 'info']) |
553 | + info = load_yaml(stdout) |
554 | + reason = None |
555 | + if 'console' not in info.get('api_extensions', []): |
556 | + reason = "LXD server does not support console api extension" |
557 | + else: |
558 | + dver = info.get('environment', {}).get('driver_version', "") |
559 | + if dver.startswith("2.") or dver.startwith("1."): |
560 | + reason = "LXD Driver version not 3.x+ (%s)" % dver |
561 | + else: |
562 | + try: |
563 | + stdout, stderr = subp(['lxc', 'console', '--help'], |
564 | + decode=False) |
565 | + if not (b'console' in stdout and b'log' in stdout): |
566 | + reason = "no '--log' in lxc console --help" |
567 | + except ProcessExecutionError as e: |
568 | + reason = "no 'console' command in lxc client" |
569 | + |
570 | + if reason: |
571 | + LOG.debug("no console-support: %s", reason) |
572 | + return False |
573 | + else: |
574 | + LOG.debug("console-support looks good") |
575 | + return True |
576 | + |
577 | + |
578 | # vi: ts=4 expandtab |
579 | diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py |
580 | index 0f7267b..dff8b1e 100644 |
581 | --- a/tests/unittests/test_datasource/test_ec2.py |
582 | +++ b/tests/unittests/test_datasource/test_ec2.py |
583 | @@ -2,6 +2,7 @@ |
584 | |
585 | import copy |
586 | import httpretty |
587 | +import json |
588 | import mock |
589 | |
590 | from cloudinit import helpers |
591 | @@ -9,6 +10,29 @@ from cloudinit.sources import DataSourceEc2 as ec2 |
592 | from cloudinit.tests import helpers as test_helpers |
593 | |
594 | |
595 | +DYNAMIC_METADATA = { |
596 | + "instance-identity": { |
597 | + "document": json.dumps({ |
598 | + "devpayProductCodes": None, |
599 | + "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], |
600 | + "availabilityZone": "us-west-2b", |
601 | + "privateIp": "10.158.112.84", |
602 | + "version": "2017-09-30", |
603 | + "instanceId": "my-identity-id", |
604 | + "billingProducts": None, |
605 | + "instanceType": "t2.micro", |
606 | + "accountId": "123456789012", |
607 | + "imageId": "ami-5fb8c835", |
608 | + "pendingTime": "2016-11-19T16:32:11Z", |
609 | + "architecture": "x86_64", |
610 | + "kernelId": None, |
611 | + "ramdiskId": None, |
612 | + "region": "us-west-2" |
613 | + }) |
614 | + } |
615 | +} |
616 | + |
617 | + |
618 | # collected from api version 2016-09-02/ with |
619 | # python3 -c 'import json |
620 | # from cloudinit.ec2_utils import get_instance_metadata as gm |
621 | @@ -85,7 +109,7 @@ DEFAULT_METADATA = { |
622 | "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, |
623 | "reservation-id": "r-01efbc9996bac1bd6", |
624 | "security-groups": "my-wide-open", |
625 | - "services": {"domain": "amazonaws.com", "partition": "aws"} |
626 | + "services": {"domain": "amazonaws.com", "partition": "aws"}, |
627 | } |
628 | |
629 | |
630 | @@ -341,6 +365,39 @@ class TestEc2(test_helpers.HttprettyTestCase): |
631 | self.assertEqual(expected, ds.network_config) |
632 | |
633 | @httpretty.activate |
634 | + def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): |
635 | + """get_instance-id gets DataSourceEc2Local.identity if not present. |
636 | + |
637 | + This handles an upgrade case where the old pickled datasource didn't |
638 | + set up self.identity, but 'systemctl cloud-init init' runs |
639 | + get_instance_id which traces on missing self.identity. lp:1748354. |
640 | + """ |
641 | + self.datasource = ec2.DataSourceEc2Local |
642 | + ds = self._setup_ds( |
643 | + platform_data=self.valid_platform_data, |
644 | + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, |
645 | + md=DEFAULT_METADATA) |
646 | + # Mock 404s on all versions except latest |
647 | + all_versions = ( |
648 | + [ds.min_metadata_version] + ds.extended_metadata_versions) |
649 | + for ver in all_versions[:-1]: |
650 | + register_mock_metaserver( |
651 | + 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver), |
652 | + None) |
653 | + ds.metadata_address = 'http://169.254.169.254' |
654 | + register_mock_metaserver( |
655 | + '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]), |
656 | + DEFAULT_METADATA) |
657 | + # Register dynamic/instance-identity document which we now read. |
658 | + register_mock_metaserver( |
659 | + '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]), |
660 | + DYNAMIC_METADATA) |
661 | + ds._cloud_platform = ec2.Platforms.AWS |
662 | + # Setup cached metadata on the Datasource |
663 | + ds.metadata = DEFAULT_METADATA |
664 | + self.assertEqual('my-identity-id', ds.get_instance_id()) |
665 | + |
666 | + @httpretty.activate |
667 | @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') |
668 | def test_valid_platform_with_strict_true(self, m_dhcp): |
669 | """Valid platform data should return true with strict_id true.""" |
670 | diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py |
671 | index 2a8e6ab..4c62c8b 100644 |
672 | --- a/tests/unittests/test_sshutil.py |
673 | +++ b/tests/unittests/test_sshutil.py |
674 | @@ -126,6 +126,48 @@ class TestAuthKeyLineParser(test_helpers.TestCase): |
675 | self.assertFalse(key.valid()) |
676 | |
677 | |
678 | +class TestUpdateAuthorizedKeys(test_helpers.TestCase): |
679 | + |
680 | + def test_new_keys_replace(self): |
681 | + """new entries with the same base64 should replace old.""" |
682 | + orig_entries = [ |
683 | + ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')), |
684 | + ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))] |
685 | + |
686 | + new_entries = [ |
687 | + ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), ] |
688 | + |
689 | + expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n' |
690 | + |
691 | + parser = ssh_util.AuthKeyLineParser() |
692 | + found = ssh_util.update_authorized_keys( |
693 | + [parser.parse(p) for p in orig_entries], |
694 | + [parser.parse(p) for p in new_entries]) |
695 | + |
696 | + self.assertEqual(expected, found) |
697 | + |
698 | + def test_new_invalid_keys_are_ignored(self): |
699 | + """new entries that are invalid should be skipped.""" |
700 | + orig_entries = [ |
701 | + ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')), |
702 | + ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))] |
703 | + |
704 | + new_entries = [ |
705 | + ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), |
706 | + 'xxx-invalid-thing1', |
707 | + 'xxx-invalid-blob2' |
708 | + ] |
709 | + |
710 | + expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n' |
711 | + |
712 | + parser = ssh_util.AuthKeyLineParser() |
713 | + found = ssh_util.update_authorized_keys( |
714 | + [parser.parse(p) for p in orig_entries], |
715 | + [parser.parse(p) for p in new_entries]) |
716 | + |
717 | + self.assertEqual(expected, found) |
718 | + |
719 | + |
720 | class TestParseSSHConfig(test_helpers.TestCase): |
721 | |
722 | def setUp(self): |