Merge ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial

Proposed by Chad Smith
Status: Merged
Merged at revision: 8730a9c4cc04f4cc7f21d22ec3474a10110c3f28
Proposed branch: ~chad.smith/cloud-init:ubuntu/xenial
Merge into: cloud-init:ubuntu/xenial
Diff against target: 722 lines (+396/-58)
11 files modified
cloudinit/cmd/status.py (+4/-1)
cloudinit/cmd/tests/test_status.py (+28/-7)
cloudinit/net/__init__.py (+46/-17)
cloudinit/net/tests/test_init.py (+91/-0)
cloudinit/sources/DataSourceEc2.py (+6/-0)
cloudinit/ssh_util.py (+1/-4)
debian/changelog (+11/-0)
tests/cloud_tests/collect.py (+3/-2)
tests/cloud_tests/platforms/lxd/instance.py (+106/-26)
tests/unittests/test_datasource/test_ec2.py (+58/-1)
tests/unittests/test_sshutil.py (+42/-0)
Reviewer Review Type Date Requested Status
Scott Moser Pending
Review via email: mp+337515@code.launchpad.net

Description of the change

Sync tip of upstream, including SRU fix into xenial for release.

To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
index d7aaee9..ea79a85 100644
--- a/cloudinit/cmd/status.py
+++ b/cloudinit/cmd/status.py
@@ -105,12 +105,12 @@ def _get_status_details(paths):
105105
106 Values are obtained from parsing paths.run_dir/status.json.106 Values are obtained from parsing paths.run_dir/status.json.
107 """107 """
108
109 status = STATUS_ENABLED_NOT_RUN108 status = STATUS_ENABLED_NOT_RUN
110 status_detail = ''109 status_detail = ''
111 status_v1 = {}110 status_v1 = {}
112111
113 status_file = os.path.join(paths.run_dir, 'status.json')112 status_file = os.path.join(paths.run_dir, 'status.json')
113 result_file = os.path.join(paths.run_dir, 'result.json')
114114
115 (is_disabled, reason) = _is_cloudinit_disabled(115 (is_disabled, reason) = _is_cloudinit_disabled(
116 CLOUDINIT_DISABLED_FILE, paths)116 CLOUDINIT_DISABLED_FILE, paths)
@@ -118,12 +118,15 @@ def _get_status_details(paths):
118 status = STATUS_DISABLED118 status = STATUS_DISABLED
119 status_detail = reason119 status_detail = reason
120 if os.path.exists(status_file):120 if os.path.exists(status_file):
121 if not os.path.exists(result_file):
122 status = STATUS_RUNNING
121 status_v1 = load_json(load_file(status_file)).get('v1', {})123 status_v1 = load_json(load_file(status_file)).get('v1', {})
122 errors = []124 errors = []
123 latest_event = 0125 latest_event = 0
124 for key, value in sorted(status_v1.items()):126 for key, value in sorted(status_v1.items()):
125 if key == 'stage':127 if key == 'stage':
126 if value:128 if value:
129 status = STATUS_RUNNING
127 status_detail = 'Running in stage: {0}'.format(value)130 status_detail = 'Running in stage: {0}'.format(value)
128 elif key == 'datasource':131 elif key == 'datasource':
129 status_detail = value132 status_detail = value
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
index a7c0a91..4a5a8c0 100644
--- a/cloudinit/cmd/tests/test_status.py
+++ b/cloudinit/cmd/tests/test_status.py
@@ -7,7 +7,7 @@ from textwrap import dedent
77
8from cloudinit.atomic_helper import write_json8from cloudinit.atomic_helper import write_json
9from cloudinit.cmd import status9from cloudinit.cmd import status
10from cloudinit.util import write_file10from cloudinit.util import ensure_file
11from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock11from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
1212
13mypaths = namedtuple('MyPaths', 'run_dir')13mypaths = namedtuple('MyPaths', 'run_dir')
@@ -36,7 +36,7 @@ class TestStatus(CiTestCase):
3636
37 def test__is_cloudinit_disabled_false_on_sysvinit(self):37 def test__is_cloudinit_disabled_false_on_sysvinit(self):
38 '''When not in an environment using systemd, return False.'''38 '''When not in an environment using systemd, return False.'''
39 write_file(self.disable_file, '') # Create the ignored disable file39 ensure_file(self.disable_file) # Create the ignored disable file
40 (is_disabled, reason) = wrap_and_call(40 (is_disabled, reason) = wrap_and_call(
41 'cloudinit.cmd.status',41 'cloudinit.cmd.status',
42 {'uses_systemd': False},42 {'uses_systemd': False},
@@ -47,7 +47,7 @@ class TestStatus(CiTestCase):
4747
48 def test__is_cloudinit_disabled_true_on_disable_file(self):48 def test__is_cloudinit_disabled_true_on_disable_file(self):
49 '''When using systemd and disable_file is present return disabled.'''49 '''When using systemd and disable_file is present return disabled.'''
50 write_file(self.disable_file, '') # Create observed disable file50 ensure_file(self.disable_file) # Create observed disable file
51 (is_disabled, reason) = wrap_and_call(51 (is_disabled, reason) = wrap_and_call(
52 'cloudinit.cmd.status',52 'cloudinit.cmd.status',
53 {'uses_systemd': True},53 {'uses_systemd': True},
@@ -58,7 +58,7 @@ class TestStatus(CiTestCase):
5858
59 def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):59 def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):
60 '''Not disabled when using systemd and enabled via commandline.'''60 '''Not disabled when using systemd and enabled via commandline.'''
61 write_file(self.disable_file, '') # Create ignored disable file61 ensure_file(self.disable_file) # Create ignored disable file
62 (is_disabled, reason) = wrap_and_call(62 (is_disabled, reason) = wrap_and_call(
63 'cloudinit.cmd.status',63 'cloudinit.cmd.status',
64 {'uses_systemd': True,64 {'uses_systemd': True,
@@ -96,7 +96,7 @@ class TestStatus(CiTestCase):
96 def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):96 def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):
97 '''Report enabled when systemd generator creates the enabled file.'''97 '''Report enabled when systemd generator creates the enabled file.'''
98 enabled_file = os.path.join(self.paths.run_dir, 'enabled')98 enabled_file = os.path.join(self.paths.run_dir, 'enabled')
99 write_file(enabled_file, '')99 ensure_file(enabled_file)
100 (is_disabled, reason) = wrap_and_call(100 (is_disabled, reason) = wrap_and_call(
101 'cloudinit.cmd.status',101 'cloudinit.cmd.status',
102 {'uses_systemd': True,102 {'uses_systemd': True,
@@ -149,8 +149,25 @@ class TestStatus(CiTestCase):
149 ''')149 ''')
150 self.assertEqual(expected, m_stdout.getvalue())150 self.assertEqual(expected, m_stdout.getvalue())
151151
152 def test_status_returns_running_on_no_results_json(self):
153 '''Report running when status.json exists but result.json does not.'''
154 result_file = self.tmp_path('result.json', self.new_root)
155 write_json(self.status_file, {})
156 self.assertFalse(
157 os.path.exists(result_file), 'Unexpected result.json found')
158 cmdargs = myargs(long=False, wait=False)
159 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
160 retcode = wrap_and_call(
161 'cloudinit.cmd.status',
162 {'_is_cloudinit_disabled': (False, ''),
163 'Init': {'side_effect': self.init_class}},
164 status.handle_status_args, 'ignored', cmdargs)
165 self.assertEqual(0, retcode)
166 self.assertEqual('status: running\n', m_stdout.getvalue())
167
152 def test_status_returns_running(self):168 def test_status_returns_running(self):
153 '''Report running when status exists with an unfinished stage.'''169 '''Report running when status exists with an unfinished stage.'''
170 ensure_file(self.tmp_path('result.json', self.new_root))
154 write_json(self.status_file,171 write_json(self.status_file,
155 {'v1': {'init': {'start': 1, 'finished': None}}})172 {'v1': {'init': {'start': 1, 'finished': None}}})
156 cmdargs = myargs(long=False, wait=False)173 cmdargs = myargs(long=False, wait=False)
@@ -164,10 +181,11 @@ class TestStatus(CiTestCase):
164 self.assertEqual('status: running\n', m_stdout.getvalue())181 self.assertEqual('status: running\n', m_stdout.getvalue())
165182
166 def test_status_returns_done(self):183 def test_status_returns_done(self):
167 '''Reports done when stage is None and all stages are finished.'''184 '''Report done results.json exists no stages are unfinished.'''
185 ensure_file(self.tmp_path('result.json', self.new_root))
168 write_json(186 write_json(
169 self.status_file,187 self.status_file,
170 {'v1': {'stage': None,188 {'v1': {'stage': None, # No current stage running
171 'datasource': (189 'datasource': (
172 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'190 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
173 '[dsmode=net]'),191 '[dsmode=net]'),
@@ -187,6 +205,7 @@ class TestStatus(CiTestCase):
187205
188 def test_status_returns_done_long(self):206 def test_status_returns_done_long(self):
189 '''Long format of done status includes datasource info.'''207 '''Long format of done status includes datasource info.'''
208 ensure_file(self.tmp_path('result.json', self.new_root))
190 write_json(209 write_json(
191 self.status_file,210 self.status_file,
192 {'v1': {'stage': None,211 {'v1': {'stage': None,
@@ -303,6 +322,8 @@ class TestStatus(CiTestCase):
303 write_json(self.status_file, running_json)322 write_json(self.status_file, running_json)
304 elif self.sleep_calls == 3:323 elif self.sleep_calls == 3:
305 write_json(self.status_file, done_json)324 write_json(self.status_file, done_json)
325 result_file = self.tmp_path('result.json', self.new_root)
326 ensure_file(result_file)
306327
307 cmdargs = myargs(long=False, wait=True)328 cmdargs = myargs(long=False, wait=True)
308 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:329 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index c015e79..f69c0ef 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -274,23 +274,52 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
274 renames are only attempted for interfaces of type 'physical'. It is274 renames are only attempted for interfaces of type 'physical'. It is
275 expected that the network system will create other devices with the275 expected that the network system will create other devices with the
276 correct name in place."""276 correct name in place."""
277 renames = []277
278 for ent in netcfg.get('config', {}):278 def _version_1(netcfg):
279 if ent.get('type') != 'physical':279 renames = []
280 continue280 for ent in netcfg.get('config', {}):
281 mac = ent.get('mac_address')281 if ent.get('type') != 'physical':
282 if not mac:282 continue
283 continue283 mac = ent.get('mac_address')
284 name = ent.get('name')284 if not mac:
285 driver = ent.get('params', {}).get('driver')285 continue
286 device_id = ent.get('params', {}).get('device_id')286 name = ent.get('name')
287 if not driver:287 driver = ent.get('params', {}).get('driver')
288 driver = device_driver(name)288 device_id = ent.get('params', {}).get('device_id')
289 if not device_id:289 if not driver:
290 device_id = device_devid(name)290 driver = device_driver(name)
291 renames.append([mac, name, driver, device_id])291 if not device_id:
292292 device_id = device_devid(name)
293 return _rename_interfaces(renames)293 renames.append([mac, name, driver, device_id])
294 return renames
295
296 def _version_2(netcfg):
297 renames = []
298 for key, ent in netcfg.get('ethernets', {}).items():
299 # only rename if configured to do so
300 name = ent.get('set-name')
301 if not name:
302 continue
303 # cloud-init requires macaddress for renaming
304 mac = ent.get('match', {}).get('macaddress')
305 if not mac:
306 continue
307 driver = ent.get('match', {}).get('driver')
308 device_id = ent.get('match', {}).get('device_id')
309 if not driver:
310 driver = device_driver(name)
311 if not device_id:
312 device_id = device_devid(name)
313 renames.append([mac, name, driver, device_id])
314 return renames
315
316 if netcfg.get('version') == 1:
317 return _rename_interfaces(_version_1(netcfg))
318 elif netcfg.get('version') == 2:
319 return _rename_interfaces(_version_2(netcfg))
320
321 raise RuntimeError('Failed to apply network config names. Found bad'
322 ' network config version: %s' % netcfg.get('version'))
294323
295324
296def interface_has_own_mac(ifname, strict=False):325def interface_has_own_mac(ifname, strict=False):
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 8cb4114..276556e 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -4,6 +4,8 @@ import copy
4import errno4import errno
5import mock5import mock
6import os6import os
7import textwrap
8import yaml
79
8import cloudinit.net as net10import cloudinit.net as net
9from cloudinit.util import ensure_file, write_file, ProcessExecutionError11from cloudinit.util import ensure_file, write_file, ProcessExecutionError
@@ -520,3 +522,92 @@ class TestEphemeralIPV4Network(CiTestCase):
520 with net.EphemeralIPv4Network(**params):522 with net.EphemeralIPv4Network(**params):
521 self.assertEqual(expected_setup_calls, m_subp.call_args_list)523 self.assertEqual(expected_setup_calls, m_subp.call_args_list)
522 m_subp.assert_has_calls(expected_teardown_calls)524 m_subp.assert_has_calls(expected_teardown_calls)
525
526
527class TestApplyNetworkCfgNames(CiTestCase):
528 V1_CONFIG = textwrap.dedent("""\
529 version: 1
530 config:
531 - type: physical
532 name: interface0
533 mac_address: "52:54:00:12:34:00"
534 subnets:
535 - type: static
536 address: 10.0.2.15
537 netmask: 255.255.255.0
538 gateway: 10.0.2.2
539 """)
540 V2_CONFIG = textwrap.dedent("""\
541 version: 2
542 ethernets:
543 interface0:
544 match:
545 macaddress: "52:54:00:12:34:00"
546 addresses:
547 - 10.0.2.15/24
548 gateway4: 10.0.2.2
549 set-name: interface0
550 """)
551
552 V2_CONFIG_NO_SETNAME = textwrap.dedent("""\
553 version: 2
554 ethernets:
555 interface0:
556 match:
557 macaddress: "52:54:00:12:34:00"
558 addresses:
559 - 10.0.2.15/24
560 gateway4: 10.0.2.2
561 """)
562
563 V2_CONFIG_NO_MAC = textwrap.dedent("""\
564 version: 2
565 ethernets:
566 interface0:
567 match:
568 driver: virtio-net
569 addresses:
570 - 10.0.2.15/24
571 gateway4: 10.0.2.2
572 set-name: interface0
573 """)
574
575 @mock.patch('cloudinit.net.device_devid')
576 @mock.patch('cloudinit.net.device_driver')
577 @mock.patch('cloudinit.net._rename_interfaces')
578 def test_apply_v1_renames(self, m_rename_interfaces, m_device_driver,
579 m_device_devid):
580 m_device_driver.return_value = 'virtio_net'
581 m_device_devid.return_value = '0x15d8'
582
583 net.apply_network_config_names(yaml.load(self.V1_CONFIG))
584
585 call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8']
586 m_rename_interfaces.assert_called_with([call])
587
588 @mock.patch('cloudinit.net.device_devid')
589 @mock.patch('cloudinit.net.device_driver')
590 @mock.patch('cloudinit.net._rename_interfaces')
591 def test_apply_v2_renames(self, m_rename_interfaces, m_device_driver,
592 m_device_devid):
593 m_device_driver.return_value = 'virtio_net'
594 m_device_devid.return_value = '0x15d8'
595
596 net.apply_network_config_names(yaml.load(self.V2_CONFIG))
597
598 call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8']
599 m_rename_interfaces.assert_called_with([call])
600
601 @mock.patch('cloudinit.net._rename_interfaces')
602 def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces):
603 net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME))
604 m_rename_interfaces.assert_called_with([])
605
606 @mock.patch('cloudinit.net._rename_interfaces')
607 def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces):
608 net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC))
609 m_rename_interfaces.assert_called_with([])
610
611 def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self):
612 with self.assertRaises(RuntimeError):
613 net.apply_network_config_names(yaml.load("version: 3"))
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index e14553b..21e9ef8 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -147,6 +147,12 @@ class DataSourceEc2(sources.DataSource):
147 def get_instance_id(self):147 def get_instance_id(self):
148 if self.cloud_platform == Platforms.AWS:148 if self.cloud_platform == Platforms.AWS:
149 # Prefer the ID from the instance identity document, but fall back149 # Prefer the ID from the instance identity document, but fall back
150 if not getattr(self, 'identity', None):
151 # If re-using cached datasource, it's get_data run didn't
152 # setup self.identity. So we need to do that now.
153 api_version = self.get_metadata_api_version()
154 self.identity = ec2.get_instance_identity(
155 api_version, self.metadata_address).get('document', {})
150 return self.identity.get(156 return self.identity.get(
151 'instanceId', self.metadata['instance-id'])157 'instanceId', self.metadata['instance-id'])
152 else:158 else:
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index b95b956..882517f 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -171,16 +171,13 @@ def parse_authorized_keys(fname):
171171
172172
173def update_authorized_keys(old_entries, keys):173def update_authorized_keys(old_entries, keys):
174 to_add = list(keys)174 to_add = list([k for k in keys if k.valid()])
175
176 for i in range(0, len(old_entries)):175 for i in range(0, len(old_entries)):
177 ent = old_entries[i]176 ent = old_entries[i]
178 if not ent.valid():177 if not ent.valid():
179 continue178 continue
180 # Replace those with the same base64179 # Replace those with the same base64
181 for k in keys:180 for k in keys:
182 if not ent.valid():
183 continue
184 if k.base64 == ent.base64:181 if k.base64 == ent.base64:
185 # Replace it with our better one182 # Replace it with our better one
186 ent = k183 ent = k
diff --git a/debian/changelog b/debian/changelog
index 474c9ed..cddeb6c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,14 @@
1cloud-init (17.2-35-gf576b2a2-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
2
3 * New upstream snapshot. (LP: #1747059)
4 - tests: add support for logs with lxd from snap and future lxd 3.
5 - EC2: Fix get_instance_id called against cached datasource pickle.
6 - cli: fix cloud-init status to report running when before result.json
7 - net: accept network-config in netplan format for renaming interfaces
8 - Fix ssh keys validation in ssh_util [Tatiana Kholkina]
9
10 -- Chad Smith <chad.smith@canonical.com> Mon, 12 Feb 2018 10:18:13 -0700
11
1cloud-init (17.2-30-gf7deaf15-0ubuntu1~16.04.1) xenial-proposed; urgency=medium12cloud-init (17.2-30-gf7deaf15-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
213
3 * debian/patches/ds-identify-behavior-xenial.patch: refresh patch.14 * debian/patches/ds-identify-behavior-xenial.patch: refresh patch.
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index 5ea88e5..d4f9135 100644
--- a/tests/cloud_tests/collect.py
+++ b/tests/cloud_tests/collect.py
@@ -44,8 +44,9 @@ def collect_console(instance, base_dir):
44 LOG.debug('getting console log for %s to %s', instance, logfile)44 LOG.debug('getting console log for %s to %s', instance, logfile)
45 try:45 try:
46 data = instance.console_log()46 data = instance.console_log()
47 except NotImplementedError:47 except NotImplementedError as e:
48 data = b'instance.console_log: not implemented'48 # args[0] is hacky, but thats all I see to get at the message.
49 data = b'NotImplementedError:' + e.args[0].encode()
49 with open(logfile, "wb") as fp:50 with open(logfile, "wb") as fp:
50 fp.write(data)51 fp.write(data)
5152
diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py
index d2d2a1f..0488da5 100644
--- a/tests/cloud_tests/platforms/lxd/instance.py
+++ b/tests/cloud_tests/platforms/lxd/instance.py
@@ -6,7 +6,9 @@ import os
6import shutil6import shutil
7from tempfile import mkdtemp7from tempfile import mkdtemp
88
9from cloudinit.util import subp, ProcessExecutionError9from cloudinit.util import load_yaml, subp, ProcessExecutionError, which
10from tests.cloud_tests import LOG
11from tests.cloud_tests.util import PlatformError
1012
11from ..instances import Instance13from ..instances import Instance
1214
@@ -15,6 +17,8 @@ class LXDInstance(Instance):
15 """LXD container backed instance."""17 """LXD container backed instance."""
1618
17 platform_name = "lxd"19 platform_name = "lxd"
20 _console_log_method = None
21 _console_log_file = None
1822
19 def __init__(self, platform, name, properties, config, features,23 def __init__(self, platform, name, properties, config, features,
20 pylxd_container):24 pylxd_container):
@@ -30,8 +34,8 @@ class LXDInstance(Instance):
30 super(LXDInstance, self).__init__(34 super(LXDInstance, self).__init__(
31 platform, name, properties, config, features)35 platform, name, properties, config, features)
32 self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name))36 self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name))
33 self._setup_console_log()
34 self.name = name37 self.name = name
38 self._setup_console_log()
3539
36 @property40 @property
37 def pylxd_container(self):41 def pylxd_container(self):
@@ -39,21 +43,6 @@ class LXDInstance(Instance):
39 self._pylxd_container.sync()43 self._pylxd_container.sync()
40 return self._pylxd_container44 return self._pylxd_container
4145
42 def _setup_console_log(self):
43 logf = os.path.join(self.tmpd, "console.log")
44
45 # doing this ensures we can read it. Otherwise it ends up root:root.
46 with open(logf, "w") as fp:
47 fp.write("# %s\n" % self.name)
48
49 cfg = "lxc.console.logfile=%s" % logf
50 orig = self._pylxd_container.config.get('raw.lxc', "")
51 if orig:
52 orig += "\n"
53 self._pylxd_container.config['raw.lxc'] = orig + cfg
54 self._pylxd_container.save()
55 self._console_log_file = logf
56
57 def _execute(self, command, stdin=None, env=None):46 def _execute(self, command, stdin=None, env=None):
58 if env is None:47 if env is None:
59 env = {}48 env = {}
@@ -97,19 +86,80 @@ class LXDInstance(Instance):
97 """86 """
98 self.pylxd_container.files.put(remote_path, data)87 self.pylxd_container.files.put(remote_path, data)
9988
89 @property
90 def console_log_method(self):
91 if self._console_log_method is not None:
92 return self._console_log_method
93
94 client = which('lxc')
95 if not client:
96 raise PlatformError("No 'lxc' client.")
97
98 elif _has_proper_console_support():
99 self._console_log_method = 'show-log'
100 elif client.startswith("/snap"):
101 self._console_log_method = 'logfile-snap'
102 else:
103 self._console_log_method = 'logfile-tmp'
104
105 LOG.debug("Set console log method to %s", self._console_log_method)
106 return self._console_log_method
107
108 def _setup_console_log(self):
109 method = self.console_log_method
110 if not method.startswith("logfile-"):
111 return
112
113 if method == "logfile-snap":
114 log_dir = "/var/snap/lxd/common/consoles"
115 if not os.path.exists(log_dir):
116 raise PlatformError(
117 "Unable to log with snap lxc. Please run:\n"
118 " sudo mkdir --mode=1777 -p %s" % log_dir)
119 elif method == "logfile-tmp":
120 log_dir = "/tmp"
121 else:
122 raise PlatformError(
123 "Unexpected value for console method: %s" % method)
124
125 # doing this ensures we can read it. Otherwise it ends up root:root.
126 log_file = os.path.join(log_dir, self.name)
127 with open(log_file, "w") as fp:
128 fp.write("# %s\n" % self.name)
129
130 cfg = "lxc.console.logfile=%s" % log_file
131 orig = self._pylxd_container.config.get('raw.lxc', "")
132 if orig:
133 orig += "\n"
134 self._pylxd_container.config['raw.lxc'] = orig + cfg
135 self._pylxd_container.save()
136 self._console_log_file = log_file
137
100 def console_log(self):138 def console_log(self):
101 """Console log.139 """Console log.
102140
103 @return_value: bytes of this instance’s console141 @return_value: bytes of this instance's console
104 """142 """
105 if not os.path.exists(self._console_log_file):143
106 raise NotImplementedError(144 if self._console_log_file:
107 "Console log '%s' does not exist. If this is a remote "145 if not os.path.exists(self._console_log_file):
108 "lxc, then this is really NotImplementedError. If it is "146 raise NotImplementedError(
109 "A local lxc, then this is a RuntimeError."147 "Console log '%s' does not exist. If this is a remote "
110 "https://github.com/lxc/lxd/issues/1129")148 "lxc, then this is really NotImplementedError. If it is "
111 with open(self._console_log_file, "rb") as fp:149 "A local lxc, then this is a RuntimeError."
112 return fp.read()150 "https://github.com/lxc/lxd/issues/1129")
151 with open(self._console_log_file, "rb") as fp:
152 return fp.read()
153
154 try:
155 stdout, stderr = subp(
156 ['lxc', 'console', '--show-log', self.name], decode=False)
157 return stdout
158 except ProcessExecutionError as e:
159 raise PlatformError(
160 "console log",
161 "Console log failed [%d]: stdout=%s stderr=%s" % (
162 e.exit_code, e.stdout, e.stderr))
113163
114 def reboot(self, wait=True):164 def reboot(self, wait=True):
115 """Reboot instance."""165 """Reboot instance."""
@@ -146,7 +196,37 @@ class LXDInstance(Instance):
146 if self.platform.container_exists(self.name):196 if self.platform.container_exists(self.name):
147 raise OSError('container {} was not properly removed'197 raise OSError('container {} was not properly removed'
148 .format(self.name))198 .format(self.name))
199 if self._console_log_file and os.path.exists(self._console_log_file):
200 os.unlink(self._console_log_file)
149 shutil.rmtree(self.tmpd)201 shutil.rmtree(self.tmpd)
150 super(LXDInstance, self).destroy()202 super(LXDInstance, self).destroy()
151203
204
205def _has_proper_console_support():
206 stdout, _ = subp(['lxc', 'info'])
207 info = load_yaml(stdout)
208 reason = None
209 if 'console' not in info.get('api_extensions', []):
210 reason = "LXD server does not support console api extension"
211 else:
212 dver = info.get('environment', {}).get('driver_version', "")
213 if dver.startswith("2.") or dver.startwith("1."):
214 reason = "LXD Driver version not 3.x+ (%s)" % dver
215 else:
216 try:
217 stdout, stderr = subp(['lxc', 'console', '--help'],
218 decode=False)
219 if not (b'console' in stdout and b'log' in stdout):
220 reason = "no '--log' in lxc console --help"
221 except ProcessExecutionError as e:
222 reason = "no 'console' command in lxc client"
223
224 if reason:
225 LOG.debug("no console-support: %s", reason)
226 return False
227 else:
228 LOG.debug("console-support looks good")
229 return True
230
231
152# vi: ts=4 expandtab232# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py
index 0f7267b..dff8b1e 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/test_datasource/test_ec2.py
@@ -2,6 +2,7 @@
22
3import copy3import copy
4import httpretty4import httpretty
5import json
5import mock6import mock
67
7from cloudinit import helpers8from cloudinit import helpers
@@ -9,6 +10,29 @@ from cloudinit.sources import DataSourceEc2 as ec2
9from cloudinit.tests import helpers as test_helpers10from cloudinit.tests import helpers as test_helpers
1011
1112
13DYNAMIC_METADATA = {
14 "instance-identity": {
15 "document": json.dumps({
16 "devpayProductCodes": None,
17 "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"],
18 "availabilityZone": "us-west-2b",
19 "privateIp": "10.158.112.84",
20 "version": "2017-09-30",
21 "instanceId": "my-identity-id",
22 "billingProducts": None,
23 "instanceType": "t2.micro",
24 "accountId": "123456789012",
25 "imageId": "ami-5fb8c835",
26 "pendingTime": "2016-11-19T16:32:11Z",
27 "architecture": "x86_64",
28 "kernelId": None,
29 "ramdiskId": None,
30 "region": "us-west-2"
31 })
32 }
33}
34
35
12# collected from api version 2016-09-02/ with36# collected from api version 2016-09-02/ with
13# python3 -c 'import json37# python3 -c 'import json
14# from cloudinit.ec2_utils import get_instance_metadata as gm38# from cloudinit.ec2_utils import get_instance_metadata as gm
@@ -85,7 +109,7 @@ DEFAULT_METADATA = {
85 "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]},109 "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]},
86 "reservation-id": "r-01efbc9996bac1bd6",110 "reservation-id": "r-01efbc9996bac1bd6",
87 "security-groups": "my-wide-open",111 "security-groups": "my-wide-open",
88 "services": {"domain": "amazonaws.com", "partition": "aws"}112 "services": {"domain": "amazonaws.com", "partition": "aws"},
89}113}
90114
91115
@@ -341,6 +365,39 @@ class TestEc2(test_helpers.HttprettyTestCase):
341 self.assertEqual(expected, ds.network_config)365 self.assertEqual(expected, ds.network_config)
342366
343 @httpretty.activate367 @httpretty.activate
368 def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
369 """get_instance-id gets DataSourceEc2Local.identity if not present.
370
371 This handles an upgrade case where the old pickled datasource didn't
372 set up self.identity, but 'systemctl cloud-init init' runs
373 get_instance_id which traces on missing self.identity. lp:1748354.
374 """
375 self.datasource = ec2.DataSourceEc2Local
376 ds = self._setup_ds(
377 platform_data=self.valid_platform_data,
378 sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
379 md=DEFAULT_METADATA)
380 # Mock 404s on all versions except latest
381 all_versions = (
382 [ds.min_metadata_version] + ds.extended_metadata_versions)
383 for ver in all_versions[:-1]:
384 register_mock_metaserver(
385 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver),
386 None)
387 ds.metadata_address = 'http://169.254.169.254'
388 register_mock_metaserver(
389 '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]),
390 DEFAULT_METADATA)
391 # Register dynamic/instance-identity document which we now read.
392 register_mock_metaserver(
393 '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]),
394 DYNAMIC_METADATA)
395 ds._cloud_platform = ec2.Platforms.AWS
396 # Setup cached metadata on the Datasource
397 ds.metadata = DEFAULT_METADATA
398 self.assertEqual('my-identity-id', ds.get_instance_id())
399
400 @httpretty.activate
344 @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')401 @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
345 def test_valid_platform_with_strict_true(self, m_dhcp):402 def test_valid_platform_with_strict_true(self, m_dhcp):
346 """Valid platform data should return true with strict_id true."""403 """Valid platform data should return true with strict_id true."""
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 2a8e6ab..4c62c8b 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -126,6 +126,48 @@ class TestAuthKeyLineParser(test_helpers.TestCase):
126 self.assertFalse(key.valid())126 self.assertFalse(key.valid())
127127
128128
129class TestUpdateAuthorizedKeys(test_helpers.TestCase):
130
131 def test_new_keys_replace(self):
132 """new entries with the same base64 should replace old."""
133 orig_entries = [
134 ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')),
135 ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))]
136
137 new_entries = [
138 ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), ]
139
140 expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n'
141
142 parser = ssh_util.AuthKeyLineParser()
143 found = ssh_util.update_authorized_keys(
144 [parser.parse(p) for p in orig_entries],
145 [parser.parse(p) for p in new_entries])
146
147 self.assertEqual(expected, found)
148
149 def test_new_invalid_keys_are_ignored(self):
150 """new entries that are invalid should be skipped."""
151 orig_entries = [
152 ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')),
153 ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))]
154
155 new_entries = [
156 ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')),
157 'xxx-invalid-thing1',
158 'xxx-invalid-blob2'
159 ]
160
161 expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n'
162
163 parser = ssh_util.AuthKeyLineParser()
164 found = ssh_util.update_authorized_keys(
165 [parser.parse(p) for p in orig_entries],
166 [parser.parse(p) for p in new_entries])
167
168 self.assertEqual(expected, found)
169
170
129class TestParseSSHConfig(test_helpers.TestCase):171class TestParseSSHConfig(test_helpers.TestCase):
130172
131 def setUp(self):173 def setUp(self):

Subscribers

People subscribed via source and target branches