Merge ~chad.smith/cloud-init:ubuntu/artful into cloud-init:ubuntu/artful
- Git
- lp:~chad.smith/cloud-init
- ubuntu/artful
- Merge into ubuntu/artful
Proposed by
Chad Smith
Status: | Merged | ||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | 9ddf54f3ca597b416d00ebb43450159013fcd3e8 | ||||||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/artful | ||||||||||||||||
Merge into: | cloud-init:ubuntu/artful | ||||||||||||||||
Diff against target: |
722 lines (+396/-58) 11 files modified
cloudinit/cmd/status.py (+4/-1) cloudinit/cmd/tests/test_status.py (+28/-7) cloudinit/net/__init__.py (+46/-17) cloudinit/net/tests/test_init.py (+91/-0) cloudinit/sources/DataSourceEc2.py (+6/-0) cloudinit/ssh_util.py (+1/-4) debian/changelog (+11/-0) tests/cloud_tests/collect.py (+3/-2) tests/cloud_tests/platforms/lxd/instance.py (+106/-26) tests/unittests/test_datasource/test_ec2.py (+58/-1) tests/unittests/test_sshutil.py (+42/-0) |
||||||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Scott Moser | Pending | ||
Review via email:
|
Commit message
Description of the change
Sync tip of master including SRU fix for release into artful.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py | |||
2 | index d7aaee9..ea79a85 100644 | |||
3 | --- a/cloudinit/cmd/status.py | |||
4 | +++ b/cloudinit/cmd/status.py | |||
5 | @@ -105,12 +105,12 @@ def _get_status_details(paths): | |||
6 | 105 | 105 | ||
7 | 106 | Values are obtained from parsing paths.run_dir/status.json. | 106 | Values are obtained from parsing paths.run_dir/status.json. |
8 | 107 | """ | 107 | """ |
9 | 108 | |||
10 | 109 | status = STATUS_ENABLED_NOT_RUN | 108 | status = STATUS_ENABLED_NOT_RUN |
11 | 110 | status_detail = '' | 109 | status_detail = '' |
12 | 111 | status_v1 = {} | 110 | status_v1 = {} |
13 | 112 | 111 | ||
14 | 113 | status_file = os.path.join(paths.run_dir, 'status.json') | 112 | status_file = os.path.join(paths.run_dir, 'status.json') |
15 | 113 | result_file = os.path.join(paths.run_dir, 'result.json') | ||
16 | 114 | 114 | ||
17 | 115 | (is_disabled, reason) = _is_cloudinit_disabled( | 115 | (is_disabled, reason) = _is_cloudinit_disabled( |
18 | 116 | CLOUDINIT_DISABLED_FILE, paths) | 116 | CLOUDINIT_DISABLED_FILE, paths) |
19 | @@ -118,12 +118,15 @@ def _get_status_details(paths): | |||
20 | 118 | status = STATUS_DISABLED | 118 | status = STATUS_DISABLED |
21 | 119 | status_detail = reason | 119 | status_detail = reason |
22 | 120 | if os.path.exists(status_file): | 120 | if os.path.exists(status_file): |
23 | 121 | if not os.path.exists(result_file): | ||
24 | 122 | status = STATUS_RUNNING | ||
25 | 121 | status_v1 = load_json(load_file(status_file)).get('v1', {}) | 123 | status_v1 = load_json(load_file(status_file)).get('v1', {}) |
26 | 122 | errors = [] | 124 | errors = [] |
27 | 123 | latest_event = 0 | 125 | latest_event = 0 |
28 | 124 | for key, value in sorted(status_v1.items()): | 126 | for key, value in sorted(status_v1.items()): |
29 | 125 | if key == 'stage': | 127 | if key == 'stage': |
30 | 126 | if value: | 128 | if value: |
31 | 129 | status = STATUS_RUNNING | ||
32 | 127 | status_detail = 'Running in stage: {0}'.format(value) | 130 | status_detail = 'Running in stage: {0}'.format(value) |
33 | 128 | elif key == 'datasource': | 131 | elif key == 'datasource': |
34 | 129 | status_detail = value | 132 | status_detail = value |
35 | diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py | |||
36 | index a7c0a91..4a5a8c0 100644 | |||
37 | --- a/cloudinit/cmd/tests/test_status.py | |||
38 | +++ b/cloudinit/cmd/tests/test_status.py | |||
39 | @@ -7,7 +7,7 @@ from textwrap import dedent | |||
40 | 7 | 7 | ||
41 | 8 | from cloudinit.atomic_helper import write_json | 8 | from cloudinit.atomic_helper import write_json |
42 | 9 | from cloudinit.cmd import status | 9 | from cloudinit.cmd import status |
44 | 10 | from cloudinit.util import write_file | 10 | from cloudinit.util import ensure_file |
45 | 11 | from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock | 11 | from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock |
46 | 12 | 12 | ||
47 | 13 | mypaths = namedtuple('MyPaths', 'run_dir') | 13 | mypaths = namedtuple('MyPaths', 'run_dir') |
48 | @@ -36,7 +36,7 @@ class TestStatus(CiTestCase): | |||
49 | 36 | 36 | ||
50 | 37 | def test__is_cloudinit_disabled_false_on_sysvinit(self): | 37 | def test__is_cloudinit_disabled_false_on_sysvinit(self): |
51 | 38 | '''When not in an environment using systemd, return False.''' | 38 | '''When not in an environment using systemd, return False.''' |
53 | 39 | write_file(self.disable_file, '') # Create the ignored disable file | 39 | ensure_file(self.disable_file) # Create the ignored disable file |
54 | 40 | (is_disabled, reason) = wrap_and_call( | 40 | (is_disabled, reason) = wrap_and_call( |
55 | 41 | 'cloudinit.cmd.status', | 41 | 'cloudinit.cmd.status', |
56 | 42 | {'uses_systemd': False}, | 42 | {'uses_systemd': False}, |
57 | @@ -47,7 +47,7 @@ class TestStatus(CiTestCase): | |||
58 | 47 | 47 | ||
59 | 48 | def test__is_cloudinit_disabled_true_on_disable_file(self): | 48 | def test__is_cloudinit_disabled_true_on_disable_file(self): |
60 | 49 | '''When using systemd and disable_file is present return disabled.''' | 49 | '''When using systemd and disable_file is present return disabled.''' |
62 | 50 | write_file(self.disable_file, '') # Create observed disable file | 50 | ensure_file(self.disable_file) # Create observed disable file |
63 | 51 | (is_disabled, reason) = wrap_and_call( | 51 | (is_disabled, reason) = wrap_and_call( |
64 | 52 | 'cloudinit.cmd.status', | 52 | 'cloudinit.cmd.status', |
65 | 53 | {'uses_systemd': True}, | 53 | {'uses_systemd': True}, |
66 | @@ -58,7 +58,7 @@ class TestStatus(CiTestCase): | |||
67 | 58 | 58 | ||
68 | 59 | def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): | 59 | def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): |
69 | 60 | '''Not disabled when using systemd and enabled via commandline.''' | 60 | '''Not disabled when using systemd and enabled via commandline.''' |
71 | 61 | write_file(self.disable_file, '') # Create ignored disable file | 61 | ensure_file(self.disable_file) # Create ignored disable file |
72 | 62 | (is_disabled, reason) = wrap_and_call( | 62 | (is_disabled, reason) = wrap_and_call( |
73 | 63 | 'cloudinit.cmd.status', | 63 | 'cloudinit.cmd.status', |
74 | 64 | {'uses_systemd': True, | 64 | {'uses_systemd': True, |
75 | @@ -96,7 +96,7 @@ class TestStatus(CiTestCase): | |||
76 | 96 | def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): | 96 | def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): |
77 | 97 | '''Report enabled when systemd generator creates the enabled file.''' | 97 | '''Report enabled when systemd generator creates the enabled file.''' |
78 | 98 | enabled_file = os.path.join(self.paths.run_dir, 'enabled') | 98 | enabled_file = os.path.join(self.paths.run_dir, 'enabled') |
80 | 99 | write_file(enabled_file, '') | 99 | ensure_file(enabled_file) |
81 | 100 | (is_disabled, reason) = wrap_and_call( | 100 | (is_disabled, reason) = wrap_and_call( |
82 | 101 | 'cloudinit.cmd.status', | 101 | 'cloudinit.cmd.status', |
83 | 102 | {'uses_systemd': True, | 102 | {'uses_systemd': True, |
84 | @@ -149,8 +149,25 @@ class TestStatus(CiTestCase): | |||
85 | 149 | ''') | 149 | ''') |
86 | 150 | self.assertEqual(expected, m_stdout.getvalue()) | 150 | self.assertEqual(expected, m_stdout.getvalue()) |
87 | 151 | 151 | ||
88 | 152 | def test_status_returns_running_on_no_results_json(self): | ||
89 | 153 | '''Report running when status.json exists but result.json does not.''' | ||
90 | 154 | result_file = self.tmp_path('result.json', self.new_root) | ||
91 | 155 | write_json(self.status_file, {}) | ||
92 | 156 | self.assertFalse( | ||
93 | 157 | os.path.exists(result_file), 'Unexpected result.json found') | ||
94 | 158 | cmdargs = myargs(long=False, wait=False) | ||
95 | 159 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | ||
96 | 160 | retcode = wrap_and_call( | ||
97 | 161 | 'cloudinit.cmd.status', | ||
98 | 162 | {'_is_cloudinit_disabled': (False, ''), | ||
99 | 163 | 'Init': {'side_effect': self.init_class}}, | ||
100 | 164 | status.handle_status_args, 'ignored', cmdargs) | ||
101 | 165 | self.assertEqual(0, retcode) | ||
102 | 166 | self.assertEqual('status: running\n', m_stdout.getvalue()) | ||
103 | 167 | |||
104 | 152 | def test_status_returns_running(self): | 168 | def test_status_returns_running(self): |
105 | 153 | '''Report running when status exists with an unfinished stage.''' | 169 | '''Report running when status exists with an unfinished stage.''' |
106 | 170 | ensure_file(self.tmp_path('result.json', self.new_root)) | ||
107 | 154 | write_json(self.status_file, | 171 | write_json(self.status_file, |
108 | 155 | {'v1': {'init': {'start': 1, 'finished': None}}}) | 172 | {'v1': {'init': {'start': 1, 'finished': None}}}) |
109 | 156 | cmdargs = myargs(long=False, wait=False) | 173 | cmdargs = myargs(long=False, wait=False) |
110 | @@ -164,10 +181,11 @@ class TestStatus(CiTestCase): | |||
111 | 164 | self.assertEqual('status: running\n', m_stdout.getvalue()) | 181 | self.assertEqual('status: running\n', m_stdout.getvalue()) |
112 | 165 | 182 | ||
113 | 166 | def test_status_returns_done(self): | 183 | def test_status_returns_done(self): |
115 | 167 | '''Reports done when stage is None and all stages are finished.''' | 184 | '''Report done results.json exists no stages are unfinished.''' |
116 | 185 | ensure_file(self.tmp_path('result.json', self.new_root)) | ||
117 | 168 | write_json( | 186 | write_json( |
118 | 169 | self.status_file, | 187 | self.status_file, |
120 | 170 | {'v1': {'stage': None, | 188 | {'v1': {'stage': None, # No current stage running |
121 | 171 | 'datasource': ( | 189 | 'datasource': ( |
122 | 172 | 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' | 190 | 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' |
123 | 173 | '[dsmode=net]'), | 191 | '[dsmode=net]'), |
124 | @@ -187,6 +205,7 @@ class TestStatus(CiTestCase): | |||
125 | 187 | 205 | ||
126 | 188 | def test_status_returns_done_long(self): | 206 | def test_status_returns_done_long(self): |
127 | 189 | '''Long format of done status includes datasource info.''' | 207 | '''Long format of done status includes datasource info.''' |
128 | 208 | ensure_file(self.tmp_path('result.json', self.new_root)) | ||
129 | 190 | write_json( | 209 | write_json( |
130 | 191 | self.status_file, | 210 | self.status_file, |
131 | 192 | {'v1': {'stage': None, | 211 | {'v1': {'stage': None, |
132 | @@ -303,6 +322,8 @@ class TestStatus(CiTestCase): | |||
133 | 303 | write_json(self.status_file, running_json) | 322 | write_json(self.status_file, running_json) |
134 | 304 | elif self.sleep_calls == 3: | 323 | elif self.sleep_calls == 3: |
135 | 305 | write_json(self.status_file, done_json) | 324 | write_json(self.status_file, done_json) |
136 | 325 | result_file = self.tmp_path('result.json', self.new_root) | ||
137 | 326 | ensure_file(result_file) | ||
138 | 306 | 327 | ||
139 | 307 | cmdargs = myargs(long=False, wait=True) | 328 | cmdargs = myargs(long=False, wait=True) |
140 | 308 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: | 329 | with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: |
141 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py | |||
142 | index c015e79..f69c0ef 100644 | |||
143 | --- a/cloudinit/net/__init__.py | |||
144 | +++ b/cloudinit/net/__init__.py | |||
145 | @@ -274,23 +274,52 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): | |||
146 | 274 | renames are only attempted for interfaces of type 'physical'. It is | 274 | renames are only attempted for interfaces of type 'physical'. It is |
147 | 275 | expected that the network system will create other devices with the | 275 | expected that the network system will create other devices with the |
148 | 276 | correct name in place.""" | 276 | correct name in place.""" |
166 | 277 | renames = [] | 277 | |
167 | 278 | for ent in netcfg.get('config', {}): | 278 | def _version_1(netcfg): |
168 | 279 | if ent.get('type') != 'physical': | 279 | renames = [] |
169 | 280 | continue | 280 | for ent in netcfg.get('config', {}): |
170 | 281 | mac = ent.get('mac_address') | 281 | if ent.get('type') != 'physical': |
171 | 282 | if not mac: | 282 | continue |
172 | 283 | continue | 283 | mac = ent.get('mac_address') |
173 | 284 | name = ent.get('name') | 284 | if not mac: |
174 | 285 | driver = ent.get('params', {}).get('driver') | 285 | continue |
175 | 286 | device_id = ent.get('params', {}).get('device_id') | 286 | name = ent.get('name') |
176 | 287 | if not driver: | 287 | driver = ent.get('params', {}).get('driver') |
177 | 288 | driver = device_driver(name) | 288 | device_id = ent.get('params', {}).get('device_id') |
178 | 289 | if not device_id: | 289 | if not driver: |
179 | 290 | device_id = device_devid(name) | 290 | driver = device_driver(name) |
180 | 291 | renames.append([mac, name, driver, device_id]) | 291 | if not device_id: |
181 | 292 | 292 | device_id = device_devid(name) | |
182 | 293 | return _rename_interfaces(renames) | 293 | renames.append([mac, name, driver, device_id]) |
183 | 294 | return renames | ||
184 | 295 | |||
185 | 296 | def _version_2(netcfg): | ||
186 | 297 | renames = [] | ||
187 | 298 | for key, ent in netcfg.get('ethernets', {}).items(): | ||
188 | 299 | # only rename if configured to do so | ||
189 | 300 | name = ent.get('set-name') | ||
190 | 301 | if not name: | ||
191 | 302 | continue | ||
192 | 303 | # cloud-init requires macaddress for renaming | ||
193 | 304 | mac = ent.get('match', {}).get('macaddress') | ||
194 | 305 | if not mac: | ||
195 | 306 | continue | ||
196 | 307 | driver = ent.get('match', {}).get('driver') | ||
197 | 308 | device_id = ent.get('match', {}).get('device_id') | ||
198 | 309 | if not driver: | ||
199 | 310 | driver = device_driver(name) | ||
200 | 311 | if not device_id: | ||
201 | 312 | device_id = device_devid(name) | ||
202 | 313 | renames.append([mac, name, driver, device_id]) | ||
203 | 314 | return renames | ||
204 | 315 | |||
205 | 316 | if netcfg.get('version') == 1: | ||
206 | 317 | return _rename_interfaces(_version_1(netcfg)) | ||
207 | 318 | elif netcfg.get('version') == 2: | ||
208 | 319 | return _rename_interfaces(_version_2(netcfg)) | ||
209 | 320 | |||
210 | 321 | raise RuntimeError('Failed to apply network config names. Found bad' | ||
211 | 322 | ' network config version: %s' % netcfg.get('version')) | ||
212 | 294 | 323 | ||
213 | 295 | 324 | ||
214 | 296 | def interface_has_own_mac(ifname, strict=False): | 325 | def interface_has_own_mac(ifname, strict=False): |
215 | diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py | |||
216 | index 8cb4114..276556e 100644 | |||
217 | --- a/cloudinit/net/tests/test_init.py | |||
218 | +++ b/cloudinit/net/tests/test_init.py | |||
219 | @@ -4,6 +4,8 @@ import copy | |||
220 | 4 | import errno | 4 | import errno |
221 | 5 | import mock | 5 | import mock |
222 | 6 | import os | 6 | import os |
223 | 7 | import textwrap | ||
224 | 8 | import yaml | ||
225 | 7 | 9 | ||
226 | 8 | import cloudinit.net as net | 10 | import cloudinit.net as net |
227 | 9 | from cloudinit.util import ensure_file, write_file, ProcessExecutionError | 11 | from cloudinit.util import ensure_file, write_file, ProcessExecutionError |
228 | @@ -520,3 +522,92 @@ class TestEphemeralIPV4Network(CiTestCase): | |||
229 | 520 | with net.EphemeralIPv4Network(**params): | 522 | with net.EphemeralIPv4Network(**params): |
230 | 521 | self.assertEqual(expected_setup_calls, m_subp.call_args_list) | 523 | self.assertEqual(expected_setup_calls, m_subp.call_args_list) |
231 | 522 | m_subp.assert_has_calls(expected_teardown_calls) | 524 | m_subp.assert_has_calls(expected_teardown_calls) |
232 | 525 | |||
233 | 526 | |||
234 | 527 | class TestApplyNetworkCfgNames(CiTestCase): | ||
235 | 528 | V1_CONFIG = textwrap.dedent("""\ | ||
236 | 529 | version: 1 | ||
237 | 530 | config: | ||
238 | 531 | - type: physical | ||
239 | 532 | name: interface0 | ||
240 | 533 | mac_address: "52:54:00:12:34:00" | ||
241 | 534 | subnets: | ||
242 | 535 | - type: static | ||
243 | 536 | address: 10.0.2.15 | ||
244 | 537 | netmask: 255.255.255.0 | ||
245 | 538 | gateway: 10.0.2.2 | ||
246 | 539 | """) | ||
247 | 540 | V2_CONFIG = textwrap.dedent("""\ | ||
248 | 541 | version: 2 | ||
249 | 542 | ethernets: | ||
250 | 543 | interface0: | ||
251 | 544 | match: | ||
252 | 545 | macaddress: "52:54:00:12:34:00" | ||
253 | 546 | addresses: | ||
254 | 547 | - 10.0.2.15/24 | ||
255 | 548 | gateway4: 10.0.2.2 | ||
256 | 549 | set-name: interface0 | ||
257 | 550 | """) | ||
258 | 551 | |||
259 | 552 | V2_CONFIG_NO_SETNAME = textwrap.dedent("""\ | ||
260 | 553 | version: 2 | ||
261 | 554 | ethernets: | ||
262 | 555 | interface0: | ||
263 | 556 | match: | ||
264 | 557 | macaddress: "52:54:00:12:34:00" | ||
265 | 558 | addresses: | ||
266 | 559 | - 10.0.2.15/24 | ||
267 | 560 | gateway4: 10.0.2.2 | ||
268 | 561 | """) | ||
269 | 562 | |||
270 | 563 | V2_CONFIG_NO_MAC = textwrap.dedent("""\ | ||
271 | 564 | version: 2 | ||
272 | 565 | ethernets: | ||
273 | 566 | interface0: | ||
274 | 567 | match: | ||
275 | 568 | driver: virtio-net | ||
276 | 569 | addresses: | ||
277 | 570 | - 10.0.2.15/24 | ||
278 | 571 | gateway4: 10.0.2.2 | ||
279 | 572 | set-name: interface0 | ||
280 | 573 | """) | ||
281 | 574 | |||
282 | 575 | @mock.patch('cloudinit.net.device_devid') | ||
283 | 576 | @mock.patch('cloudinit.net.device_driver') | ||
284 | 577 | @mock.patch('cloudinit.net._rename_interfaces') | ||
285 | 578 | def test_apply_v1_renames(self, m_rename_interfaces, m_device_driver, | ||
286 | 579 | m_device_devid): | ||
287 | 580 | m_device_driver.return_value = 'virtio_net' | ||
288 | 581 | m_device_devid.return_value = '0x15d8' | ||
289 | 582 | |||
290 | 583 | net.apply_network_config_names(yaml.load(self.V1_CONFIG)) | ||
291 | 584 | |||
292 | 585 | call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] | ||
293 | 586 | m_rename_interfaces.assert_called_with([call]) | ||
294 | 587 | |||
295 | 588 | @mock.patch('cloudinit.net.device_devid') | ||
296 | 589 | @mock.patch('cloudinit.net.device_driver') | ||
297 | 590 | @mock.patch('cloudinit.net._rename_interfaces') | ||
298 | 591 | def test_apply_v2_renames(self, m_rename_interfaces, m_device_driver, | ||
299 | 592 | m_device_devid): | ||
300 | 593 | m_device_driver.return_value = 'virtio_net' | ||
301 | 594 | m_device_devid.return_value = '0x15d8' | ||
302 | 595 | |||
303 | 596 | net.apply_network_config_names(yaml.load(self.V2_CONFIG)) | ||
304 | 597 | |||
305 | 598 | call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] | ||
306 | 599 | m_rename_interfaces.assert_called_with([call]) | ||
307 | 600 | |||
308 | 601 | @mock.patch('cloudinit.net._rename_interfaces') | ||
309 | 602 | def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces): | ||
310 | 603 | net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME)) | ||
311 | 604 | m_rename_interfaces.assert_called_with([]) | ||
312 | 605 | |||
313 | 606 | @mock.patch('cloudinit.net._rename_interfaces') | ||
314 | 607 | def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces): | ||
315 | 608 | net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC)) | ||
316 | 609 | m_rename_interfaces.assert_called_with([]) | ||
317 | 610 | |||
318 | 611 | def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self): | ||
319 | 612 | with self.assertRaises(RuntimeError): | ||
320 | 613 | net.apply_network_config_names(yaml.load("version: 3")) | ||
321 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py | |||
322 | index e14553b..21e9ef8 100644 | |||
323 | --- a/cloudinit/sources/DataSourceEc2.py | |||
324 | +++ b/cloudinit/sources/DataSourceEc2.py | |||
325 | @@ -147,6 +147,12 @@ class DataSourceEc2(sources.DataSource): | |||
326 | 147 | def get_instance_id(self): | 147 | def get_instance_id(self): |
327 | 148 | if self.cloud_platform == Platforms.AWS: | 148 | if self.cloud_platform == Platforms.AWS: |
328 | 149 | # Prefer the ID from the instance identity document, but fall back | 149 | # Prefer the ID from the instance identity document, but fall back |
329 | 150 | if not getattr(self, 'identity', None): | ||
330 | 151 | # If re-using cached datasource, it's get_data run didn't | ||
331 | 152 | # setup self.identity. So we need to do that now. | ||
332 | 153 | api_version = self.get_metadata_api_version() | ||
333 | 154 | self.identity = ec2.get_instance_identity( | ||
334 | 155 | api_version, self.metadata_address).get('document', {}) | ||
335 | 150 | return self.identity.get( | 156 | return self.identity.get( |
336 | 151 | 'instanceId', self.metadata['instance-id']) | 157 | 'instanceId', self.metadata['instance-id']) |
337 | 152 | else: | 158 | else: |
338 | diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py | |||
339 | index b95b956..882517f 100644 | |||
340 | --- a/cloudinit/ssh_util.py | |||
341 | +++ b/cloudinit/ssh_util.py | |||
342 | @@ -171,16 +171,13 @@ def parse_authorized_keys(fname): | |||
343 | 171 | 171 | ||
344 | 172 | 172 | ||
345 | 173 | def update_authorized_keys(old_entries, keys): | 173 | def update_authorized_keys(old_entries, keys): |
348 | 174 | to_add = list(keys) | 174 | to_add = list([k for k in keys if k.valid()]) |
347 | 175 | |||
349 | 176 | for i in range(0, len(old_entries)): | 175 | for i in range(0, len(old_entries)): |
350 | 177 | ent = old_entries[i] | 176 | ent = old_entries[i] |
351 | 178 | if not ent.valid(): | 177 | if not ent.valid(): |
352 | 179 | continue | 178 | continue |
353 | 180 | # Replace those with the same base64 | 179 | # Replace those with the same base64 |
354 | 181 | for k in keys: | 180 | for k in keys: |
355 | 182 | if not ent.valid(): | ||
356 | 183 | continue | ||
357 | 184 | if k.base64 == ent.base64: | 181 | if k.base64 == ent.base64: |
358 | 185 | # Replace it with our better one | 182 | # Replace it with our better one |
359 | 186 | ent = k | 183 | ent = k |
360 | diff --git a/debian/changelog b/debian/changelog | |||
361 | index 35cabb1..fe1b23d 100644 | |||
362 | --- a/debian/changelog | |||
363 | +++ b/debian/changelog | |||
364 | @@ -1,3 +1,14 @@ | |||
365 | 1 | cloud-init (17.2-35-gf576b2a2-0ubuntu1~17.10.1) artful-proposed; urgency=medium | ||
366 | 2 | |||
367 | 3 | * New upstream snapshot. (LP: #1747059) | ||
368 | 4 | - tests: add support for logs with lxd from snap and future lxd 3. | ||
369 | 5 | - EC2: Fix get_instance_id called against cached datasource pickle. | ||
370 | 6 | - cli: fix cloud-init status to report running when before result.json | ||
371 | 7 | - net: accept network-config in netplan format for renaming interfaces | ||
372 | 8 | - Fix ssh keys validation in ssh_util [Tatiana Kholkina] | ||
373 | 9 | |||
374 | 10 | -- Chad Smith <chad.smith@canonical.com> Mon, 12 Feb 2018 10:16:42 -0700 | ||
375 | 11 | |||
376 | 1 | cloud-init (17.2-30-gf7deaf15-0ubuntu1~17.10.1) artful-proposed; urgency=medium | 12 | cloud-init (17.2-30-gf7deaf15-0ubuntu1~17.10.1) artful-proposed; urgency=medium |
377 | 2 | 13 | ||
378 | 3 | * New upstream snapshot. (LP: #1747059) | 14 | * New upstream snapshot. (LP: #1747059) |
379 | diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py | |||
380 | index 5ea88e5..d4f9135 100644 | |||
381 | --- a/tests/cloud_tests/collect.py | |||
382 | +++ b/tests/cloud_tests/collect.py | |||
383 | @@ -44,8 +44,9 @@ def collect_console(instance, base_dir): | |||
384 | 44 | LOG.debug('getting console log for %s to %s', instance, logfile) | 44 | LOG.debug('getting console log for %s to %s', instance, logfile) |
385 | 45 | try: | 45 | try: |
386 | 46 | data = instance.console_log() | 46 | data = instance.console_log() |
389 | 47 | except NotImplementedError: | 47 | except NotImplementedError as e: |
390 | 48 | data = b'instance.console_log: not implemented' | 48 | # args[0] is hacky, but thats all I see to get at the message. |
391 | 49 | data = b'NotImplementedError:' + e.args[0].encode() | ||
392 | 49 | with open(logfile, "wb") as fp: | 50 | with open(logfile, "wb") as fp: |
393 | 50 | fp.write(data) | 51 | fp.write(data) |
394 | 51 | 52 | ||
395 | diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py | |||
396 | index d2d2a1f..0488da5 100644 | |||
397 | --- a/tests/cloud_tests/platforms/lxd/instance.py | |||
398 | +++ b/tests/cloud_tests/platforms/lxd/instance.py | |||
399 | @@ -6,7 +6,9 @@ import os | |||
400 | 6 | import shutil | 6 | import shutil |
401 | 7 | from tempfile import mkdtemp | 7 | from tempfile import mkdtemp |
402 | 8 | 8 | ||
404 | 9 | from cloudinit.util import subp, ProcessExecutionError | 9 | from cloudinit.util import load_yaml, subp, ProcessExecutionError, which |
405 | 10 | from tests.cloud_tests import LOG | ||
406 | 11 | from tests.cloud_tests.util import PlatformError | ||
407 | 10 | 12 | ||
408 | 11 | from ..instances import Instance | 13 | from ..instances import Instance |
409 | 12 | 14 | ||
410 | @@ -15,6 +17,8 @@ class LXDInstance(Instance): | |||
411 | 15 | """LXD container backed instance.""" | 17 | """LXD container backed instance.""" |
412 | 16 | 18 | ||
413 | 17 | platform_name = "lxd" | 19 | platform_name = "lxd" |
414 | 20 | _console_log_method = None | ||
415 | 21 | _console_log_file = None | ||
416 | 18 | 22 | ||
417 | 19 | def __init__(self, platform, name, properties, config, features, | 23 | def __init__(self, platform, name, properties, config, features, |
418 | 20 | pylxd_container): | 24 | pylxd_container): |
419 | @@ -30,8 +34,8 @@ class LXDInstance(Instance): | |||
420 | 30 | super(LXDInstance, self).__init__( | 34 | super(LXDInstance, self).__init__( |
421 | 31 | platform, name, properties, config, features) | 35 | platform, name, properties, config, features) |
422 | 32 | self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name)) | 36 | self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name)) |
423 | 33 | self._setup_console_log() | ||
424 | 34 | self.name = name | 37 | self.name = name |
425 | 38 | self._setup_console_log() | ||
426 | 35 | 39 | ||
427 | 36 | @property | 40 | @property |
428 | 37 | def pylxd_container(self): | 41 | def pylxd_container(self): |
429 | @@ -39,21 +43,6 @@ class LXDInstance(Instance): | |||
430 | 39 | self._pylxd_container.sync() | 43 | self._pylxd_container.sync() |
431 | 40 | return self._pylxd_container | 44 | return self._pylxd_container |
432 | 41 | 45 | ||
433 | 42 | def _setup_console_log(self): | ||
434 | 43 | logf = os.path.join(self.tmpd, "console.log") | ||
435 | 44 | |||
436 | 45 | # doing this ensures we can read it. Otherwise it ends up root:root. | ||
437 | 46 | with open(logf, "w") as fp: | ||
438 | 47 | fp.write("# %s\n" % self.name) | ||
439 | 48 | |||
440 | 49 | cfg = "lxc.console.logfile=%s" % logf | ||
441 | 50 | orig = self._pylxd_container.config.get('raw.lxc', "") | ||
442 | 51 | if orig: | ||
443 | 52 | orig += "\n" | ||
444 | 53 | self._pylxd_container.config['raw.lxc'] = orig + cfg | ||
445 | 54 | self._pylxd_container.save() | ||
446 | 55 | self._console_log_file = logf | ||
447 | 56 | |||
448 | 57 | def _execute(self, command, stdin=None, env=None): | 46 | def _execute(self, command, stdin=None, env=None): |
449 | 58 | if env is None: | 47 | if env is None: |
450 | 59 | env = {} | 48 | env = {} |
451 | @@ -97,19 +86,80 @@ class LXDInstance(Instance): | |||
452 | 97 | """ | 86 | """ |
453 | 98 | self.pylxd_container.files.put(remote_path, data) | 87 | self.pylxd_container.files.put(remote_path, data) |
454 | 99 | 88 | ||
455 | 89 | @property | ||
456 | 90 | def console_log_method(self): | ||
457 | 91 | if self._console_log_method is not None: | ||
458 | 92 | return self._console_log_method | ||
459 | 93 | |||
460 | 94 | client = which('lxc') | ||
461 | 95 | if not client: | ||
462 | 96 | raise PlatformError("No 'lxc' client.") | ||
463 | 97 | |||
464 | 98 | elif _has_proper_console_support(): | ||
465 | 99 | self._console_log_method = 'show-log' | ||
466 | 100 | elif client.startswith("/snap"): | ||
467 | 101 | self._console_log_method = 'logfile-snap' | ||
468 | 102 | else: | ||
469 | 103 | self._console_log_method = 'logfile-tmp' | ||
470 | 104 | |||
471 | 105 | LOG.debug("Set console log method to %s", self._console_log_method) | ||
472 | 106 | return self._console_log_method | ||
473 | 107 | |||
474 | 108 | def _setup_console_log(self): | ||
475 | 109 | method = self.console_log_method | ||
476 | 110 | if not method.startswith("logfile-"): | ||
477 | 111 | return | ||
478 | 112 | |||
479 | 113 | if method == "logfile-snap": | ||
480 | 114 | log_dir = "/var/snap/lxd/common/consoles" | ||
481 | 115 | if not os.path.exists(log_dir): | ||
482 | 116 | raise PlatformError( | ||
483 | 117 | "Unable to log with snap lxc. Please run:\n" | ||
484 | 118 | " sudo mkdir --mode=1777 -p %s" % log_dir) | ||
485 | 119 | elif method == "logfile-tmp": | ||
486 | 120 | log_dir = "/tmp" | ||
487 | 121 | else: | ||
488 | 122 | raise PlatformError( | ||
489 | 123 | "Unexpected value for console method: %s" % method) | ||
490 | 124 | |||
491 | 125 | # doing this ensures we can read it. Otherwise it ends up root:root. | ||
492 | 126 | log_file = os.path.join(log_dir, self.name) | ||
493 | 127 | with open(log_file, "w") as fp: | ||
494 | 128 | fp.write("# %s\n" % self.name) | ||
495 | 129 | |||
496 | 130 | cfg = "lxc.console.logfile=%s" % log_file | ||
497 | 131 | orig = self._pylxd_container.config.get('raw.lxc', "") | ||
498 | 132 | if orig: | ||
499 | 133 | orig += "\n" | ||
500 | 134 | self._pylxd_container.config['raw.lxc'] = orig + cfg | ||
501 | 135 | self._pylxd_container.save() | ||
502 | 136 | self._console_log_file = log_file | ||
503 | 137 | |||
504 | 100 | def console_log(self): | 138 | def console_log(self): |
505 | 101 | """Console log. | 139 | """Console log. |
506 | 102 | 140 | ||
508 | 103 | @return_value: bytes of this instance’s console | 141 | @return_value: bytes of this instance's console |
509 | 104 | """ | 142 | """ |
518 | 105 | if not os.path.exists(self._console_log_file): | 143 | |
519 | 106 | raise NotImplementedError( | 144 | if self._console_log_file: |
520 | 107 | "Console log '%s' does not exist. If this is a remote " | 145 | if not os.path.exists(self._console_log_file): |
521 | 108 | "lxc, then this is really NotImplementedError. If it is " | 146 | raise NotImplementedError( |
522 | 109 | "A local lxc, then this is a RuntimeError." | 147 | "Console log '%s' does not exist. If this is a remote " |
523 | 110 | "https://github.com/lxc/lxd/issues/1129") | 148 | "lxc, then this is really NotImplementedError. If it is " |
524 | 111 | with open(self._console_log_file, "rb") as fp: | 149 | "A local lxc, then this is a RuntimeError." |
525 | 112 | return fp.read() | 150 | "https://github.com/lxc/lxd/issues/1129") |
526 | 151 | with open(self._console_log_file, "rb") as fp: | ||
527 | 152 | return fp.read() | ||
528 | 153 | |||
529 | 154 | try: | ||
530 | 155 | stdout, stderr = subp( | ||
531 | 156 | ['lxc', 'console', '--show-log', self.name], decode=False) | ||
532 | 157 | return stdout | ||
533 | 158 | except ProcessExecutionError as e: | ||
534 | 159 | raise PlatformError( | ||
535 | 160 | "console log", | ||
536 | 161 | "Console log failed [%d]: stdout=%s stderr=%s" % ( | ||
537 | 162 | e.exit_code, e.stdout, e.stderr)) | ||
538 | 113 | 163 | ||
539 | 114 | def reboot(self, wait=True): | 164 | def reboot(self, wait=True): |
540 | 115 | """Reboot instance.""" | 165 | """Reboot instance.""" |
541 | @@ -146,7 +196,37 @@ class LXDInstance(Instance): | |||
542 | 146 | if self.platform.container_exists(self.name): | 196 | if self.platform.container_exists(self.name): |
543 | 147 | raise OSError('container {} was not properly removed' | 197 | raise OSError('container {} was not properly removed' |
544 | 148 | .format(self.name)) | 198 | .format(self.name)) |
545 | 199 | if self._console_log_file and os.path.exists(self._console_log_file): | ||
546 | 200 | os.unlink(self._console_log_file) | ||
547 | 149 | shutil.rmtree(self.tmpd) | 201 | shutil.rmtree(self.tmpd) |
548 | 150 | super(LXDInstance, self).destroy() | 202 | super(LXDInstance, self).destroy() |
549 | 151 | 203 | ||
550 | 204 | |||
551 | 205 | def _has_proper_console_support(): | ||
552 | 206 | stdout, _ = subp(['lxc', 'info']) | ||
553 | 207 | info = load_yaml(stdout) | ||
554 | 208 | reason = None | ||
555 | 209 | if 'console' not in info.get('api_extensions', []): | ||
556 | 210 | reason = "LXD server does not support console api extension" | ||
557 | 211 | else: | ||
558 | 212 | dver = info.get('environment', {}).get('driver_version', "") | ||
559 | 213 | if dver.startswith("2.") or dver.startwith("1."): | ||
560 | 214 | reason = "LXD Driver version not 3.x+ (%s)" % dver | ||
561 | 215 | else: | ||
562 | 216 | try: | ||
563 | 217 | stdout, stderr = subp(['lxc', 'console', '--help'], | ||
564 | 218 | decode=False) | ||
565 | 219 | if not (b'console' in stdout and b'log' in stdout): | ||
566 | 220 | reason = "no '--log' in lxc console --help" | ||
567 | 221 | except ProcessExecutionError as e: | ||
568 | 222 | reason = "no 'console' command in lxc client" | ||
569 | 223 | |||
570 | 224 | if reason: | ||
571 | 225 | LOG.debug("no console-support: %s", reason) | ||
572 | 226 | return False | ||
573 | 227 | else: | ||
574 | 228 | LOG.debug("console-support looks good") | ||
575 | 229 | return True | ||
576 | 230 | |||
577 | 231 | |||
578 | 152 | # vi: ts=4 expandtab | 232 | # vi: ts=4 expandtab |
579 | diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py | |||
580 | index 0f7267b..dff8b1e 100644 | |||
581 | --- a/tests/unittests/test_datasource/test_ec2.py | |||
582 | +++ b/tests/unittests/test_datasource/test_ec2.py | |||
583 | @@ -2,6 +2,7 @@ | |||
584 | 2 | 2 | ||
585 | 3 | import copy | 3 | import copy |
586 | 4 | import httpretty | 4 | import httpretty |
587 | 5 | import json | ||
588 | 5 | import mock | 6 | import mock |
589 | 6 | 7 | ||
590 | 7 | from cloudinit import helpers | 8 | from cloudinit import helpers |
591 | @@ -9,6 +10,29 @@ from cloudinit.sources import DataSourceEc2 as ec2 | |||
592 | 9 | from cloudinit.tests import helpers as test_helpers | 10 | from cloudinit.tests import helpers as test_helpers |
593 | 10 | 11 | ||
594 | 11 | 12 | ||
595 | 13 | DYNAMIC_METADATA = { | ||
596 | 14 | "instance-identity": { | ||
597 | 15 | "document": json.dumps({ | ||
598 | 16 | "devpayProductCodes": None, | ||
599 | 17 | "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], | ||
600 | 18 | "availabilityZone": "us-west-2b", | ||
601 | 19 | "privateIp": "10.158.112.84", | ||
602 | 20 | "version": "2017-09-30", | ||
603 | 21 | "instanceId": "my-identity-id", | ||
604 | 22 | "billingProducts": None, | ||
605 | 23 | "instanceType": "t2.micro", | ||
606 | 24 | "accountId": "123456789012", | ||
607 | 25 | "imageId": "ami-5fb8c835", | ||
608 | 26 | "pendingTime": "2016-11-19T16:32:11Z", | ||
609 | 27 | "architecture": "x86_64", | ||
610 | 28 | "kernelId": None, | ||
611 | 29 | "ramdiskId": None, | ||
612 | 30 | "region": "us-west-2" | ||
613 | 31 | }) | ||
614 | 32 | } | ||
615 | 33 | } | ||
616 | 34 | |||
617 | 35 | |||
618 | 12 | # collected from api version 2016-09-02/ with | 36 | # collected from api version 2016-09-02/ with |
619 | 13 | # python3 -c 'import json | 37 | # python3 -c 'import json |
620 | 14 | # from cloudinit.ec2_utils import get_instance_metadata as gm | 38 | # from cloudinit.ec2_utils import get_instance_metadata as gm |
621 | @@ -85,7 +109,7 @@ DEFAULT_METADATA = { | |||
622 | 85 | "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, | 109 | "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, |
623 | 86 | "reservation-id": "r-01efbc9996bac1bd6", | 110 | "reservation-id": "r-01efbc9996bac1bd6", |
624 | 87 | "security-groups": "my-wide-open", | 111 | "security-groups": "my-wide-open", |
626 | 88 | "services": {"domain": "amazonaws.com", "partition": "aws"} | 112 | "services": {"domain": "amazonaws.com", "partition": "aws"}, |
627 | 89 | } | 113 | } |
628 | 90 | 114 | ||
629 | 91 | 115 | ||
630 | @@ -341,6 +365,39 @@ class TestEc2(test_helpers.HttprettyTestCase): | |||
631 | 341 | self.assertEqual(expected, ds.network_config) | 365 | self.assertEqual(expected, ds.network_config) |
632 | 342 | 366 | ||
633 | 343 | @httpretty.activate | 367 | @httpretty.activate |
634 | 368 | def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): | ||
635 | 369 | """get_instance-id gets DataSourceEc2Local.identity if not present. | ||
636 | 370 | |||
637 | 371 | This handles an upgrade case where the old pickled datasource didn't | ||
638 | 372 | set up self.identity, but 'systemctl cloud-init init' runs | ||
639 | 373 | get_instance_id which traces on missing self.identity. lp:1748354. | ||
640 | 374 | """ | ||
641 | 375 | self.datasource = ec2.DataSourceEc2Local | ||
642 | 376 | ds = self._setup_ds( | ||
643 | 377 | platform_data=self.valid_platform_data, | ||
644 | 378 | sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, | ||
645 | 379 | md=DEFAULT_METADATA) | ||
646 | 380 | # Mock 404s on all versions except latest | ||
647 | 381 | all_versions = ( | ||
648 | 382 | [ds.min_metadata_version] + ds.extended_metadata_versions) | ||
649 | 383 | for ver in all_versions[:-1]: | ||
650 | 384 | register_mock_metaserver( | ||
651 | 385 | 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver), | ||
652 | 386 | None) | ||
653 | 387 | ds.metadata_address = 'http://169.254.169.254' | ||
654 | 388 | register_mock_metaserver( | ||
655 | 389 | '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]), | ||
656 | 390 | DEFAULT_METADATA) | ||
657 | 391 | # Register dynamic/instance-identity document which we now read. | ||
658 | 392 | register_mock_metaserver( | ||
659 | 393 | '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]), | ||
660 | 394 | DYNAMIC_METADATA) | ||
661 | 395 | ds._cloud_platform = ec2.Platforms.AWS | ||
662 | 396 | # Setup cached metadata on the Datasource | ||
663 | 397 | ds.metadata = DEFAULT_METADATA | ||
664 | 398 | self.assertEqual('my-identity-id', ds.get_instance_id()) | ||
665 | 399 | |||
666 | 400 | @httpretty.activate | ||
667 | 344 | @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') | 401 | @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') |
668 | 345 | def test_valid_platform_with_strict_true(self, m_dhcp): | 402 | def test_valid_platform_with_strict_true(self, m_dhcp): |
669 | 346 | """Valid platform data should return true with strict_id true.""" | 403 | """Valid platform data should return true with strict_id true.""" |
670 | diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py | |||
671 | index 2a8e6ab..4c62c8b 100644 | |||
672 | --- a/tests/unittests/test_sshutil.py | |||
673 | +++ b/tests/unittests/test_sshutil.py | |||
674 | @@ -126,6 +126,48 @@ class TestAuthKeyLineParser(test_helpers.TestCase): | |||
675 | 126 | self.assertFalse(key.valid()) | 126 | self.assertFalse(key.valid()) |
676 | 127 | 127 | ||
677 | 128 | 128 | ||
678 | 129 | class TestUpdateAuthorizedKeys(test_helpers.TestCase): | ||
679 | 130 | |||
680 | 131 | def test_new_keys_replace(self): | ||
681 | 132 | """new entries with the same base64 should replace old.""" | ||
682 | 133 | orig_entries = [ | ||
683 | 134 | ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')), | ||
684 | 135 | ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))] | ||
685 | 136 | |||
686 | 137 | new_entries = [ | ||
687 | 138 | ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), ] | ||
688 | 139 | |||
689 | 140 | expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n' | ||
690 | 141 | |||
691 | 142 | parser = ssh_util.AuthKeyLineParser() | ||
692 | 143 | found = ssh_util.update_authorized_keys( | ||
693 | 144 | [parser.parse(p) for p in orig_entries], | ||
694 | 145 | [parser.parse(p) for p in new_entries]) | ||
695 | 146 | |||
696 | 147 | self.assertEqual(expected, found) | ||
697 | 148 | |||
698 | 149 | def test_new_invalid_keys_are_ignored(self): | ||
699 | 150 | """new entries that are invalid should be skipped.""" | ||
700 | 151 | orig_entries = [ | ||
701 | 152 | ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')), | ||
702 | 153 | ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))] | ||
703 | 154 | |||
704 | 155 | new_entries = [ | ||
705 | 156 | ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), | ||
706 | 157 | 'xxx-invalid-thing1', | ||
707 | 158 | 'xxx-invalid-blob2' | ||
708 | 159 | ] | ||
709 | 160 | |||
710 | 161 | expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n' | ||
711 | 162 | |||
712 | 163 | parser = ssh_util.AuthKeyLineParser() | ||
713 | 164 | found = ssh_util.update_authorized_keys( | ||
714 | 165 | [parser.parse(p) for p in orig_entries], | ||
715 | 166 | [parser.parse(p) for p in new_entries]) | ||
716 | 167 | |||
717 | 168 | self.assertEqual(expected, found) | ||
718 | 169 | |||
719 | 170 | |||
720 | 129 | class TestParseSSHConfig(test_helpers.TestCase): | 171 | class TestParseSSHConfig(test_helpers.TestCase): |
721 | 130 | 172 | ||
722 | 131 | def setUp(self): | 173 | def setUp(self): |