Merge ~oddbloke/cloud-init/+git/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel
- Git
- lp:~oddbloke/cloud-init/+git/cloud-init
- ubuntu/devel
- Merge into ubuntu/devel
Proposed by
Dan Watkins
Status: | Merged |
---|---|
Merged at revision: | 6380e13bb69e77f6684e89ff59c467e59a6b8b7f |
Proposed branch: | ~oddbloke/cloud-init/+git/cloud-init:ubuntu/devel |
Merge into: | cloud-init:ubuntu/devel |
Diff against target: |
2904 lines (+1300/-449) 34 files modified
cloudinit/cmd/main.py (+5/-4) cloudinit/config/cc_ubuntu_advantage.py (+116/-109) cloudinit/config/cc_ubuntu_drivers.py (+112/-0) cloudinit/config/tests/test_ubuntu_advantage.py (+191/-156) cloudinit/config/tests/test_ubuntu_drivers.py (+174/-0) cloudinit/net/eni.py (+11/-5) cloudinit/net/network_state.py (+33/-8) cloudinit/net/sysconfig.py (+25/-9) cloudinit/sources/DataSourceAzure.py (+168/-89) cloudinit/sources/DataSourceEc2.py (+6/-2) cloudinit/sources/DataSourceNoCloud.py (+3/-1) cloudinit/sources/DataSourceScaleway.py (+2/-1) cloudinit/sources/__init__.py (+3/-3) cloudinit/sources/helpers/azure.py (+31/-0) cloudinit/sources/tests/test_init.py (+15/-0) cloudinit/util.py (+15/-0) config/cloud.cfg.tmpl (+3/-0) debian/changelog (+29/-0) doc/rtd/topics/datasources/azure.rst (+35/-22) doc/rtd/topics/datasources/nocloud.rst (+1/-1) doc/rtd/topics/modules.rst (+1/-0) tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml (+1/-2) tests/cloud_tests/testcases/modules/apt_pipelining_os.py (+3/-3) tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml (+4/-5) tests/data/azure/non_unicode_random_string (+1/-0) tests/unittests/test_datasource/test_azure.py (+22/-2) tests/unittests/test_datasource/test_nocloud.py (+42/-0) tests/unittests/test_datasource/test_scaleway.py (+7/-0) tests/unittests/test_distros/test_netconfig.py (+2/-0) tests/unittests/test_ds_identify.py (+17/-0) tests/unittests/test_handler/test_schema.py (+1/-0) tests/unittests/test_net.py (+209/-15) tools/ds-identify (+4/-3) tox.ini (+8/-9) |
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Needs Fixing | |
Ryan Harper | Approve | ||
Review via email: mp+365803@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
FAILED: Continuous integration, rev:6380e13bb69
No commit message was specified in the merge proposal. Click on the following link and set the commit message (if you want a jenkins rebuild you need to trigger it yourself):
https:/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild:
https:/
review:
Needs Fixing
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
2 | index 933c019..a5446da 100644 | |||
3 | --- a/cloudinit/cmd/main.py | |||
4 | +++ b/cloudinit/cmd/main.py | |||
5 | @@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None): | |||
6 | 632 | 'start': None, | 632 | 'start': None, |
7 | 633 | 'finished': None, | 633 | 'finished': None, |
8 | 634 | } | 634 | } |
9 | 635 | |||
10 | 635 | if status is None: | 636 | if status is None: |
11 | 636 | status = {'v1': {}} | 637 | status = {'v1': {}} |
12 | 637 | for m in modes: | ||
13 | 638 | status['v1'][m] = nullstatus.copy() | ||
14 | 639 | status['v1']['datasource'] = None | 638 | status['v1']['datasource'] = None |
17 | 640 | elif mode not in status['v1']: | 639 | |
18 | 641 | status['v1'][mode] = nullstatus.copy() | 640 | for m in modes: |
19 | 641 | if m not in status['v1']: | ||
20 | 642 | status['v1'][m] = nullstatus.copy() | ||
21 | 642 | 643 | ||
22 | 643 | v1 = status['v1'] | 644 | v1 = status['v1'] |
23 | 644 | v1['stage'] = mode | 645 | v1['stage'] = mode |
24 | diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py | |||
25 | index 5e082bd..f488123 100644 | |||
26 | --- a/cloudinit/config/cc_ubuntu_advantage.py | |||
27 | +++ b/cloudinit/config/cc_ubuntu_advantage.py | |||
28 | @@ -1,150 +1,143 @@ | |||
29 | 1 | # Copyright (C) 2018 Canonical Ltd. | ||
30 | 2 | # | ||
31 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
32 | 4 | 2 | ||
34 | 5 | """Ubuntu advantage: manage ubuntu-advantage offerings from Canonical.""" | 3 | """ubuntu_advantage: Configure Ubuntu Advantage support services""" |
35 | 6 | 4 | ||
36 | 7 | import sys | ||
37 | 8 | from textwrap import dedent | 5 | from textwrap import dedent |
38 | 9 | 6 | ||
40 | 10 | from cloudinit import log as logging | 7 | import six |
41 | 8 | |||
42 | 11 | from cloudinit.config.schema import ( | 9 | from cloudinit.config.schema import ( |
43 | 12 | get_schema_doc, validate_cloudconfig_schema) | 10 | get_schema_doc, validate_cloudconfig_schema) |
44 | 11 | from cloudinit import log as logging | ||
45 | 13 | from cloudinit.settings import PER_INSTANCE | 12 | from cloudinit.settings import PER_INSTANCE |
46 | 14 | from cloudinit.subp import prepend_base_command | ||
47 | 15 | from cloudinit import util | 13 | from cloudinit import util |
48 | 16 | 14 | ||
49 | 17 | 15 | ||
52 | 18 | distros = ['ubuntu'] | 16 | UA_URL = 'https://ubuntu.com/advantage' |
51 | 19 | frequency = PER_INSTANCE | ||
53 | 20 | 17 | ||
55 | 21 | LOG = logging.getLogger(__name__) | 18 | distros = ['ubuntu'] |
56 | 22 | 19 | ||
57 | 23 | schema = { | 20 | schema = { |
58 | 24 | 'id': 'cc_ubuntu_advantage', | 21 | 'id': 'cc_ubuntu_advantage', |
59 | 25 | 'name': 'Ubuntu Advantage', | 22 | 'name': 'Ubuntu Advantage', |
61 | 26 | 'title': 'Install, configure and manage ubuntu-advantage offerings', | 23 | 'title': 'Configure Ubuntu Advantage support services', |
62 | 27 | 'description': dedent("""\ | 24 | 'description': dedent("""\ |
84 | 28 | This module provides configuration options to setup ubuntu-advantage | 25 | Attach machine to an existing Ubuntu Advantage support contract and |
85 | 29 | subscriptions. | 26 | enable or disable support services such as Livepatch, ESM, |
86 | 30 | 27 | FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage, | |
87 | 31 | .. note:: | 28 | one can also specify services to enable. When the 'enable' |
88 | 32 | Both ``commands`` value can be either a dictionary or a list. If | 29 | list is present, any named service will be enabled and all absent |
89 | 33 | the configuration provided is a dictionary, the keys are only used | 30 | services will remain disabled. |
90 | 34 | to order the execution of the commands and the dictionary is | 31 | |
91 | 35 | merged with any vendor-data ubuntu-advantage configuration | 32 | Note that when enabling FIPS or FIPS updates you will need to schedule |
92 | 36 | provided. If a ``commands`` is provided as a list, any vendor-data | 33 | a reboot to ensure the machine is running the FIPS-compliant kernel. |
93 | 37 | ubuntu-advantage ``commands`` are ignored. | 34 | See :ref:`Power State Change` for information on how to configure |
94 | 38 | 35 | cloud-init to perform this reboot. | |
74 | 39 | Ubuntu-advantage ``commands`` is a dictionary or list of | ||
75 | 40 | ubuntu-advantage commands to run on the deployed machine. | ||
76 | 41 | These commands can be used to enable or disable subscriptions to | ||
77 | 42 | various ubuntu-advantage products. See 'man ubuntu-advantage' for more | ||
78 | 43 | information on supported subcommands. | ||
79 | 44 | |||
80 | 45 | .. note:: | ||
81 | 46 | Each command item can be a string or list. If the item is a list, | ||
82 | 47 | 'ubuntu-advantage' can be omitted and it will automatically be | ||
83 | 48 | inserted as part of the command. | ||
95 | 49 | """), | 36 | """), |
96 | 50 | 'distros': distros, | 37 | 'distros': distros, |
97 | 51 | 'examples': [dedent("""\ | 38 | 'examples': [dedent("""\ |
99 | 52 | # Enable Extended Security Maintenance using your service auth token | 39 | # Attach the machine to a Ubuntu Advantage support contract with a |
100 | 40 | # UA contract token obtained from %s. | ||
101 | 41 | ubuntu_advantage: | ||
102 | 42 | token: <ua_contract_token> | ||
103 | 43 | """ % UA_URL), dedent("""\ | ||
104 | 44 | # Attach the machine to an Ubuntu Advantage support contract enabling | ||
105 | 45 | # only fips and esm services. Services will only be enabled if | ||
106 | 46 | # the environment supports said service. Otherwise warnings will | ||
107 | 47 | # be logged for incompatible services specified. | ||
108 | 53 | ubuntu-advantage: | 48 | ubuntu-advantage: |
111 | 54 | commands: | 49 | token: <ua_contract_token> |
112 | 55 | 00: ubuntu-advantage enable-esm <token> | 50 | enable: |
113 | 51 | - fips | ||
114 | 52 | - esm | ||
115 | 56 | """), dedent("""\ | 53 | """), dedent("""\ |
117 | 57 | # Enable livepatch by providing your livepatch token | 54 | # Attach the machine to an Ubuntu Advantage support contract and enable |
118 | 55 | # the FIPS service. Perform a reboot once cloud-init has | ||
119 | 56 | # completed. | ||
120 | 57 | power_state: | ||
121 | 58 | mode: reboot | ||
122 | 58 | ubuntu-advantage: | 59 | ubuntu-advantage: |
138 | 59 | commands: | 60 | token: <ua_contract_token> |
139 | 60 | 00: ubuntu-advantage enable-livepatch <livepatch-token> | 61 | enable: |
140 | 61 | 62 | - fips | |
141 | 62 | """), dedent("""\ | 63 | """)], |
127 | 63 | # Convenience: the ubuntu-advantage command can be omitted when | ||
128 | 64 | # specifying commands as a list and 'ubuntu-advantage' will | ||
129 | 65 | # automatically be prepended. | ||
130 | 66 | # The following commands are equivalent | ||
131 | 67 | ubuntu-advantage: | ||
132 | 68 | commands: | ||
133 | 69 | 00: ['enable-livepatch', 'my-token'] | ||
134 | 70 | 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token'] | ||
135 | 71 | 02: ubuntu-advantage enable-livepatch my-token | ||
136 | 72 | 03: 'ubuntu-advantage enable-livepatch my-token' | ||
137 | 73 | """)], | ||
142 | 74 | 'frequency': PER_INSTANCE, | 64 | 'frequency': PER_INSTANCE, |
143 | 75 | 'type': 'object', | 65 | 'type': 'object', |
144 | 76 | 'properties': { | 66 | 'properties': { |
146 | 77 | 'ubuntu-advantage': { | 67 | 'ubuntu_advantage': { |
147 | 78 | 'type': 'object', | 68 | 'type': 'object', |
148 | 79 | 'properties': { | 69 | 'properties': { |
159 | 80 | 'commands': { | 70 | 'enable': { |
160 | 81 | 'type': ['object', 'array'], # Array of strings or dict | 71 | 'type': 'array', |
161 | 82 | 'items': { | 72 | 'items': {'type': 'string'}, |
162 | 83 | 'oneOf': [ | 73 | }, |
163 | 84 | {'type': 'array', 'items': {'type': 'string'}}, | 74 | 'token': { |
164 | 85 | {'type': 'string'}] | 75 | 'type': 'string', |
165 | 86 | }, | 76 | 'description': ( |
166 | 87 | 'additionalItems': False, # Reject non-string & non-list | 77 | 'A contract token obtained from %s.' % UA_URL) |
157 | 88 | 'minItems': 1, | ||
158 | 89 | 'minProperties': 1, | ||
167 | 90 | } | 78 | } |
168 | 91 | }, | 79 | }, |
171 | 92 | 'additionalProperties': False, # Reject keys not in schema | 80 | 'required': ['token'], |
172 | 93 | 'required': ['commands'] | 81 | 'additionalProperties': False |
173 | 94 | } | 82 | } |
174 | 95 | } | 83 | } |
175 | 96 | } | 84 | } |
176 | 97 | 85 | ||
177 | 98 | # TODO schema for 'assertions' and 'commands' are too permissive at the moment. | ||
178 | 99 | # Once python-jsonschema supports schema draft 6 add support for arbitrary | ||
179 | 100 | # object keys with 'patternProperties' constraint to validate string values. | ||
180 | 101 | |||
181 | 102 | __doc__ = get_schema_doc(schema) # Supplement python help() | 86 | __doc__ = get_schema_doc(schema) # Supplement python help() |
182 | 103 | 87 | ||
188 | 104 | UA_CMD = "ubuntu-advantage" | 88 | LOG = logging.getLogger(__name__) |
184 | 105 | |||
185 | 106 | |||
186 | 107 | def run_commands(commands): | ||
187 | 108 | """Run the commands provided in ubuntu-advantage:commands config. | ||
189 | 109 | 89 | ||
190 | 110 | Commands are run individually. Any errors are collected and reported | ||
191 | 111 | after attempting all commands. | ||
192 | 112 | 90 | ||
221 | 113 | @param commands: A list or dict containing commands to run. Keys of a | 91 | def configure_ua(token=None, enable=None): |
222 | 114 | dict will be used to order the commands provided as dict values. | 92 | """Call ua commandline client to attach or enable services.""" |
223 | 115 | """ | 93 | error = None |
224 | 116 | if not commands: | 94 | if not token: |
225 | 117 | return | 95 | error = ('ubuntu_advantage: token must be provided') |
226 | 118 | LOG.debug('Running user-provided ubuntu-advantage commands') | 96 | LOG.error(error) |
227 | 119 | if isinstance(commands, dict): | 97 | raise RuntimeError(error) |
228 | 120 | # Sort commands based on dictionary key | 98 | |
229 | 121 | commands = [v for _, v in sorted(commands.items())] | 99 | if enable is None: |
230 | 122 | elif not isinstance(commands, list): | 100 | enable = [] |
231 | 123 | raise TypeError( | 101 | elif isinstance(enable, six.string_types): |
232 | 124 | 'commands parameter was not a list or dict: {commands}'.format( | 102 | LOG.warning('ubuntu_advantage: enable should be a list, not' |
233 | 125 | commands=commands)) | 103 | ' a string; treating as a single enable') |
234 | 126 | 104 | enable = [enable] | |
235 | 127 | fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands) | 105 | elif not isinstance(enable, list): |
236 | 128 | 106 | LOG.warning('ubuntu_advantage: enable should be a list, not' | |
237 | 129 | cmd_failures = [] | 107 | ' a %s; skipping enabling services', |
238 | 130 | for command in fixed_ua_commands: | 108 | type(enable).__name__) |
239 | 131 | shell = isinstance(command, str) | 109 | enable = [] |
240 | 132 | try: | 110 | |
241 | 133 | util.subp(command, shell=shell, status_cb=sys.stderr.write) | 111 | attach_cmd = ['ua', 'attach', token] |
242 | 134 | except util.ProcessExecutionError as e: | 112 | LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd)) |
243 | 135 | cmd_failures.append(str(e)) | 113 | try: |
244 | 136 | if cmd_failures: | 114 | util.subp(attach_cmd) |
245 | 137 | msg = ( | 115 | except util.ProcessExecutionError as e: |
246 | 138 | 'Failures running ubuntu-advantage commands:\n' | 116 | msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format( |
247 | 139 | '{cmd_failures}'.format( | 117 | error=str(e)) |
220 | 140 | cmd_failures=cmd_failures)) | ||
248 | 141 | util.logexc(LOG, msg) | 118 | util.logexc(LOG, msg) |
249 | 142 | raise RuntimeError(msg) | 119 | raise RuntimeError(msg) |
250 | 120 | enable_errors = [] | ||
251 | 121 | for service in enable: | ||
252 | 122 | try: | ||
253 | 123 | cmd = ['ua', 'enable', service] | ||
254 | 124 | util.subp(cmd, capture=True) | ||
255 | 125 | except util.ProcessExecutionError as e: | ||
256 | 126 | enable_errors.append((service, e)) | ||
257 | 127 | if enable_errors: | ||
258 | 128 | for service, error in enable_errors: | ||
259 | 129 | msg = 'Failure enabling "{service}":\n{error}'.format( | ||
260 | 130 | service=service, error=str(error)) | ||
261 | 131 | util.logexc(LOG, msg) | ||
262 | 132 | raise RuntimeError( | ||
263 | 133 | 'Failure enabling Ubuntu Advantage service(s): {}'.format( | ||
264 | 134 | ', '.join('"{}"'.format(service) | ||
265 | 135 | for service, _ in enable_errors))) | ||
266 | 143 | 136 | ||
267 | 144 | 137 | ||
268 | 145 | def maybe_install_ua_tools(cloud): | 138 | def maybe_install_ua_tools(cloud): |
269 | 146 | """Install ubuntu-advantage-tools if not present.""" | 139 | """Install ubuntu-advantage-tools if not present.""" |
271 | 147 | if util.which('ubuntu-advantage'): | 140 | if util.which('ua'): |
272 | 148 | return | 141 | return |
273 | 149 | try: | 142 | try: |
274 | 150 | cloud.distro.update_package_sources() | 143 | cloud.distro.update_package_sources() |
275 | @@ -159,14 +152,28 @@ def maybe_install_ua_tools(cloud): | |||
276 | 159 | 152 | ||
277 | 160 | 153 | ||
278 | 161 | def handle(name, cfg, cloud, log, args): | 154 | def handle(name, cfg, cloud, log, args): |
283 | 162 | cfgin = cfg.get('ubuntu-advantage') | 155 | ua_section = None |
284 | 163 | if cfgin is None: | 156 | if 'ubuntu-advantage' in cfg: |
285 | 164 | LOG.debug(("Skipping module named %s," | 157 | LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.' |
286 | 165 | " no 'ubuntu-advantage' key in configuration"), name) | 158 | ' Expected underscore delimited "ubuntu_advantage"; will' |
287 | 159 | ' attempt to continue.') | ||
288 | 160 | ua_section = cfg['ubuntu-advantage'] | ||
289 | 161 | if 'ubuntu_advantage' in cfg: | ||
290 | 162 | ua_section = cfg['ubuntu_advantage'] | ||
291 | 163 | if ua_section is None: | ||
292 | 164 | LOG.debug("Skipping module named %s," | ||
293 | 165 | " no 'ubuntu_advantage' configuration found", name) | ||
294 | 166 | return | 166 | return |
295 | 167 | |||
296 | 168 | validate_cloudconfig_schema(cfg, schema) | 167 | validate_cloudconfig_schema(cfg, schema) |
297 | 168 | if 'commands' in ua_section: | ||
298 | 169 | msg = ( | ||
299 | 170 | 'Deprecated configuration "ubuntu-advantage: commands" provided.' | ||
300 | 171 | ' Expected "token"') | ||
301 | 172 | LOG.error(msg) | ||
302 | 173 | raise RuntimeError(msg) | ||
303 | 174 | |||
304 | 169 | maybe_install_ua_tools(cloud) | 175 | maybe_install_ua_tools(cloud) |
306 | 170 | run_commands(cfgin.get('commands', [])) | 176 | configure_ua(token=ua_section.get('token'), |
307 | 177 | enable=ua_section.get('enable')) | ||
308 | 171 | 178 | ||
309 | 172 | # vi: ts=4 expandtab | 179 | # vi: ts=4 expandtab |
310 | diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py | |||
311 | 173 | new file mode 100644 | 180 | new file mode 100644 |
312 | index 0000000..91feb60 | |||
313 | --- /dev/null | |||
314 | +++ b/cloudinit/config/cc_ubuntu_drivers.py | |||
315 | @@ -0,0 +1,112 @@ | |||
316 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
317 | 2 | |||
318 | 3 | """Ubuntu Drivers: Interact with third party drivers in Ubuntu.""" | ||
319 | 4 | |||
320 | 5 | from textwrap import dedent | ||
321 | 6 | |||
322 | 7 | from cloudinit.config.schema import ( | ||
323 | 8 | get_schema_doc, validate_cloudconfig_schema) | ||
324 | 9 | from cloudinit import log as logging | ||
325 | 10 | from cloudinit.settings import PER_INSTANCE | ||
326 | 11 | from cloudinit import type_utils | ||
327 | 12 | from cloudinit import util | ||
328 | 13 | |||
329 | 14 | LOG = logging.getLogger(__name__) | ||
330 | 15 | |||
331 | 16 | frequency = PER_INSTANCE | ||
332 | 17 | distros = ['ubuntu'] | ||
333 | 18 | schema = { | ||
334 | 19 | 'id': 'cc_ubuntu_drivers', | ||
335 | 20 | 'name': 'Ubuntu Drivers', | ||
336 | 21 | 'title': 'Interact with third party drivers in Ubuntu.', | ||
337 | 22 | 'description': dedent("""\ | ||
338 | 23 | This module interacts with the 'ubuntu-drivers' command to install | ||
339 | 24 | third party driver packages."""), | ||
340 | 25 | 'distros': distros, | ||
341 | 26 | 'examples': [dedent("""\ | ||
342 | 27 | drivers: | ||
343 | 28 | nvidia: | ||
344 | 29 | license-accepted: true | ||
345 | 30 | """)], | ||
346 | 31 | 'frequency': frequency, | ||
347 | 32 | 'type': 'object', | ||
348 | 33 | 'properties': { | ||
349 | 34 | 'drivers': { | ||
350 | 35 | 'type': 'object', | ||
351 | 36 | 'additionalProperties': False, | ||
352 | 37 | 'properties': { | ||
353 | 38 | 'nvidia': { | ||
354 | 39 | 'type': 'object', | ||
355 | 40 | 'additionalProperties': False, | ||
356 | 41 | 'required': ['license-accepted'], | ||
357 | 42 | 'properties': { | ||
358 | 43 | 'license-accepted': { | ||
359 | 44 | 'type': 'boolean', | ||
360 | 45 | 'description': ("Do you accept the NVIDIA driver" | ||
361 | 46 | " license?"), | ||
362 | 47 | }, | ||
363 | 48 | 'version': { | ||
364 | 49 | 'type': 'string', | ||
365 | 50 | 'description': ( | ||
366 | 51 | 'The version of the driver to install (e.g.' | ||
367 | 52 | ' "390", "410"). Defaults to the latest' | ||
368 | 53 | ' version.'), | ||
369 | 54 | }, | ||
370 | 55 | }, | ||
371 | 56 | }, | ||
372 | 57 | }, | ||
373 | 58 | }, | ||
374 | 59 | }, | ||
375 | 60 | } | ||
376 | 61 | OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( | ||
377 | 62 | "ubuntu-drivers: error: argument <command>: invalid choice: 'install'") | ||
378 | 63 | |||
379 | 64 | __doc__ = get_schema_doc(schema) # Supplement python help() | ||
380 | 65 | |||
381 | 66 | |||
382 | 67 | def install_drivers(cfg, pkg_install_func): | ||
383 | 68 | if not isinstance(cfg, dict): | ||
384 | 69 | raise TypeError( | ||
385 | 70 | "'drivers' config expected dict, found '%s': %s" % | ||
386 | 71 | (type_utils.obj_name(cfg), cfg)) | ||
387 | 72 | |||
388 | 73 | cfgpath = 'nvidia/license-accepted' | ||
389 | 74 | # Call translate_bool to ensure that we treat string values like "yes" as | ||
390 | 75 | # acceptance and _don't_ treat string values like "nah" as acceptance | ||
391 | 76 | # because they're True-ish | ||
392 | 77 | nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath)) | ||
393 | 78 | if not nv_acc: | ||
394 | 79 | LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc) | ||
395 | 80 | return | ||
396 | 81 | |||
397 | 82 | if not util.which('ubuntu-drivers'): | ||
398 | 83 | LOG.debug("'ubuntu-drivers' command not available. " | ||
399 | 84 | "Installing ubuntu-drivers-common") | ||
400 | 85 | pkg_install_func(['ubuntu-drivers-common']) | ||
401 | 86 | |||
402 | 87 | driver_arg = 'nvidia' | ||
403 | 88 | version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version') | ||
404 | 89 | if version_cfg: | ||
405 | 90 | driver_arg += ':{}'.format(version_cfg) | ||
406 | 91 | |||
407 | 92 | LOG.debug("Installing NVIDIA drivers (%s=%s, version=%s)", | ||
408 | 93 | cfgpath, nv_acc, version_cfg if version_cfg else 'latest') | ||
409 | 94 | |||
410 | 95 | try: | ||
411 | 96 | util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg]) | ||
412 | 97 | except util.ProcessExecutionError as exc: | ||
413 | 98 | if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr: | ||
414 | 99 | LOG.warning('the available version of ubuntu-drivers is' | ||
415 | 100 | ' too old to perform requested driver installation') | ||
416 | 101 | elif 'No drivers found for installation.' in exc.stdout: | ||
417 | 102 | LOG.warning('ubuntu-drivers found no drivers for installation') | ||
418 | 103 | raise | ||
419 | 104 | |||
420 | 105 | |||
421 | 106 | def handle(name, cfg, cloud, log, _args): | ||
422 | 107 | if "drivers" not in cfg: | ||
423 | 108 | log.debug("Skipping module named %s, no 'drivers' key in config", name) | ||
424 | 109 | return | ||
425 | 110 | |||
426 | 111 | validate_cloudconfig_schema(cfg, schema) | ||
427 | 112 | install_drivers(cfg['drivers'], cloud.distro.install_packages) | ||
428 | diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py | |||
429 | index b7cf9be..8c4161e 100644 | |||
430 | --- a/cloudinit/config/tests/test_ubuntu_advantage.py | |||
431 | +++ b/cloudinit/config/tests/test_ubuntu_advantage.py | |||
432 | @@ -1,10 +1,7 @@ | |||
433 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
434 | 2 | 2 | ||
435 | 3 | import re | ||
436 | 4 | from six import StringIO | ||
437 | 5 | |||
438 | 6 | from cloudinit.config.cc_ubuntu_advantage import ( | 3 | from cloudinit.config.cc_ubuntu_advantage import ( |
440 | 7 | handle, maybe_install_ua_tools, run_commands, schema) | 4 | configure_ua, handle, maybe_install_ua_tools, schema) |
441 | 8 | from cloudinit.config.schema import validate_cloudconfig_schema | 5 | from cloudinit.config.schema import validate_cloudconfig_schema |
442 | 9 | from cloudinit import util | 6 | from cloudinit import util |
443 | 10 | from cloudinit.tests.helpers import ( | 7 | from cloudinit.tests.helpers import ( |
444 | @@ -20,90 +17,120 @@ class FakeCloud(object): | |||
445 | 20 | self.distro = distro | 17 | self.distro = distro |
446 | 21 | 18 | ||
447 | 22 | 19 | ||
449 | 23 | class TestRunCommands(CiTestCase): | 20 | class TestConfigureUA(CiTestCase): |
450 | 24 | 21 | ||
451 | 25 | with_logs = True | 22 | with_logs = True |
452 | 26 | allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] | 23 | allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] |
453 | 27 | 24 | ||
454 | 28 | def setUp(self): | 25 | def setUp(self): |
456 | 29 | super(TestRunCommands, self).setUp() | 26 | super(TestConfigureUA, self).setUp() |
457 | 30 | self.tmp = self.tmp_dir() | 27 | self.tmp = self.tmp_dir() |
458 | 31 | 28 | ||
459 | 32 | @mock.patch('%s.util.subp' % MPATH) | 29 | @mock.patch('%s.util.subp' % MPATH) |
470 | 33 | def test_run_commands_on_empty_list(self, m_subp): | 30 | def test_configure_ua_attach_error(self, m_subp): |
471 | 34 | """When provided with an empty list, run_commands does nothing.""" | 31 | """Errors from ua attach command are raised.""" |
472 | 35 | run_commands([]) | 32 | m_subp.side_effect = util.ProcessExecutionError( |
473 | 36 | self.assertEqual('', self.logs.getvalue()) | 33 | 'Invalid token SomeToken') |
474 | 37 | m_subp.assert_not_called() | 34 | with self.assertRaises(RuntimeError) as context_manager: |
475 | 38 | 35 | configure_ua(token='SomeToken') | |
466 | 39 | def test_run_commands_on_non_list_or_dict(self): | ||
467 | 40 | """When provided an invalid type, run_commands raises an error.""" | ||
468 | 41 | with self.assertRaises(TypeError) as context_manager: | ||
469 | 42 | run_commands(commands="I'm Not Valid") | ||
476 | 43 | self.assertEqual( | 36 | self.assertEqual( |
478 | 44 | "commands parameter was not a list or dict: I'm Not Valid", | 37 | 'Failure attaching Ubuntu Advantage:\nUnexpected error while' |
479 | 38 | ' running command.\nCommand: -\nExit code: -\nReason: -\n' | ||
480 | 39 | 'Stdout: Invalid token SomeToken\nStderr: -', | ||
481 | 45 | str(context_manager.exception)) | 40 | str(context_manager.exception)) |
482 | 46 | 41 | ||
519 | 47 | def test_run_command_logs_commands_and_exit_codes_to_stderr(self): | 42 | @mock.patch('%s.util.subp' % MPATH) |
520 | 48 | """All exit codes are logged to stderr.""" | 43 | def test_configure_ua_attach_with_token(self, m_subp): |
521 | 49 | outfile = self.tmp_path('output.log', dir=self.tmp) | 44 | """When token is provided, attach the machine to ua using the token.""" |
522 | 50 | 45 | configure_ua(token='SomeToken') | |
523 | 51 | cmd1 = 'echo "HI" >> %s' % outfile | 46 | m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) |
524 | 52 | cmd2 = 'bogus command' | 47 | self.assertEqual( |
525 | 53 | cmd3 = 'echo "MOM" >> %s' % outfile | 48 | 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', |
526 | 54 | commands = [cmd1, cmd2, cmd3] | 49 | self.logs.getvalue()) |
527 | 55 | 50 | ||
528 | 56 | mock_path = '%s.sys.stderr' % MPATH | 51 | @mock.patch('%s.util.subp' % MPATH) |
529 | 57 | with mock.patch(mock_path, new_callable=StringIO) as m_stderr: | 52 | def test_configure_ua_attach_on_service_error(self, m_subp): |
530 | 58 | with self.assertRaises(RuntimeError) as context_manager: | 53 | """all services should be enabled and then any failures raised""" |
495 | 59 | run_commands(commands=commands) | ||
496 | 60 | |||
497 | 61 | self.assertIsNotNone( | ||
498 | 62 | re.search(r'bogus: (command )?not found', | ||
499 | 63 | str(context_manager.exception)), | ||
500 | 64 | msg='Expected bogus command not found') | ||
501 | 65 | expected_stderr_log = '\n'.join([ | ||
502 | 66 | 'Begin run command: {cmd}'.format(cmd=cmd1), | ||
503 | 67 | 'End run command: exit(0)', | ||
504 | 68 | 'Begin run command: {cmd}'.format(cmd=cmd2), | ||
505 | 69 | 'ERROR: End run command: exit(127)', | ||
506 | 70 | 'Begin run command: {cmd}'.format(cmd=cmd3), | ||
507 | 71 | 'End run command: exit(0)\n']) | ||
508 | 72 | self.assertEqual(expected_stderr_log, m_stderr.getvalue()) | ||
509 | 73 | |||
510 | 74 | def test_run_command_as_lists(self): | ||
511 | 75 | """When commands are specified as a list, run them in order.""" | ||
512 | 76 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
513 | 77 | |||
514 | 78 | cmd1 = 'echo "HI" >> %s' % outfile | ||
515 | 79 | cmd2 = 'echo "MOM" >> %s' % outfile | ||
516 | 80 | commands = [cmd1, cmd2] | ||
517 | 81 | with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): | ||
518 | 82 | run_commands(commands=commands) | ||
531 | 83 | 54 | ||
532 | 55 | def fake_subp(cmd, capture=None): | ||
533 | 56 | fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] | ||
534 | 57 | if cmd in fail_cmds and capture: | ||
535 | 58 | svc = cmd[-1] | ||
536 | 59 | raise util.ProcessExecutionError( | ||
537 | 60 | 'Invalid {} credentials'.format(svc.upper())) | ||
538 | 61 | |||
539 | 62 | m_subp.side_effect = fake_subp | ||
540 | 63 | |||
541 | 64 | with self.assertRaises(RuntimeError) as context_manager: | ||
542 | 65 | configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips']) | ||
543 | 66 | self.assertEqual( | ||
544 | 67 | m_subp.call_args_list, | ||
545 | 68 | [mock.call(['ua', 'attach', 'SomeToken']), | ||
546 | 69 | mock.call(['ua', 'enable', 'esm'], capture=True), | ||
547 | 70 | mock.call(['ua', 'enable', 'cc'], capture=True), | ||
548 | 71 | mock.call(['ua', 'enable', 'fips'], capture=True)]) | ||
549 | 84 | self.assertIn( | 72 | self.assertIn( |
551 | 85 | 'DEBUG: Running user-provided ubuntu-advantage commands', | 73 | 'WARNING: Failure enabling "esm":\nUnexpected error' |
552 | 74 | ' while running command.\nCommand: -\nExit code: -\nReason: -\n' | ||
553 | 75 | 'Stdout: Invalid ESM credentials\nStderr: -\n', | ||
554 | 86 | self.logs.getvalue()) | 76 | self.logs.getvalue()) |
555 | 87 | self.assertEqual('HI\nMOM\n', util.load_file(outfile)) | ||
556 | 88 | self.assertIn( | 77 | self.assertIn( |
559 | 89 | 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage' | 78 | 'WARNING: Failure enabling "cc":\nUnexpected error' |
560 | 90 | ' config:', | 79 | ' while running command.\nCommand: -\nExit code: -\nReason: -\n' |
561 | 80 | 'Stdout: Invalid CC credentials\nStderr: -\n', | ||
562 | 81 | self.logs.getvalue()) | ||
563 | 82 | self.assertEqual( | ||
564 | 83 | 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"', | ||
565 | 84 | str(context_manager.exception)) | ||
566 | 85 | |||
567 | 86 | @mock.patch('%s.util.subp' % MPATH) | ||
568 | 87 | def test_configure_ua_attach_with_empty_services(self, m_subp): | ||
569 | 88 | """When services is an empty list, do not auto-enable attach.""" | ||
570 | 89 | configure_ua(token='SomeToken', enable=[]) | ||
571 | 90 | m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) | ||
572 | 91 | self.assertEqual( | ||
573 | 92 | 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', | ||
574 | 91 | self.logs.getvalue()) | 93 | self.logs.getvalue()) |
575 | 92 | 94 | ||
584 | 93 | def test_run_command_dict_sorted_as_command_script(self): | 95 | @mock.patch('%s.util.subp' % MPATH) |
585 | 94 | """When commands are a dict, sort them and run.""" | 96 | def test_configure_ua_attach_with_specific_services(self, m_subp): |
586 | 95 | outfile = self.tmp_path('output.log', dir=self.tmp) | 97 | """When services a list, only enable specific services.""" |
587 | 96 | cmd1 = 'echo "HI" >> %s' % outfile | 98 | configure_ua(token='SomeToken', enable=['fips']) |
588 | 97 | cmd2 = 'echo "MOM" >> %s' % outfile | 99 | self.assertEqual( |
589 | 98 | commands = {'02': cmd1, '01': cmd2} | 100 | m_subp.call_args_list, |
590 | 99 | with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): | 101 | [mock.call(['ua', 'attach', 'SomeToken']), |
591 | 100 | run_commands(commands=commands) | 102 | mock.call(['ua', 'enable', 'fips'], capture=True)]) |
592 | 103 | self.assertEqual( | ||
593 | 104 | 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', | ||
594 | 105 | self.logs.getvalue()) | ||
595 | 106 | |||
596 | 107 | @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) | ||
597 | 108 | @mock.patch('%s.util.subp' % MPATH) | ||
598 | 109 | def test_configure_ua_attach_with_string_services(self, m_subp): | ||
599 | 110 | """When services a string, treat as singleton list and warn""" | ||
600 | 111 | configure_ua(token='SomeToken', enable='fips') | ||
601 | 112 | self.assertEqual( | ||
602 | 113 | m_subp.call_args_list, | ||
603 | 114 | [mock.call(['ua', 'attach', 'SomeToken']), | ||
604 | 115 | mock.call(['ua', 'enable', 'fips'], capture=True)]) | ||
605 | 116 | self.assertEqual( | ||
606 | 117 | 'WARNING: ubuntu_advantage: enable should be a list, not a' | ||
607 | 118 | ' string; treating as a single enable\n' | ||
608 | 119 | 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', | ||
609 | 120 | self.logs.getvalue()) | ||
610 | 101 | 121 | ||
616 | 102 | expected_messages = [ | 122 | @mock.patch('%s.util.subp' % MPATH) |
617 | 103 | 'DEBUG: Running user-provided ubuntu-advantage commands'] | 123 | def test_configure_ua_attach_with_weird_services(self, m_subp): |
618 | 104 | for message in expected_messages: | 124 | """When services not string or list, warn but still attach""" |
619 | 105 | self.assertIn(message, self.logs.getvalue()) | 125 | configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) |
620 | 106 | self.assertEqual('MOM\nHI\n', util.load_file(outfile)) | 126 | self.assertEqual( |
621 | 127 | m_subp.call_args_list, | ||
622 | 128 | [mock.call(['ua', 'attach', 'SomeToken'])]) | ||
623 | 129 | self.assertEqual( | ||
624 | 130 | 'WARNING: ubuntu_advantage: enable should be a list, not a' | ||
625 | 131 | ' dict; skipping enabling services\n' | ||
626 | 132 | 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', | ||
627 | 133 | self.logs.getvalue()) | ||
628 | 107 | 134 | ||
629 | 108 | 135 | ||
630 | 109 | @skipUnlessJsonSchema() | 136 | @skipUnlessJsonSchema() |
631 | @@ -112,90 +139,50 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): | |||
632 | 112 | with_logs = True | 139 | with_logs = True |
633 | 113 | schema = schema | 140 | schema = schema |
634 | 114 | 141 | ||
638 | 115 | def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): | 142 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) |
639 | 116 | """If ubuntu-advantage configuration is not a dict, emit a warning.""" | 143 | @mock.patch('%s.configure_ua' % MPATH) |
640 | 117 | validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema) | 144 | def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _): |
641 | 145 | """If ubuntu_advantage configuration is not a dict, emit a warning.""" | ||
642 | 146 | validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema) | ||
643 | 118 | self.assertEqual( | 147 | self.assertEqual( |
645 | 119 | "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not" | 148 | "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not" |
646 | 120 | " of type 'object'\n", | 149 | " of type 'object'\n", |
647 | 121 | self.logs.getvalue()) | 150 | self.logs.getvalue()) |
648 | 122 | 151 | ||
652 | 123 | @mock.patch('%s.run_commands' % MPATH) | 152 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) |
653 | 124 | def test_schema_disallows_unknown_keys(self, _): | 153 | @mock.patch('%s.configure_ua' % MPATH) |
654 | 125 | """Unknown keys in ubuntu-advantage configuration emit warnings.""" | 154 | def test_schema_disallows_unknown_keys(self, _cfg, _): |
655 | 155 | """Unknown keys in ubuntu_advantage configuration emit warnings.""" | ||
656 | 126 | validate_cloudconfig_schema( | 156 | validate_cloudconfig_schema( |
658 | 127 | {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}}, | 157 | {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}}, |
659 | 128 | schema) | 158 | schema) |
660 | 129 | self.assertIn( | 159 | self.assertIn( |
662 | 130 | 'WARNING: Invalid config:\nubuntu-advantage: Additional properties' | 160 | 'WARNING: Invalid config:\nubuntu_advantage: Additional properties' |
663 | 131 | " are not allowed ('invalid-key' was unexpected)", | 161 | " are not allowed ('invalid-key' was unexpected)", |
664 | 132 | self.logs.getvalue()) | 162 | self.logs.getvalue()) |
665 | 133 | 163 | ||
678 | 134 | def test_warn_schema_requires_commands(self): | 164 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) |
679 | 135 | """Warn when ubuntu-advantage configuration lacks commands.""" | 165 | @mock.patch('%s.configure_ua' % MPATH) |
680 | 136 | validate_cloudconfig_schema( | 166 | def test_warn_schema_requires_token(self, _cfg, _): |
681 | 137 | {'ubuntu-advantage': {}}, schema) | 167 | """Warn if ubuntu_advantage configuration lacks token.""" |
670 | 138 | self.assertEqual( | ||
671 | 139 | "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a" | ||
672 | 140 | " required property\n", | ||
673 | 141 | self.logs.getvalue()) | ||
674 | 142 | |||
675 | 143 | @mock.patch('%s.run_commands' % MPATH) | ||
676 | 144 | def test_warn_schema_commands_is_not_list_or_dict(self, _): | ||
677 | 145 | """Warn when ubuntu-advantage:commands config is not a list or dict.""" | ||
682 | 146 | validate_cloudconfig_schema( | 168 | validate_cloudconfig_schema( |
684 | 147 | {'ubuntu-advantage': {'commands': 'broken'}}, schema) | 169 | {'ubuntu_advantage': {'enable': ['esm']}}, schema) |
685 | 148 | self.assertEqual( | 170 | self.assertEqual( |
689 | 149 | "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is" | 171 | "WARNING: Invalid config:\nubuntu_advantage:" |
690 | 150 | " not of type 'object', 'array'\n", | 172 | " 'token' is a required property\n", self.logs.getvalue()) |
688 | 151 | self.logs.getvalue()) | ||
691 | 152 | 173 | ||
697 | 153 | @mock.patch('%s.run_commands' % MPATH) | 174 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) |
698 | 154 | def test_warn_schema_when_commands_is_empty(self, _): | 175 | @mock.patch('%s.configure_ua' % MPATH) |
699 | 155 | """Emit warnings when ubuntu-advantage:commands is empty.""" | 176 | def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _): |
700 | 156 | validate_cloudconfig_schema( | 177 | """Warn when ubuntu_advantage:enable config is not a list.""" |
696 | 157 | {'ubuntu-advantage': {'commands': []}}, schema) | ||
701 | 158 | validate_cloudconfig_schema( | 178 | validate_cloudconfig_schema( |
703 | 159 | {'ubuntu-advantage': {'commands': {}}}, schema) | 179 | {'ubuntu_advantage': {'enable': 'needslist'}}, schema) |
704 | 160 | self.assertEqual( | 180 | self.assertEqual( |
708 | 161 | "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too" | 181 | "WARNING: Invalid config:\nubuntu_advantage: 'token' is a" |
709 | 162 | " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}" | 182 | " required property\nubuntu_advantage.enable: 'needslist'" |
710 | 163 | " does not have enough properties\n", | 183 | " is not of type 'array'\n", |
711 | 164 | self.logs.getvalue()) | 184 | self.logs.getvalue()) |
712 | 165 | 185 | ||
713 | 166 | @mock.patch('%s.run_commands' % MPATH) | ||
714 | 167 | def test_schema_when_commands_are_list_or_dict(self, _): | ||
715 | 168 | """No warnings when ubuntu-advantage:commands are a list or dict.""" | ||
716 | 169 | validate_cloudconfig_schema( | ||
717 | 170 | {'ubuntu-advantage': {'commands': ['valid']}}, schema) | ||
718 | 171 | validate_cloudconfig_schema( | ||
719 | 172 | {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) | ||
720 | 173 | self.assertEqual('', self.logs.getvalue()) | ||
721 | 174 | |||
722 | 175 | def test_duplicates_are_fine_array_array(self): | ||
723 | 176 | """Duplicated commands array/array entries are allowed.""" | ||
724 | 177 | self.assertSchemaValid( | ||
725 | 178 | {'commands': [["echo", "bye"], ["echo" "bye"]]}, | ||
726 | 179 | "command entries can be duplicate.") | ||
727 | 180 | |||
728 | 181 | def test_duplicates_are_fine_array_string(self): | ||
729 | 182 | """Duplicated commands array/string entries are allowed.""" | ||
730 | 183 | self.assertSchemaValid( | ||
731 | 184 | {'commands': ["echo bye", "echo bye"]}, | ||
732 | 185 | "command entries can be duplicate.") | ||
733 | 186 | |||
734 | 187 | def test_duplicates_are_fine_dict_array(self): | ||
735 | 188 | """Duplicated commands dict/array entries are allowed.""" | ||
736 | 189 | self.assertSchemaValid( | ||
737 | 190 | {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, | ||
738 | 191 | "command entries can be duplicate.") | ||
739 | 192 | |||
740 | 193 | def test_duplicates_are_fine_dict_string(self): | ||
741 | 194 | """Duplicated commands dict/string entries are allowed.""" | ||
742 | 195 | self.assertSchemaValid( | ||
743 | 196 | {'commands': {'00': "echo bye", '01': "echo bye"}}, | ||
744 | 197 | "command entries can be duplicate.") | ||
745 | 198 | |||
746 | 199 | 186 | ||
747 | 200 | class TestHandle(CiTestCase): | 187 | class TestHandle(CiTestCase): |
748 | 201 | 188 | ||
749 | @@ -205,41 +192,89 @@ class TestHandle(CiTestCase): | |||
750 | 205 | super(TestHandle, self).setUp() | 192 | super(TestHandle, self).setUp() |
751 | 206 | self.tmp = self.tmp_dir() | 193 | self.tmp = self.tmp_dir() |
752 | 207 | 194 | ||
753 | 208 | @mock.patch('%s.run_commands' % MPATH) | ||
754 | 209 | @mock.patch('%s.validate_cloudconfig_schema' % MPATH) | 195 | @mock.patch('%s.validate_cloudconfig_schema' % MPATH) |
756 | 210 | def test_handle_no_config(self, m_schema, m_run): | 196 | def test_handle_no_config(self, m_schema): |
757 | 211 | """When no ua-related configuration is provided, nothing happens.""" | 197 | """When no ua-related configuration is provided, nothing happens.""" |
758 | 212 | cfg = {} | 198 | cfg = {} |
759 | 213 | handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) | 199 | handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) |
760 | 214 | self.assertIn( | 200 | self.assertIn( |
763 | 215 | "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key" | 201 | "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'" |
764 | 216 | " in config", | 202 | ' configuration found', |
765 | 217 | self.logs.getvalue()) | 203 | self.logs.getvalue()) |
766 | 218 | m_schema.assert_not_called() | 204 | m_schema.assert_not_called() |
767 | 219 | m_run.assert_not_called() | ||
768 | 220 | 205 | ||
769 | 206 | @mock.patch('%s.configure_ua' % MPATH) | ||
770 | 221 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) | 207 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) |
772 | 222 | def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install): | 208 | def test_handle_tries_to_install_ubuntu_advantage_tools( |
773 | 209 | self, m_install, m_cfg): | ||
774 | 223 | """If ubuntu_advantage is provided, try installing ua-tools package.""" | 210 | """If ubuntu_advantage is provided, try installing ua-tools package.""" |
776 | 224 | cfg = {'ubuntu-advantage': {}} | 211 | cfg = {'ubuntu_advantage': {'token': 'valid'}} |
777 | 225 | mycloud = FakeCloud(None) | 212 | mycloud = FakeCloud(None) |
778 | 226 | handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) | 213 | handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) |
779 | 227 | m_install.assert_called_once_with(mycloud) | 214 | m_install.assert_called_once_with(mycloud) |
780 | 228 | 215 | ||
781 | 216 | @mock.patch('%s.configure_ua' % MPATH) | ||
782 | 229 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) | 217 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) |
786 | 230 | def test_handle_runs_commands_provided(self, m_install): | 218 | def test_handle_passes_credentials_and_services_to_configure_ua( |
787 | 231 | """When commands are specified as a list, run them.""" | 219 | self, m_install, m_configure_ua): |
788 | 232 | outfile = self.tmp_path('output.log', dir=self.tmp) | 220 | """All ubuntu_advantage config keys are passed to configure_ua.""" |
789 | 221 | cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}} | ||
790 | 222 | handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
791 | 223 | m_configure_ua.assert_called_once_with( | ||
792 | 224 | token='token', enable=['esm']) | ||
793 | 225 | |||
794 | 226 | @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) | ||
795 | 227 | @mock.patch('%s.configure_ua' % MPATH) | ||
796 | 228 | def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config( | ||
797 | 229 | self, m_configure_ua): | ||
798 | 230 | """Warning when ubuntu-advantage key is present with new config""" | ||
799 | 231 | cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}} | ||
800 | 232 | handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
801 | 233 | self.assertEqual( | ||
802 | 234 | 'WARNING: Deprecated configuration key "ubuntu-advantage"' | ||
803 | 235 | ' provided. Expected underscore delimited "ubuntu_advantage";' | ||
804 | 236 | ' will attempt to continue.', | ||
805 | 237 | self.logs.getvalue().splitlines()[0]) | ||
806 | 238 | m_configure_ua.assert_called_once_with( | ||
807 | 239 | token='token', enable=['esm']) | ||
808 | 240 | |||
809 | 241 | def test_handle_error_on_deprecated_commands_key_dashed(self): | ||
810 | 242 | """Error when commands is present in ubuntu-advantage key.""" | ||
811 | 243 | cfg = {'ubuntu-advantage': {'commands': 'nogo'}} | ||
812 | 244 | with self.assertRaises(RuntimeError) as context_manager: | ||
813 | 245 | handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
814 | 246 | self.assertEqual( | ||
815 | 247 | 'Deprecated configuration "ubuntu-advantage: commands" provided.' | ||
816 | 248 | ' Expected "token"', | ||
817 | 249 | str(context_manager.exception)) | ||
818 | 250 | |||
819 | 251 | def test_handle_error_on_deprecated_commands_key_underscored(self): | ||
820 | 252 | """Error when commands is present in ubuntu_advantage key.""" | ||
821 | 253 | cfg = {'ubuntu_advantage': {'commands': 'nogo'}} | ||
822 | 254 | with self.assertRaises(RuntimeError) as context_manager: | ||
823 | 255 | handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
824 | 256 | self.assertEqual( | ||
825 | 257 | 'Deprecated configuration "ubuntu-advantage: commands" provided.' | ||
826 | 258 | ' Expected "token"', | ||
827 | 259 | str(context_manager.exception)) | ||
828 | 233 | 260 | ||
829 | 261 | @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) | ||
830 | 262 | @mock.patch('%s.configure_ua' % MPATH) | ||
831 | 263 | def test_handle_prefers_new_style_config( | ||
832 | 264 | self, m_configure_ua): | ||
833 | 265 | """ubuntu_advantage should be preferred over ubuntu-advantage""" | ||
834 | 234 | cfg = { | 266 | cfg = { |
843 | 235 | 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, | 267 | 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']}, |
844 | 236 | 'echo "MOM" >> %s' % outfile]}} | 268 | 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}, |
845 | 237 | mock_path = '%s.sys.stderr' % MPATH | 269 | } |
846 | 238 | with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): | 270 | handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) |
847 | 239 | with mock.patch(mock_path, new_callable=StringIO): | 271 | self.assertEqual( |
848 | 240 | handle('nomatter', cfg=cfg, cloud=None, log=self.logger, | 272 | 'WARNING: Deprecated configuration key "ubuntu-advantage"' |
849 | 241 | args=None) | 273 | ' provided. Expected underscore delimited "ubuntu_advantage";' |
850 | 242 | self.assertEqual('HI\nMOM\n', util.load_file(outfile)) | 274 | ' will attempt to continue.', |
851 | 275 | self.logs.getvalue().splitlines()[0]) | ||
852 | 276 | m_configure_ua.assert_called_once_with( | ||
853 | 277 | token='token', enable=['esm']) | ||
854 | 243 | 278 | ||
855 | 244 | 279 | ||
856 | 245 | class TestMaybeInstallUATools(CiTestCase): | 280 | class TestMaybeInstallUATools(CiTestCase): |
857 | @@ -253,7 +288,7 @@ class TestMaybeInstallUATools(CiTestCase): | |||
858 | 253 | @mock.patch('%s.util.which' % MPATH) | 288 | @mock.patch('%s.util.which' % MPATH) |
859 | 254 | def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): | 289 | def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): |
860 | 255 | """Do nothing if ubuntu-advantage-tools already exists.""" | 290 | """Do nothing if ubuntu-advantage-tools already exists.""" |
862 | 256 | m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed | 291 | m_which.return_value = '/usr/bin/ua' # already installed |
863 | 257 | distro = mock.MagicMock() | 292 | distro = mock.MagicMock() |
864 | 258 | distro.update_package_sources.side_effect = RuntimeError( | 293 | distro.update_package_sources.side_effect = RuntimeError( |
865 | 259 | 'Some apt error') | 294 | 'Some apt error') |
866 | diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py | |||
867 | 260 | new file mode 100644 | 295 | new file mode 100644 |
868 | index 0000000..efba4ce | |||
869 | --- /dev/null | |||
870 | +++ b/cloudinit/config/tests/test_ubuntu_drivers.py | |||
871 | @@ -0,0 +1,174 @@ | |||
872 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
873 | 2 | |||
874 | 3 | import copy | ||
875 | 4 | |||
876 | 5 | from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock | ||
877 | 6 | from cloudinit.config.schema import ( | ||
878 | 7 | SchemaValidationError, validate_cloudconfig_schema) | ||
879 | 8 | from cloudinit.config import cc_ubuntu_drivers as drivers | ||
880 | 9 | from cloudinit.util import ProcessExecutionError | ||
881 | 10 | |||
882 | 11 | MPATH = "cloudinit.config.cc_ubuntu_drivers." | ||
883 | 12 | OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( | ||
884 | 13 | "ubuntu-drivers: error: argument <command>: invalid choice: 'install' " | ||
885 | 14 | "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") | ||
886 | 15 | |||
887 | 16 | |||
888 | 17 | class TestUbuntuDrivers(CiTestCase): | ||
889 | 18 | cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}} | ||
890 | 19 | install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'] | ||
891 | 20 | |||
892 | 21 | with_logs = True | ||
893 | 22 | |||
894 | 23 | @skipUnlessJsonSchema() | ||
895 | 24 | def test_schema_requires_boolean_for_license_accepted(self): | ||
896 | 25 | with self.assertRaisesRegex( | ||
897 | 26 | SchemaValidationError, ".*license-accepted.*TRUE.*boolean"): | ||
898 | 27 | validate_cloudconfig_schema( | ||
899 | 28 | {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, | ||
900 | 29 | schema=drivers.schema, strict=True) | ||
901 | 30 | |||
902 | 31 | @mock.patch(MPATH + "util.subp", return_value=('', '')) | ||
903 | 32 | @mock.patch(MPATH + "util.which", return_value=False) | ||
904 | 33 | def _assert_happy_path_taken(self, config, m_which, m_subp): | ||
905 | 34 | """Positive path test through handle. Package should be installed.""" | ||
906 | 35 | myCloud = mock.MagicMock() | ||
907 | 36 | drivers.handle('ubuntu_drivers', config, myCloud, None, None) | ||
908 | 37 | self.assertEqual([mock.call(['ubuntu-drivers-common'])], | ||
909 | 38 | myCloud.distro.install_packages.call_args_list) | ||
910 | 39 | self.assertEqual([mock.call(self.install_gpgpu)], | ||
911 | 40 | m_subp.call_args_list) | ||
912 | 41 | |||
913 | 42 | def test_handle_does_package_install(self): | ||
914 | 43 | self._assert_happy_path_taken(self.cfg_accepted) | ||
915 | 44 | |||
916 | 45 | def test_trueish_strings_are_considered_approval(self): | ||
917 | 46 | for true_value in ['yes', 'true', 'on', '1']: | ||
918 | 47 | new_config = copy.deepcopy(self.cfg_accepted) | ||
919 | 48 | new_config['drivers']['nvidia']['license-accepted'] = true_value | ||
920 | 49 | self._assert_happy_path_taken(new_config) | ||
921 | 50 | |||
922 | 51 | @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( | ||
923 | 52 | stdout='No drivers found for installation.\n', exit_code=1)) | ||
924 | 53 | @mock.patch(MPATH + "util.which", return_value=False) | ||
925 | 54 | def test_handle_raises_error_if_no_drivers_found(self, m_which, m_subp): | ||
926 | 55 | """If ubuntu-drivers doesn't install any drivers, raise an error.""" | ||
927 | 56 | myCloud = mock.MagicMock() | ||
928 | 57 | with self.assertRaises(Exception): | ||
929 | 58 | drivers.handle( | ||
930 | 59 | 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) | ||
931 | 60 | self.assertEqual([mock.call(['ubuntu-drivers-common'])], | ||
932 | 61 | myCloud.distro.install_packages.call_args_list) | ||
933 | 62 | self.assertEqual([mock.call(self.install_gpgpu)], | ||
934 | 63 | m_subp.call_args_list) | ||
935 | 64 | self.assertIn('ubuntu-drivers found no drivers for installation', | ||
936 | 65 | self.logs.getvalue()) | ||
937 | 66 | |||
938 | 67 | @mock.patch(MPATH + "util.subp", return_value=('', '')) | ||
939 | 68 | @mock.patch(MPATH + "util.which", return_value=False) | ||
940 | 69 | def _assert_inert_with_config(self, config, m_which, m_subp): | ||
941 | 70 | """Helper to reduce repetition when testing negative cases""" | ||
942 | 71 | myCloud = mock.MagicMock() | ||
943 | 72 | drivers.handle('ubuntu_drivers', config, myCloud, None, None) | ||
944 | 73 | self.assertEqual(0, myCloud.distro.install_packages.call_count) | ||
945 | 74 | self.assertEqual(0, m_subp.call_count) | ||
946 | 75 | |||
947 | 76 | def test_handle_inert_if_license_not_accepted(self): | ||
948 | 77 | """Ensure we don't do anything if the license is rejected.""" | ||
949 | 78 | self._assert_inert_with_config( | ||
950 | 79 | {'drivers': {'nvidia': {'license-accepted': False}}}) | ||
951 | 80 | |||
952 | 81 | def test_handle_inert_if_garbage_in_license_field(self): | ||
953 | 82 | """Ensure we don't do anything if unknown text is in license field.""" | ||
954 | 83 | self._assert_inert_with_config( | ||
955 | 84 | {'drivers': {'nvidia': {'license-accepted': 'garbage'}}}) | ||
956 | 85 | |||
957 | 86 | def test_handle_inert_if_no_license_key(self): | ||
958 | 87 | """Ensure we don't do anything if no license key.""" | ||
959 | 88 | self._assert_inert_with_config({'drivers': {'nvidia': {}}}) | ||
960 | 89 | |||
961 | 90 | def test_handle_inert_if_no_nvidia_key(self): | ||
962 | 91 | """Ensure we don't do anything if other license accepted.""" | ||
963 | 92 | self._assert_inert_with_config( | ||
964 | 93 | {'drivers': {'acme': {'license-accepted': True}}}) | ||
965 | 94 | |||
966 | 95 | def test_handle_inert_if_string_given(self): | ||
967 | 96 | """Ensure we don't do anything if string refusal given.""" | ||
968 | 97 | for false_value in ['no', 'false', 'off', '0']: | ||
969 | 98 | self._assert_inert_with_config( | ||
970 | 99 | {'drivers': {'nvidia': {'license-accepted': false_value}}}) | ||
971 | 100 | |||
972 | 101 | @mock.patch(MPATH + "install_drivers") | ||
973 | 102 | def test_handle_no_drivers_does_nothing(self, m_install_drivers): | ||
974 | 103 | """If no 'drivers' key in the config, nothing should be done.""" | ||
975 | 104 | myCloud = mock.MagicMock() | ||
976 | 105 | myLog = mock.MagicMock() | ||
977 | 106 | drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None) | ||
978 | 107 | self.assertIn('Skipping module named', | ||
979 | 108 | myLog.debug.call_args_list[0][0][0]) | ||
980 | 109 | self.assertEqual(0, m_install_drivers.call_count) | ||
981 | 110 | |||
982 | 111 | @mock.patch(MPATH + "util.subp", return_value=('', '')) | ||
983 | 112 | @mock.patch(MPATH + "util.which", return_value=True) | ||
984 | 113 | def test_install_drivers_no_install_if_present(self, m_which, m_subp): | ||
985 | 114 | """If 'ubuntu-drivers' is present, no package install should occur.""" | ||
986 | 115 | pkg_install = mock.MagicMock() | ||
987 | 116 | drivers.install_drivers(self.cfg_accepted['drivers'], | ||
988 | 117 | pkg_install_func=pkg_install) | ||
989 | 118 | self.assertEqual(0, pkg_install.call_count) | ||
990 | 119 | self.assertEqual([mock.call('ubuntu-drivers')], | ||
991 | 120 | m_which.call_args_list) | ||
992 | 121 | self.assertEqual([mock.call(self.install_gpgpu)], | ||
993 | 122 | m_subp.call_args_list) | ||
994 | 123 | |||
995 | 124 | def test_install_drivers_rejects_invalid_config(self): | ||
996 | 125 | """install_drivers should raise TypeError if not given a config dict""" | ||
997 | 126 | pkg_install = mock.MagicMock() | ||
998 | 127 | with self.assertRaisesRegex(TypeError, ".*expected dict.*"): | ||
999 | 128 | drivers.install_drivers("mystring", pkg_install_func=pkg_install) | ||
1000 | 129 | self.assertEqual(0, pkg_install.call_count) | ||
1001 | 130 | |||
1002 | 131 | @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( | ||
1003 | 132 | stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)) | ||
1004 | 133 | @mock.patch(MPATH + "util.which", return_value=False) | ||
1005 | 134 | def test_install_drivers_handles_old_ubuntu_drivers_gracefully( | ||
1006 | 135 | self, m_which, m_subp): | ||
1007 | 136 | """Older ubuntu-drivers versions should emit message and raise error""" | ||
1008 | 137 | myCloud = mock.MagicMock() | ||
1009 | 138 | with self.assertRaises(Exception): | ||
1010 | 139 | drivers.handle( | ||
1011 | 140 | 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) | ||
1012 | 141 | self.assertEqual([mock.call(['ubuntu-drivers-common'])], | ||
1013 | 142 | myCloud.distro.install_packages.call_args_list) | ||
1014 | 143 | self.assertEqual([mock.call(self.install_gpgpu)], | ||
1015 | 144 | m_subp.call_args_list) | ||
1016 | 145 | self.assertIn('WARNING: the available version of ubuntu-drivers is' | ||
1017 | 146 | ' too old to perform requested driver installation', | ||
1018 | 147 | self.logs.getvalue()) | ||
1019 | 148 | |||
1020 | 149 | |||
1021 | 150 | # Sub-class TestUbuntuDrivers to run the same test cases, but with a version | ||
1022 | 151 | class TestUbuntuDriversWithVersion(TestUbuntuDrivers): | ||
1023 | 152 | cfg_accepted = { | ||
1024 | 153 | 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} | ||
1025 | 154 | install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] | ||
1026 | 155 | |||
1027 | 156 | @mock.patch(MPATH + "util.subp", return_value=('', '')) | ||
1028 | 157 | @mock.patch(MPATH + "util.which", return_value=False) | ||
1029 | 158 | def test_version_none_uses_latest(self, m_which, m_subp): | ||
1030 | 159 | myCloud = mock.MagicMock() | ||
1031 | 160 | version_none_cfg = { | ||
1032 | 161 | 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} | ||
1033 | 162 | drivers.handle( | ||
1034 | 163 | 'ubuntu_drivers', version_none_cfg, myCloud, None, None) | ||
1035 | 164 | self.assertEqual( | ||
1036 | 165 | [mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])], | ||
1037 | 166 | m_subp.call_args_list) | ||
1038 | 167 | |||
1039 | 168 | def test_specifying_a_version_doesnt_override_license_acceptance(self): | ||
1040 | 169 | self._assert_inert_with_config({ | ||
1041 | 170 | 'drivers': {'nvidia': {'license-accepted': False, | ||
1042 | 171 | 'version': '123'}} | ||
1043 | 172 | }) | ||
1044 | 173 | |||
1045 | 174 | # vi: ts=4 expandtab | ||
1046 | diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py | |||
1047 | index 6423632..b129bb6 100644 | |||
1048 | --- a/cloudinit/net/eni.py | |||
1049 | +++ b/cloudinit/net/eni.py | |||
1050 | @@ -366,8 +366,6 @@ class Renderer(renderer.Renderer): | |||
1051 | 366 | down = indent + "pre-down route del" | 366 | down = indent + "pre-down route del" |
1052 | 367 | or_true = " || true" | 367 | or_true = " || true" |
1053 | 368 | mapping = { | 368 | mapping = { |
1054 | 369 | 'network': '-net', | ||
1055 | 370 | 'netmask': 'netmask', | ||
1056 | 371 | 'gateway': 'gw', | 369 | 'gateway': 'gw', |
1057 | 372 | 'metric': 'metric', | 370 | 'metric': 'metric', |
1058 | 373 | } | 371 | } |
1059 | @@ -379,13 +377,21 @@ class Renderer(renderer.Renderer): | |||
1060 | 379 | default_gw = ' -A inet6 default' | 377 | default_gw = ' -A inet6 default' |
1061 | 380 | 378 | ||
1062 | 381 | route_line = '' | 379 | route_line = '' |
1065 | 382 | for k in ['network', 'netmask', 'gateway', 'metric']: | 380 | for k in ['network', 'gateway', 'metric']: |
1066 | 383 | if default_gw and k in ['network', 'netmask']: | 381 | if default_gw and k == 'network': |
1067 | 384 | continue | 382 | continue |
1068 | 385 | if k == 'gateway': | 383 | if k == 'gateway': |
1069 | 386 | route_line += '%s %s %s' % (default_gw, mapping[k], route[k]) | 384 | route_line += '%s %s %s' % (default_gw, mapping[k], route[k]) |
1070 | 387 | elif k in route: | 385 | elif k in route: |
1072 | 388 | route_line += ' %s %s' % (mapping[k], route[k]) | 386 | if k == 'network': |
1073 | 387 | if ':' in route[k]: | ||
1074 | 388 | route_line += ' -A inet6' | ||
1075 | 389 | else: | ||
1076 | 390 | route_line += ' -net' | ||
1077 | 391 | if 'prefix' in route: | ||
1078 | 392 | route_line += ' %s/%s' % (route[k], route['prefix']) | ||
1079 | 393 | else: | ||
1080 | 394 | route_line += ' %s %s' % (mapping[k], route[k]) | ||
1081 | 389 | content.append(up + route_line + or_true) | 395 | content.append(up + route_line + or_true) |
1082 | 390 | content.append(down + route_line + or_true) | 396 | content.append(down + route_line + or_true) |
1083 | 391 | return content | 397 | return content |
1084 | diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py | |||
1085 | index 539b76d..4d19f56 100644 | |||
1086 | --- a/cloudinit/net/network_state.py | |||
1087 | +++ b/cloudinit/net/network_state.py | |||
1088 | @@ -148,6 +148,7 @@ class NetworkState(object): | |||
1089 | 148 | self._network_state = copy.deepcopy(network_state) | 148 | self._network_state = copy.deepcopy(network_state) |
1090 | 149 | self._version = version | 149 | self._version = version |
1091 | 150 | self.use_ipv6 = network_state.get('use_ipv6', False) | 150 | self.use_ipv6 = network_state.get('use_ipv6', False) |
1092 | 151 | self._has_default_route = None | ||
1093 | 151 | 152 | ||
1094 | 152 | @property | 153 | @property |
1095 | 153 | def config(self): | 154 | def config(self): |
1096 | @@ -157,14 +158,6 @@ class NetworkState(object): | |||
1097 | 157 | def version(self): | 158 | def version(self): |
1098 | 158 | return self._version | 159 | return self._version |
1099 | 159 | 160 | ||
1100 | 160 | def iter_routes(self, filter_func=None): | ||
1101 | 161 | for route in self._network_state.get('routes', []): | ||
1102 | 162 | if filter_func is not None: | ||
1103 | 163 | if filter_func(route): | ||
1104 | 164 | yield route | ||
1105 | 165 | else: | ||
1106 | 166 | yield route | ||
1107 | 167 | |||
1108 | 168 | @property | 161 | @property |
1109 | 169 | def dns_nameservers(self): | 162 | def dns_nameservers(self): |
1110 | 170 | try: | 163 | try: |
1111 | @@ -179,6 +172,12 @@ class NetworkState(object): | |||
1112 | 179 | except KeyError: | 172 | except KeyError: |
1113 | 180 | return [] | 173 | return [] |
1114 | 181 | 174 | ||
1115 | 175 | @property | ||
1116 | 176 | def has_default_route(self): | ||
1117 | 177 | if self._has_default_route is None: | ||
1118 | 178 | self._has_default_route = self._maybe_has_default_route() | ||
1119 | 179 | return self._has_default_route | ||
1120 | 180 | |||
1121 | 182 | def iter_interfaces(self, filter_func=None): | 181 | def iter_interfaces(self, filter_func=None): |
1122 | 183 | ifaces = self._network_state.get('interfaces', {}) | 182 | ifaces = self._network_state.get('interfaces', {}) |
1123 | 184 | for iface in six.itervalues(ifaces): | 183 | for iface in six.itervalues(ifaces): |
1124 | @@ -188,6 +187,32 @@ class NetworkState(object): | |||
1125 | 188 | if filter_func(iface): | 187 | if filter_func(iface): |
1126 | 189 | yield iface | 188 | yield iface |
1127 | 190 | 189 | ||
1128 | 190 | def iter_routes(self, filter_func=None): | ||
1129 | 191 | for route in self._network_state.get('routes', []): | ||
1130 | 192 | if filter_func is not None: | ||
1131 | 193 | if filter_func(route): | ||
1132 | 194 | yield route | ||
1133 | 195 | else: | ||
1134 | 196 | yield route | ||
1135 | 197 | |||
1136 | 198 | def _maybe_has_default_route(self): | ||
1137 | 199 | for route in self.iter_routes(): | ||
1138 | 200 | if self._is_default_route(route): | ||
1139 | 201 | return True | ||
1140 | 202 | for iface in self.iter_interfaces(): | ||
1141 | 203 | for subnet in iface.get('subnets', []): | ||
1142 | 204 | for route in subnet.get('routes', []): | ||
1143 | 205 | if self._is_default_route(route): | ||
1144 | 206 | return True | ||
1145 | 207 | return False | ||
1146 | 208 | |||
1147 | 209 | def _is_default_route(self, route): | ||
1148 | 210 | default_nets = ('::', '0.0.0.0') | ||
1149 | 211 | return ( | ||
1150 | 212 | route.get('prefix') == 0 | ||
1151 | 213 | and route.get('network') in default_nets | ||
1152 | 214 | ) | ||
1153 | 215 | |||
1154 | 191 | 216 | ||
1155 | 192 | @six.add_metaclass(CommandHandlerMeta) | 217 | @six.add_metaclass(CommandHandlerMeta) |
1156 | 193 | class NetworkStateInterpreter(object): | 218 | class NetworkStateInterpreter(object): |
1157 | diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py | |||
1158 | index 19b3e60..0998392 100644 | |||
1159 | --- a/cloudinit/net/sysconfig.py | |||
1160 | +++ b/cloudinit/net/sysconfig.py | |||
1161 | @@ -322,7 +322,7 @@ class Renderer(renderer.Renderer): | |||
1162 | 322 | iface_cfg[new_key] = old_value | 322 | iface_cfg[new_key] = old_value |
1163 | 323 | 323 | ||
1164 | 324 | @classmethod | 324 | @classmethod |
1166 | 325 | def _render_subnets(cls, iface_cfg, subnets): | 325 | def _render_subnets(cls, iface_cfg, subnets, has_default_route): |
1167 | 326 | # setting base values | 326 | # setting base values |
1168 | 327 | iface_cfg['BOOTPROTO'] = 'none' | 327 | iface_cfg['BOOTPROTO'] = 'none' |
1169 | 328 | 328 | ||
1170 | @@ -331,6 +331,7 @@ class Renderer(renderer.Renderer): | |||
1171 | 331 | mtu_key = 'MTU' | 331 | mtu_key = 'MTU' |
1172 | 332 | subnet_type = subnet.get('type') | 332 | subnet_type = subnet.get('type') |
1173 | 333 | if subnet_type == 'dhcp6': | 333 | if subnet_type == 'dhcp6': |
1174 | 334 | # TODO need to set BOOTPROTO to dhcp6 on SUSE | ||
1175 | 334 | iface_cfg['IPV6INIT'] = True | 335 | iface_cfg['IPV6INIT'] = True |
1176 | 335 | iface_cfg['DHCPV6C'] = True | 336 | iface_cfg['DHCPV6C'] = True |
1177 | 336 | elif subnet_type in ['dhcp4', 'dhcp']: | 337 | elif subnet_type in ['dhcp4', 'dhcp']: |
1178 | @@ -375,9 +376,9 @@ class Renderer(renderer.Renderer): | |||
1179 | 375 | ipv6_index = -1 | 376 | ipv6_index = -1 |
1180 | 376 | for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): | 377 | for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): |
1181 | 377 | subnet_type = subnet.get('type') | 378 | subnet_type = subnet.get('type') |
1185 | 378 | if subnet_type == 'dhcp6': | 379 | if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: |
1186 | 379 | continue | 380 | if has_default_route and iface_cfg['BOOTPROTO'] != 'none': |
1187 | 380 | elif subnet_type in ['dhcp4', 'dhcp']: | 381 | iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False |
1188 | 381 | continue | 382 | continue |
1189 | 382 | elif subnet_type == 'static': | 383 | elif subnet_type == 'static': |
1190 | 383 | if subnet_is_ipv6(subnet): | 384 | if subnet_is_ipv6(subnet): |
1191 | @@ -385,10 +386,13 @@ class Renderer(renderer.Renderer): | |||
1192 | 385 | ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix']) | 386 | ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix']) |
1193 | 386 | if ipv6_index == 0: | 387 | if ipv6_index == 0: |
1194 | 387 | iface_cfg['IPV6ADDR'] = ipv6_cidr | 388 | iface_cfg['IPV6ADDR'] = ipv6_cidr |
1195 | 389 | iface_cfg['IPADDR6'] = ipv6_cidr | ||
1196 | 388 | elif ipv6_index == 1: | 390 | elif ipv6_index == 1: |
1197 | 389 | iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr | 391 | iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr |
1198 | 392 | iface_cfg['IPADDR6_0'] = ipv6_cidr | ||
1199 | 390 | else: | 393 | else: |
1200 | 391 | iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr | 394 | iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr |
1201 | 395 | iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr | ||
1202 | 392 | else: | 396 | else: |
1203 | 393 | ipv4_index = ipv4_index + 1 | 397 | ipv4_index = ipv4_index + 1 |
1204 | 394 | suff = "" if ipv4_index == 0 else str(ipv4_index) | 398 | suff = "" if ipv4_index == 0 else str(ipv4_index) |
1205 | @@ -443,6 +447,8 @@ class Renderer(renderer.Renderer): | |||
1206 | 443 | # TODO(harlowja): add validation that no other iface has | 447 | # TODO(harlowja): add validation that no other iface has |
1207 | 444 | # also provided the default route? | 448 | # also provided the default route? |
1208 | 445 | iface_cfg['DEFROUTE'] = True | 449 | iface_cfg['DEFROUTE'] = True |
1209 | 450 | if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'): | ||
1210 | 451 | iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True | ||
1211 | 446 | if 'gateway' in route: | 452 | if 'gateway' in route: |
1212 | 447 | if is_ipv6 or is_ipv6_addr(route['gateway']): | 453 | if is_ipv6 or is_ipv6_addr(route['gateway']): |
1213 | 448 | iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] | 454 | iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] |
1214 | @@ -493,7 +499,9 @@ class Renderer(renderer.Renderer): | |||
1215 | 493 | iface_cfg = iface_contents[iface_name] | 499 | iface_cfg = iface_contents[iface_name] |
1216 | 494 | route_cfg = iface_cfg.routes | 500 | route_cfg = iface_cfg.routes |
1217 | 495 | 501 | ||
1219 | 496 | cls._render_subnets(iface_cfg, iface_subnets) | 502 | cls._render_subnets( |
1220 | 503 | iface_cfg, iface_subnets, network_state.has_default_route | ||
1221 | 504 | ) | ||
1222 | 497 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) | 505 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) |
1223 | 498 | 506 | ||
1224 | 499 | @classmethod | 507 | @classmethod |
1225 | @@ -518,7 +526,9 @@ class Renderer(renderer.Renderer): | |||
1226 | 518 | 526 | ||
1227 | 519 | iface_subnets = iface.get("subnets", []) | 527 | iface_subnets = iface.get("subnets", []) |
1228 | 520 | route_cfg = iface_cfg.routes | 528 | route_cfg = iface_cfg.routes |
1230 | 521 | cls._render_subnets(iface_cfg, iface_subnets) | 529 | cls._render_subnets( |
1231 | 530 | iface_cfg, iface_subnets, network_state.has_default_route | ||
1232 | 531 | ) | ||
1233 | 522 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) | 532 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) |
1234 | 523 | 533 | ||
1235 | 524 | # iter_interfaces on network-state is not sorted to produce | 534 | # iter_interfaces on network-state is not sorted to produce |
1236 | @@ -547,7 +557,9 @@ class Renderer(renderer.Renderer): | |||
1237 | 547 | 557 | ||
1238 | 548 | iface_subnets = iface.get("subnets", []) | 558 | iface_subnets = iface.get("subnets", []) |
1239 | 549 | route_cfg = iface_cfg.routes | 559 | route_cfg = iface_cfg.routes |
1241 | 550 | cls._render_subnets(iface_cfg, iface_subnets) | 560 | cls._render_subnets( |
1242 | 561 | iface_cfg, iface_subnets, network_state.has_default_route | ||
1243 | 562 | ) | ||
1244 | 551 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) | 563 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) |
1245 | 552 | 564 | ||
1246 | 553 | @staticmethod | 565 | @staticmethod |
1247 | @@ -608,7 +620,9 @@ class Renderer(renderer.Renderer): | |||
1248 | 608 | 620 | ||
1249 | 609 | iface_subnets = iface.get("subnets", []) | 621 | iface_subnets = iface.get("subnets", []) |
1250 | 610 | route_cfg = iface_cfg.routes | 622 | route_cfg = iface_cfg.routes |
1252 | 611 | cls._render_subnets(iface_cfg, iface_subnets) | 623 | cls._render_subnets( |
1253 | 624 | iface_cfg, iface_subnets, network_state.has_default_route | ||
1254 | 625 | ) | ||
1255 | 612 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) | 626 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) |
1256 | 613 | 627 | ||
1257 | 614 | @classmethod | 628 | @classmethod |
1258 | @@ -620,7 +634,9 @@ class Renderer(renderer.Renderer): | |||
1259 | 620 | iface_cfg.kind = 'infiniband' | 634 | iface_cfg.kind = 'infiniband' |
1260 | 621 | iface_subnets = iface.get("subnets", []) | 635 | iface_subnets = iface.get("subnets", []) |
1261 | 622 | route_cfg = iface_cfg.routes | 636 | route_cfg = iface_cfg.routes |
1263 | 623 | cls._render_subnets(iface_cfg, iface_subnets) | 637 | cls._render_subnets( |
1264 | 638 | iface_cfg, iface_subnets, network_state.has_default_route | ||
1265 | 639 | ) | ||
1266 | 624 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) | 640 | cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) |
1267 | 625 | 641 | ||
1268 | 626 | @classmethod | 642 | @classmethod |
1269 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py | |||
1270 | 627 | old mode 100644 | 643 | old mode 100644 |
1271 | 628 | new mode 100755 | 644 | new mode 100755 |
1272 | index eccbee5..76b1661 | |||
1273 | --- a/cloudinit/sources/DataSourceAzure.py | |||
1274 | +++ b/cloudinit/sources/DataSourceAzure.py | |||
1275 | @@ -21,10 +21,14 @@ from cloudinit import net | |||
1276 | 21 | from cloudinit.event import EventType | 21 | from cloudinit.event import EventType |
1277 | 22 | from cloudinit.net.dhcp import EphemeralDHCPv4 | 22 | from cloudinit.net.dhcp import EphemeralDHCPv4 |
1278 | 23 | from cloudinit import sources | 23 | from cloudinit import sources |
1279 | 24 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric | ||
1280 | 25 | from cloudinit.sources.helpers import netlink | 24 | from cloudinit.sources.helpers import netlink |
1281 | 26 | from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc | 25 | from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc |
1282 | 27 | from cloudinit import util | 26 | from cloudinit import util |
1283 | 27 | from cloudinit.reporting import events | ||
1284 | 28 | |||
1285 | 29 | from cloudinit.sources.helpers.azure import (azure_ds_reporter, | ||
1286 | 30 | azure_ds_telemetry_reporter, | ||
1287 | 31 | get_metadata_from_fabric) | ||
1288 | 28 | 32 | ||
1289 | 29 | LOG = logging.getLogger(__name__) | 33 | LOG = logging.getLogger(__name__) |
1290 | 30 | 34 | ||
1291 | @@ -54,6 +58,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" | |||
1292 | 54 | REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" | 58 | REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" |
1293 | 55 | AGENT_SEED_DIR = '/var/lib/waagent' | 59 | AGENT_SEED_DIR = '/var/lib/waagent' |
1294 | 56 | IMDS_URL = "http://169.254.169.254/metadata/" | 60 | IMDS_URL = "http://169.254.169.254/metadata/" |
1295 | 61 | PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" | ||
1296 | 57 | 62 | ||
1297 | 58 | # List of static scripts and network config artifacts created by | 63 | # List of static scripts and network config artifacts created by |
1298 | 59 | # stock ubuntu suported images. | 64 | # stock ubuntu suported images. |
1299 | @@ -195,6 +200,8 @@ if util.is_FreeBSD(): | |||
1300 | 195 | RESOURCE_DISK_PATH = "/dev/" + res_disk | 200 | RESOURCE_DISK_PATH = "/dev/" + res_disk |
1301 | 196 | else: | 201 | else: |
1302 | 197 | LOG.debug("resource disk is None") | 202 | LOG.debug("resource disk is None") |
1303 | 203 | # TODO Find where platform entropy data is surfaced | ||
1304 | 204 | PLATFORM_ENTROPY_SOURCE = None | ||
1305 | 198 | 205 | ||
1306 | 199 | BUILTIN_DS_CONFIG = { | 206 | BUILTIN_DS_CONFIG = { |
1307 | 200 | 'agent_command': AGENT_START_BUILTIN, | 207 | 'agent_command': AGENT_START_BUILTIN, |
1308 | @@ -241,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'): | |||
1309 | 241 | util.subp([hostname_command, hostname]) | 248 | util.subp([hostname_command, hostname]) |
1310 | 242 | 249 | ||
1311 | 243 | 250 | ||
1312 | 251 | @azure_ds_telemetry_reporter | ||
1313 | 244 | @contextlib.contextmanager | 252 | @contextlib.contextmanager |
1314 | 245 | def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): | 253 | def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): |
1315 | 246 | """ | 254 | """ |
1316 | @@ -287,6 +295,7 @@ class DataSourceAzure(sources.DataSource): | |||
1317 | 287 | root = sources.DataSource.__str__(self) | 295 | root = sources.DataSource.__str__(self) |
1318 | 288 | return "%s [seed=%s]" % (root, self.seed) | 296 | return "%s [seed=%s]" % (root, self.seed) |
1319 | 289 | 297 | ||
1320 | 298 | @azure_ds_telemetry_reporter | ||
1321 | 290 | def bounce_network_with_azure_hostname(self): | 299 | def bounce_network_with_azure_hostname(self): |
1322 | 291 | # When using cloud-init to provision, we have to set the hostname from | 300 | # When using cloud-init to provision, we have to set the hostname from |
1323 | 292 | # the metadata and "bounce" the network to force DDNS to update via | 301 | # the metadata and "bounce" the network to force DDNS to update via |
1324 | @@ -312,6 +321,7 @@ class DataSourceAzure(sources.DataSource): | |||
1325 | 312 | util.logexc(LOG, "handling set_hostname failed") | 321 | util.logexc(LOG, "handling set_hostname failed") |
1326 | 313 | return False | 322 | return False |
1327 | 314 | 323 | ||
1328 | 324 | @azure_ds_telemetry_reporter | ||
1329 | 315 | def get_metadata_from_agent(self): | 325 | def get_metadata_from_agent(self): |
1330 | 316 | temp_hostname = self.metadata.get('local-hostname') | 326 | temp_hostname = self.metadata.get('local-hostname') |
1331 | 317 | agent_cmd = self.ds_cfg['agent_command'] | 327 | agent_cmd = self.ds_cfg['agent_command'] |
1332 | @@ -341,15 +351,18 @@ class DataSourceAzure(sources.DataSource): | |||
1333 | 341 | LOG.debug("ssh authentication: " | 351 | LOG.debug("ssh authentication: " |
1334 | 342 | "using fingerprint from fabirc") | 352 | "using fingerprint from fabirc") |
1335 | 343 | 353 | ||
1345 | 344 | # wait very long for public SSH keys to arrive | 354 | with events.ReportEventStack( |
1346 | 345 | # https://bugs.launchpad.net/cloud-init/+bug/1717611 | 355 | name="waiting-for-ssh-public-key", |
1347 | 346 | missing = util.log_time(logfunc=LOG.debug, | 356 | description="wait for agents to retrieve ssh keys", |
1348 | 347 | msg="waiting for SSH public key files", | 357 | parent=azure_ds_reporter): |
1349 | 348 | func=util.wait_for_files, | 358 | # wait very long for public SSH keys to arrive |
1350 | 349 | args=(fp_files, 900)) | 359 | # https://bugs.launchpad.net/cloud-init/+bug/1717611 |
1351 | 350 | 360 | missing = util.log_time(logfunc=LOG.debug, | |
1352 | 351 | if len(missing): | 361 | msg="waiting for SSH public key files", |
1353 | 352 | LOG.warning("Did not find files, but going on: %s", missing) | 362 | func=util.wait_for_files, |
1354 | 363 | args=(fp_files, 900)) | ||
1355 | 364 | if len(missing): | ||
1356 | 365 | LOG.warning("Did not find files, but going on: %s", missing) | ||
1357 | 353 | 366 | ||
1358 | 354 | metadata = {} | 367 | metadata = {} |
1359 | 355 | metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) | 368 | metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) |
1360 | @@ -363,6 +376,7 @@ class DataSourceAzure(sources.DataSource): | |||
1361 | 363 | subplatform_type = 'seed-dir' | 376 | subplatform_type = 'seed-dir' |
1362 | 364 | return '%s (%s)' % (subplatform_type, self.seed) | 377 | return '%s (%s)' % (subplatform_type, self.seed) |
1363 | 365 | 378 | ||
1364 | 379 | @azure_ds_telemetry_reporter | ||
1365 | 366 | def crawl_metadata(self): | 380 | def crawl_metadata(self): |
1366 | 367 | """Walk all instance metadata sources returning a dict on success. | 381 | """Walk all instance metadata sources returning a dict on success. |
1367 | 368 | 382 | ||
1368 | @@ -464,6 +478,7 @@ class DataSourceAzure(sources.DataSource): | |||
1369 | 464 | super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) | 478 | super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) |
1370 | 465 | self._metadata_imds = sources.UNSET | 479 | self._metadata_imds = sources.UNSET |
1371 | 466 | 480 | ||
1372 | 481 | @azure_ds_telemetry_reporter | ||
1373 | 467 | def _get_data(self): | 482 | def _get_data(self): |
1374 | 468 | """Crawl and process datasource metadata caching metadata as attrs. | 483 | """Crawl and process datasource metadata caching metadata as attrs. |
1375 | 469 | 484 | ||
1376 | @@ -510,6 +525,7 @@ class DataSourceAzure(sources.DataSource): | |||
1377 | 510 | # quickly (local check only) if self.instance_id is still valid | 525 | # quickly (local check only) if self.instance_id is still valid |
1378 | 511 | return sources.instance_id_matches_system_uuid(self.get_instance_id()) | 526 | return sources.instance_id_matches_system_uuid(self.get_instance_id()) |
1379 | 512 | 527 | ||
1380 | 528 | @azure_ds_telemetry_reporter | ||
1381 | 513 | def setup(self, is_new_instance): | 529 | def setup(self, is_new_instance): |
1382 | 514 | if self._negotiated is False: | 530 | if self._negotiated is False: |
1383 | 515 | LOG.debug("negotiating for %s (new_instance=%s)", | 531 | LOG.debug("negotiating for %s (new_instance=%s)", |
1384 | @@ -577,6 +593,7 @@ class DataSourceAzure(sources.DataSource): | |||
1385 | 577 | if nl_sock: | 593 | if nl_sock: |
1386 | 578 | nl_sock.close() | 594 | nl_sock.close() |
1387 | 579 | 595 | ||
1388 | 596 | @azure_ds_telemetry_reporter | ||
1389 | 580 | def _report_ready(self, lease): | 597 | def _report_ready(self, lease): |
1390 | 581 | """Tells the fabric provisioning has completed """ | 598 | """Tells the fabric provisioning has completed """ |
1391 | 582 | try: | 599 | try: |
1392 | @@ -614,9 +631,14 @@ class DataSourceAzure(sources.DataSource): | |||
1393 | 614 | def _reprovision(self): | 631 | def _reprovision(self): |
1394 | 615 | """Initiate the reprovisioning workflow.""" | 632 | """Initiate the reprovisioning workflow.""" |
1395 | 616 | contents = self._poll_imds() | 633 | contents = self._poll_imds() |
1399 | 617 | md, ud, cfg = read_azure_ovf(contents) | 634 | with events.ReportEventStack( |
1400 | 618 | return (md, ud, cfg, {'ovf-env.xml': contents}) | 635 | name="reprovisioning-read-azure-ovf", |
1401 | 619 | 636 | description="read azure ovf during reprovisioning", | |
1402 | 637 | parent=azure_ds_reporter): | ||
1403 | 638 | md, ud, cfg = read_azure_ovf(contents) | ||
1404 | 639 | return (md, ud, cfg, {'ovf-env.xml': contents}) | ||
1405 | 640 | |||
1406 | 641 | @azure_ds_telemetry_reporter | ||
1407 | 620 | def _negotiate(self): | 642 | def _negotiate(self): |
1408 | 621 | """Negotiate with fabric and return data from it. | 643 | """Negotiate with fabric and return data from it. |
1409 | 622 | 644 | ||
1410 | @@ -649,6 +671,7 @@ class DataSourceAzure(sources.DataSource): | |||
1411 | 649 | util.del_file(REPROVISION_MARKER_FILE) | 671 | util.del_file(REPROVISION_MARKER_FILE) |
1412 | 650 | return fabric_data | 672 | return fabric_data |
1413 | 651 | 673 | ||
1414 | 674 | @azure_ds_telemetry_reporter | ||
1415 | 652 | def activate(self, cfg, is_new_instance): | 675 | def activate(self, cfg, is_new_instance): |
1416 | 653 | address_ephemeral_resize(is_new_instance=is_new_instance, | 676 | address_ephemeral_resize(is_new_instance=is_new_instance, |
1417 | 654 | preserve_ntfs=self.ds_cfg.get( | 677 | preserve_ntfs=self.ds_cfg.get( |
1418 | @@ -665,7 +688,7 @@ class DataSourceAzure(sources.DataSource): | |||
1419 | 665 | 2. Generate a fallback network config that does not include any of | 688 | 2. Generate a fallback network config that does not include any of |
1420 | 666 | the blacklisted devices. | 689 | the blacklisted devices. |
1421 | 667 | """ | 690 | """ |
1423 | 668 | if not self._network_config: | 691 | if not self._network_config or self._network_config == sources.UNSET: |
1424 | 669 | if self.ds_cfg.get('apply_network_config'): | 692 | if self.ds_cfg.get('apply_network_config'): |
1425 | 670 | nc_src = self._metadata_imds | 693 | nc_src = self._metadata_imds |
1426 | 671 | else: | 694 | else: |
1427 | @@ -687,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16): | |||
1428 | 687 | return [] | 710 | return [] |
1429 | 688 | 711 | ||
1430 | 689 | 712 | ||
1431 | 713 | @azure_ds_telemetry_reporter | ||
1432 | 690 | def _has_ntfs_filesystem(devpath): | 714 | def _has_ntfs_filesystem(devpath): |
1433 | 691 | ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) | 715 | ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) |
1434 | 692 | LOG.debug('ntfs_devices found = %s', ntfs_devices) | 716 | LOG.debug('ntfs_devices found = %s', ntfs_devices) |
1435 | 693 | return os.path.realpath(devpath) in ntfs_devices | 717 | return os.path.realpath(devpath) in ntfs_devices |
1436 | 694 | 718 | ||
1437 | 695 | 719 | ||
1438 | 720 | @azure_ds_telemetry_reporter | ||
1439 | 696 | def can_dev_be_reformatted(devpath, preserve_ntfs): | 721 | def can_dev_be_reformatted(devpath, preserve_ntfs): |
1440 | 697 | """Determine if the ephemeral drive at devpath should be reformatted. | 722 | """Determine if the ephemeral drive at devpath should be reformatted. |
1441 | 698 | 723 | ||
1442 | @@ -741,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): | |||
1443 | 741 | (cand_part, cand_path, devpath)) | 766 | (cand_part, cand_path, devpath)) |
1444 | 742 | return False, msg | 767 | return False, msg |
1445 | 743 | 768 | ||
1446 | 769 | @azure_ds_telemetry_reporter | ||
1447 | 744 | def count_files(mp): | 770 | def count_files(mp): |
1448 | 745 | ignored = set(['dataloss_warning_readme.txt']) | 771 | ignored = set(['dataloss_warning_readme.txt']) |
1449 | 746 | return len([f for f in os.listdir(mp) if f.lower() not in ignored]) | 772 | return len([f for f in os.listdir(mp) if f.lower() not in ignored]) |
1450 | 747 | 773 | ||
1451 | 748 | bmsg = ('partition %s (%s) on device %s was ntfs formatted' % | 774 | bmsg = ('partition %s (%s) on device %s was ntfs formatted' % |
1452 | 749 | (cand_part, cand_path, devpath)) | 775 | (cand_part, cand_path, devpath)) |
1469 | 750 | try: | 776 | |
1470 | 751 | file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", | 777 | with events.ReportEventStack( |
1471 | 752 | update_env_for_mount={'LANG': 'C'}) | 778 | name="mount-ntfs-and-count", |
1472 | 753 | except util.MountFailedError as e: | 779 | description="mount-ntfs-and-count", |
1473 | 754 | if "unknown filesystem type 'ntfs'" in str(e): | 780 | parent=azure_ds_reporter) as evt: |
1474 | 755 | return True, (bmsg + ' but this system cannot mount NTFS,' | 781 | try: |
1475 | 756 | ' assuming there are no important files.' | 782 | file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", |
1476 | 757 | ' Formatting allowed.') | 783 | update_env_for_mount={'LANG': 'C'}) |
1477 | 758 | return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) | 784 | except util.MountFailedError as e: |
1478 | 759 | 785 | evt.description = "cannot mount ntfs" | |
1479 | 760 | if file_count != 0: | 786 | if "unknown filesystem type 'ntfs'" in str(e): |
1480 | 761 | LOG.warning("it looks like you're using NTFS on the ephemeral disk, " | 787 | return True, (bmsg + ' but this system cannot mount NTFS,' |
1481 | 762 | 'to ensure that filesystem does not get wiped, set ' | 788 | ' assuming there are no important files.' |
1482 | 763 | '%s.%s in config', '.'.join(DS_CFG_PATH), | 789 | ' Formatting allowed.') |
1483 | 764 | DS_CFG_KEY_PRESERVE_NTFS) | 790 | return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) |
1484 | 765 | return False, bmsg + ' but had %d files on it.' % file_count | 791 | |
1485 | 792 | if file_count != 0: | ||
1486 | 793 | evt.description = "mounted and counted %d files" % file_count | ||
1487 | 794 | LOG.warning("it looks like you're using NTFS on the ephemeral" | ||
1488 | 795 | " disk, to ensure that filesystem does not get wiped," | ||
1489 | 796 | " set %s.%s in config", '.'.join(DS_CFG_PATH), | ||
1490 | 797 | DS_CFG_KEY_PRESERVE_NTFS) | ||
1491 | 798 | return False, bmsg + ' but had %d files on it.' % file_count | ||
1492 | 766 | 799 | ||
1493 | 767 | return True, bmsg + ' and had no important files. Safe for reformatting.' | 800 | return True, bmsg + ' and had no important files. Safe for reformatting.' |
1494 | 768 | 801 | ||
1495 | 769 | 802 | ||
1496 | 803 | @azure_ds_telemetry_reporter | ||
1497 | 770 | def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, | 804 | def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, |
1498 | 771 | is_new_instance=False, preserve_ntfs=False): | 805 | is_new_instance=False, preserve_ntfs=False): |
1499 | 772 | # wait for ephemeral disk to come up | 806 | # wait for ephemeral disk to come up |
1500 | 773 | naplen = .2 | 807 | naplen = .2 |
1508 | 774 | missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, | 808 | with events.ReportEventStack( |
1509 | 775 | log_pre="Azure ephemeral disk: ") | 809 | name="wait-for-ephemeral-disk", |
1510 | 776 | 810 | description="wait for ephemeral disk", | |
1511 | 777 | if missing: | 811 | parent=azure_ds_reporter): |
1512 | 778 | LOG.warning("ephemeral device '%s' did not appear after %d seconds.", | 812 | missing = util.wait_for_files([devpath], |
1513 | 779 | devpath, maxwait) | 813 | maxwait=maxwait, |
1514 | 780 | return | 814 | naplen=naplen, |
1515 | 815 | log_pre="Azure ephemeral disk: ") | ||
1516 | 816 | |||
1517 | 817 | if missing: | ||
1518 | 818 | LOG.warning("ephemeral device '%s' did" | ||
1519 | 819 | " not appear after %d seconds.", | ||
1520 | 820 | devpath, maxwait) | ||
1521 | 821 | return | ||
1522 | 781 | 822 | ||
1523 | 782 | result = False | 823 | result = False |
1524 | 783 | msg = None | 824 | msg = None |
1525 | @@ -805,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, | |||
1526 | 805 | return | 846 | return |
1527 | 806 | 847 | ||
1528 | 807 | 848 | ||
1529 | 849 | @azure_ds_telemetry_reporter | ||
1530 | 808 | def perform_hostname_bounce(hostname, cfg, prev_hostname): | 850 | def perform_hostname_bounce(hostname, cfg, prev_hostname): |
1531 | 809 | # set the hostname to 'hostname' if it is not already set to that. | 851 | # set the hostname to 'hostname' if it is not already set to that. |
1532 | 810 | # then, if policy is not off, bounce the interface using command | 852 | # then, if policy is not off, bounce the interface using command |
1533 | @@ -840,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): | |||
1534 | 840 | return True | 882 | return True |
1535 | 841 | 883 | ||
1536 | 842 | 884 | ||
1537 | 885 | @azure_ds_telemetry_reporter | ||
1538 | 843 | def crtfile_to_pubkey(fname, data=None): | 886 | def crtfile_to_pubkey(fname, data=None): |
1539 | 844 | pipeline = ('openssl x509 -noout -pubkey < "$0" |' | 887 | pipeline = ('openssl x509 -noout -pubkey < "$0" |' |
1540 | 845 | 'ssh-keygen -i -m PKCS8 -f /dev/stdin') | 888 | 'ssh-keygen -i -m PKCS8 -f /dev/stdin') |
1541 | @@ -848,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None): | |||
1542 | 848 | return out.rstrip() | 891 | return out.rstrip() |
1543 | 849 | 892 | ||
1544 | 850 | 893 | ||
1545 | 894 | @azure_ds_telemetry_reporter | ||
1546 | 851 | def pubkeys_from_crt_files(flist): | 895 | def pubkeys_from_crt_files(flist): |
1547 | 852 | pubkeys = [] | 896 | pubkeys = [] |
1548 | 853 | errors = [] | 897 | errors = [] |
1549 | @@ -863,6 +907,7 @@ def pubkeys_from_crt_files(flist): | |||
1550 | 863 | return pubkeys | 907 | return pubkeys |
1551 | 864 | 908 | ||
1552 | 865 | 909 | ||
1553 | 910 | @azure_ds_telemetry_reporter | ||
1554 | 866 | def write_files(datadir, files, dirmode=None): | 911 | def write_files(datadir, files, dirmode=None): |
1555 | 867 | 912 | ||
1556 | 868 | def _redact_password(cnt, fname): | 913 | def _redact_password(cnt, fname): |
1557 | @@ -890,6 +935,7 @@ def write_files(datadir, files, dirmode=None): | |||
1558 | 890 | util.write_file(filename=fname, content=content, mode=0o600) | 935 | util.write_file(filename=fname, content=content, mode=0o600) |
1559 | 891 | 936 | ||
1560 | 892 | 937 | ||
1561 | 938 | @azure_ds_telemetry_reporter | ||
1562 | 893 | def invoke_agent(cmd): | 939 | def invoke_agent(cmd): |
1563 | 894 | # this is a function itself to simplify patching it for test | 940 | # this is a function itself to simplify patching it for test |
1564 | 895 | if cmd: | 941 | if cmd: |
1565 | @@ -909,6 +955,7 @@ def find_child(node, filter_func): | |||
1566 | 909 | return ret | 955 | return ret |
1567 | 910 | 956 | ||
1568 | 911 | 957 | ||
1569 | 958 | @azure_ds_telemetry_reporter | ||
1570 | 912 | def load_azure_ovf_pubkeys(sshnode): | 959 | def load_azure_ovf_pubkeys(sshnode): |
1571 | 913 | # This parses a 'SSH' node formatted like below, and returns | 960 | # This parses a 'SSH' node formatted like below, and returns |
1572 | 914 | # an array of dicts. | 961 | # an array of dicts. |
1573 | @@ -961,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode): | |||
1574 | 961 | return found | 1008 | return found |
1575 | 962 | 1009 | ||
1576 | 963 | 1010 | ||
1577 | 1011 | @azure_ds_telemetry_reporter | ||
1578 | 964 | def read_azure_ovf(contents): | 1012 | def read_azure_ovf(contents): |
1579 | 965 | try: | 1013 | try: |
1580 | 966 | dom = minidom.parseString(contents) | 1014 | dom = minidom.parseString(contents) |
1581 | @@ -1061,6 +1109,7 @@ def read_azure_ovf(contents): | |||
1582 | 1061 | return (md, ud, cfg) | 1109 | return (md, ud, cfg) |
1583 | 1062 | 1110 | ||
1584 | 1063 | 1111 | ||
1585 | 1112 | @azure_ds_telemetry_reporter | ||
1586 | 1064 | def _extract_preprovisioned_vm_setting(dom): | 1113 | def _extract_preprovisioned_vm_setting(dom): |
1587 | 1065 | """Read the preprovision flag from the ovf. It should not | 1114 | """Read the preprovision flag from the ovf. It should not |
1588 | 1066 | exist unless true.""" | 1115 | exist unless true.""" |
1589 | @@ -1089,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"): | |||
1590 | 1089 | return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) | 1138 | return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) |
1591 | 1090 | 1139 | ||
1592 | 1091 | 1140 | ||
1593 | 1141 | @azure_ds_telemetry_reporter | ||
1594 | 1092 | def _check_freebsd_cdrom(cdrom_dev): | 1142 | def _check_freebsd_cdrom(cdrom_dev): |
1595 | 1093 | """Return boolean indicating path to cdrom device has content.""" | 1143 | """Return boolean indicating path to cdrom device has content.""" |
1596 | 1094 | try: | 1144 | try: |
1597 | @@ -1100,18 +1150,31 @@ def _check_freebsd_cdrom(cdrom_dev): | |||
1598 | 1100 | return False | 1150 | return False |
1599 | 1101 | 1151 | ||
1600 | 1102 | 1152 | ||
1602 | 1103 | def _get_random_seed(): | 1153 | @azure_ds_telemetry_reporter |
1603 | 1154 | def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): | ||
1604 | 1104 | """Return content random seed file if available, otherwise, | 1155 | """Return content random seed file if available, otherwise, |
1605 | 1105 | return None.""" | 1156 | return None.""" |
1606 | 1106 | # azure / hyper-v provides random data here | 1157 | # azure / hyper-v provides random data here |
1607 | 1107 | # TODO. find the seed on FreeBSD platform | ||
1608 | 1108 | # now update ds_cfg to reflect contents pass in config | 1158 | # now update ds_cfg to reflect contents pass in config |
1610 | 1109 | if util.is_FreeBSD(): | 1159 | if source is None: |
1611 | 1110 | return None | 1160 | return None |
1614 | 1111 | return util.load_file("/sys/firmware/acpi/tables/OEM0", | 1161 | seed = util.load_file(source, quiet=True, decode=False) |
1615 | 1112 | quiet=True, decode=False) | 1162 | |
1616 | 1163 | # The seed generally contains non-Unicode characters. load_file puts | ||
1617 | 1164 | # them into a str (in python 2) or bytes (in python 3). In python 2, | ||
1618 | 1165 | # bad octets in a str cause util.json_dumps() to throw an exception. In | ||
1619 | 1166 | # python 3, bytes is a non-serializable type, and the handler load_file | ||
1620 | 1167 | # uses applies b64 encoding *again* to handle it. The simplest solution | ||
1621 | 1168 | # is to just b64encode the data and then decode it to a serializable | ||
1622 | 1169 | # string. Same number of bits of entropy, just with 25% more zeroes. | ||
1623 | 1170 | # There's no need to undo this base64-encoding when the random seed is | ||
1624 | 1171 | # actually used in cc_seed_random.py. | ||
1625 | 1172 | seed = base64.b64encode(seed).decode() | ||
1626 | 1173 | |||
1627 | 1174 | return seed | ||
1628 | 1113 | 1175 | ||
1629 | 1114 | 1176 | ||
1630 | 1177 | @azure_ds_telemetry_reporter | ||
1631 | 1115 | def list_possible_azure_ds_devs(): | 1178 | def list_possible_azure_ds_devs(): |
1632 | 1116 | devlist = [] | 1179 | devlist = [] |
1633 | 1117 | if util.is_FreeBSD(): | 1180 | if util.is_FreeBSD(): |
1634 | @@ -1126,6 +1189,7 @@ def list_possible_azure_ds_devs(): | |||
1635 | 1126 | return devlist | 1189 | return devlist |
1636 | 1127 | 1190 | ||
1637 | 1128 | 1191 | ||
1638 | 1192 | @azure_ds_telemetry_reporter | ||
1639 | 1129 | def load_azure_ds_dir(source_dir): | 1193 | def load_azure_ds_dir(source_dir): |
1640 | 1130 | ovf_file = os.path.join(source_dir, "ovf-env.xml") | 1194 | ovf_file = os.path.join(source_dir, "ovf-env.xml") |
1641 | 1131 | 1195 | ||
1642 | @@ -1148,47 +1212,54 @@ def parse_network_config(imds_metadata): | |||
1643 | 1148 | @param: imds_metadata: Dict of content read from IMDS network service. | 1212 | @param: imds_metadata: Dict of content read from IMDS network service. |
1644 | 1149 | @return: Dictionary containing network version 2 standard configuration. | 1213 | @return: Dictionary containing network version 2 standard configuration. |
1645 | 1150 | """ | 1214 | """ |
1685 | 1151 | if imds_metadata != sources.UNSET and imds_metadata: | 1215 | with events.ReportEventStack( |
1686 | 1152 | netconfig = {'version': 2, 'ethernets': {}} | 1216 | name="parse_network_config", |
1687 | 1153 | LOG.debug('Azure: generating network configuration from IMDS') | 1217 | description="", |
1688 | 1154 | network_metadata = imds_metadata['network'] | 1218 | parent=azure_ds_reporter) as evt: |
1689 | 1155 | for idx, intf in enumerate(network_metadata['interface']): | 1219 | if imds_metadata != sources.UNSET and imds_metadata: |
1690 | 1156 | nicname = 'eth{idx}'.format(idx=idx) | 1220 | netconfig = {'version': 2, 'ethernets': {}} |
1691 | 1157 | dev_config = {} | 1221 | LOG.debug('Azure: generating network configuration from IMDS') |
1692 | 1158 | for addr4 in intf['ipv4']['ipAddress']: | 1222 | network_metadata = imds_metadata['network'] |
1693 | 1159 | privateIpv4 = addr4['privateIpAddress'] | 1223 | for idx, intf in enumerate(network_metadata['interface']): |
1694 | 1160 | if privateIpv4: | 1224 | nicname = 'eth{idx}'.format(idx=idx) |
1695 | 1161 | if dev_config.get('dhcp4', False): | 1225 | dev_config = {} |
1696 | 1162 | # Append static address config for nic > 1 | 1226 | for addr4 in intf['ipv4']['ipAddress']: |
1697 | 1163 | netPrefix = intf['ipv4']['subnet'][0].get( | 1227 | privateIpv4 = addr4['privateIpAddress'] |
1698 | 1164 | 'prefix', '24') | 1228 | if privateIpv4: |
1699 | 1165 | if not dev_config.get('addresses'): | 1229 | if dev_config.get('dhcp4', False): |
1700 | 1166 | dev_config['addresses'] = [] | 1230 | # Append static address config for nic > 1 |
1701 | 1167 | dev_config['addresses'].append( | 1231 | netPrefix = intf['ipv4']['subnet'][0].get( |
1702 | 1168 | '{ip}/{prefix}'.format( | 1232 | 'prefix', '24') |
1703 | 1169 | ip=privateIpv4, prefix=netPrefix)) | 1233 | if not dev_config.get('addresses'): |
1704 | 1170 | else: | 1234 | dev_config['addresses'] = [] |
1705 | 1171 | dev_config['dhcp4'] = True | 1235 | dev_config['addresses'].append( |
1706 | 1172 | for addr6 in intf['ipv6']['ipAddress']: | 1236 | '{ip}/{prefix}'.format( |
1707 | 1173 | privateIpv6 = addr6['privateIpAddress'] | 1237 | ip=privateIpv4, prefix=netPrefix)) |
1708 | 1174 | if privateIpv6: | 1238 | else: |
1709 | 1175 | dev_config['dhcp6'] = True | 1239 | dev_config['dhcp4'] = True |
1710 | 1176 | break | 1240 | for addr6 in intf['ipv6']['ipAddress']: |
1711 | 1177 | if dev_config: | 1241 | privateIpv6 = addr6['privateIpAddress'] |
1712 | 1178 | mac = ':'.join(re.findall(r'..', intf['macAddress'])) | 1242 | if privateIpv6: |
1713 | 1179 | dev_config.update( | 1243 | dev_config['dhcp6'] = True |
1714 | 1180 | {'match': {'macaddress': mac.lower()}, | 1244 | break |
1715 | 1181 | 'set-name': nicname}) | 1245 | if dev_config: |
1716 | 1182 | netconfig['ethernets'][nicname] = dev_config | 1246 | mac = ':'.join(re.findall(r'..', intf['macAddress'])) |
1717 | 1183 | else: | 1247 | dev_config.update( |
1718 | 1184 | blacklist = ['mlx4_core'] | 1248 | {'match': {'macaddress': mac.lower()}, |
1719 | 1185 | LOG.debug('Azure: generating fallback configuration') | 1249 | 'set-name': nicname}) |
1720 | 1186 | # generate a network config, blacklist picking mlx4_core devs | 1250 | netconfig['ethernets'][nicname] = dev_config |
1721 | 1187 | netconfig = net.generate_fallback_config( | 1251 | evt.description = "network config from imds" |
1722 | 1188 | blacklist_drivers=blacklist, config_driver=True) | 1252 | else: |
1723 | 1189 | return netconfig | 1253 | blacklist = ['mlx4_core'] |
1724 | 1254 | LOG.debug('Azure: generating fallback configuration') | ||
1725 | 1255 | # generate a network config, blacklist picking mlx4_core devs | ||
1726 | 1256 | netconfig = net.generate_fallback_config( | ||
1727 | 1257 | blacklist_drivers=blacklist, config_driver=True) | ||
1728 | 1258 | evt.description = "network config from fallback" | ||
1729 | 1259 | return netconfig | ||
1730 | 1190 | 1260 | ||
1731 | 1191 | 1261 | ||
1732 | 1262 | @azure_ds_telemetry_reporter | ||
1733 | 1192 | def get_metadata_from_imds(fallback_nic, retries): | 1263 | def get_metadata_from_imds(fallback_nic, retries): |
1734 | 1193 | """Query Azure's network metadata service, returning a dictionary. | 1264 | """Query Azure's network metadata service, returning a dictionary. |
1735 | 1194 | 1265 | ||
1736 | @@ -1213,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries): | |||
1737 | 1213 | return util.log_time(**kwargs) | 1284 | return util.log_time(**kwargs) |
1738 | 1214 | 1285 | ||
1739 | 1215 | 1286 | ||
1740 | 1287 | @azure_ds_telemetry_reporter | ||
1741 | 1216 | def _get_metadata_from_imds(retries): | 1288 | def _get_metadata_from_imds(retries): |
1742 | 1217 | 1289 | ||
1743 | 1218 | url = IMDS_URL + "instance?api-version=2017-12-01" | 1290 | url = IMDS_URL + "instance?api-version=2017-12-01" |
1744 | @@ -1232,6 +1304,7 @@ def _get_metadata_from_imds(retries): | |||
1745 | 1232 | return {} | 1304 | return {} |
1746 | 1233 | 1305 | ||
1747 | 1234 | 1306 | ||
1748 | 1307 | @azure_ds_telemetry_reporter | ||
1749 | 1235 | def maybe_remove_ubuntu_network_config_scripts(paths=None): | 1308 | def maybe_remove_ubuntu_network_config_scripts(paths=None): |
1750 | 1236 | """Remove Azure-specific ubuntu network config for non-primary nics. | 1309 | """Remove Azure-specific ubuntu network config for non-primary nics. |
1751 | 1237 | 1310 | ||
1752 | @@ -1269,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): | |||
1753 | 1269 | 1342 | ||
1754 | 1270 | 1343 | ||
1755 | 1271 | def _is_platform_viable(seed_dir): | 1344 | def _is_platform_viable(seed_dir): |
1764 | 1272 | """Check platform environment to report if this datasource may run.""" | 1345 | with events.ReportEventStack( |
1765 | 1273 | asset_tag = util.read_dmi_data('chassis-asset-tag') | 1346 | name="check-platform-viability", |
1766 | 1274 | if asset_tag == AZURE_CHASSIS_ASSET_TAG: | 1347 | description="found azure asset tag", |
1767 | 1275 | return True | 1348 | parent=azure_ds_reporter) as evt: |
1768 | 1276 | LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) | 1349 | |
1769 | 1277 | if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): | 1350 | """Check platform environment to report if this datasource may run.""" |
1770 | 1278 | return True | 1351 | asset_tag = util.read_dmi_data('chassis-asset-tag') |
1771 | 1279 | return False | 1352 | if asset_tag == AZURE_CHASSIS_ASSET_TAG: |
1772 | 1353 | return True | ||
1773 | 1354 | LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) | ||
1774 | 1355 | evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag | ||
1775 | 1356 | if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): | ||
1776 | 1357 | return True | ||
1777 | 1358 | return False | ||
1778 | 1280 | 1359 | ||
1779 | 1281 | 1360 | ||
1780 | 1282 | class BrokenAzureDataSource(Exception): | 1361 | class BrokenAzureDataSource(Exception): |
1781 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py | |||
1782 | index 4f2f6cc..ac28f1d 100644 | |||
1783 | --- a/cloudinit/sources/DataSourceEc2.py | |||
1784 | +++ b/cloudinit/sources/DataSourceEc2.py | |||
1785 | @@ -334,8 +334,12 @@ class DataSourceEc2(sources.DataSource): | |||
1786 | 334 | if isinstance(net_md, dict): | 334 | if isinstance(net_md, dict): |
1787 | 335 | result = convert_ec2_metadata_network_config( | 335 | result = convert_ec2_metadata_network_config( |
1788 | 336 | net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) | 336 | net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) |
1791 | 337 | # RELEASE_BLOCKER: Xenial debian/postinst needs to add | 337 | |
1792 | 338 | # EventType.BOOT on upgrade path for classic. | 338 | # RELEASE_BLOCKER: xenial should drop the below if statement, |
1793 | 339 | # because the issue being addressed doesn't exist pre-netplan. | ||
1794 | 340 | # (This datasource doesn't implement check_instance_id() so the | ||
1795 | 341 | # datasource object is recreated every boot; this means we don't | ||
1796 | 342 | # need to modify update_events on cloud-init upgrade.) | ||
1797 | 339 | 343 | ||
1798 | 340 | # Non-VPC (aka Classic) Ec2 instances need to rewrite the | 344 | # Non-VPC (aka Classic) Ec2 instances need to rewrite the |
1799 | 341 | # network config file every boot due to MAC address change. | 345 | # network config file every boot due to MAC address change. |
1800 | diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py | |||
1801 | index 6860f0c..fcf5d58 100644 | |||
1802 | --- a/cloudinit/sources/DataSourceNoCloud.py | |||
1803 | +++ b/cloudinit/sources/DataSourceNoCloud.py | |||
1804 | @@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource): | |||
1805 | 106 | fslist = util.find_devs_with("TYPE=vfat") | 106 | fslist = util.find_devs_with("TYPE=vfat") |
1806 | 107 | fslist.extend(util.find_devs_with("TYPE=iso9660")) | 107 | fslist.extend(util.find_devs_with("TYPE=iso9660")) |
1807 | 108 | 108 | ||
1809 | 109 | label_list = util.find_devs_with("LABEL=%s" % label) | 109 | label_list = util.find_devs_with("LABEL=%s" % label.upper()) |
1810 | 110 | label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) | ||
1811 | 111 | |||
1812 | 110 | devlist = list(set(fslist) & set(label_list)) | 112 | devlist = list(set(fslist) & set(label_list)) |
1813 | 111 | devlist.sort(reverse=True) | 113 | devlist.sort(reverse=True) |
1814 | 112 | 114 | ||
1815 | diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py | |||
1816 | index b573b38..54bfc1f 100644 | |||
1817 | --- a/cloudinit/sources/DataSourceScaleway.py | |||
1818 | +++ b/cloudinit/sources/DataSourceScaleway.py | |||
1819 | @@ -171,10 +171,11 @@ def query_data_api(api_type, api_address, retries, timeout): | |||
1820 | 171 | 171 | ||
1821 | 172 | class DataSourceScaleway(sources.DataSource): | 172 | class DataSourceScaleway(sources.DataSource): |
1822 | 173 | dsname = "Scaleway" | 173 | dsname = "Scaleway" |
1823 | 174 | update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} | ||
1824 | 175 | 174 | ||
1825 | 176 | def __init__(self, sys_cfg, distro, paths): | 175 | def __init__(self, sys_cfg, distro, paths): |
1826 | 177 | super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) | 176 | super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) |
1827 | 177 | self.update_events = { | ||
1828 | 178 | 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} | ||
1829 | 178 | 179 | ||
1830 | 179 | self.ds_cfg = util.mergemanydict([ | 180 | self.ds_cfg = util.mergemanydict([ |
1831 | 180 | util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), | 181 | util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), |
1832 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py | |||
1833 | index e6966b3..1604932 100644 | |||
1834 | --- a/cloudinit/sources/__init__.py | |||
1835 | +++ b/cloudinit/sources/__init__.py | |||
1836 | @@ -164,9 +164,6 @@ class DataSource(object): | |||
1837 | 164 | # A datasource which supports writing network config on each system boot | 164 | # A datasource which supports writing network config on each system boot |
1838 | 165 | # would call update_events['network'].add(EventType.BOOT). | 165 | # would call update_events['network'].add(EventType.BOOT). |
1839 | 166 | 166 | ||
1840 | 167 | # Default: generate network config on new instance id (first boot). | ||
1841 | 168 | update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} | ||
1842 | 169 | |||
1843 | 170 | # N-tuple listing default values for any metadata-related class | 167 | # N-tuple listing default values for any metadata-related class |
1844 | 171 | # attributes cached on an instance by a process_data runs. These attribute | 168 | # attributes cached on an instance by a process_data runs. These attribute |
1845 | 172 | # values are reset via clear_cached_attrs during any update_metadata call. | 169 | # values are reset via clear_cached_attrs during any update_metadata call. |
1846 | @@ -191,6 +188,9 @@ class DataSource(object): | |||
1847 | 191 | self.vendordata = None | 188 | self.vendordata = None |
1848 | 192 | self.vendordata_raw = None | 189 | self.vendordata_raw = None |
1849 | 193 | 190 | ||
1850 | 191 | # Default: generate network config on new instance id (first boot). | ||
1851 | 192 | self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} | ||
1852 | 193 | |||
1853 | 194 | self.ds_cfg = util.get_cfg_by_path( | 194 | self.ds_cfg = util.get_cfg_by_path( |
1854 | 195 | self.sys_cfg, ("datasource", self.dsname), {}) | 195 | self.sys_cfg, ("datasource", self.dsname), {}) |
1855 | 196 | if not self.ds_cfg: | 196 | if not self.ds_cfg: |
1856 | diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py | |||
1857 | 197 | old mode 100644 | 197 | old mode 100644 |
1858 | 198 | new mode 100755 | 198 | new mode 100755 |
1859 | index 2829dd2..d3af05e | |||
1860 | --- a/cloudinit/sources/helpers/azure.py | |||
1861 | +++ b/cloudinit/sources/helpers/azure.py | |||
1862 | @@ -16,10 +16,27 @@ from xml.etree import ElementTree | |||
1863 | 16 | 16 | ||
1864 | 17 | from cloudinit import url_helper | 17 | from cloudinit import url_helper |
1865 | 18 | from cloudinit import util | 18 | from cloudinit import util |
1866 | 19 | from cloudinit.reporting import events | ||
1867 | 19 | 20 | ||
1868 | 20 | LOG = logging.getLogger(__name__) | 21 | LOG = logging.getLogger(__name__) |
1869 | 21 | 22 | ||
1870 | 22 | 23 | ||
1871 | 24 | azure_ds_reporter = events.ReportEventStack( | ||
1872 | 25 | name="azure-ds", | ||
1873 | 26 | description="initialize reporter for azure ds", | ||
1874 | 27 | reporting_enabled=True) | ||
1875 | 28 | |||
1876 | 29 | |||
1877 | 30 | def azure_ds_telemetry_reporter(func): | ||
1878 | 31 | def impl(*args, **kwargs): | ||
1879 | 32 | with events.ReportEventStack( | ||
1880 | 33 | name=func.__name__, | ||
1881 | 34 | description=func.__name__, | ||
1882 | 35 | parent=azure_ds_reporter): | ||
1883 | 36 | return func(*args, **kwargs) | ||
1884 | 37 | return impl | ||
1885 | 38 | |||
1886 | 39 | |||
1887 | 23 | @contextmanager | 40 | @contextmanager |
1888 | 24 | def cd(newdir): | 41 | def cd(newdir): |
1889 | 25 | prevdir = os.getcwd() | 42 | prevdir = os.getcwd() |
1890 | @@ -119,6 +136,7 @@ class OpenSSLManager(object): | |||
1891 | 119 | def clean_up(self): | 136 | def clean_up(self): |
1892 | 120 | util.del_dir(self.tmpdir) | 137 | util.del_dir(self.tmpdir) |
1893 | 121 | 138 | ||
1894 | 139 | @azure_ds_telemetry_reporter | ||
1895 | 122 | def generate_certificate(self): | 140 | def generate_certificate(self): |
1896 | 123 | LOG.debug('Generating certificate for communication with fabric...') | 141 | LOG.debug('Generating certificate for communication with fabric...') |
1897 | 124 | if self.certificate is not None: | 142 | if self.certificate is not None: |
1898 | @@ -139,17 +157,20 @@ class OpenSSLManager(object): | |||
1899 | 139 | LOG.debug('New certificate generated.') | 157 | LOG.debug('New certificate generated.') |
1900 | 140 | 158 | ||
1901 | 141 | @staticmethod | 159 | @staticmethod |
1902 | 160 | @azure_ds_telemetry_reporter | ||
1903 | 142 | def _run_x509_action(action, cert): | 161 | def _run_x509_action(action, cert): |
1904 | 143 | cmd = ['openssl', 'x509', '-noout', action] | 162 | cmd = ['openssl', 'x509', '-noout', action] |
1905 | 144 | result, _ = util.subp(cmd, data=cert) | 163 | result, _ = util.subp(cmd, data=cert) |
1906 | 145 | return result | 164 | return result |
1907 | 146 | 165 | ||
1908 | 166 | @azure_ds_telemetry_reporter | ||
1909 | 147 | def _get_ssh_key_from_cert(self, certificate): | 167 | def _get_ssh_key_from_cert(self, certificate): |
1910 | 148 | pub_key = self._run_x509_action('-pubkey', certificate) | 168 | pub_key = self._run_x509_action('-pubkey', certificate) |
1911 | 149 | keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] | 169 | keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] |
1912 | 150 | ssh_key, _ = util.subp(keygen_cmd, data=pub_key) | 170 | ssh_key, _ = util.subp(keygen_cmd, data=pub_key) |
1913 | 151 | return ssh_key | 171 | return ssh_key |
1914 | 152 | 172 | ||
1915 | 173 | @azure_ds_telemetry_reporter | ||
1916 | 153 | def _get_fingerprint_from_cert(self, certificate): | 174 | def _get_fingerprint_from_cert(self, certificate): |
1917 | 154 | """openssl x509 formats fingerprints as so: | 175 | """openssl x509 formats fingerprints as so: |
1918 | 155 | 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ | 176 | 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ |
1919 | @@ -163,6 +184,7 @@ class OpenSSLManager(object): | |||
1920 | 163 | octets = raw_fp[eq+1:-1].split(':') | 184 | octets = raw_fp[eq+1:-1].split(':') |
1921 | 164 | return ''.join(octets) | 185 | return ''.join(octets) |
1922 | 165 | 186 | ||
1923 | 187 | @azure_ds_telemetry_reporter | ||
1924 | 166 | def _decrypt_certs_from_xml(self, certificates_xml): | 188 | def _decrypt_certs_from_xml(self, certificates_xml): |
1925 | 167 | """Decrypt the certificates XML document using the our private key; | 189 | """Decrypt the certificates XML document using the our private key; |
1926 | 168 | return the list of certs and private keys contained in the doc. | 190 | return the list of certs and private keys contained in the doc. |
1927 | @@ -185,6 +207,7 @@ class OpenSSLManager(object): | |||
1928 | 185 | shell=True, data=b'\n'.join(lines)) | 207 | shell=True, data=b'\n'.join(lines)) |
1929 | 186 | return out | 208 | return out |
1930 | 187 | 209 | ||
1931 | 210 | @azure_ds_telemetry_reporter | ||
1932 | 188 | def parse_certificates(self, certificates_xml): | 211 | def parse_certificates(self, certificates_xml): |
1933 | 189 | """Given the Certificates XML document, return a dictionary of | 212 | """Given the Certificates XML document, return a dictionary of |
1934 | 190 | fingerprints and associated SSH keys derived from the certs.""" | 213 | fingerprints and associated SSH keys derived from the certs.""" |
1935 | @@ -265,11 +288,13 @@ class WALinuxAgentShim(object): | |||
1936 | 265 | return socket.inet_ntoa(packed_bytes) | 288 | return socket.inet_ntoa(packed_bytes) |
1937 | 266 | 289 | ||
1938 | 267 | @staticmethod | 290 | @staticmethod |
1939 | 291 | @azure_ds_telemetry_reporter | ||
1940 | 268 | def _networkd_get_value_from_leases(leases_d=None): | 292 | def _networkd_get_value_from_leases(leases_d=None): |
1941 | 269 | return dhcp.networkd_get_option_from_leases( | 293 | return dhcp.networkd_get_option_from_leases( |
1942 | 270 | 'OPTION_245', leases_d=leases_d) | 294 | 'OPTION_245', leases_d=leases_d) |
1943 | 271 | 295 | ||
1944 | 272 | @staticmethod | 296 | @staticmethod |
1945 | 297 | @azure_ds_telemetry_reporter | ||
1946 | 273 | def _get_value_from_leases_file(fallback_lease_file): | 298 | def _get_value_from_leases_file(fallback_lease_file): |
1947 | 274 | leases = [] | 299 | leases = [] |
1948 | 275 | content = util.load_file(fallback_lease_file) | 300 | content = util.load_file(fallback_lease_file) |
1949 | @@ -287,6 +312,7 @@ class WALinuxAgentShim(object): | |||
1950 | 287 | return leases[-1] | 312 | return leases[-1] |
1951 | 288 | 313 | ||
1952 | 289 | @staticmethod | 314 | @staticmethod |
1953 | 315 | @azure_ds_telemetry_reporter | ||
1954 | 290 | def _load_dhclient_json(): | 316 | def _load_dhclient_json(): |
1955 | 291 | dhcp_options = {} | 317 | dhcp_options = {} |
1956 | 292 | hooks_dir = WALinuxAgentShim._get_hooks_dir() | 318 | hooks_dir = WALinuxAgentShim._get_hooks_dir() |
1957 | @@ -305,6 +331,7 @@ class WALinuxAgentShim(object): | |||
1958 | 305 | return dhcp_options | 331 | return dhcp_options |
1959 | 306 | 332 | ||
1960 | 307 | @staticmethod | 333 | @staticmethod |
1961 | 334 | @azure_ds_telemetry_reporter | ||
1962 | 308 | def _get_value_from_dhcpoptions(dhcp_options): | 335 | def _get_value_from_dhcpoptions(dhcp_options): |
1963 | 309 | if dhcp_options is None: | 336 | if dhcp_options is None: |
1964 | 310 | return None | 337 | return None |
1965 | @@ -318,6 +345,7 @@ class WALinuxAgentShim(object): | |||
1966 | 318 | return _value | 345 | return _value |
1967 | 319 | 346 | ||
1968 | 320 | @staticmethod | 347 | @staticmethod |
1969 | 348 | @azure_ds_telemetry_reporter | ||
1970 | 321 | def find_endpoint(fallback_lease_file=None, dhcp245=None): | 349 | def find_endpoint(fallback_lease_file=None, dhcp245=None): |
1971 | 322 | value = None | 350 | value = None |
1972 | 323 | if dhcp245 is not None: | 351 | if dhcp245 is not None: |
1973 | @@ -352,6 +380,7 @@ class WALinuxAgentShim(object): | |||
1974 | 352 | LOG.debug('Azure endpoint found at %s', endpoint_ip_address) | 380 | LOG.debug('Azure endpoint found at %s', endpoint_ip_address) |
1975 | 353 | return endpoint_ip_address | 381 | return endpoint_ip_address |
1976 | 354 | 382 | ||
1977 | 383 | @azure_ds_telemetry_reporter | ||
1978 | 355 | def register_with_azure_and_fetch_data(self, pubkey_info=None): | 384 | def register_with_azure_and_fetch_data(self, pubkey_info=None): |
1979 | 356 | if self.openssl_manager is None: | 385 | if self.openssl_manager is None: |
1980 | 357 | self.openssl_manager = OpenSSLManager() | 386 | self.openssl_manager = OpenSSLManager() |
1981 | @@ -404,6 +433,7 @@ class WALinuxAgentShim(object): | |||
1982 | 404 | 433 | ||
1983 | 405 | return keys | 434 | return keys |
1984 | 406 | 435 | ||
1985 | 436 | @azure_ds_telemetry_reporter | ||
1986 | 407 | def _report_ready(self, goal_state, http_client): | 437 | def _report_ready(self, goal_state, http_client): |
1987 | 408 | LOG.debug('Reporting ready to Azure fabric.') | 438 | LOG.debug('Reporting ready to Azure fabric.') |
1988 | 409 | document = self.REPORT_READY_XML_TEMPLATE.format( | 439 | document = self.REPORT_READY_XML_TEMPLATE.format( |
1989 | @@ -419,6 +449,7 @@ class WALinuxAgentShim(object): | |||
1990 | 419 | LOG.info('Reported ready to Azure fabric.') | 449 | LOG.info('Reported ready to Azure fabric.') |
1991 | 420 | 450 | ||
1992 | 421 | 451 | ||
1993 | 452 | @azure_ds_telemetry_reporter | ||
1994 | 422 | def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, | 453 | def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, |
1995 | 423 | pubkey_info=None): | 454 | pubkey_info=None): |
1996 | 424 | shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, | 455 | shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, |
1997 | diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py | |||
1998 | index 6378e98..cb1912b 100644 | |||
1999 | --- a/cloudinit/sources/tests/test_init.py | |||
2000 | +++ b/cloudinit/sources/tests/test_init.py | |||
2001 | @@ -575,6 +575,21 @@ class TestDataSource(CiTestCase): | |||
2002 | 575 | " events: New instance first boot", | 575 | " events: New instance first boot", |
2003 | 576 | self.logs.getvalue()) | 576 | self.logs.getvalue()) |
2004 | 577 | 577 | ||
2005 | 578 | def test_data_sources_cant_mutate_update_events_for_others(self): | ||
2006 | 579 | """update_events shouldn't be changed for other DSes (LP: #1819913)""" | ||
2007 | 580 | |||
2008 | 581 | class ModifyingDS(DataSource): | ||
2009 | 582 | |||
2010 | 583 | def __init__(self, sys_cfg, distro, paths): | ||
2011 | 584 | # This mirrors what DataSourceAzure does which causes LP: | ||
2012 | 585 | # #1819913 | ||
2013 | 586 | DataSource.__init__(self, sys_cfg, distro, paths) | ||
2014 | 587 | self.update_events['network'].add(EventType.BOOT) | ||
2015 | 588 | |||
2016 | 589 | before_update_events = copy.deepcopy(self.datasource.update_events) | ||
2017 | 590 | ModifyingDS(self.sys_cfg, self.distro, self.paths) | ||
2018 | 591 | self.assertEqual(before_update_events, self.datasource.update_events) | ||
2019 | 592 | |||
2020 | 578 | 593 | ||
2021 | 579 | class TestRedactSensitiveData(CiTestCase): | 594 | class TestRedactSensitiveData(CiTestCase): |
2022 | 580 | 595 | ||
2023 | diff --git a/cloudinit/util.py b/cloudinit/util.py | |||
2024 | index a192091..385f231 100644 | |||
2025 | --- a/cloudinit/util.py | |||
2026 | +++ b/cloudinit/util.py | |||
2027 | @@ -703,6 +703,21 @@ def get_cfg_option_list(yobj, key, default=None): | |||
2028 | 703 | # get a cfg entry by its path array | 703 | # get a cfg entry by its path array |
2029 | 704 | # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) | 704 | # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) |
2030 | 705 | def get_cfg_by_path(yobj, keyp, default=None): | 705 | def get_cfg_by_path(yobj, keyp, default=None): |
2031 | 706 | """Return the value of the item at path C{keyp} in C{yobj}. | ||
2032 | 707 | |||
2033 | 708 | example: | ||
2034 | 709 | get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4 | ||
2035 | 710 | get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None | ||
2036 | 711 | |||
2037 | 712 | @param yobj: A dictionary. | ||
2038 | 713 | @param keyp: A path inside yobj. it can be a '/' delimited string, | ||
2039 | 714 | or an iterable. | ||
2040 | 715 | @param default: The default to return if the path does not exist. | ||
2041 | 716 | @return: The value of the item at keyp." | ||
2042 | 717 | is not found.""" | ||
2043 | 718 | |||
2044 | 719 | if isinstance(keyp, six.string_types): | ||
2045 | 720 | keyp = keyp.split("/") | ||
2046 | 706 | cur = yobj | 721 | cur = yobj |
2047 | 707 | for tok in keyp: | 722 | for tok in keyp: |
2048 | 708 | if tok not in cur: | 723 | if tok not in cur: |
2049 | diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl | |||
2050 | index 7513176..25db43e 100644 | |||
2051 | --- a/config/cloud.cfg.tmpl | |||
2052 | +++ b/config/cloud.cfg.tmpl | |||
2053 | @@ -112,6 +112,9 @@ cloud_final_modules: | |||
2054 | 112 | - landscape | 112 | - landscape |
2055 | 113 | - lxd | 113 | - lxd |
2056 | 114 | {% endif %} | 114 | {% endif %} |
2057 | 115 | {% if variant in ["ubuntu", "unknown"] %} | ||
2058 | 116 | - ubuntu-drivers | ||
2059 | 117 | {% endif %} | ||
2060 | 115 | {% if variant not in ["freebsd"] %} | 118 | {% if variant not in ["freebsd"] %} |
2061 | 116 | - puppet | 119 | - puppet |
2062 | 117 | - chef | 120 | - chef |
2063 | diff --git a/debian/changelog b/debian/changelog | |||
2064 | index ac376ab..f869278 100644 | |||
2065 | --- a/debian/changelog | |||
2066 | +++ b/debian/changelog | |||
2067 | @@ -1,3 +1,32 @@ | |||
2068 | 1 | cloud-init (18.5-61-gb76714c3-0ubuntu1) disco; urgency=medium | ||
2069 | 2 | |||
2070 | 3 | * New upstream snapshot. | ||
2071 | 4 | - Change DataSourceNoCloud to ignore file system label's case. | ||
2072 | 5 | [Risto Oikarinen] | ||
2073 | 6 | - cmd:main.py: Fix missing 'modules-init' key in modes dict | ||
2074 | 7 | [Antonio Romito] (LP: #1815109) | ||
2075 | 8 | - ubuntu_advantage: rewrite cloud-config module | ||
2076 | 9 | - Azure: Treat _unset network configuration as if it were absent | ||
2077 | 10 | [Jason Zions (MSFT)] (LP: #1823084) | ||
2078 | 11 | - DatasourceAzure: add additional logging for azure datasource [Anh Vo] | ||
2079 | 12 | - cloud_tests: fix apt_pipelining test-cases | ||
2080 | 13 | - Azure: Ensure platform random_seed is always serializable as JSON. | ||
2081 | 14 | [Jason Zions (MSFT)] | ||
2082 | 15 | - net/sysconfig: write out SUSE-compatible IPv6 config [Robert Schweikert] | ||
2083 | 16 | - tox: Update testenv for openSUSE Leap to 15.0 [Thomas Bechtold] | ||
2084 | 17 | - net: Fix ipv6 static routes when using eni renderer | ||
2085 | 18 | [Raphael Glon] (LP: #1818669) | ||
2086 | 19 | - Add ubuntu_drivers config module | ||
2087 | 20 | - doc: Refresh Azure walinuxagent docs | ||
2088 | 21 | - tox: bump pylint version to latest (2.3.1) | ||
2089 | 22 | - DataSource: move update_events from a class to an instance attribute | ||
2090 | 23 | (LP: #1819913) | ||
2091 | 24 | - net/sysconfig: Handle default route setup for dhcp configured NICs | ||
2092 | 25 | [Robert Schweikert] (LP: #1812117) | ||
2093 | 26 | - DataSourceEc2: update RELEASE_BLOCKER to be more accurate | ||
2094 | 27 | |||
2095 | 28 | -- Daniel Watkins <oddbloke@ubuntu.com> Wed, 10 Apr 2019 11:49:03 -0400 | ||
2096 | 29 | |||
2097 | 1 | cloud-init (18.5-45-g3554ffe8-0ubuntu1) disco; urgency=medium | 30 | cloud-init (18.5-45-g3554ffe8-0ubuntu1) disco; urgency=medium |
2098 | 2 | 31 | ||
2099 | 3 | * New upstream snapshot. | 32 | * New upstream snapshot. |
2100 | diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst | |||
2101 | index 720a475..b41cddd 100644 | |||
2102 | --- a/doc/rtd/topics/datasources/azure.rst | |||
2103 | +++ b/doc/rtd/topics/datasources/azure.rst | |||
2104 | @@ -5,9 +5,30 @@ Azure | |||
2105 | 5 | 5 | ||
2106 | 6 | This datasource finds metadata and user-data from the Azure cloud platform. | 6 | This datasource finds metadata and user-data from the Azure cloud platform. |
2107 | 7 | 7 | ||
2111 | 8 | Azure Platform | 8 | walinuxagent |
2112 | 9 | -------------- | 9 | ------------ |
2113 | 10 | The azure cloud-platform provides initial data to an instance via an attached | 10 | walinuxagent has several functions within images. For cloud-init |
2114 | 11 | specifically, the relevant functionality it performs is to register the | ||
2115 | 12 | instance with the Azure cloud platform at boot so networking will be | ||
2116 | 13 | permitted. For more information about the other functionality of | ||
2117 | 14 | walinuxagent, see `Azure's documentation | ||
2118 | 15 | <https://github.com/Azure/WALinuxAgent#introduction>`_ for more details. | ||
2119 | 16 | (Note, however, that only one of walinuxagent's provisioning and cloud-init | ||
2120 | 17 | should be used to perform instance customisation.) | ||
2121 | 18 | |||
2122 | 19 | If you are configuring walinuxagent yourself, you will want to ensure that you | ||
2123 | 20 | have `Provisioning.UseCloudInit | ||
2124 | 21 | <https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to | ||
2125 | 22 | ``y``. | ||
2126 | 23 | |||
2127 | 24 | |||
2128 | 25 | Builtin Agent | ||
2129 | 26 | ------------- | ||
2130 | 27 | An alternative to using walinuxagent to register to the Azure cloud platform | ||
2131 | 28 | is to use the ``__builtin__`` agent command. This section contains more | ||
2132 | 29 | background on what that code path does, and how to enable it. | ||
2133 | 30 | |||
2134 | 31 | The Azure cloud platform provides initial data to an instance via an attached | ||
2135 | 11 | CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some | 32 | CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some |
2136 | 12 | information. Additional information is obtained via interaction with the | 33 | information. Additional information is obtained via interaction with the |
2137 | 13 | "endpoint". | 34 | "endpoint". |
2138 | @@ -36,25 +57,17 @@ for the endpoint server (again option 245). | |||
2139 | 36 | You can define the path to the lease file with the 'dhclient_lease_file' | 57 | You can define the path to the lease file with the 'dhclient_lease_file' |
2140 | 37 | configuration. | 58 | configuration. |
2141 | 38 | 59 | ||
2161 | 39 | walinuxagent | 60 | |
2162 | 40 | ------------ | 61 | IMDS |
2163 | 41 | In order to operate correctly, cloud-init needs walinuxagent to provide much | 62 | ---- |
2164 | 42 | of the interaction with azure. In addition to "provisioning" code, walinux | 63 | Azure provides the `instance metadata service (IMDS) |
2165 | 43 | does the following on the agent is a long running daemon that handles the | 64 | <https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service>`_ |
2166 | 44 | following things: | 65 | which is a REST service on ``196.254.196.254`` providing additional |
2167 | 45 | - generate a x509 certificate and send that to the endpoint | 66 | configuration information to the instance. Cloud-init uses the IMDS for: |
2168 | 46 | 67 | ||
2169 | 47 | waagent.conf config | 68 | - network configuration for the instance which is applied per boot |
2170 | 48 | ^^^^^^^^^^^^^^^^^^^ | 69 | - a preprovisioing gate which blocks instance configuration until Azure fabric |
2171 | 49 | in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults. | 70 | is ready to provision |
2153 | 50 | |||
2154 | 51 | :: | ||
2155 | 52 | |||
2156 | 53 | # disabling provisioning turns off all 'Provisioning.*' function | ||
2157 | 54 | Provisioning.Enabled=n | ||
2158 | 55 | # this is currently not handled by cloud-init, so let walinuxagent do it. | ||
2159 | 56 | ResourceDisk.Format=y | ||
2160 | 57 | ResourceDisk.MountPoint=/mnt | ||
2172 | 58 | 71 | ||
2173 | 59 | 72 | ||
2174 | 60 | Configuration | 73 | Configuration |
2175 | diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst | |||
2176 | index 08578e8..1c5cf96 100644 | |||
2177 | --- a/doc/rtd/topics/datasources/nocloud.rst | |||
2178 | +++ b/doc/rtd/topics/datasources/nocloud.rst | |||
2179 | @@ -9,7 +9,7 @@ network at all). | |||
2180 | 9 | 9 | ||
2181 | 10 | You can provide meta-data and user-data to a local vm boot via files on a | 10 | You can provide meta-data and user-data to a local vm boot via files on a |
2182 | 11 | `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be | 11 | `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be |
2184 | 12 | ``cidata``. | 12 | ``cidata`` or ``CIDATA``. |
2185 | 13 | 13 | ||
2186 | 14 | Alternatively, you can provide meta-data via kernel command line or SMBIOS | 14 | Alternatively, you can provide meta-data via kernel command line or SMBIOS |
2187 | 15 | "serial number" option. The data must be passed in the form of a string: | 15 | "serial number" option. The data must be passed in the form of a string: |
2188 | diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst | |||
2189 | index d9720f6..3dcdd3b 100644 | |||
2190 | --- a/doc/rtd/topics/modules.rst | |||
2191 | +++ b/doc/rtd/topics/modules.rst | |||
2192 | @@ -54,6 +54,7 @@ Modules | |||
2193 | 54 | .. automodule:: cloudinit.config.cc_ssh_import_id | 54 | .. automodule:: cloudinit.config.cc_ssh_import_id |
2194 | 55 | .. automodule:: cloudinit.config.cc_timezone | 55 | .. automodule:: cloudinit.config.cc_timezone |
2195 | 56 | .. automodule:: cloudinit.config.cc_ubuntu_advantage | 56 | .. automodule:: cloudinit.config.cc_ubuntu_advantage |
2196 | 57 | .. automodule:: cloudinit.config.cc_ubuntu_drivers | ||
2197 | 57 | .. automodule:: cloudinit.config.cc_update_etc_hosts | 58 | .. automodule:: cloudinit.config.cc_update_etc_hosts |
2198 | 58 | .. automodule:: cloudinit.config.cc_update_hostname | 59 | .. automodule:: cloudinit.config.cc_update_hostname |
2199 | 59 | .. automodule:: cloudinit.config.cc_users_groups | 60 | .. automodule:: cloudinit.config.cc_users_groups |
2200 | diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml | |||
2201 | index bd9b5d0..22a31dc 100644 | |||
2202 | --- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml | |||
2203 | +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml | |||
2204 | @@ -5,8 +5,7 @@ required_features: | |||
2205 | 5 | - apt | 5 | - apt |
2206 | 6 | cloud_config: | | 6 | cloud_config: | |
2207 | 7 | #cloud-config | 7 | #cloud-config |
2210 | 8 | apt: | 8 | apt_pipelining: false |
2209 | 9 | apt_pipelining: false | ||
2211 | 10 | collect_scripts: | 9 | collect_scripts: |
2212 | 11 | 90cloud-init-pipelining: | | 10 | 90cloud-init-pipelining: | |
2213 | 12 | #!/bin/bash | 11 | #!/bin/bash |
2214 | diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py | |||
2215 | index 740dc7c..2b940a6 100644 | |||
2216 | --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py | |||
2217 | +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py | |||
2218 | @@ -8,8 +8,8 @@ class TestAptPipeliningOS(base.CloudTestCase): | |||
2219 | 8 | """Test apt-pipelining module.""" | 8 | """Test apt-pipelining module.""" |
2220 | 9 | 9 | ||
2221 | 10 | def test_os_pipelining(self): | 10 | def test_os_pipelining(self): |
2225 | 11 | """Test pipelining set to os.""" | 11 | """test 'os' settings does not write apt config file.""" |
2226 | 12 | out = self.get_data_file('90cloud-init-pipelining') | 12 | out = self.get_data_file('90cloud-init-pipelining_not_written') |
2227 | 13 | self.assertIn('Acquire::http::Pipeline-Depth "0";', out) | 13 | self.assertEqual(0, int(out)) |
2228 | 14 | 14 | ||
2229 | 15 | # vi: ts=4 expandtab | 15 | # vi: ts=4 expandtab |
2230 | diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml | |||
2231 | index cbed3ba..86d5220 100644 | |||
2232 | --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml | |||
2233 | +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml | |||
2234 | @@ -1,15 +1,14 @@ | |||
2235 | 1 | # | 1 | # |
2237 | 2 | # Set apt pipelining value to OS | 2 | # Set apt pipelining value to OS, no conf written |
2238 | 3 | # | 3 | # |
2239 | 4 | required_features: | 4 | required_features: |
2240 | 5 | - apt | 5 | - apt |
2241 | 6 | cloud_config: | | 6 | cloud_config: | |
2242 | 7 | #cloud-config | 7 | #cloud-config |
2245 | 8 | apt: | 8 | apt_pipelining: os |
2244 | 9 | apt_pipelining: os | ||
2246 | 10 | collect_scripts: | 9 | collect_scripts: |
2248 | 11 | 90cloud-init-pipelining: | | 10 | 90cloud-init-pipelining_not_written: | |
2249 | 12 | #!/bin/bash | 11 | #!/bin/bash |
2251 | 13 | cat /etc/apt/apt.conf.d/90cloud-init-pipelining | 12 | ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l |
2252 | 14 | 13 | ||
2253 | 15 | # vi: ts=4 expandtab | 14 | # vi: ts=4 expandtab |
2254 | diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string | |||
2255 | 16 | new file mode 100644 | 15 | new file mode 100644 |
2256 | index 0000000..b9ecefb | |||
2257 | --- /dev/null | |||
2258 | +++ b/tests/data/azure/non_unicode_random_string | |||
2259 | @@ -0,0 +1 @@ | |||
2260 | 1 | OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ | ||
2261 | 0 | \ No newline at end of file | 2 | \ No newline at end of file |
2262 | diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py | |||
2263 | index 6b05b8f..53c56cd 100644 | |||
2264 | --- a/tests/unittests/test_datasource/test_azure.py | |||
2265 | +++ b/tests/unittests/test_datasource/test_azure.py | |||
2266 | @@ -7,11 +7,11 @@ from cloudinit.sources import ( | |||
2267 | 7 | UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) | 7 | UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) |
2268 | 8 | from cloudinit.util import (b64e, decode_binary, load_file, write_file, | 8 | from cloudinit.util import (b64e, decode_binary, load_file, write_file, |
2269 | 9 | find_freebsd_part, get_path_dev_freebsd, | 9 | find_freebsd_part, get_path_dev_freebsd, |
2271 | 10 | MountFailedError) | 10 | MountFailedError, json_dumps, load_json) |
2272 | 11 | from cloudinit.version import version_string as vs | 11 | from cloudinit.version import version_string as vs |
2273 | 12 | from cloudinit.tests.helpers import ( | 12 | from cloudinit.tests.helpers import ( |
2274 | 13 | HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, | 13 | HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, |
2276 | 14 | ExitStack) | 14 | ExitStack, resourceLocation) |
2277 | 15 | 15 | ||
2278 | 16 | import crypt | 16 | import crypt |
2279 | 17 | import httpretty | 17 | import httpretty |
2280 | @@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase): | |||
2281 | 1923 | self.logs.getvalue()) | 1923 | self.logs.getvalue()) |
2282 | 1924 | 1924 | ||
2283 | 1925 | 1925 | ||
2284 | 1926 | class TestRandomSeed(CiTestCase): | ||
2285 | 1927 | """Test proper handling of random_seed""" | ||
2286 | 1928 | |||
2287 | 1929 | def test_non_ascii_seed_is_serializable(self): | ||
2288 | 1930 | """Pass if a random string from the Azure infrastructure which | ||
2289 | 1931 | contains at least one non-Unicode character can be converted to/from | ||
2290 | 1932 | JSON without alteration and without throwing an exception. | ||
2291 | 1933 | """ | ||
2292 | 1934 | path = resourceLocation("azure/non_unicode_random_string") | ||
2293 | 1935 | result = dsaz._get_random_seed(path) | ||
2294 | 1936 | |||
2295 | 1937 | obj = {'seed': result} | ||
2296 | 1938 | try: | ||
2297 | 1939 | serialized = json_dumps(obj) | ||
2298 | 1940 | deserialized = load_json(serialized) | ||
2299 | 1941 | except UnicodeDecodeError: | ||
2300 | 1942 | self.fail("Non-serializable random seed returned") | ||
2301 | 1943 | |||
2302 | 1944 | self.assertEqual(deserialized['seed'], result) | ||
2303 | 1945 | |||
2304 | 1926 | # vi: ts=4 expandtab | 1946 | # vi: ts=4 expandtab |
2305 | diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py | |||
2306 | index 3429272..b785362 100644 | |||
2307 | --- a/tests/unittests/test_datasource/test_nocloud.py | |||
2308 | +++ b/tests/unittests/test_datasource/test_nocloud.py | |||
2309 | @@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase): | |||
2310 | 32 | self.mocks.enter_context( | 32 | self.mocks.enter_context( |
2311 | 33 | mock.patch.object(util, 'read_dmi_data', return_value=None)) | 33 | mock.patch.object(util, 'read_dmi_data', return_value=None)) |
2312 | 34 | 34 | ||
2313 | 35 | def _test_fs_config_is_read(self, fs_label, fs_label_to_search): | ||
2314 | 36 | vfat_device = 'device-1' | ||
2315 | 37 | |||
2316 | 38 | def m_mount_cb(device, callback, mtype): | ||
2317 | 39 | if (device == vfat_device): | ||
2318 | 40 | return {'meta-data': yaml.dump({'instance-id': 'IID'})} | ||
2319 | 41 | else: | ||
2320 | 42 | return {} | ||
2321 | 43 | |||
2322 | 44 | def m_find_devs_with(query='', path=''): | ||
2323 | 45 | if 'TYPE=vfat' == query: | ||
2324 | 46 | return [vfat_device] | ||
2325 | 47 | elif 'LABEL={}'.format(fs_label) == query: | ||
2326 | 48 | return [vfat_device] | ||
2327 | 49 | else: | ||
2328 | 50 | return [] | ||
2329 | 51 | |||
2330 | 52 | self.mocks.enter_context( | ||
2331 | 53 | mock.patch.object(util, 'find_devs_with', | ||
2332 | 54 | side_effect=m_find_devs_with)) | ||
2333 | 55 | self.mocks.enter_context( | ||
2334 | 56 | mock.patch.object(util, 'mount_cb', | ||
2335 | 57 | side_effect=m_mount_cb)) | ||
2336 | 58 | sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} | ||
2337 | 59 | dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) | ||
2338 | 60 | ret = dsrc.get_data() | ||
2339 | 61 | |||
2340 | 62 | self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') | ||
2341 | 63 | self.assertTrue(ret) | ||
2342 | 64 | |||
2343 | 35 | def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): | 65 | def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): |
2344 | 36 | md = {'instance-id': 'IID', 'dsmode': 'local'} | 66 | md = {'instance-id': 'IID', 'dsmode': 'local'} |
2345 | 37 | ud = b"USER_DATA_HERE" | 67 | ud = b"USER_DATA_HERE" |
2346 | @@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase): | |||
2347 | 90 | ret = dsrc.get_data() | 120 | ret = dsrc.get_data() |
2348 | 91 | self.assertFalse(ret) | 121 | self.assertFalse(ret) |
2349 | 92 | 122 | ||
2350 | 123 | def test_fs_config_lowercase_label(self, m_is_lxd): | ||
2351 | 124 | self._test_fs_config_is_read('cidata', 'cidata') | ||
2352 | 125 | |||
2353 | 126 | def test_fs_config_uppercase_label(self, m_is_lxd): | ||
2354 | 127 | self._test_fs_config_is_read('CIDATA', 'cidata') | ||
2355 | 128 | |||
2356 | 129 | def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): | ||
2357 | 130 | self._test_fs_config_is_read('cidata', 'CIDATA') | ||
2358 | 131 | |||
2359 | 132 | def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): | ||
2360 | 133 | self._test_fs_config_is_read('CIDATA', 'CIDATA') | ||
2361 | 134 | |||
2362 | 93 | def test_no_datasource_expected(self, m_is_lxd): | 135 | def test_no_datasource_expected(self, m_is_lxd): |
2363 | 94 | # no source should be found if no cmdline, config, and fs_label=None | 136 | # no source should be found if no cmdline, config, and fs_label=None |
2364 | 95 | sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} | 137 | sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} |
2365 | diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py | |||
2366 | index f96bf0a..3bfd752 100644 | |||
2367 | --- a/tests/unittests/test_datasource/test_scaleway.py | |||
2368 | +++ b/tests/unittests/test_datasource/test_scaleway.py | |||
2369 | @@ -7,6 +7,7 @@ import requests | |||
2370 | 7 | 7 | ||
2371 | 8 | from cloudinit import helpers | 8 | from cloudinit import helpers |
2372 | 9 | from cloudinit import settings | 9 | from cloudinit import settings |
2373 | 10 | from cloudinit.event import EventType | ||
2374 | 10 | from cloudinit.sources import DataSourceScaleway | 11 | from cloudinit.sources import DataSourceScaleway |
2375 | 11 | 12 | ||
2376 | 12 | from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase | 13 | from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase |
2377 | @@ -403,3 +404,9 @@ class TestDataSourceScaleway(HttprettyTestCase): | |||
2378 | 403 | 404 | ||
2379 | 404 | netcfg = self.datasource.network_config | 405 | netcfg = self.datasource.network_config |
2380 | 405 | self.assertEqual(netcfg, '0xdeadbeef') | 406 | self.assertEqual(netcfg, '0xdeadbeef') |
2381 | 407 | |||
2382 | 408 | def test_update_events_is_correct(self): | ||
2383 | 409 | """ensure update_events contains correct data""" | ||
2384 | 410 | self.assertEqual( | ||
2385 | 411 | {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, | ||
2386 | 412 | self.datasource.update_events) | ||
2387 | diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py | |||
2388 | index e453040..c3c0c8c 100644 | |||
2389 | --- a/tests/unittests/test_distros/test_netconfig.py | |||
2390 | +++ b/tests/unittests/test_distros/test_netconfig.py | |||
2391 | @@ -496,6 +496,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): | |||
2392 | 496 | BOOTPROTO=none | 496 | BOOTPROTO=none |
2393 | 497 | DEFROUTE=yes | 497 | DEFROUTE=yes |
2394 | 498 | DEVICE=eth0 | 498 | DEVICE=eth0 |
2395 | 499 | IPADDR6=2607:f0d0:1002:0011::2/64 | ||
2396 | 499 | IPV6ADDR=2607:f0d0:1002:0011::2/64 | 500 | IPV6ADDR=2607:f0d0:1002:0011::2/64 |
2397 | 500 | IPV6INIT=yes | 501 | IPV6INIT=yes |
2398 | 501 | IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 | 502 | IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 |
2399 | @@ -588,6 +589,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): | |||
2400 | 588 | BOOTPROTO=none | 589 | BOOTPROTO=none |
2401 | 589 | DEFROUTE=yes | 590 | DEFROUTE=yes |
2402 | 590 | DEVICE=eth0 | 591 | DEVICE=eth0 |
2403 | 592 | IPADDR6=2607:f0d0:1002:0011::2/64 | ||
2404 | 591 | IPV6ADDR=2607:f0d0:1002:0011::2/64 | 593 | IPV6ADDR=2607:f0d0:1002:0011::2/64 |
2405 | 592 | IPV6INIT=yes | 594 | IPV6INIT=yes |
2406 | 593 | IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 | 595 | IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 |
2407 | diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py | |||
2408 | index d00c1b4..8c18aa1 100644 | |||
2409 | --- a/tests/unittests/test_ds_identify.py | |||
2410 | +++ b/tests/unittests/test_ds_identify.py | |||
2411 | @@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase): | |||
2412 | 520 | """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" | 520 | """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" |
2413 | 521 | self._test_ds_found('NoCloud') | 521 | self._test_ds_found('NoCloud') |
2414 | 522 | 522 | ||
2415 | 523 | def test_nocloud_upper(self): | ||
2416 | 524 | """NoCloud is found with uppercase filesystem label.""" | ||
2417 | 525 | self._test_ds_found('NoCloudUpper') | ||
2418 | 526 | |||
2419 | 523 | def test_nocloud_seed(self): | 527 | def test_nocloud_seed(self): |
2420 | 524 | """Nocloud seed directory.""" | 528 | """Nocloud seed directory.""" |
2421 | 525 | self._test_ds_found('NoCloud-seed') | 529 | self._test_ds_found('NoCloud-seed') |
2422 | @@ -713,6 +717,19 @@ VALID_CFG = { | |||
2423 | 713 | 'dev/vdb': 'pretend iso content for cidata\n', | 717 | 'dev/vdb': 'pretend iso content for cidata\n', |
2424 | 714 | } | 718 | } |
2425 | 715 | }, | 719 | }, |
2426 | 720 | 'NoCloudUpper': { | ||
2427 | 721 | 'ds': 'NoCloud', | ||
2428 | 722 | 'mocks': [ | ||
2429 | 723 | MOCK_VIRT_IS_KVM, | ||
2430 | 724 | {'name': 'blkid', 'ret': 0, | ||
2431 | 725 | 'out': blkid_out( | ||
2432 | 726 | BLKID_UEFI_UBUNTU + | ||
2433 | 727 | [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, | ||
2434 | 728 | ], | ||
2435 | 729 | 'files': { | ||
2436 | 730 | 'dev/vdb': 'pretend iso content for cidata\n', | ||
2437 | 731 | } | ||
2438 | 732 | }, | ||
2439 | 716 | 'NoCloud-seed': { | 733 | 'NoCloud-seed': { |
2440 | 717 | 'ds': 'NoCloud', | 734 | 'ds': 'NoCloud', |
2441 | 718 | 'files': { | 735 | 'files': { |
2442 | diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py | |||
2443 | index 1bad07f..e69a47a 100644 | |||
2444 | --- a/tests/unittests/test_handler/test_schema.py | |||
2445 | +++ b/tests/unittests/test_handler/test_schema.py | |||
2446 | @@ -28,6 +28,7 @@ class GetSchemaTest(CiTestCase): | |||
2447 | 28 | 'cc_runcmd', | 28 | 'cc_runcmd', |
2448 | 29 | 'cc_snap', | 29 | 'cc_snap', |
2449 | 30 | 'cc_ubuntu_advantage', | 30 | 'cc_ubuntu_advantage', |
2450 | 31 | 'cc_ubuntu_drivers', | ||
2451 | 31 | 'cc_zypper_add_repo' | 32 | 'cc_zypper_add_repo' |
2452 | 32 | ], | 33 | ], |
2453 | 33 | [subschema['id'] for subschema in schema['allOf']]) | 34 | [subschema['id'] for subschema in schema['allOf']]) |
2454 | diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py | |||
2455 | index e3b9e02..fd03deb 100644 | |||
2456 | --- a/tests/unittests/test_net.py | |||
2457 | +++ b/tests/unittests/test_net.py | |||
2458 | @@ -691,6 +691,9 @@ DEVICE=eth0 | |||
2459 | 691 | GATEWAY=172.19.3.254 | 691 | GATEWAY=172.19.3.254 |
2460 | 692 | HWADDR=fa:16:3e:ed:9a:59 | 692 | HWADDR=fa:16:3e:ed:9a:59 |
2461 | 693 | IPADDR=172.19.1.34 | 693 | IPADDR=172.19.1.34 |
2462 | 694 | IPADDR6=2001:DB8::10/64 | ||
2463 | 695 | IPADDR6_0=2001:DB9::10/64 | ||
2464 | 696 | IPADDR6_2=2001:DB10::10/64 | ||
2465 | 694 | IPV6ADDR=2001:DB8::10/64 | 697 | IPV6ADDR=2001:DB8::10/64 |
2466 | 695 | IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" | 698 | IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" |
2467 | 696 | IPV6INIT=yes | 699 | IPV6INIT=yes |
2468 | @@ -729,6 +732,9 @@ DEVICE=eth0 | |||
2469 | 729 | GATEWAY=172.19.3.254 | 732 | GATEWAY=172.19.3.254 |
2470 | 730 | HWADDR=fa:16:3e:ed:9a:59 | 733 | HWADDR=fa:16:3e:ed:9a:59 |
2471 | 731 | IPADDR=172.19.1.34 | 734 | IPADDR=172.19.1.34 |
2472 | 735 | IPADDR6=2001:DB8::10/64 | ||
2473 | 736 | IPADDR6_0=2001:DB9::10/64 | ||
2474 | 737 | IPADDR6_2=2001:DB10::10/64 | ||
2475 | 732 | IPV6ADDR=2001:DB8::10/64 | 738 | IPV6ADDR=2001:DB8::10/64 |
2476 | 733 | IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" | 739 | IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" |
2477 | 734 | IPV6INIT=yes | 740 | IPV6INIT=yes |
2478 | @@ -860,6 +866,7 @@ NETWORK_CONFIGS = { | |||
2479 | 860 | BOOTPROTO=dhcp | 866 | BOOTPROTO=dhcp |
2480 | 861 | DEFROUTE=yes | 867 | DEFROUTE=yes |
2481 | 862 | DEVICE=eth99 | 868 | DEVICE=eth99 |
2482 | 869 | DHCLIENT_SET_DEFAULT_ROUTE=yes | ||
2483 | 863 | DNS1=8.8.8.8 | 870 | DNS1=8.8.8.8 |
2484 | 864 | DNS2=8.8.4.4 | 871 | DNS2=8.8.4.4 |
2485 | 865 | DOMAIN="barley.maas sach.maas" | 872 | DOMAIN="barley.maas sach.maas" |
2486 | @@ -979,6 +986,7 @@ NETWORK_CONFIGS = { | |||
2487 | 979 | BOOTPROTO=none | 986 | BOOTPROTO=none |
2488 | 980 | DEVICE=iface0 | 987 | DEVICE=iface0 |
2489 | 981 | IPADDR=192.168.14.2 | 988 | IPADDR=192.168.14.2 |
2490 | 989 | IPADDR6=2001:1::1/64 | ||
2491 | 982 | IPV6ADDR=2001:1::1/64 | 990 | IPV6ADDR=2001:1::1/64 |
2492 | 983 | IPV6INIT=yes | 991 | IPV6INIT=yes |
2493 | 984 | NETMASK=255.255.255.0 | 992 | NETMASK=255.255.255.0 |
2494 | @@ -1113,8 +1121,8 @@ iface eth0.101 inet static | |||
2495 | 1113 | iface eth0.101 inet static | 1121 | iface eth0.101 inet static |
2496 | 1114 | address 192.168.2.10/24 | 1122 | address 192.168.2.10/24 |
2497 | 1115 | 1123 | ||
2500 | 1116 | post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | 1124 | post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true |
2501 | 1117 | pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | 1125 | pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true |
2502 | 1118 | """), | 1126 | """), |
2503 | 1119 | 'expected_netplan': textwrap.dedent(""" | 1127 | 'expected_netplan': textwrap.dedent(""" |
2504 | 1120 | network: | 1128 | network: |
2505 | @@ -1234,6 +1242,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2506 | 1234 | 'ifcfg-bond0.200': textwrap.dedent("""\ | 1242 | 'ifcfg-bond0.200': textwrap.dedent("""\ |
2507 | 1235 | BOOTPROTO=dhcp | 1243 | BOOTPROTO=dhcp |
2508 | 1236 | DEVICE=bond0.200 | 1244 | DEVICE=bond0.200 |
2509 | 1245 | DHCLIENT_SET_DEFAULT_ROUTE=no | ||
2510 | 1237 | NM_CONTROLLED=no | 1246 | NM_CONTROLLED=no |
2511 | 1238 | ONBOOT=yes | 1247 | ONBOOT=yes |
2512 | 1239 | PHYSDEV=bond0 | 1248 | PHYSDEV=bond0 |
2513 | @@ -1247,6 +1256,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2514 | 1247 | DEFROUTE=yes | 1256 | DEFROUTE=yes |
2515 | 1248 | DEVICE=br0 | 1257 | DEVICE=br0 |
2516 | 1249 | IPADDR=192.168.14.2 | 1258 | IPADDR=192.168.14.2 |
2517 | 1259 | IPADDR6=2001:1::1/64 | ||
2518 | 1250 | IPV6ADDR=2001:1::1/64 | 1260 | IPV6ADDR=2001:1::1/64 |
2519 | 1251 | IPV6INIT=yes | 1261 | IPV6INIT=yes |
2520 | 1252 | IPV6_DEFAULTGW=2001:4800:78ff:1b::1 | 1262 | IPV6_DEFAULTGW=2001:4800:78ff:1b::1 |
2521 | @@ -1333,6 +1343,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2522 | 1333 | 'ifcfg-eth5': textwrap.dedent("""\ | 1343 | 'ifcfg-eth5': textwrap.dedent("""\ |
2523 | 1334 | BOOTPROTO=dhcp | 1344 | BOOTPROTO=dhcp |
2524 | 1335 | DEVICE=eth5 | 1345 | DEVICE=eth5 |
2525 | 1346 | DHCLIENT_SET_DEFAULT_ROUTE=no | ||
2526 | 1336 | HWADDR=98:bb:9f:2c:e8:8a | 1347 | HWADDR=98:bb:9f:2c:e8:8a |
2527 | 1337 | NM_CONTROLLED=no | 1348 | NM_CONTROLLED=no |
2528 | 1338 | ONBOOT=no | 1349 | ONBOOT=no |
2529 | @@ -1505,17 +1516,18 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2530 | 1505 | - gateway: 192.168.0.3 | 1516 | - gateway: 192.168.0.3 |
2531 | 1506 | netmask: 255.255.255.0 | 1517 | netmask: 255.255.255.0 |
2532 | 1507 | network: 10.1.3.0 | 1518 | network: 10.1.3.0 |
2533 | 1508 | - gateway: 2001:67c:1562:1 | ||
2534 | 1509 | network: 2001:67c:1 | ||
2535 | 1510 | netmask: ffff:ffff:0 | ||
2536 | 1511 | - gateway: 3001:67c:1562:1 | ||
2537 | 1512 | network: 3001:67c:1 | ||
2538 | 1513 | netmask: ffff:ffff:0 | ||
2539 | 1514 | metric: 10000 | ||
2540 | 1515 | - type: static | 1519 | - type: static |
2541 | 1516 | address: 192.168.1.2/24 | 1520 | address: 192.168.1.2/24 |
2542 | 1517 | - type: static | 1521 | - type: static |
2543 | 1518 | address: 2001:1::1/92 | 1522 | address: 2001:1::1/92 |
2544 | 1523 | routes: | ||
2545 | 1524 | - gateway: 2001:67c:1562:1 | ||
2546 | 1525 | network: 2001:67c:1 | ||
2547 | 1526 | netmask: ffff:ffff:0 | ||
2548 | 1527 | - gateway: 3001:67c:1562:1 | ||
2549 | 1528 | network: 3001:67c:1 | ||
2550 | 1529 | netmask: ffff:ffff:0 | ||
2551 | 1530 | metric: 10000 | ||
2552 | 1519 | """), | 1531 | """), |
2553 | 1520 | 'expected_netplan': textwrap.dedent(""" | 1532 | 'expected_netplan': textwrap.dedent(""" |
2554 | 1521 | network: | 1533 | network: |
2555 | @@ -1554,6 +1566,51 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2556 | 1554 | to: 3001:67c:1/32 | 1566 | to: 3001:67c:1/32 |
2557 | 1555 | via: 3001:67c:1562:1 | 1567 | via: 3001:67c:1562:1 |
2558 | 1556 | """), | 1568 | """), |
2559 | 1569 | 'expected_eni': textwrap.dedent("""\ | ||
2560 | 1570 | auto lo | ||
2561 | 1571 | iface lo inet loopback | ||
2562 | 1572 | |||
2563 | 1573 | auto bond0s0 | ||
2564 | 1574 | iface bond0s0 inet manual | ||
2565 | 1575 | bond-master bond0 | ||
2566 | 1576 | bond-mode active-backup | ||
2567 | 1577 | bond-xmit-hash-policy layer3+4 | ||
2568 | 1578 | bond_miimon 100 | ||
2569 | 1579 | |||
2570 | 1580 | auto bond0s1 | ||
2571 | 1581 | iface bond0s1 inet manual | ||
2572 | 1582 | bond-master bond0 | ||
2573 | 1583 | bond-mode active-backup | ||
2574 | 1584 | bond-xmit-hash-policy layer3+4 | ||
2575 | 1585 | bond_miimon 100 | ||
2576 | 1586 | |||
2577 | 1587 | auto bond0 | ||
2578 | 1588 | iface bond0 inet static | ||
2579 | 1589 | address 192.168.0.2/24 | ||
2580 | 1590 | gateway 192.168.0.1 | ||
2581 | 1591 | bond-mode active-backup | ||
2582 | 1592 | bond-slaves none | ||
2583 | 1593 | bond-xmit-hash-policy layer3+4 | ||
2584 | 1594 | bond_miimon 100 | ||
2585 | 1595 | hwaddress aa:bb:cc:dd:e8:ff | ||
2586 | 1596 | mtu 9000 | ||
2587 | 1597 | post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true | ||
2588 | 1598 | pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true | ||
2589 | 1599 | |||
2590 | 1600 | # control-alias bond0 | ||
2591 | 1601 | iface bond0 inet static | ||
2592 | 1602 | address 192.168.1.2/24 | ||
2593 | 1603 | |||
2594 | 1604 | # control-alias bond0 | ||
2595 | 1605 | iface bond0 inet6 static | ||
2596 | 1606 | address 2001:1::1/92 | ||
2597 | 1607 | post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true | ||
2598 | 1608 | pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true | ||
2599 | 1609 | post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ | ||
2600 | 1610 | || true | ||
2601 | 1611 | pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ | ||
2602 | 1612 | || true | ||
2603 | 1613 | """), | ||
2604 | 1557 | 'yaml-v2': textwrap.dedent(""" | 1614 | 'yaml-v2': textwrap.dedent(""" |
2605 | 1558 | version: 2 | 1615 | version: 2 |
2606 | 1559 | ethernets: | 1616 | ethernets: |
2607 | @@ -1641,6 +1698,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2608 | 1641 | MACADDR=aa:bb:cc:dd:e8:ff | 1698 | MACADDR=aa:bb:cc:dd:e8:ff |
2609 | 1642 | IPADDR=192.168.0.2 | 1699 | IPADDR=192.168.0.2 |
2610 | 1643 | IPADDR1=192.168.1.2 | 1700 | IPADDR1=192.168.1.2 |
2611 | 1701 | IPADDR6=2001:1::1/92 | ||
2612 | 1644 | IPV6ADDR=2001:1::1/92 | 1702 | IPV6ADDR=2001:1::1/92 |
2613 | 1645 | IPV6INIT=yes | 1703 | IPV6INIT=yes |
2614 | 1646 | MTU=9000 | 1704 | MTU=9000 |
2615 | @@ -1696,6 +1754,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2616 | 1696 | MACADDR=aa:bb:cc:dd:e8:ff | 1754 | MACADDR=aa:bb:cc:dd:e8:ff |
2617 | 1697 | IPADDR=192.168.0.2 | 1755 | IPADDR=192.168.0.2 |
2618 | 1698 | IPADDR1=192.168.1.2 | 1756 | IPADDR1=192.168.1.2 |
2619 | 1757 | IPADDR6=2001:1::1/92 | ||
2620 | 1699 | IPV6ADDR=2001:1::1/92 | 1758 | IPV6ADDR=2001:1::1/92 |
2621 | 1700 | IPV6INIT=yes | 1759 | IPV6INIT=yes |
2622 | 1701 | MTU=9000 | 1760 | MTU=9000 |
2623 | @@ -1786,6 +1845,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2624 | 1786 | GATEWAY=192.168.1.1 | 1845 | GATEWAY=192.168.1.1 |
2625 | 1787 | IPADDR=192.168.2.2 | 1846 | IPADDR=192.168.2.2 |
2626 | 1788 | IPADDR1=192.168.1.2 | 1847 | IPADDR1=192.168.1.2 |
2627 | 1848 | IPADDR6=2001:1::bbbb/96 | ||
2628 | 1789 | IPV6ADDR=2001:1::bbbb/96 | 1849 | IPV6ADDR=2001:1::bbbb/96 |
2629 | 1790 | IPV6INIT=yes | 1850 | IPV6INIT=yes |
2630 | 1791 | IPV6_DEFAULTGW=2001:1::1 | 1851 | IPV6_DEFAULTGW=2001:1::1 |
2631 | @@ -1847,6 +1907,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2632 | 1847 | BRIDGE=br0 | 1907 | BRIDGE=br0 |
2633 | 1848 | DEVICE=eth0 | 1908 | DEVICE=eth0 |
2634 | 1849 | HWADDR=52:54:00:12:34:00 | 1909 | HWADDR=52:54:00:12:34:00 |
2635 | 1910 | IPADDR6=2001:1::100/96 | ||
2636 | 1850 | IPV6ADDR=2001:1::100/96 | 1911 | IPV6ADDR=2001:1::100/96 |
2637 | 1851 | IPV6INIT=yes | 1912 | IPV6INIT=yes |
2638 | 1852 | NM_CONTROLLED=no | 1913 | NM_CONTROLLED=no |
2639 | @@ -1860,6 +1921,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true | |||
2640 | 1860 | BRIDGE=br0 | 1921 | BRIDGE=br0 |
2641 | 1861 | DEVICE=eth1 | 1922 | DEVICE=eth1 |
2642 | 1862 | HWADDR=52:54:00:12:34:01 | 1923 | HWADDR=52:54:00:12:34:01 |
2643 | 1924 | IPADDR6=2001:1::101/96 | ||
2644 | 1863 | IPV6ADDR=2001:1::101/96 | 1925 | IPV6ADDR=2001:1::101/96 |
2645 | 1864 | IPV6INIT=yes | 1926 | IPV6INIT=yes |
2646 | 1865 | NM_CONTROLLED=no | 1927 | NM_CONTROLLED=no |
2647 | @@ -1988,6 +2050,23 @@ CONFIG_V1_SIMPLE_SUBNET = { | |||
2648 | 1988 | 'type': 'static'}], | 2050 | 'type': 'static'}], |
2649 | 1989 | 'type': 'physical'}]} | 2051 | 'type': 'physical'}]} |
2650 | 1990 | 2052 | ||
2651 | 2053 | CONFIG_V1_MULTI_IFACE = { | ||
2652 | 2054 | 'version': 1, | ||
2653 | 2055 | 'config': [{'type': 'physical', | ||
2654 | 2056 | 'mtu': 1500, | ||
2655 | 2057 | 'subnets': [{'type': 'static', | ||
2656 | 2058 | 'netmask': '255.255.240.0', | ||
2657 | 2059 | 'routes': [{'netmask': '0.0.0.0', | ||
2658 | 2060 | 'network': '0.0.0.0', | ||
2659 | 2061 | 'gateway': '51.68.80.1'}], | ||
2660 | 2062 | 'address': '51.68.89.122', | ||
2661 | 2063 | 'ipv4': True}], | ||
2662 | 2064 | 'mac_address': 'fa:16:3e:25:b4:59', | ||
2663 | 2065 | 'name': 'eth0'}, | ||
2664 | 2066 | {'type': 'physical', | ||
2665 | 2067 | 'mtu': 9000, | ||
2666 | 2068 | 'subnets': [{'type': 'dhcp4'}], | ||
2667 | 2069 | 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]} | ||
2668 | 1991 | 2070 | ||
2669 | 1992 | DEFAULT_DEV_ATTRS = { | 2071 | DEFAULT_DEV_ATTRS = { |
2670 | 1993 | 'eth1000': { | 2072 | 'eth1000': { |
2671 | @@ -2460,6 +2539,49 @@ USERCTL=no | |||
2672 | 2460 | respath = '/etc/resolv.conf' | 2539 | respath = '/etc/resolv.conf' |
2673 | 2461 | self.assertNotIn(respath, found.keys()) | 2540 | self.assertNotIn(respath, found.keys()) |
2674 | 2462 | 2541 | ||
2675 | 2542 | def test_network_config_v1_multi_iface_samples(self): | ||
2676 | 2543 | ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE) | ||
2677 | 2544 | render_dir = self.tmp_path("render") | ||
2678 | 2545 | os.makedirs(render_dir) | ||
2679 | 2546 | renderer = self._get_renderer() | ||
2680 | 2547 | renderer.render_network_state(ns, target=render_dir) | ||
2681 | 2548 | found = dir2dict(render_dir) | ||
2682 | 2549 | nspath = '/etc/sysconfig/network-scripts/' | ||
2683 | 2550 | self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) | ||
2684 | 2551 | expected_i1 = """\ | ||
2685 | 2552 | # Created by cloud-init on instance boot automatically, do not edit. | ||
2686 | 2553 | # | ||
2687 | 2554 | BOOTPROTO=none | ||
2688 | 2555 | DEFROUTE=yes | ||
2689 | 2556 | DEVICE=eth0 | ||
2690 | 2557 | GATEWAY=51.68.80.1 | ||
2691 | 2558 | HWADDR=fa:16:3e:25:b4:59 | ||
2692 | 2559 | IPADDR=51.68.89.122 | ||
2693 | 2560 | MTU=1500 | ||
2694 | 2561 | NETMASK=255.255.240.0 | ||
2695 | 2562 | NM_CONTROLLED=no | ||
2696 | 2563 | ONBOOT=yes | ||
2697 | 2564 | STARTMODE=auto | ||
2698 | 2565 | TYPE=Ethernet | ||
2699 | 2566 | USERCTL=no | ||
2700 | 2567 | """ | ||
2701 | 2568 | self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0']) | ||
2702 | 2569 | expected_i2 = """\ | ||
2703 | 2570 | # Created by cloud-init on instance boot automatically, do not edit. | ||
2704 | 2571 | # | ||
2705 | 2572 | BOOTPROTO=dhcp | ||
2706 | 2573 | DEVICE=eth1 | ||
2707 | 2574 | DHCLIENT_SET_DEFAULT_ROUTE=no | ||
2708 | 2575 | HWADDR=fa:16:3e:b1:ca:29 | ||
2709 | 2576 | MTU=9000 | ||
2710 | 2577 | NM_CONTROLLED=no | ||
2711 | 2578 | ONBOOT=yes | ||
2712 | 2579 | STARTMODE=auto | ||
2713 | 2580 | TYPE=Ethernet | ||
2714 | 2581 | USERCTL=no | ||
2715 | 2582 | """ | ||
2716 | 2583 | self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1']) | ||
2717 | 2584 | |||
2718 | 2463 | def test_config_with_explicit_loopback(self): | 2585 | def test_config_with_explicit_loopback(self): |
2719 | 2464 | ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) | 2586 | ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) |
2720 | 2465 | render_dir = self.tmp_path("render") | 2587 | render_dir = self.tmp_path("render") |
2721 | @@ -2634,6 +2756,7 @@ USERCTL=no | |||
2722 | 2634 | GATEWAY=192.168.42.1 | 2756 | GATEWAY=192.168.42.1 |
2723 | 2635 | HWADDR=52:54:00:ab:cd:ef | 2757 | HWADDR=52:54:00:ab:cd:ef |
2724 | 2636 | IPADDR=192.168.42.100 | 2758 | IPADDR=192.168.42.100 |
2725 | 2759 | IPADDR6=2001:db8::100/32 | ||
2726 | 2637 | IPV6ADDR=2001:db8::100/32 | 2760 | IPV6ADDR=2001:db8::100/32 |
2727 | 2638 | IPV6INIT=yes | 2761 | IPV6INIT=yes |
2728 | 2639 | IPV6_DEFAULTGW=2001:db8::1 | 2762 | IPV6_DEFAULTGW=2001:db8::1 |
2729 | @@ -3570,17 +3693,17 @@ class TestEniRoundTrip(CiTestCase): | |||
2730 | 3570 | 'iface eth0 inet static', | 3693 | 'iface eth0 inet static', |
2731 | 3571 | ' address 172.23.31.42/26', | 3694 | ' address 172.23.31.42/26', |
2732 | 3572 | ' gateway 172.23.31.2', | 3695 | ' gateway 172.23.31.2', |
2734 | 3573 | ('post-up route add -net 10.0.0.0 netmask 255.240.0.0 gw ' | 3696 | ('post-up route add -net 10.0.0.0/12 gw ' |
2735 | 3574 | '172.23.31.1 metric 0 || true'), | 3697 | '172.23.31.1 metric 0 || true'), |
2737 | 3575 | ('pre-down route del -net 10.0.0.0 netmask 255.240.0.0 gw ' | 3698 | ('pre-down route del -net 10.0.0.0/12 gw ' |
2738 | 3576 | '172.23.31.1 metric 0 || true'), | 3699 | '172.23.31.1 metric 0 || true'), |
2740 | 3577 | ('post-up route add -net 192.168.2.0 netmask 255.255.0.0 gw ' | 3700 | ('post-up route add -net 192.168.2.0/16 gw ' |
2741 | 3578 | '172.23.31.1 metric 0 || true'), | 3701 | '172.23.31.1 metric 0 || true'), |
2743 | 3579 | ('pre-down route del -net 192.168.2.0 netmask 255.255.0.0 gw ' | 3702 | ('pre-down route del -net 192.168.2.0/16 gw ' |
2744 | 3580 | '172.23.31.1 metric 0 || true'), | 3703 | '172.23.31.1 metric 0 || true'), |
2746 | 3581 | ('post-up route add -net 10.0.200.0 netmask 255.255.0.0 gw ' | 3704 | ('post-up route add -net 10.0.200.0/16 gw ' |
2747 | 3582 | '172.23.31.1 metric 1 || true'), | 3705 | '172.23.31.1 metric 1 || true'), |
2749 | 3583 | ('pre-down route del -net 10.0.200.0 netmask 255.255.0.0 gw ' | 3706 | ('pre-down route del -net 10.0.200.0/16 gw ' |
2750 | 3584 | '172.23.31.1 metric 1 || true'), | 3707 | '172.23.31.1 metric 1 || true'), |
2751 | 3585 | ] | 3708 | ] |
2752 | 3586 | found = files['/etc/network/interfaces'].splitlines() | 3709 | found = files['/etc/network/interfaces'].splitlines() |
2753 | @@ -3588,6 +3711,77 @@ class TestEniRoundTrip(CiTestCase): | |||
2754 | 3588 | self.assertEqual( | 3711 | self.assertEqual( |
2755 | 3589 | expected, [line for line in found if line]) | 3712 | expected, [line for line in found if line]) |
2756 | 3590 | 3713 | ||
2757 | 3714 | def test_ipv6_static_routes(self): | ||
2758 | 3715 | # as reported in bug 1818669 | ||
2759 | 3716 | conf = [ | ||
2760 | 3717 | {'name': 'eno3', 'type': 'physical', | ||
2761 | 3718 | 'subnets': [{ | ||
2762 | 3719 | 'address': 'fd00::12/64', | ||
2763 | 3720 | 'dns_nameservers': ['fd00:2::15'], | ||
2764 | 3721 | 'gateway': 'fd00::1', | ||
2765 | 3722 | 'ipv6': True, | ||
2766 | 3723 | 'type': 'static', | ||
2767 | 3724 | 'routes': [{'netmask': '32', | ||
2768 | 3725 | 'network': 'fd00:12::', | ||
2769 | 3726 | 'gateway': 'fd00::2'}, | ||
2770 | 3727 | {'network': 'fd00:14::', | ||
2771 | 3728 | 'gateway': 'fd00::3'}, | ||
2772 | 3729 | {'destination': 'fe00:14::/48', | ||
2773 | 3730 | 'gateway': 'fe00::4', | ||
2774 | 3731 | 'metric': 500}, | ||
2775 | 3732 | {'gateway': '192.168.23.1', | ||
2776 | 3733 | 'metric': 999, | ||
2777 | 3734 | 'netmask': 24, | ||
2778 | 3735 | 'network': '192.168.23.0'}, | ||
2779 | 3736 | {'destination': '10.23.23.0/24', | ||
2780 | 3737 | 'gateway': '10.23.23.2', | ||
2781 | 3738 | 'metric': 300}]}]}, | ||
2782 | 3739 | ] | ||
2783 | 3740 | |||
2784 | 3741 | files = self._render_and_read( | ||
2785 | 3742 | network_config={'config': conf, 'version': 1}) | ||
2786 | 3743 | expected = [ | ||
2787 | 3744 | 'auto lo', | ||
2788 | 3745 | 'iface lo inet loopback', | ||
2789 | 3746 | 'auto eno3', | ||
2790 | 3747 | 'iface eno3 inet6 static', | ||
2791 | 3748 | ' address fd00::12/64', | ||
2792 | 3749 | ' dns-nameservers fd00:2::15', | ||
2793 | 3750 | ' gateway fd00::1', | ||
2794 | 3751 | (' post-up route add -A inet6 fd00:12::/32 gw ' | ||
2795 | 3752 | 'fd00::2 || true'), | ||
2796 | 3753 | (' pre-down route del -A inet6 fd00:12::/32 gw ' | ||
2797 | 3754 | 'fd00::2 || true'), | ||
2798 | 3755 | (' post-up route add -A inet6 fd00:14::/64 gw ' | ||
2799 | 3756 | 'fd00::3 || true'), | ||
2800 | 3757 | (' pre-down route del -A inet6 fd00:14::/64 gw ' | ||
2801 | 3758 | 'fd00::3 || true'), | ||
2802 | 3759 | (' post-up route add -A inet6 fe00:14::/48 gw ' | ||
2803 | 3760 | 'fe00::4 metric 500 || true'), | ||
2804 | 3761 | (' pre-down route del -A inet6 fe00:14::/48 gw ' | ||
2805 | 3762 | 'fe00::4 metric 500 || true'), | ||
2806 | 3763 | (' post-up route add -net 192.168.23.0/24 gw ' | ||
2807 | 3764 | '192.168.23.1 metric 999 || true'), | ||
2808 | 3765 | (' pre-down route del -net 192.168.23.0/24 gw ' | ||
2809 | 3766 | '192.168.23.1 metric 999 || true'), | ||
2810 | 3767 | (' post-up route add -net 10.23.23.0/24 gw ' | ||
2811 | 3768 | '10.23.23.2 metric 300 || true'), | ||
2812 | 3769 | (' pre-down route del -net 10.23.23.0/24 gw ' | ||
2813 | 3770 | '10.23.23.2 metric 300 || true'), | ||
2814 | 3771 | |||
2815 | 3772 | ] | ||
2816 | 3773 | found = files['/etc/network/interfaces'].splitlines() | ||
2817 | 3774 | |||
2818 | 3775 | self.assertEqual( | ||
2819 | 3776 | expected, [line for line in found if line]) | ||
2820 | 3777 | |||
2821 | 3778 | def testsimple_render_bond(self): | ||
2822 | 3779 | entry = NETWORK_CONFIGS['bond'] | ||
2823 | 3780 | files = self._render_and_read(network_config=yaml.load(entry['yaml'])) | ||
2824 | 3781 | self.assertEqual( | ||
2825 | 3782 | entry['expected_eni'].splitlines(), | ||
2826 | 3783 | files['/etc/network/interfaces'].splitlines()) | ||
2827 | 3784 | |||
2828 | 3591 | 3785 | ||
2829 | 3592 | class TestNetRenderers(CiTestCase): | 3786 | class TestNetRenderers(CiTestCase): |
2830 | 3593 | @mock.patch("cloudinit.net.renderers.sysconfig.available") | 3787 | @mock.patch("cloudinit.net.renderers.sysconfig.available") |
2831 | diff --git a/tools/ds-identify b/tools/ds-identify | |||
2832 | index b78b273..6518901 100755 | |||
2833 | --- a/tools/ds-identify | |||
2834 | +++ b/tools/ds-identify | |||
2835 | @@ -620,7 +620,7 @@ dscheck_MAAS() { | |||
2836 | 620 | } | 620 | } |
2837 | 621 | 621 | ||
2838 | 622 | dscheck_NoCloud() { | 622 | dscheck_NoCloud() { |
2840 | 623 | local fslabel="cidata" d="" | 623 | local fslabel="cidata CIDATA" d="" |
2841 | 624 | case " ${DI_KERNEL_CMDLINE} " in | 624 | case " ${DI_KERNEL_CMDLINE} " in |
2842 | 625 | *\ ds=nocloud*) return ${DS_FOUND};; | 625 | *\ ds=nocloud*) return ${DS_FOUND};; |
2843 | 626 | esac | 626 | esac |
2844 | @@ -632,9 +632,10 @@ dscheck_NoCloud() { | |||
2845 | 632 | check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} | 632 | check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} |
2846 | 633 | check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} | 633 | check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} |
2847 | 634 | done | 634 | done |
2849 | 635 | if has_fs_with_label "${fslabel}"; then | 635 | if has_fs_with_label $fslabel; then |
2850 | 636 | return ${DS_FOUND} | 636 | return ${DS_FOUND} |
2851 | 637 | fi | 637 | fi |
2852 | 638 | |||
2853 | 638 | return ${DS_NOT_FOUND} | 639 | return ${DS_NOT_FOUND} |
2854 | 639 | } | 640 | } |
2855 | 640 | 641 | ||
2856 | @@ -762,7 +763,7 @@ is_cdrom_ovf() { | |||
2857 | 762 | 763 | ||
2858 | 763 | # explicitly skip known labels of other types. rd_rdfe is azure. | 764 | # explicitly skip known labels of other types. rd_rdfe is azure. |
2859 | 764 | case "$label" in | 765 | case "$label" in |
2861 | 765 | config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; | 766 | config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; |
2862 | 766 | esac | 767 | esac |
2863 | 767 | 768 | ||
2864 | 768 | local idstr="http://schemas.dmtf.org/ovf/environment/1" | 769 | local idstr="http://schemas.dmtf.org/ovf/environment/1" |
2865 | diff --git a/tox.ini b/tox.ini | |||
2866 | index d371720..1f01eb7 100644 | |||
2867 | --- a/tox.ini | |||
2868 | +++ b/tox.ini | |||
2869 | @@ -21,7 +21,7 @@ setenv = | |||
2870 | 21 | basepython = python3 | 21 | basepython = python3 |
2871 | 22 | deps = | 22 | deps = |
2872 | 23 | # requirements | 23 | # requirements |
2874 | 24 | pylint==2.2.2 | 24 | pylint==2.3.1 |
2875 | 25 | # test-requirements because unit tests are now present in cloudinit tree | 25 | # test-requirements because unit tests are now present in cloudinit tree |
2876 | 26 | -r{toxinidir}/test-requirements.txt | 26 | -r{toxinidir}/test-requirements.txt |
2877 | 27 | commands = {envpython} -m pylint {posargs:cloudinit tests tools} | 27 | commands = {envpython} -m pylint {posargs:cloudinit tests tools} |
2878 | @@ -96,19 +96,18 @@ deps = | |||
2879 | 96 | six==1.9.0 | 96 | six==1.9.0 |
2880 | 97 | -r{toxinidir}/test-requirements.txt | 97 | -r{toxinidir}/test-requirements.txt |
2881 | 98 | 98 | ||
2883 | 99 | [testenv:opensusel42] | 99 | [testenv:opensusel150] |
2884 | 100 | basepython = python2.7 | 100 | basepython = python2.7 |
2885 | 101 | commands = nosetests {posargs:tests/unittests cloudinit} | 101 | commands = nosetests {posargs:tests/unittests cloudinit} |
2886 | 102 | deps = | 102 | deps = |
2887 | 103 | # requirements | 103 | # requirements |
2892 | 104 | argparse==1.3.0 | 104 | jinja2==2.10 |
2893 | 105 | jinja2==2.8 | 105 | PyYAML==3.12 |
2894 | 106 | PyYAML==3.11 | 106 | oauthlib==2.0.6 |
2891 | 107 | oauthlib==0.7.2 | ||
2895 | 108 | configobj==5.0.6 | 107 | configobj==5.0.6 |
2899 | 109 | requests==2.11.1 | 108 | requests==2.18.4 |
2900 | 110 | jsonpatch==1.11 | 109 | jsonpatch==1.16 |
2901 | 111 | six==1.9.0 | 110 | six==1.11.0 |
2902 | 112 | -r{toxinidir}/test-requirements.txt | 111 | -r{toxinidir}/test-requirements.txt |
2903 | 113 | 112 | ||
2904 | 114 | [testenv:tip-pycodestyle] | 113 | [testenv:tip-pycodestyle] |
Thanks, this looks perfect. I'd diffed my version of ubuntu/devel with yours and it's clean.
(neipa) cloud-init % git diff oddbloke/ ubuntu/ devel gb76714c3- 0ubuntu1) disco; urgency=medium
diff --git a/debian/changelog b/debian/changelog
index f869278..a8b05a4 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -25,7 +25,7 @@ cloud-init (18.5-61-
[Robert Schweikert] (LP: #1812117)
- DataSourceEc2: update RELEASE_BLOCKER to be more accurate
- -- Daniel Watkins <email address hidden> Wed, 10 Apr 2019 11:49:03 -0400
+ -- Ryan Harper <email address hidden> Tue, 09 Apr 2019 15:09:59 -0500
cloud-init (18.5-45- g3554ffe8- 0ubuntu1) disco; urgency=medium