Merge ~oddbloke/cloud-init/+git/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

Proposed by Dan Watkins
Status: Merged
Merged at revision: 6380e13bb69e77f6684e89ff59c467e59a6b8b7f
Proposed branch: ~oddbloke/cloud-init/+git/cloud-init:ubuntu/devel
Merge into: cloud-init:ubuntu/devel
Diff against target: 2904 lines (+1300/-449)
34 files modified
cloudinit/cmd/main.py (+5/-4)
cloudinit/config/cc_ubuntu_advantage.py (+116/-109)
cloudinit/config/cc_ubuntu_drivers.py (+112/-0)
cloudinit/config/tests/test_ubuntu_advantage.py (+191/-156)
cloudinit/config/tests/test_ubuntu_drivers.py (+174/-0)
cloudinit/net/eni.py (+11/-5)
cloudinit/net/network_state.py (+33/-8)
cloudinit/net/sysconfig.py (+25/-9)
cloudinit/sources/DataSourceAzure.py (+168/-89)
cloudinit/sources/DataSourceEc2.py (+6/-2)
cloudinit/sources/DataSourceNoCloud.py (+3/-1)
cloudinit/sources/DataSourceScaleway.py (+2/-1)
cloudinit/sources/__init__.py (+3/-3)
cloudinit/sources/helpers/azure.py (+31/-0)
cloudinit/sources/tests/test_init.py (+15/-0)
cloudinit/util.py (+15/-0)
config/cloud.cfg.tmpl (+3/-0)
debian/changelog (+29/-0)
doc/rtd/topics/datasources/azure.rst (+35/-22)
doc/rtd/topics/datasources/nocloud.rst (+1/-1)
doc/rtd/topics/modules.rst (+1/-0)
tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml (+1/-2)
tests/cloud_tests/testcases/modules/apt_pipelining_os.py (+3/-3)
tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml (+4/-5)
tests/data/azure/non_unicode_random_string (+1/-0)
tests/unittests/test_datasource/test_azure.py (+22/-2)
tests/unittests/test_datasource/test_nocloud.py (+42/-0)
tests/unittests/test_datasource/test_scaleway.py (+7/-0)
tests/unittests/test_distros/test_netconfig.py (+2/-0)
tests/unittests/test_ds_identify.py (+17/-0)
tests/unittests/test_handler/test_schema.py (+1/-0)
tests/unittests/test_net.py (+209/-15)
tools/ds-identify (+4/-3)
tox.ini (+8/-9)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Needs Fixing
Ryan Harper Approve
Review via email: mp+365803@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Ryan Harper (raharper) wrote :

Thanks, this looks perfect. I'd diffed my version of ubuntu/devel with yours and it's clean.

(neipa) cloud-init % git diff oddbloke/ubuntu/devel
diff --git a/debian/changelog b/debian/changelog
index f869278..a8b05a4 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -25,7 +25,7 @@ cloud-init (18.5-61-gb76714c3-0ubuntu1) disco; urgency=medium
       [Robert Schweikert] (LP: #1812117)
     - DataSourceEc2: update RELEASE_BLOCKER to be more accurate

- -- Daniel Watkins <email address hidden> Wed, 10 Apr 2019 11:49:03 -0400
+ -- Ryan Harper <email address hidden> Tue, 09 Apr 2019 15:09:59 -0500

 cloud-init (18.5-45-g3554ffe8-0ubuntu1) disco; urgency=medium

review: Approve
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

FAILED: Continuous integration, rev:6380e13bb69e77f6684e89ff59c467e59a6b8b7f
No commit message was specified in the merge proposal. Click on the following link and set the commit message (if you want a jenkins rebuild you need to trigger it yourself):
https://code.launchpad.net/~daniel-thewatkins/cloud-init/+git/cloud-init/+merge/365803/+edit-commit-message

https://jenkins.ubuntu.com/server/job/cloud-init-ci/674/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/674/rebuild

review: Needs Fixing (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
2index 933c019..a5446da 100644
3--- a/cloudinit/cmd/main.py
4+++ b/cloudinit/cmd/main.py
5@@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None):
6 'start': None,
7 'finished': None,
8 }
9+
10 if status is None:
11 status = {'v1': {}}
12- for m in modes:
13- status['v1'][m] = nullstatus.copy()
14 status['v1']['datasource'] = None
15- elif mode not in status['v1']:
16- status['v1'][mode] = nullstatus.copy()
17+
18+ for m in modes:
19+ if m not in status['v1']:
20+ status['v1'][m] = nullstatus.copy()
21
22 v1 = status['v1']
23 v1['stage'] = mode
24diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
25index 5e082bd..f488123 100644
26--- a/cloudinit/config/cc_ubuntu_advantage.py
27+++ b/cloudinit/config/cc_ubuntu_advantage.py
28@@ -1,150 +1,143 @@
29-# Copyright (C) 2018 Canonical Ltd.
30-#
31 # This file is part of cloud-init. See LICENSE file for license information.
32
33-"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical."""
34+"""ubuntu_advantage: Configure Ubuntu Advantage support services"""
35
36-import sys
37 from textwrap import dedent
38
39-from cloudinit import log as logging
40+import six
41+
42 from cloudinit.config.schema import (
43 get_schema_doc, validate_cloudconfig_schema)
44+from cloudinit import log as logging
45 from cloudinit.settings import PER_INSTANCE
46-from cloudinit.subp import prepend_base_command
47 from cloudinit import util
48
49
50-distros = ['ubuntu']
51-frequency = PER_INSTANCE
52+UA_URL = 'https://ubuntu.com/advantage'
53
54-LOG = logging.getLogger(__name__)
55+distros = ['ubuntu']
56
57 schema = {
58 'id': 'cc_ubuntu_advantage',
59 'name': 'Ubuntu Advantage',
60- 'title': 'Install, configure and manage ubuntu-advantage offerings',
61+ 'title': 'Configure Ubuntu Advantage support services',
62 'description': dedent("""\
63- This module provides configuration options to setup ubuntu-advantage
64- subscriptions.
65-
66- .. note::
67- Both ``commands`` value can be either a dictionary or a list. If
68- the configuration provided is a dictionary, the keys are only used
69- to order the execution of the commands and the dictionary is
70- merged with any vendor-data ubuntu-advantage configuration
71- provided. If a ``commands`` is provided as a list, any vendor-data
72- ubuntu-advantage ``commands`` are ignored.
73-
74- Ubuntu-advantage ``commands`` is a dictionary or list of
75- ubuntu-advantage commands to run on the deployed machine.
76- These commands can be used to enable or disable subscriptions to
77- various ubuntu-advantage products. See 'man ubuntu-advantage' for more
78- information on supported subcommands.
79-
80- .. note::
81- Each command item can be a string or list. If the item is a list,
82- 'ubuntu-advantage' can be omitted and it will automatically be
83- inserted as part of the command.
84+ Attach machine to an existing Ubuntu Advantage support contract and
85+ enable or disable support services such as Livepatch, ESM,
86+ FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
87+ one can also specify services to enable. When the 'enable'
88+ list is present, any named service will be enabled and all absent
89+ services will remain disabled.
90+
91+ Note that when enabling FIPS or FIPS updates you will need to schedule
92+ a reboot to ensure the machine is running the FIPS-compliant kernel.
93+ See :ref:`Power State Change` for information on how to configure
94+ cloud-init to perform this reboot.
95 """),
96 'distros': distros,
97 'examples': [dedent("""\
98- # Enable Extended Security Maintenance using your service auth token
99+ # Attach the machine to a Ubuntu Advantage support contract with a
100+ # UA contract token obtained from %s.
101+ ubuntu_advantage:
102+ token: <ua_contract_token>
103+ """ % UA_URL), dedent("""\
104+ # Attach the machine to an Ubuntu Advantage support contract enabling
105+ # only fips and esm services. Services will only be enabled if
106+ # the environment supports said service. Otherwise warnings will
107+ # be logged for incompatible services specified.
108 ubuntu-advantage:
109- commands:
110- 00: ubuntu-advantage enable-esm <token>
111+ token: <ua_contract_token>
112+ enable:
113+ - fips
114+ - esm
115 """), dedent("""\
116- # Enable livepatch by providing your livepatch token
117+ # Attach the machine to an Ubuntu Advantage support contract and enable
118+ # the FIPS service. Perform a reboot once cloud-init has
119+ # completed.
120+ power_state:
121+ mode: reboot
122 ubuntu-advantage:
123- commands:
124- 00: ubuntu-advantage enable-livepatch <livepatch-token>
125-
126- """), dedent("""\
127- # Convenience: the ubuntu-advantage command can be omitted when
128- # specifying commands as a list and 'ubuntu-advantage' will
129- # automatically be prepended.
130- # The following commands are equivalent
131- ubuntu-advantage:
132- commands:
133- 00: ['enable-livepatch', 'my-token']
134- 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token']
135- 02: ubuntu-advantage enable-livepatch my-token
136- 03: 'ubuntu-advantage enable-livepatch my-token'
137- """)],
138+ token: <ua_contract_token>
139+ enable:
140+ - fips
141+ """)],
142 'frequency': PER_INSTANCE,
143 'type': 'object',
144 'properties': {
145- 'ubuntu-advantage': {
146+ 'ubuntu_advantage': {
147 'type': 'object',
148 'properties': {
149- 'commands': {
150- 'type': ['object', 'array'], # Array of strings or dict
151- 'items': {
152- 'oneOf': [
153- {'type': 'array', 'items': {'type': 'string'}},
154- {'type': 'string'}]
155- },
156- 'additionalItems': False, # Reject non-string & non-list
157- 'minItems': 1,
158- 'minProperties': 1,
159+ 'enable': {
160+ 'type': 'array',
161+ 'items': {'type': 'string'},
162+ },
163+ 'token': {
164+ 'type': 'string',
165+ 'description': (
166+ 'A contract token obtained from %s.' % UA_URL)
167 }
168 },
169- 'additionalProperties': False, # Reject keys not in schema
170- 'required': ['commands']
171+ 'required': ['token'],
172+ 'additionalProperties': False
173 }
174 }
175 }
176
177-# TODO schema for 'assertions' and 'commands' are too permissive at the moment.
178-# Once python-jsonschema supports schema draft 6 add support for arbitrary
179-# object keys with 'patternProperties' constraint to validate string values.
180-
181 __doc__ = get_schema_doc(schema) # Supplement python help()
182
183-UA_CMD = "ubuntu-advantage"
184-
185-
186-def run_commands(commands):
187- """Run the commands provided in ubuntu-advantage:commands config.
188+LOG = logging.getLogger(__name__)
189
190- Commands are run individually. Any errors are collected and reported
191- after attempting all commands.
192
193- @param commands: A list or dict containing commands to run. Keys of a
194- dict will be used to order the commands provided as dict values.
195- """
196- if not commands:
197- return
198- LOG.debug('Running user-provided ubuntu-advantage commands')
199- if isinstance(commands, dict):
200- # Sort commands based on dictionary key
201- commands = [v for _, v in sorted(commands.items())]
202- elif not isinstance(commands, list):
203- raise TypeError(
204- 'commands parameter was not a list or dict: {commands}'.format(
205- commands=commands))
206-
207- fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands)
208-
209- cmd_failures = []
210- for command in fixed_ua_commands:
211- shell = isinstance(command, str)
212- try:
213- util.subp(command, shell=shell, status_cb=sys.stderr.write)
214- except util.ProcessExecutionError as e:
215- cmd_failures.append(str(e))
216- if cmd_failures:
217- msg = (
218- 'Failures running ubuntu-advantage commands:\n'
219- '{cmd_failures}'.format(
220- cmd_failures=cmd_failures))
221+def configure_ua(token=None, enable=None):
222+ """Call ua commandline client to attach or enable services."""
223+ error = None
224+ if not token:
225+ error = ('ubuntu_advantage: token must be provided')
226+ LOG.error(error)
227+ raise RuntimeError(error)
228+
229+ if enable is None:
230+ enable = []
231+ elif isinstance(enable, six.string_types):
232+ LOG.warning('ubuntu_advantage: enable should be a list, not'
233+ ' a string; treating as a single enable')
234+ enable = [enable]
235+ elif not isinstance(enable, list):
236+ LOG.warning('ubuntu_advantage: enable should be a list, not'
237+ ' a %s; skipping enabling services',
238+ type(enable).__name__)
239+ enable = []
240+
241+ attach_cmd = ['ua', 'attach', token]
242+ LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
243+ try:
244+ util.subp(attach_cmd)
245+ except util.ProcessExecutionError as e:
246+ msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
247+ error=str(e))
248 util.logexc(LOG, msg)
249 raise RuntimeError(msg)
250+ enable_errors = []
251+ for service in enable:
252+ try:
253+ cmd = ['ua', 'enable', service]
254+ util.subp(cmd, capture=True)
255+ except util.ProcessExecutionError as e:
256+ enable_errors.append((service, e))
257+ if enable_errors:
258+ for service, error in enable_errors:
259+ msg = 'Failure enabling "{service}":\n{error}'.format(
260+ service=service, error=str(error))
261+ util.logexc(LOG, msg)
262+ raise RuntimeError(
263+ 'Failure enabling Ubuntu Advantage service(s): {}'.format(
264+ ', '.join('"{}"'.format(service)
265+ for service, _ in enable_errors)))
266
267
268 def maybe_install_ua_tools(cloud):
269 """Install ubuntu-advantage-tools if not present."""
270- if util.which('ubuntu-advantage'):
271+ if util.which('ua'):
272 return
273 try:
274 cloud.distro.update_package_sources()
275@@ -159,14 +152,28 @@ def maybe_install_ua_tools(cloud):
276
277
278 def handle(name, cfg, cloud, log, args):
279- cfgin = cfg.get('ubuntu-advantage')
280- if cfgin is None:
281- LOG.debug(("Skipping module named %s,"
282- " no 'ubuntu-advantage' key in configuration"), name)
283+ ua_section = None
284+ if 'ubuntu-advantage' in cfg:
285+ LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.'
286+ ' Expected underscore delimited "ubuntu_advantage"; will'
287+ ' attempt to continue.')
288+ ua_section = cfg['ubuntu-advantage']
289+ if 'ubuntu_advantage' in cfg:
290+ ua_section = cfg['ubuntu_advantage']
291+ if ua_section is None:
292+ LOG.debug("Skipping module named %s,"
293+ " no 'ubuntu_advantage' configuration found", name)
294 return
295-
296 validate_cloudconfig_schema(cfg, schema)
297+ if 'commands' in ua_section:
298+ msg = (
299+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
300+ ' Expected "token"')
301+ LOG.error(msg)
302+ raise RuntimeError(msg)
303+
304 maybe_install_ua_tools(cloud)
305- run_commands(cfgin.get('commands', []))
306+ configure_ua(token=ua_section.get('token'),
307+ enable=ua_section.get('enable'))
308
309 # vi: ts=4 expandtab
310diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
311new file mode 100644
312index 0000000..91feb60
313--- /dev/null
314+++ b/cloudinit/config/cc_ubuntu_drivers.py
315@@ -0,0 +1,112 @@
316+# This file is part of cloud-init. See LICENSE file for license information.
317+
318+"""Ubuntu Drivers: Interact with third party drivers in Ubuntu."""
319+
320+from textwrap import dedent
321+
322+from cloudinit.config.schema import (
323+ get_schema_doc, validate_cloudconfig_schema)
324+from cloudinit import log as logging
325+from cloudinit.settings import PER_INSTANCE
326+from cloudinit import type_utils
327+from cloudinit import util
328+
329+LOG = logging.getLogger(__name__)
330+
331+frequency = PER_INSTANCE
332+distros = ['ubuntu']
333+schema = {
334+ 'id': 'cc_ubuntu_drivers',
335+ 'name': 'Ubuntu Drivers',
336+ 'title': 'Interact with third party drivers in Ubuntu.',
337+ 'description': dedent("""\
338+ This module interacts with the 'ubuntu-drivers' command to install
339+ third party driver packages."""),
340+ 'distros': distros,
341+ 'examples': [dedent("""\
342+ drivers:
343+ nvidia:
344+ license-accepted: true
345+ """)],
346+ 'frequency': frequency,
347+ 'type': 'object',
348+ 'properties': {
349+ 'drivers': {
350+ 'type': 'object',
351+ 'additionalProperties': False,
352+ 'properties': {
353+ 'nvidia': {
354+ 'type': 'object',
355+ 'additionalProperties': False,
356+ 'required': ['license-accepted'],
357+ 'properties': {
358+ 'license-accepted': {
359+ 'type': 'boolean',
360+ 'description': ("Do you accept the NVIDIA driver"
361+ " license?"),
362+ },
363+ 'version': {
364+ 'type': 'string',
365+ 'description': (
366+ 'The version of the driver to install (e.g.'
367+ ' "390", "410"). Defaults to the latest'
368+ ' version.'),
369+ },
370+ },
371+ },
372+ },
373+ },
374+ },
375+}
376+OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
377+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install'")
378+
379+__doc__ = get_schema_doc(schema) # Supplement python help()
380+
381+
382+def install_drivers(cfg, pkg_install_func):
383+ if not isinstance(cfg, dict):
384+ raise TypeError(
385+ "'drivers' config expected dict, found '%s': %s" %
386+ (type_utils.obj_name(cfg), cfg))
387+
388+ cfgpath = 'nvidia/license-accepted'
389+ # Call translate_bool to ensure that we treat string values like "yes" as
390+ # acceptance and _don't_ treat string values like "nah" as acceptance
391+ # because they're True-ish
392+ nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath))
393+ if not nv_acc:
394+ LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
395+ return
396+
397+ if not util.which('ubuntu-drivers'):
398+ LOG.debug("'ubuntu-drivers' command not available. "
399+ "Installing ubuntu-drivers-common")
400+ pkg_install_func(['ubuntu-drivers-common'])
401+
402+ driver_arg = 'nvidia'
403+ version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version')
404+ if version_cfg:
405+ driver_arg += ':{}'.format(version_cfg)
406+
407+ LOG.debug("Installing NVIDIA drivers (%s=%s, version=%s)",
408+ cfgpath, nv_acc, version_cfg if version_cfg else 'latest')
409+
410+ try:
411+ util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
412+ except util.ProcessExecutionError as exc:
413+ if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
414+ LOG.warning('the available version of ubuntu-drivers is'
415+ ' too old to perform requested driver installation')
416+ elif 'No drivers found for installation.' in exc.stdout:
417+ LOG.warning('ubuntu-drivers found no drivers for installation')
418+ raise
419+
420+
421+def handle(name, cfg, cloud, log, _args):
422+ if "drivers" not in cfg:
423+ log.debug("Skipping module named %s, no 'drivers' key in config", name)
424+ return
425+
426+ validate_cloudconfig_schema(cfg, schema)
427+ install_drivers(cfg['drivers'], cloud.distro.install_packages)
428diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
429index b7cf9be..8c4161e 100644
430--- a/cloudinit/config/tests/test_ubuntu_advantage.py
431+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
432@@ -1,10 +1,7 @@
433 # This file is part of cloud-init. See LICENSE file for license information.
434
435-import re
436-from six import StringIO
437-
438 from cloudinit.config.cc_ubuntu_advantage import (
439- handle, maybe_install_ua_tools, run_commands, schema)
440+ configure_ua, handle, maybe_install_ua_tools, schema)
441 from cloudinit.config.schema import validate_cloudconfig_schema
442 from cloudinit import util
443 from cloudinit.tests.helpers import (
444@@ -20,90 +17,120 @@ class FakeCloud(object):
445 self.distro = distro
446
447
448-class TestRunCommands(CiTestCase):
449+class TestConfigureUA(CiTestCase):
450
451 with_logs = True
452 allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
453
454 def setUp(self):
455- super(TestRunCommands, self).setUp()
456+ super(TestConfigureUA, self).setUp()
457 self.tmp = self.tmp_dir()
458
459 @mock.patch('%s.util.subp' % MPATH)
460- def test_run_commands_on_empty_list(self, m_subp):
461- """When provided with an empty list, run_commands does nothing."""
462- run_commands([])
463- self.assertEqual('', self.logs.getvalue())
464- m_subp.assert_not_called()
465-
466- def test_run_commands_on_non_list_or_dict(self):
467- """When provided an invalid type, run_commands raises an error."""
468- with self.assertRaises(TypeError) as context_manager:
469- run_commands(commands="I'm Not Valid")
470+ def test_configure_ua_attach_error(self, m_subp):
471+ """Errors from ua attach command are raised."""
472+ m_subp.side_effect = util.ProcessExecutionError(
473+ 'Invalid token SomeToken')
474+ with self.assertRaises(RuntimeError) as context_manager:
475+ configure_ua(token='SomeToken')
476 self.assertEqual(
477- "commands parameter was not a list or dict: I'm Not Valid",
478+ 'Failure attaching Ubuntu Advantage:\nUnexpected error while'
479+ ' running command.\nCommand: -\nExit code: -\nReason: -\n'
480+ 'Stdout: Invalid token SomeToken\nStderr: -',
481 str(context_manager.exception))
482
483- def test_run_command_logs_commands_and_exit_codes_to_stderr(self):
484- """All exit codes are logged to stderr."""
485- outfile = self.tmp_path('output.log', dir=self.tmp)
486-
487- cmd1 = 'echo "HI" >> %s' % outfile
488- cmd2 = 'bogus command'
489- cmd3 = 'echo "MOM" >> %s' % outfile
490- commands = [cmd1, cmd2, cmd3]
491-
492- mock_path = '%s.sys.stderr' % MPATH
493- with mock.patch(mock_path, new_callable=StringIO) as m_stderr:
494- with self.assertRaises(RuntimeError) as context_manager:
495- run_commands(commands=commands)
496-
497- self.assertIsNotNone(
498- re.search(r'bogus: (command )?not found',
499- str(context_manager.exception)),
500- msg='Expected bogus command not found')
501- expected_stderr_log = '\n'.join([
502- 'Begin run command: {cmd}'.format(cmd=cmd1),
503- 'End run command: exit(0)',
504- 'Begin run command: {cmd}'.format(cmd=cmd2),
505- 'ERROR: End run command: exit(127)',
506- 'Begin run command: {cmd}'.format(cmd=cmd3),
507- 'End run command: exit(0)\n'])
508- self.assertEqual(expected_stderr_log, m_stderr.getvalue())
509-
510- def test_run_command_as_lists(self):
511- """When commands are specified as a list, run them in order."""
512- outfile = self.tmp_path('output.log', dir=self.tmp)
513-
514- cmd1 = 'echo "HI" >> %s' % outfile
515- cmd2 = 'echo "MOM" >> %s' % outfile
516- commands = [cmd1, cmd2]
517- with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO):
518- run_commands(commands=commands)
519+ @mock.patch('%s.util.subp' % MPATH)
520+ def test_configure_ua_attach_with_token(self, m_subp):
521+ """When token is provided, attach the machine to ua using the token."""
522+ configure_ua(token='SomeToken')
523+ m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
524+ self.assertEqual(
525+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
526+ self.logs.getvalue())
527+
528+ @mock.patch('%s.util.subp' % MPATH)
529+ def test_configure_ua_attach_on_service_error(self, m_subp):
530+ """all services should be enabled and then any failures raised"""
531
532+ def fake_subp(cmd, capture=None):
533+ fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
534+ if cmd in fail_cmds and capture:
535+ svc = cmd[-1]
536+ raise util.ProcessExecutionError(
537+ 'Invalid {} credentials'.format(svc.upper()))
538+
539+ m_subp.side_effect = fake_subp
540+
541+ with self.assertRaises(RuntimeError) as context_manager:
542+ configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips'])
543+ self.assertEqual(
544+ m_subp.call_args_list,
545+ [mock.call(['ua', 'attach', 'SomeToken']),
546+ mock.call(['ua', 'enable', 'esm'], capture=True),
547+ mock.call(['ua', 'enable', 'cc'], capture=True),
548+ mock.call(['ua', 'enable', 'fips'], capture=True)])
549 self.assertIn(
550- 'DEBUG: Running user-provided ubuntu-advantage commands',
551+ 'WARNING: Failure enabling "esm":\nUnexpected error'
552+ ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
553+ 'Stdout: Invalid ESM credentials\nStderr: -\n',
554 self.logs.getvalue())
555- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
556 self.assertIn(
557- 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage'
558- ' config:',
559+ 'WARNING: Failure enabling "cc":\nUnexpected error'
560+ ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
561+ 'Stdout: Invalid CC credentials\nStderr: -\n',
562+ self.logs.getvalue())
563+ self.assertEqual(
564+ 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
565+ str(context_manager.exception))
566+
567+ @mock.patch('%s.util.subp' % MPATH)
568+ def test_configure_ua_attach_with_empty_services(self, m_subp):
569+ """When services is an empty list, do not auto-enable attach."""
570+ configure_ua(token='SomeToken', enable=[])
571+ m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
572+ self.assertEqual(
573+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
574 self.logs.getvalue())
575
576- def test_run_command_dict_sorted_as_command_script(self):
577- """When commands are a dict, sort them and run."""
578- outfile = self.tmp_path('output.log', dir=self.tmp)
579- cmd1 = 'echo "HI" >> %s' % outfile
580- cmd2 = 'echo "MOM" >> %s' % outfile
581- commands = {'02': cmd1, '01': cmd2}
582- with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO):
583- run_commands(commands=commands)
584+ @mock.patch('%s.util.subp' % MPATH)
585+ def test_configure_ua_attach_with_specific_services(self, m_subp):
586+ """When services a list, only enable specific services."""
587+ configure_ua(token='SomeToken', enable=['fips'])
588+ self.assertEqual(
589+ m_subp.call_args_list,
590+ [mock.call(['ua', 'attach', 'SomeToken']),
591+ mock.call(['ua', 'enable', 'fips'], capture=True)])
592+ self.assertEqual(
593+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
594+ self.logs.getvalue())
595+
596+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
597+ @mock.patch('%s.util.subp' % MPATH)
598+ def test_configure_ua_attach_with_string_services(self, m_subp):
599+ """When services a string, treat as singleton list and warn"""
600+ configure_ua(token='SomeToken', enable='fips')
601+ self.assertEqual(
602+ m_subp.call_args_list,
603+ [mock.call(['ua', 'attach', 'SomeToken']),
604+ mock.call(['ua', 'enable', 'fips'], capture=True)])
605+ self.assertEqual(
606+ 'WARNING: ubuntu_advantage: enable should be a list, not a'
607+ ' string; treating as a single enable\n'
608+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
609+ self.logs.getvalue())
610
611- expected_messages = [
612- 'DEBUG: Running user-provided ubuntu-advantage commands']
613- for message in expected_messages:
614- self.assertIn(message, self.logs.getvalue())
615- self.assertEqual('MOM\nHI\n', util.load_file(outfile))
616+ @mock.patch('%s.util.subp' % MPATH)
617+ def test_configure_ua_attach_with_weird_services(self, m_subp):
618+ """When services not string or list, warn but still attach"""
619+ configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
620+ self.assertEqual(
621+ m_subp.call_args_list,
622+ [mock.call(['ua', 'attach', 'SomeToken'])])
623+ self.assertEqual(
624+ 'WARNING: ubuntu_advantage: enable should be a list, not a'
625+ ' dict; skipping enabling services\n'
626+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
627+ self.logs.getvalue())
628
629
630 @skipUnlessJsonSchema()
631@@ -112,90 +139,50 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
632 with_logs = True
633 schema = schema
634
635- def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):
636- """If ubuntu-advantage configuration is not a dict, emit a warning."""
637- validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema)
638+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
639+ @mock.patch('%s.configure_ua' % MPATH)
640+ def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
641+ """If ubuntu_advantage configuration is not a dict, emit a warning."""
642+ validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema)
643 self.assertEqual(
644- "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not"
645+ "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not"
646 " of type 'object'\n",
647 self.logs.getvalue())
648
649- @mock.patch('%s.run_commands' % MPATH)
650- def test_schema_disallows_unknown_keys(self, _):
651- """Unknown keys in ubuntu-advantage configuration emit warnings."""
652+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
653+ @mock.patch('%s.configure_ua' % MPATH)
654+ def test_schema_disallows_unknown_keys(self, _cfg, _):
655+ """Unknown keys in ubuntu_advantage configuration emit warnings."""
656 validate_cloudconfig_schema(
657- {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}},
658+ {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}},
659 schema)
660 self.assertIn(
661- 'WARNING: Invalid config:\nubuntu-advantage: Additional properties'
662+ 'WARNING: Invalid config:\nubuntu_advantage: Additional properties'
663 " are not allowed ('invalid-key' was unexpected)",
664 self.logs.getvalue())
665
666- def test_warn_schema_requires_commands(self):
667- """Warn when ubuntu-advantage configuration lacks commands."""
668- validate_cloudconfig_schema(
669- {'ubuntu-advantage': {}}, schema)
670- self.assertEqual(
671- "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a"
672- " required property\n",
673- self.logs.getvalue())
674-
675- @mock.patch('%s.run_commands' % MPATH)
676- def test_warn_schema_commands_is_not_list_or_dict(self, _):
677- """Warn when ubuntu-advantage:commands config is not a list or dict."""
678+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
679+ @mock.patch('%s.configure_ua' % MPATH)
680+ def test_warn_schema_requires_token(self, _cfg, _):
681+ """Warn if ubuntu_advantage configuration lacks token."""
682 validate_cloudconfig_schema(
683- {'ubuntu-advantage': {'commands': 'broken'}}, schema)
684+ {'ubuntu_advantage': {'enable': ['esm']}}, schema)
685 self.assertEqual(
686- "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is"
687- " not of type 'object', 'array'\n",
688- self.logs.getvalue())
689+ "WARNING: Invalid config:\nubuntu_advantage:"
690+ " 'token' is a required property\n", self.logs.getvalue())
691
692- @mock.patch('%s.run_commands' % MPATH)
693- def test_warn_schema_when_commands_is_empty(self, _):
694- """Emit warnings when ubuntu-advantage:commands is empty."""
695- validate_cloudconfig_schema(
696- {'ubuntu-advantage': {'commands': []}}, schema)
697+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
698+ @mock.patch('%s.configure_ua' % MPATH)
699+ def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
700+ """Warn when ubuntu_advantage:enable config is not a list."""
701 validate_cloudconfig_schema(
702- {'ubuntu-advantage': {'commands': {}}}, schema)
703+ {'ubuntu_advantage': {'enable': 'needslist'}}, schema)
704 self.assertEqual(
705- "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too"
706- " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}"
707- " does not have enough properties\n",
708+ "WARNING: Invalid config:\nubuntu_advantage: 'token' is a"
709+ " required property\nubuntu_advantage.enable: 'needslist'"
710+ " is not of type 'array'\n",
711 self.logs.getvalue())
712
713- @mock.patch('%s.run_commands' % MPATH)
714- def test_schema_when_commands_are_list_or_dict(self, _):
715- """No warnings when ubuntu-advantage:commands are a list or dict."""
716- validate_cloudconfig_schema(
717- {'ubuntu-advantage': {'commands': ['valid']}}, schema)
718- validate_cloudconfig_schema(
719- {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)
720- self.assertEqual('', self.logs.getvalue())
721-
722- def test_duplicates_are_fine_array_array(self):
723- """Duplicated commands array/array entries are allowed."""
724- self.assertSchemaValid(
725- {'commands': [["echo", "bye"], ["echo" "bye"]]},
726- "command entries can be duplicate.")
727-
728- def test_duplicates_are_fine_array_string(self):
729- """Duplicated commands array/string entries are allowed."""
730- self.assertSchemaValid(
731- {'commands': ["echo bye", "echo bye"]},
732- "command entries can be duplicate.")
733-
734- def test_duplicates_are_fine_dict_array(self):
735- """Duplicated commands dict/array entries are allowed."""
736- self.assertSchemaValid(
737- {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
738- "command entries can be duplicate.")
739-
740- def test_duplicates_are_fine_dict_string(self):
741- """Duplicated commands dict/string entries are allowed."""
742- self.assertSchemaValid(
743- {'commands': {'00': "echo bye", '01': "echo bye"}},
744- "command entries can be duplicate.")
745-
746
747 class TestHandle(CiTestCase):
748
749@@ -205,41 +192,89 @@ class TestHandle(CiTestCase):
750 super(TestHandle, self).setUp()
751 self.tmp = self.tmp_dir()
752
753- @mock.patch('%s.run_commands' % MPATH)
754 @mock.patch('%s.validate_cloudconfig_schema' % MPATH)
755- def test_handle_no_config(self, m_schema, m_run):
756+ def test_handle_no_config(self, m_schema):
757 """When no ua-related configuration is provided, nothing happens."""
758 cfg = {}
759 handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None)
760 self.assertIn(
761- "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key"
762- " in config",
763+ "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
764+ ' configuration found',
765 self.logs.getvalue())
766 m_schema.assert_not_called()
767- m_run.assert_not_called()
768
769+ @mock.patch('%s.configure_ua' % MPATH)
770 @mock.patch('%s.maybe_install_ua_tools' % MPATH)
771- def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install):
772+ def test_handle_tries_to_install_ubuntu_advantage_tools(
773+ self, m_install, m_cfg):
774 """If ubuntu_advantage is provided, try installing ua-tools package."""
775- cfg = {'ubuntu-advantage': {}}
776+ cfg = {'ubuntu_advantage': {'token': 'valid'}}
777 mycloud = FakeCloud(None)
778 handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None)
779 m_install.assert_called_once_with(mycloud)
780
781+ @mock.patch('%s.configure_ua' % MPATH)
782 @mock.patch('%s.maybe_install_ua_tools' % MPATH)
783- def test_handle_runs_commands_provided(self, m_install):
784- """When commands are specified as a list, run them."""
785- outfile = self.tmp_path('output.log', dir=self.tmp)
786+ def test_handle_passes_credentials_and_services_to_configure_ua(
787+ self, m_install, m_configure_ua):
788+ """All ubuntu_advantage config keys are passed to configure_ua."""
789+ cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}}
790+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
791+ m_configure_ua.assert_called_once_with(
792+ token='token', enable=['esm'])
793+
794+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
795+ @mock.patch('%s.configure_ua' % MPATH)
796+ def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
797+ self, m_configure_ua):
798+ """Warning when ubuntu-advantage key is present with new config"""
799+ cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}}
800+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
801+ self.assertEqual(
802+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
803+ ' provided. Expected underscore delimited "ubuntu_advantage";'
804+ ' will attempt to continue.',
805+ self.logs.getvalue().splitlines()[0])
806+ m_configure_ua.assert_called_once_with(
807+ token='token', enable=['esm'])
808+
809+ def test_handle_error_on_deprecated_commands_key_dashed(self):
810+ """Error when commands is present in ubuntu-advantage key."""
811+ cfg = {'ubuntu-advantage': {'commands': 'nogo'}}
812+ with self.assertRaises(RuntimeError) as context_manager:
813+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
814+ self.assertEqual(
815+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
816+ ' Expected "token"',
817+ str(context_manager.exception))
818+
819+ def test_handle_error_on_deprecated_commands_key_underscored(self):
820+ """Error when commands is present in ubuntu_advantage key."""
821+ cfg = {'ubuntu_advantage': {'commands': 'nogo'}}
822+ with self.assertRaises(RuntimeError) as context_manager:
823+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
824+ self.assertEqual(
825+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
826+ ' Expected "token"',
827+ str(context_manager.exception))
828
829+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
830+ @mock.patch('%s.configure_ua' % MPATH)
831+ def test_handle_prefers_new_style_config(
832+ self, m_configure_ua):
833+ """ubuntu_advantage should be preferred over ubuntu-advantage"""
834 cfg = {
835- 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile,
836- 'echo "MOM" >> %s' % outfile]}}
837- mock_path = '%s.sys.stderr' % MPATH
838- with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
839- with mock.patch(mock_path, new_callable=StringIO):
840- handle('nomatter', cfg=cfg, cloud=None, log=self.logger,
841- args=None)
842- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
843+ 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']},
844+ 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']},
845+ }
846+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
847+ self.assertEqual(
848+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
849+ ' provided. Expected underscore delimited "ubuntu_advantage";'
850+ ' will attempt to continue.',
851+ self.logs.getvalue().splitlines()[0])
852+ m_configure_ua.assert_called_once_with(
853+ token='token', enable=['esm'])
854
855
856 class TestMaybeInstallUATools(CiTestCase):
857@@ -253,7 +288,7 @@ class TestMaybeInstallUATools(CiTestCase):
858 @mock.patch('%s.util.which' % MPATH)
859 def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
860 """Do nothing if ubuntu-advantage-tools already exists."""
861- m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed
862+ m_which.return_value = '/usr/bin/ua' # already installed
863 distro = mock.MagicMock()
864 distro.update_package_sources.side_effect = RuntimeError(
865 'Some apt error')
866diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
867new file mode 100644
868index 0000000..efba4ce
869--- /dev/null
870+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
871@@ -0,0 +1,174 @@
872+# This file is part of cloud-init. See LICENSE file for license information.
873+
874+import copy
875+
876+from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
877+from cloudinit.config.schema import (
878+ SchemaValidationError, validate_cloudconfig_schema)
879+from cloudinit.config import cc_ubuntu_drivers as drivers
880+from cloudinit.util import ProcessExecutionError
881+
882+MPATH = "cloudinit.config.cc_ubuntu_drivers."
883+OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
884+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install' "
885+ "(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
886+
887+
888+class TestUbuntuDrivers(CiTestCase):
889+ cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}}
890+ install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia']
891+
892+ with_logs = True
893+
894+ @skipUnlessJsonSchema()
895+ def test_schema_requires_boolean_for_license_accepted(self):
896+ with self.assertRaisesRegex(
897+ SchemaValidationError, ".*license-accepted.*TRUE.*boolean"):
898+ validate_cloudconfig_schema(
899+ {'drivers': {'nvidia': {'license-accepted': "TRUE"}}},
900+ schema=drivers.schema, strict=True)
901+
902+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
903+ @mock.patch(MPATH + "util.which", return_value=False)
904+ def _assert_happy_path_taken(self, config, m_which, m_subp):
905+ """Positive path test through handle. Package should be installed."""
906+ myCloud = mock.MagicMock()
907+ drivers.handle('ubuntu_drivers', config, myCloud, None, None)
908+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
909+ myCloud.distro.install_packages.call_args_list)
910+ self.assertEqual([mock.call(self.install_gpgpu)],
911+ m_subp.call_args_list)
912+
913+ def test_handle_does_package_install(self):
914+ self._assert_happy_path_taken(self.cfg_accepted)
915+
916+ def test_trueish_strings_are_considered_approval(self):
917+ for true_value in ['yes', 'true', 'on', '1']:
918+ new_config = copy.deepcopy(self.cfg_accepted)
919+ new_config['drivers']['nvidia']['license-accepted'] = true_value
920+ self._assert_happy_path_taken(new_config)
921+
922+ @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError(
923+ stdout='No drivers found for installation.\n', exit_code=1))
924+ @mock.patch(MPATH + "util.which", return_value=False)
925+ def test_handle_raises_error_if_no_drivers_found(self, m_which, m_subp):
926+ """If ubuntu-drivers doesn't install any drivers, raise an error."""
927+ myCloud = mock.MagicMock()
928+ with self.assertRaises(Exception):
929+ drivers.handle(
930+ 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
931+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
932+ myCloud.distro.install_packages.call_args_list)
933+ self.assertEqual([mock.call(self.install_gpgpu)],
934+ m_subp.call_args_list)
935+ self.assertIn('ubuntu-drivers found no drivers for installation',
936+ self.logs.getvalue())
937+
938+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
939+ @mock.patch(MPATH + "util.which", return_value=False)
940+ def _assert_inert_with_config(self, config, m_which, m_subp):
941+ """Helper to reduce repetition when testing negative cases"""
942+ myCloud = mock.MagicMock()
943+ drivers.handle('ubuntu_drivers', config, myCloud, None, None)
944+ self.assertEqual(0, myCloud.distro.install_packages.call_count)
945+ self.assertEqual(0, m_subp.call_count)
946+
947+ def test_handle_inert_if_license_not_accepted(self):
948+ """Ensure we don't do anything if the license is rejected."""
949+ self._assert_inert_with_config(
950+ {'drivers': {'nvidia': {'license-accepted': False}}})
951+
952+ def test_handle_inert_if_garbage_in_license_field(self):
953+ """Ensure we don't do anything if unknown text is in license field."""
954+ self._assert_inert_with_config(
955+ {'drivers': {'nvidia': {'license-accepted': 'garbage'}}})
956+
957+ def test_handle_inert_if_no_license_key(self):
958+ """Ensure we don't do anything if no license key."""
959+ self._assert_inert_with_config({'drivers': {'nvidia': {}}})
960+
961+ def test_handle_inert_if_no_nvidia_key(self):
962+ """Ensure we don't do anything if other license accepted."""
963+ self._assert_inert_with_config(
964+ {'drivers': {'acme': {'license-accepted': True}}})
965+
966+ def test_handle_inert_if_string_given(self):
967+ """Ensure we don't do anything if string refusal given."""
968+ for false_value in ['no', 'false', 'off', '0']:
969+ self._assert_inert_with_config(
970+ {'drivers': {'nvidia': {'license-accepted': false_value}}})
971+
972+ @mock.patch(MPATH + "install_drivers")
973+ def test_handle_no_drivers_does_nothing(self, m_install_drivers):
974+ """If no 'drivers' key in the config, nothing should be done."""
975+ myCloud = mock.MagicMock()
976+ myLog = mock.MagicMock()
977+ drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None)
978+ self.assertIn('Skipping module named',
979+ myLog.debug.call_args_list[0][0][0])
980+ self.assertEqual(0, m_install_drivers.call_count)
981+
982+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
983+ @mock.patch(MPATH + "util.which", return_value=True)
984+ def test_install_drivers_no_install_if_present(self, m_which, m_subp):
985+ """If 'ubuntu-drivers' is present, no package install should occur."""
986+ pkg_install = mock.MagicMock()
987+ drivers.install_drivers(self.cfg_accepted['drivers'],
988+ pkg_install_func=pkg_install)
989+ self.assertEqual(0, pkg_install.call_count)
990+ self.assertEqual([mock.call('ubuntu-drivers')],
991+ m_which.call_args_list)
992+ self.assertEqual([mock.call(self.install_gpgpu)],
993+ m_subp.call_args_list)
994+
995+ def test_install_drivers_rejects_invalid_config(self):
996+ """install_drivers should raise TypeError if not given a config dict"""
997+ pkg_install = mock.MagicMock()
998+ with self.assertRaisesRegex(TypeError, ".*expected dict.*"):
999+ drivers.install_drivers("mystring", pkg_install_func=pkg_install)
1000+ self.assertEqual(0, pkg_install.call_count)
1001+
1002+ @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError(
1003+ stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2))
1004+ @mock.patch(MPATH + "util.which", return_value=False)
1005+ def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
1006+ self, m_which, m_subp):
1007+ """Older ubuntu-drivers versions should emit message and raise error"""
1008+ myCloud = mock.MagicMock()
1009+ with self.assertRaises(Exception):
1010+ drivers.handle(
1011+ 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
1012+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
1013+ myCloud.distro.install_packages.call_args_list)
1014+ self.assertEqual([mock.call(self.install_gpgpu)],
1015+ m_subp.call_args_list)
1016+ self.assertIn('WARNING: the available version of ubuntu-drivers is'
1017+ ' too old to perform requested driver installation',
1018+ self.logs.getvalue())
1019+
1020+
1021+# Sub-class TestUbuntuDrivers to run the same test cases, but with a version
1022+class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
1023+ cfg_accepted = {
1024+ 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}}
1025+ install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
1026+
1027+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
1028+ @mock.patch(MPATH + "util.which", return_value=False)
1029+ def test_version_none_uses_latest(self, m_which, m_subp):
1030+ myCloud = mock.MagicMock()
1031+ version_none_cfg = {
1032+ 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}}
1033+ drivers.handle(
1034+ 'ubuntu_drivers', version_none_cfg, myCloud, None, None)
1035+ self.assertEqual(
1036+ [mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])],
1037+ m_subp.call_args_list)
1038+
1039+ def test_specifying_a_version_doesnt_override_license_acceptance(self):
1040+ self._assert_inert_with_config({
1041+ 'drivers': {'nvidia': {'license-accepted': False,
1042+ 'version': '123'}}
1043+ })
1044+
1045+# vi: ts=4 expandtab
1046diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
1047index 6423632..b129bb6 100644
1048--- a/cloudinit/net/eni.py
1049+++ b/cloudinit/net/eni.py
1050@@ -366,8 +366,6 @@ class Renderer(renderer.Renderer):
1051 down = indent + "pre-down route del"
1052 or_true = " || true"
1053 mapping = {
1054- 'network': '-net',
1055- 'netmask': 'netmask',
1056 'gateway': 'gw',
1057 'metric': 'metric',
1058 }
1059@@ -379,13 +377,21 @@ class Renderer(renderer.Renderer):
1060 default_gw = ' -A inet6 default'
1061
1062 route_line = ''
1063- for k in ['network', 'netmask', 'gateway', 'metric']:
1064- if default_gw and k in ['network', 'netmask']:
1065+ for k in ['network', 'gateway', 'metric']:
1066+ if default_gw and k == 'network':
1067 continue
1068 if k == 'gateway':
1069 route_line += '%s %s %s' % (default_gw, mapping[k], route[k])
1070 elif k in route:
1071- route_line += ' %s %s' % (mapping[k], route[k])
1072+ if k == 'network':
1073+ if ':' in route[k]:
1074+ route_line += ' -A inet6'
1075+ else:
1076+ route_line += ' -net'
1077+ if 'prefix' in route:
1078+ route_line += ' %s/%s' % (route[k], route['prefix'])
1079+ else:
1080+ route_line += ' %s %s' % (mapping[k], route[k])
1081 content.append(up + route_line + or_true)
1082 content.append(down + route_line + or_true)
1083 return content
1084diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
1085index 539b76d..4d19f56 100644
1086--- a/cloudinit/net/network_state.py
1087+++ b/cloudinit/net/network_state.py
1088@@ -148,6 +148,7 @@ class NetworkState(object):
1089 self._network_state = copy.deepcopy(network_state)
1090 self._version = version
1091 self.use_ipv6 = network_state.get('use_ipv6', False)
1092+ self._has_default_route = None
1093
1094 @property
1095 def config(self):
1096@@ -157,14 +158,6 @@ class NetworkState(object):
1097 def version(self):
1098 return self._version
1099
1100- def iter_routes(self, filter_func=None):
1101- for route in self._network_state.get('routes', []):
1102- if filter_func is not None:
1103- if filter_func(route):
1104- yield route
1105- else:
1106- yield route
1107-
1108 @property
1109 def dns_nameservers(self):
1110 try:
1111@@ -179,6 +172,12 @@ class NetworkState(object):
1112 except KeyError:
1113 return []
1114
1115+ @property
1116+ def has_default_route(self):
1117+ if self._has_default_route is None:
1118+ self._has_default_route = self._maybe_has_default_route()
1119+ return self._has_default_route
1120+
1121 def iter_interfaces(self, filter_func=None):
1122 ifaces = self._network_state.get('interfaces', {})
1123 for iface in six.itervalues(ifaces):
1124@@ -188,6 +187,32 @@ class NetworkState(object):
1125 if filter_func(iface):
1126 yield iface
1127
1128+ def iter_routes(self, filter_func=None):
1129+ for route in self._network_state.get('routes', []):
1130+ if filter_func is not None:
1131+ if filter_func(route):
1132+ yield route
1133+ else:
1134+ yield route
1135+
1136+ def _maybe_has_default_route(self):
1137+ for route in self.iter_routes():
1138+ if self._is_default_route(route):
1139+ return True
1140+ for iface in self.iter_interfaces():
1141+ for subnet in iface.get('subnets', []):
1142+ for route in subnet.get('routes', []):
1143+ if self._is_default_route(route):
1144+ return True
1145+ return False
1146+
1147+ def _is_default_route(self, route):
1148+ default_nets = ('::', '0.0.0.0')
1149+ return (
1150+ route.get('prefix') == 0
1151+ and route.get('network') in default_nets
1152+ )
1153+
1154
1155 @six.add_metaclass(CommandHandlerMeta)
1156 class NetworkStateInterpreter(object):
1157diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
1158index 19b3e60..0998392 100644
1159--- a/cloudinit/net/sysconfig.py
1160+++ b/cloudinit/net/sysconfig.py
1161@@ -322,7 +322,7 @@ class Renderer(renderer.Renderer):
1162 iface_cfg[new_key] = old_value
1163
1164 @classmethod
1165- def _render_subnets(cls, iface_cfg, subnets):
1166+ def _render_subnets(cls, iface_cfg, subnets, has_default_route):
1167 # setting base values
1168 iface_cfg['BOOTPROTO'] = 'none'
1169
1170@@ -331,6 +331,7 @@ class Renderer(renderer.Renderer):
1171 mtu_key = 'MTU'
1172 subnet_type = subnet.get('type')
1173 if subnet_type == 'dhcp6':
1174+ # TODO need to set BOOTPROTO to dhcp6 on SUSE
1175 iface_cfg['IPV6INIT'] = True
1176 iface_cfg['DHCPV6C'] = True
1177 elif subnet_type in ['dhcp4', 'dhcp']:
1178@@ -375,9 +376,9 @@ class Renderer(renderer.Renderer):
1179 ipv6_index = -1
1180 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
1181 subnet_type = subnet.get('type')
1182- if subnet_type == 'dhcp6':
1183- continue
1184- elif subnet_type in ['dhcp4', 'dhcp']:
1185+ if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']:
1186+ if has_default_route and iface_cfg['BOOTPROTO'] != 'none':
1187+ iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False
1188 continue
1189 elif subnet_type == 'static':
1190 if subnet_is_ipv6(subnet):
1191@@ -385,10 +386,13 @@ class Renderer(renderer.Renderer):
1192 ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
1193 if ipv6_index == 0:
1194 iface_cfg['IPV6ADDR'] = ipv6_cidr
1195+ iface_cfg['IPADDR6'] = ipv6_cidr
1196 elif ipv6_index == 1:
1197 iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
1198+ iface_cfg['IPADDR6_0'] = ipv6_cidr
1199 else:
1200 iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr
1201+ iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
1202 else:
1203 ipv4_index = ipv4_index + 1
1204 suff = "" if ipv4_index == 0 else str(ipv4_index)
1205@@ -443,6 +447,8 @@ class Renderer(renderer.Renderer):
1206 # TODO(harlowja): add validation that no other iface has
1207 # also provided the default route?
1208 iface_cfg['DEFROUTE'] = True
1209+ if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'):
1210+ iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True
1211 if 'gateway' in route:
1212 if is_ipv6 or is_ipv6_addr(route['gateway']):
1213 iface_cfg['IPV6_DEFAULTGW'] = route['gateway']
1214@@ -493,7 +499,9 @@ class Renderer(renderer.Renderer):
1215 iface_cfg = iface_contents[iface_name]
1216 route_cfg = iface_cfg.routes
1217
1218- cls._render_subnets(iface_cfg, iface_subnets)
1219+ cls._render_subnets(
1220+ iface_cfg, iface_subnets, network_state.has_default_route
1221+ )
1222 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
1223
1224 @classmethod
1225@@ -518,7 +526,9 @@ class Renderer(renderer.Renderer):
1226
1227 iface_subnets = iface.get("subnets", [])
1228 route_cfg = iface_cfg.routes
1229- cls._render_subnets(iface_cfg, iface_subnets)
1230+ cls._render_subnets(
1231+ iface_cfg, iface_subnets, network_state.has_default_route
1232+ )
1233 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
1234
1235 # iter_interfaces on network-state is not sorted to produce
1236@@ -547,7 +557,9 @@ class Renderer(renderer.Renderer):
1237
1238 iface_subnets = iface.get("subnets", [])
1239 route_cfg = iface_cfg.routes
1240- cls._render_subnets(iface_cfg, iface_subnets)
1241+ cls._render_subnets(
1242+ iface_cfg, iface_subnets, network_state.has_default_route
1243+ )
1244 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
1245
1246 @staticmethod
1247@@ -608,7 +620,9 @@ class Renderer(renderer.Renderer):
1248
1249 iface_subnets = iface.get("subnets", [])
1250 route_cfg = iface_cfg.routes
1251- cls._render_subnets(iface_cfg, iface_subnets)
1252+ cls._render_subnets(
1253+ iface_cfg, iface_subnets, network_state.has_default_route
1254+ )
1255 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
1256
1257 @classmethod
1258@@ -620,7 +634,9 @@ class Renderer(renderer.Renderer):
1259 iface_cfg.kind = 'infiniband'
1260 iface_subnets = iface.get("subnets", [])
1261 route_cfg = iface_cfg.routes
1262- cls._render_subnets(iface_cfg, iface_subnets)
1263+ cls._render_subnets(
1264+ iface_cfg, iface_subnets, network_state.has_default_route
1265+ )
1266 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
1267
1268 @classmethod
1269diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
1270old mode 100644
1271new mode 100755
1272index eccbee5..76b1661
1273--- a/cloudinit/sources/DataSourceAzure.py
1274+++ b/cloudinit/sources/DataSourceAzure.py
1275@@ -21,10 +21,14 @@ from cloudinit import net
1276 from cloudinit.event import EventType
1277 from cloudinit.net.dhcp import EphemeralDHCPv4
1278 from cloudinit import sources
1279-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
1280 from cloudinit.sources.helpers import netlink
1281 from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
1282 from cloudinit import util
1283+from cloudinit.reporting import events
1284+
1285+from cloudinit.sources.helpers.azure import (azure_ds_reporter,
1286+ azure_ds_telemetry_reporter,
1287+ get_metadata_from_fabric)
1288
1289 LOG = logging.getLogger(__name__)
1290
1291@@ -54,6 +58,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
1292 REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
1293 AGENT_SEED_DIR = '/var/lib/waagent'
1294 IMDS_URL = "http://169.254.169.254/metadata/"
1295+PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
1296
1297 # List of static scripts and network config artifacts created by
1298 # stock ubuntu suported images.
1299@@ -195,6 +200,8 @@ if util.is_FreeBSD():
1300 RESOURCE_DISK_PATH = "/dev/" + res_disk
1301 else:
1302 LOG.debug("resource disk is None")
1303+ # TODO Find where platform entropy data is surfaced
1304+ PLATFORM_ENTROPY_SOURCE = None
1305
1306 BUILTIN_DS_CONFIG = {
1307 'agent_command': AGENT_START_BUILTIN,
1308@@ -241,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'):
1309 util.subp([hostname_command, hostname])
1310
1311
1312+@azure_ds_telemetry_reporter
1313 @contextlib.contextmanager
1314 def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
1315 """
1316@@ -287,6 +295,7 @@ class DataSourceAzure(sources.DataSource):
1317 root = sources.DataSource.__str__(self)
1318 return "%s [seed=%s]" % (root, self.seed)
1319
1320+ @azure_ds_telemetry_reporter
1321 def bounce_network_with_azure_hostname(self):
1322 # When using cloud-init to provision, we have to set the hostname from
1323 # the metadata and "bounce" the network to force DDNS to update via
1324@@ -312,6 +321,7 @@ class DataSourceAzure(sources.DataSource):
1325 util.logexc(LOG, "handling set_hostname failed")
1326 return False
1327
1328+ @azure_ds_telemetry_reporter
1329 def get_metadata_from_agent(self):
1330 temp_hostname = self.metadata.get('local-hostname')
1331 agent_cmd = self.ds_cfg['agent_command']
1332@@ -341,15 +351,18 @@ class DataSourceAzure(sources.DataSource):
1333 LOG.debug("ssh authentication: "
1334 "using fingerprint from fabirc")
1335
1336- # wait very long for public SSH keys to arrive
1337- # https://bugs.launchpad.net/cloud-init/+bug/1717611
1338- missing = util.log_time(logfunc=LOG.debug,
1339- msg="waiting for SSH public key files",
1340- func=util.wait_for_files,
1341- args=(fp_files, 900))
1342-
1343- if len(missing):
1344- LOG.warning("Did not find files, but going on: %s", missing)
1345+ with events.ReportEventStack(
1346+ name="waiting-for-ssh-public-key",
1347+ description="wait for agents to retrieve ssh keys",
1348+ parent=azure_ds_reporter):
1349+ # wait very long for public SSH keys to arrive
1350+ # https://bugs.launchpad.net/cloud-init/+bug/1717611
1351+ missing = util.log_time(logfunc=LOG.debug,
1352+ msg="waiting for SSH public key files",
1353+ func=util.wait_for_files,
1354+ args=(fp_files, 900))
1355+ if len(missing):
1356+ LOG.warning("Did not find files, but going on: %s", missing)
1357
1358 metadata = {}
1359 metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
1360@@ -363,6 +376,7 @@ class DataSourceAzure(sources.DataSource):
1361 subplatform_type = 'seed-dir'
1362 return '%s (%s)' % (subplatform_type, self.seed)
1363
1364+ @azure_ds_telemetry_reporter
1365 def crawl_metadata(self):
1366 """Walk all instance metadata sources returning a dict on success.
1367
1368@@ -464,6 +478,7 @@ class DataSourceAzure(sources.DataSource):
1369 super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
1370 self._metadata_imds = sources.UNSET
1371
1372+ @azure_ds_telemetry_reporter
1373 def _get_data(self):
1374 """Crawl and process datasource metadata caching metadata as attrs.
1375
1376@@ -510,6 +525,7 @@ class DataSourceAzure(sources.DataSource):
1377 # quickly (local check only) if self.instance_id is still valid
1378 return sources.instance_id_matches_system_uuid(self.get_instance_id())
1379
1380+ @azure_ds_telemetry_reporter
1381 def setup(self, is_new_instance):
1382 if self._negotiated is False:
1383 LOG.debug("negotiating for %s (new_instance=%s)",
1384@@ -577,6 +593,7 @@ class DataSourceAzure(sources.DataSource):
1385 if nl_sock:
1386 nl_sock.close()
1387
1388+ @azure_ds_telemetry_reporter
1389 def _report_ready(self, lease):
1390 """Tells the fabric provisioning has completed """
1391 try:
1392@@ -614,9 +631,14 @@ class DataSourceAzure(sources.DataSource):
1393 def _reprovision(self):
1394 """Initiate the reprovisioning workflow."""
1395 contents = self._poll_imds()
1396- md, ud, cfg = read_azure_ovf(contents)
1397- return (md, ud, cfg, {'ovf-env.xml': contents})
1398-
1399+ with events.ReportEventStack(
1400+ name="reprovisioning-read-azure-ovf",
1401+ description="read azure ovf during reprovisioning",
1402+ parent=azure_ds_reporter):
1403+ md, ud, cfg = read_azure_ovf(contents)
1404+ return (md, ud, cfg, {'ovf-env.xml': contents})
1405+
1406+ @azure_ds_telemetry_reporter
1407 def _negotiate(self):
1408 """Negotiate with fabric and return data from it.
1409
1410@@ -649,6 +671,7 @@ class DataSourceAzure(sources.DataSource):
1411 util.del_file(REPROVISION_MARKER_FILE)
1412 return fabric_data
1413
1414+ @azure_ds_telemetry_reporter
1415 def activate(self, cfg, is_new_instance):
1416 address_ephemeral_resize(is_new_instance=is_new_instance,
1417 preserve_ntfs=self.ds_cfg.get(
1418@@ -665,7 +688,7 @@ class DataSourceAzure(sources.DataSource):
1419 2. Generate a fallback network config that does not include any of
1420 the blacklisted devices.
1421 """
1422- if not self._network_config:
1423+ if not self._network_config or self._network_config == sources.UNSET:
1424 if self.ds_cfg.get('apply_network_config'):
1425 nc_src = self._metadata_imds
1426 else:
1427@@ -687,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16):
1428 return []
1429
1430
1431+@azure_ds_telemetry_reporter
1432 def _has_ntfs_filesystem(devpath):
1433 ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
1434 LOG.debug('ntfs_devices found = %s', ntfs_devices)
1435 return os.path.realpath(devpath) in ntfs_devices
1436
1437
1438+@azure_ds_telemetry_reporter
1439 def can_dev_be_reformatted(devpath, preserve_ntfs):
1440 """Determine if the ephemeral drive at devpath should be reformatted.
1441
1442@@ -741,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
1443 (cand_part, cand_path, devpath))
1444 return False, msg
1445
1446+ @azure_ds_telemetry_reporter
1447 def count_files(mp):
1448 ignored = set(['dataloss_warning_readme.txt'])
1449 return len([f for f in os.listdir(mp) if f.lower() not in ignored])
1450
1451 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
1452 (cand_part, cand_path, devpath))
1453- try:
1454- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
1455- update_env_for_mount={'LANG': 'C'})
1456- except util.MountFailedError as e:
1457- if "unknown filesystem type 'ntfs'" in str(e):
1458- return True, (bmsg + ' but this system cannot mount NTFS,'
1459- ' assuming there are no important files.'
1460- ' Formatting allowed.')
1461- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
1462-
1463- if file_count != 0:
1464- LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
1465- 'to ensure that filesystem does not get wiped, set '
1466- '%s.%s in config', '.'.join(DS_CFG_PATH),
1467- DS_CFG_KEY_PRESERVE_NTFS)
1468- return False, bmsg + ' but had %d files on it.' % file_count
1469+
1470+ with events.ReportEventStack(
1471+ name="mount-ntfs-and-count",
1472+ description="mount-ntfs-and-count",
1473+ parent=azure_ds_reporter) as evt:
1474+ try:
1475+ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
1476+ update_env_for_mount={'LANG': 'C'})
1477+ except util.MountFailedError as e:
1478+ evt.description = "cannot mount ntfs"
1479+ if "unknown filesystem type 'ntfs'" in str(e):
1480+ return True, (bmsg + ' but this system cannot mount NTFS,'
1481+ ' assuming there are no important files.'
1482+ ' Formatting allowed.')
1483+ return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
1484+
1485+ if file_count != 0:
1486+ evt.description = "mounted and counted %d files" % file_count
1487+ LOG.warning("it looks like you're using NTFS on the ephemeral"
1488+ " disk, to ensure that filesystem does not get wiped,"
1489+ " set %s.%s in config", '.'.join(DS_CFG_PATH),
1490+ DS_CFG_KEY_PRESERVE_NTFS)
1491+ return False, bmsg + ' but had %d files on it.' % file_count
1492
1493 return True, bmsg + ' and had no important files. Safe for reformatting.'
1494
1495
1496+@azure_ds_telemetry_reporter
1497 def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
1498 is_new_instance=False, preserve_ntfs=False):
1499 # wait for ephemeral disk to come up
1500 naplen = .2
1501- missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
1502- log_pre="Azure ephemeral disk: ")
1503-
1504- if missing:
1505- LOG.warning("ephemeral device '%s' did not appear after %d seconds.",
1506- devpath, maxwait)
1507- return
1508+ with events.ReportEventStack(
1509+ name="wait-for-ephemeral-disk",
1510+ description="wait for ephemeral disk",
1511+ parent=azure_ds_reporter):
1512+ missing = util.wait_for_files([devpath],
1513+ maxwait=maxwait,
1514+ naplen=naplen,
1515+ log_pre="Azure ephemeral disk: ")
1516+
1517+ if missing:
1518+ LOG.warning("ephemeral device '%s' did"
1519+ " not appear after %d seconds.",
1520+ devpath, maxwait)
1521+ return
1522
1523 result = False
1524 msg = None
1525@@ -805,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
1526 return
1527
1528
1529+@azure_ds_telemetry_reporter
1530 def perform_hostname_bounce(hostname, cfg, prev_hostname):
1531 # set the hostname to 'hostname' if it is not already set to that.
1532 # then, if policy is not off, bounce the interface using command
1533@@ -840,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
1534 return True
1535
1536
1537+@azure_ds_telemetry_reporter
1538 def crtfile_to_pubkey(fname, data=None):
1539 pipeline = ('openssl x509 -noout -pubkey < "$0" |'
1540 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
1541@@ -848,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None):
1542 return out.rstrip()
1543
1544
1545+@azure_ds_telemetry_reporter
1546 def pubkeys_from_crt_files(flist):
1547 pubkeys = []
1548 errors = []
1549@@ -863,6 +907,7 @@ def pubkeys_from_crt_files(flist):
1550 return pubkeys
1551
1552
1553+@azure_ds_telemetry_reporter
1554 def write_files(datadir, files, dirmode=None):
1555
1556 def _redact_password(cnt, fname):
1557@@ -890,6 +935,7 @@ def write_files(datadir, files, dirmode=None):
1558 util.write_file(filename=fname, content=content, mode=0o600)
1559
1560
1561+@azure_ds_telemetry_reporter
1562 def invoke_agent(cmd):
1563 # this is a function itself to simplify patching it for test
1564 if cmd:
1565@@ -909,6 +955,7 @@ def find_child(node, filter_func):
1566 return ret
1567
1568
1569+@azure_ds_telemetry_reporter
1570 def load_azure_ovf_pubkeys(sshnode):
1571 # This parses a 'SSH' node formatted like below, and returns
1572 # an array of dicts.
1573@@ -961,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode):
1574 return found
1575
1576
1577+@azure_ds_telemetry_reporter
1578 def read_azure_ovf(contents):
1579 try:
1580 dom = minidom.parseString(contents)
1581@@ -1061,6 +1109,7 @@ def read_azure_ovf(contents):
1582 return (md, ud, cfg)
1583
1584
1585+@azure_ds_telemetry_reporter
1586 def _extract_preprovisioned_vm_setting(dom):
1587 """Read the preprovision flag from the ovf. It should not
1588 exist unless true."""
1589@@ -1089,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"):
1590 return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
1591
1592
1593+@azure_ds_telemetry_reporter
1594 def _check_freebsd_cdrom(cdrom_dev):
1595 """Return boolean indicating path to cdrom device has content."""
1596 try:
1597@@ -1100,18 +1150,31 @@ def _check_freebsd_cdrom(cdrom_dev):
1598 return False
1599
1600
1601-def _get_random_seed():
1602+@azure_ds_telemetry_reporter
1603+def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
1604 """Return content random seed file if available, otherwise,
1605 return None."""
1606 # azure / hyper-v provides random data here
1607- # TODO. find the seed on FreeBSD platform
1608 # now update ds_cfg to reflect contents pass in config
1609- if util.is_FreeBSD():
1610+ if source is None:
1611 return None
1612- return util.load_file("/sys/firmware/acpi/tables/OEM0",
1613- quiet=True, decode=False)
1614+ seed = util.load_file(source, quiet=True, decode=False)
1615+
1616+ # The seed generally contains non-Unicode characters. load_file puts
1617+ # them into a str (in python 2) or bytes (in python 3). In python 2,
1618+ # bad octets in a str cause util.json_dumps() to throw an exception. In
1619+ # python 3, bytes is a non-serializable type, and the handler load_file
1620+ # uses applies b64 encoding *again* to handle it. The simplest solution
1621+ # is to just b64encode the data and then decode it to a serializable
1622+ # string. Same number of bits of entropy, just with 25% more zeroes.
1623+ # There's no need to undo this base64-encoding when the random seed is
1624+ # actually used in cc_seed_random.py.
1625+ seed = base64.b64encode(seed).decode()
1626+
1627+ return seed
1628
1629
1630+@azure_ds_telemetry_reporter
1631 def list_possible_azure_ds_devs():
1632 devlist = []
1633 if util.is_FreeBSD():
1634@@ -1126,6 +1189,7 @@ def list_possible_azure_ds_devs():
1635 return devlist
1636
1637
1638+@azure_ds_telemetry_reporter
1639 def load_azure_ds_dir(source_dir):
1640 ovf_file = os.path.join(source_dir, "ovf-env.xml")
1641
1642@@ -1148,47 +1212,54 @@ def parse_network_config(imds_metadata):
1643 @param: imds_metadata: Dict of content read from IMDS network service.
1644 @return: Dictionary containing network version 2 standard configuration.
1645 """
1646- if imds_metadata != sources.UNSET and imds_metadata:
1647- netconfig = {'version': 2, 'ethernets': {}}
1648- LOG.debug('Azure: generating network configuration from IMDS')
1649- network_metadata = imds_metadata['network']
1650- for idx, intf in enumerate(network_metadata['interface']):
1651- nicname = 'eth{idx}'.format(idx=idx)
1652- dev_config = {}
1653- for addr4 in intf['ipv4']['ipAddress']:
1654- privateIpv4 = addr4['privateIpAddress']
1655- if privateIpv4:
1656- if dev_config.get('dhcp4', False):
1657- # Append static address config for nic > 1
1658- netPrefix = intf['ipv4']['subnet'][0].get(
1659- 'prefix', '24')
1660- if not dev_config.get('addresses'):
1661- dev_config['addresses'] = []
1662- dev_config['addresses'].append(
1663- '{ip}/{prefix}'.format(
1664- ip=privateIpv4, prefix=netPrefix))
1665- else:
1666- dev_config['dhcp4'] = True
1667- for addr6 in intf['ipv6']['ipAddress']:
1668- privateIpv6 = addr6['privateIpAddress']
1669- if privateIpv6:
1670- dev_config['dhcp6'] = True
1671- break
1672- if dev_config:
1673- mac = ':'.join(re.findall(r'..', intf['macAddress']))
1674- dev_config.update(
1675- {'match': {'macaddress': mac.lower()},
1676- 'set-name': nicname})
1677- netconfig['ethernets'][nicname] = dev_config
1678- else:
1679- blacklist = ['mlx4_core']
1680- LOG.debug('Azure: generating fallback configuration')
1681- # generate a network config, blacklist picking mlx4_core devs
1682- netconfig = net.generate_fallback_config(
1683- blacklist_drivers=blacklist, config_driver=True)
1684- return netconfig
1685+ with events.ReportEventStack(
1686+ name="parse_network_config",
1687+ description="",
1688+ parent=azure_ds_reporter) as evt:
1689+ if imds_metadata != sources.UNSET and imds_metadata:
1690+ netconfig = {'version': 2, 'ethernets': {}}
1691+ LOG.debug('Azure: generating network configuration from IMDS')
1692+ network_metadata = imds_metadata['network']
1693+ for idx, intf in enumerate(network_metadata['interface']):
1694+ nicname = 'eth{idx}'.format(idx=idx)
1695+ dev_config = {}
1696+ for addr4 in intf['ipv4']['ipAddress']:
1697+ privateIpv4 = addr4['privateIpAddress']
1698+ if privateIpv4:
1699+ if dev_config.get('dhcp4', False):
1700+ # Append static address config for nic > 1
1701+ netPrefix = intf['ipv4']['subnet'][0].get(
1702+ 'prefix', '24')
1703+ if not dev_config.get('addresses'):
1704+ dev_config['addresses'] = []
1705+ dev_config['addresses'].append(
1706+ '{ip}/{prefix}'.format(
1707+ ip=privateIpv4, prefix=netPrefix))
1708+ else:
1709+ dev_config['dhcp4'] = True
1710+ for addr6 in intf['ipv6']['ipAddress']:
1711+ privateIpv6 = addr6['privateIpAddress']
1712+ if privateIpv6:
1713+ dev_config['dhcp6'] = True
1714+ break
1715+ if dev_config:
1716+ mac = ':'.join(re.findall(r'..', intf['macAddress']))
1717+ dev_config.update(
1718+ {'match': {'macaddress': mac.lower()},
1719+ 'set-name': nicname})
1720+ netconfig['ethernets'][nicname] = dev_config
1721+ evt.description = "network config from imds"
1722+ else:
1723+ blacklist = ['mlx4_core']
1724+ LOG.debug('Azure: generating fallback configuration')
1725+ # generate a network config, blacklist picking mlx4_core devs
1726+ netconfig = net.generate_fallback_config(
1727+ blacklist_drivers=blacklist, config_driver=True)
1728+ evt.description = "network config from fallback"
1729+ return netconfig
1730
1731
1732+@azure_ds_telemetry_reporter
1733 def get_metadata_from_imds(fallback_nic, retries):
1734 """Query Azure's network metadata service, returning a dictionary.
1735
1736@@ -1213,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries):
1737 return util.log_time(**kwargs)
1738
1739
1740+@azure_ds_telemetry_reporter
1741 def _get_metadata_from_imds(retries):
1742
1743 url = IMDS_URL + "instance?api-version=2017-12-01"
1744@@ -1232,6 +1304,7 @@ def _get_metadata_from_imds(retries):
1745 return {}
1746
1747
1748+@azure_ds_telemetry_reporter
1749 def maybe_remove_ubuntu_network_config_scripts(paths=None):
1750 """Remove Azure-specific ubuntu network config for non-primary nics.
1751
1752@@ -1269,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
1753
1754
1755 def _is_platform_viable(seed_dir):
1756- """Check platform environment to report if this datasource may run."""
1757- asset_tag = util.read_dmi_data('chassis-asset-tag')
1758- if asset_tag == AZURE_CHASSIS_ASSET_TAG:
1759- return True
1760- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
1761- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
1762- return True
1763- return False
1764+ with events.ReportEventStack(
1765+ name="check-platform-viability",
1766+ description="found azure asset tag",
1767+ parent=azure_ds_reporter) as evt:
1768+
1769+ """Check platform environment to report if this datasource may run."""
1770+ asset_tag = util.read_dmi_data('chassis-asset-tag')
1771+ if asset_tag == AZURE_CHASSIS_ASSET_TAG:
1772+ return True
1773+ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
1774+ evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag
1775+ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
1776+ return True
1777+ return False
1778
1779
1780 class BrokenAzureDataSource(Exception):
1781diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
1782index 4f2f6cc..ac28f1d 100644
1783--- a/cloudinit/sources/DataSourceEc2.py
1784+++ b/cloudinit/sources/DataSourceEc2.py
1785@@ -334,8 +334,12 @@ class DataSourceEc2(sources.DataSource):
1786 if isinstance(net_md, dict):
1787 result = convert_ec2_metadata_network_config(
1788 net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
1789- # RELEASE_BLOCKER: Xenial debian/postinst needs to add
1790- # EventType.BOOT on upgrade path for classic.
1791+
1792+ # RELEASE_BLOCKER: xenial should drop the below if statement,
1793+ # because the issue being addressed doesn't exist pre-netplan.
1794+ # (This datasource doesn't implement check_instance_id() so the
1795+ # datasource object is recreated every boot; this means we don't
1796+ # need to modify update_events on cloud-init upgrade.)
1797
1798 # Non-VPC (aka Classic) Ec2 instances need to rewrite the
1799 # network config file every boot due to MAC address change.
1800diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
1801index 6860f0c..fcf5d58 100644
1802--- a/cloudinit/sources/DataSourceNoCloud.py
1803+++ b/cloudinit/sources/DataSourceNoCloud.py
1804@@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource):
1805 fslist = util.find_devs_with("TYPE=vfat")
1806 fslist.extend(util.find_devs_with("TYPE=iso9660"))
1807
1808- label_list = util.find_devs_with("LABEL=%s" % label)
1809+ label_list = util.find_devs_with("LABEL=%s" % label.upper())
1810+ label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
1811+
1812 devlist = list(set(fslist) & set(label_list))
1813 devlist.sort(reverse=True)
1814
1815diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
1816index b573b38..54bfc1f 100644
1817--- a/cloudinit/sources/DataSourceScaleway.py
1818+++ b/cloudinit/sources/DataSourceScaleway.py
1819@@ -171,10 +171,11 @@ def query_data_api(api_type, api_address, retries, timeout):
1820
1821 class DataSourceScaleway(sources.DataSource):
1822 dsname = "Scaleway"
1823- update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
1824
1825 def __init__(self, sys_cfg, distro, paths):
1826 super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
1827+ self.update_events = {
1828+ 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}
1829
1830 self.ds_cfg = util.mergemanydict([
1831 util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
1832diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
1833index e6966b3..1604932 100644
1834--- a/cloudinit/sources/__init__.py
1835+++ b/cloudinit/sources/__init__.py
1836@@ -164,9 +164,6 @@ class DataSource(object):
1837 # A datasource which supports writing network config on each system boot
1838 # would call update_events['network'].add(EventType.BOOT).
1839
1840- # Default: generate network config on new instance id (first boot).
1841- update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
1842-
1843 # N-tuple listing default values for any metadata-related class
1844 # attributes cached on an instance by a process_data runs. These attribute
1845 # values are reset via clear_cached_attrs during any update_metadata call.
1846@@ -191,6 +188,9 @@ class DataSource(object):
1847 self.vendordata = None
1848 self.vendordata_raw = None
1849
1850+ # Default: generate network config on new instance id (first boot).
1851+ self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}}
1852+
1853 self.ds_cfg = util.get_cfg_by_path(
1854 self.sys_cfg, ("datasource", self.dsname), {})
1855 if not self.ds_cfg:
1856diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
1857old mode 100644
1858new mode 100755
1859index 2829dd2..d3af05e
1860--- a/cloudinit/sources/helpers/azure.py
1861+++ b/cloudinit/sources/helpers/azure.py
1862@@ -16,10 +16,27 @@ from xml.etree import ElementTree
1863
1864 from cloudinit import url_helper
1865 from cloudinit import util
1866+from cloudinit.reporting import events
1867
1868 LOG = logging.getLogger(__name__)
1869
1870
1871+azure_ds_reporter = events.ReportEventStack(
1872+ name="azure-ds",
1873+ description="initialize reporter for azure ds",
1874+ reporting_enabled=True)
1875+
1876+
1877+def azure_ds_telemetry_reporter(func):
1878+ def impl(*args, **kwargs):
1879+ with events.ReportEventStack(
1880+ name=func.__name__,
1881+ description=func.__name__,
1882+ parent=azure_ds_reporter):
1883+ return func(*args, **kwargs)
1884+ return impl
1885+
1886+
1887 @contextmanager
1888 def cd(newdir):
1889 prevdir = os.getcwd()
1890@@ -119,6 +136,7 @@ class OpenSSLManager(object):
1891 def clean_up(self):
1892 util.del_dir(self.tmpdir)
1893
1894+ @azure_ds_telemetry_reporter
1895 def generate_certificate(self):
1896 LOG.debug('Generating certificate for communication with fabric...')
1897 if self.certificate is not None:
1898@@ -139,17 +157,20 @@ class OpenSSLManager(object):
1899 LOG.debug('New certificate generated.')
1900
1901 @staticmethod
1902+ @azure_ds_telemetry_reporter
1903 def _run_x509_action(action, cert):
1904 cmd = ['openssl', 'x509', '-noout', action]
1905 result, _ = util.subp(cmd, data=cert)
1906 return result
1907
1908+ @azure_ds_telemetry_reporter
1909 def _get_ssh_key_from_cert(self, certificate):
1910 pub_key = self._run_x509_action('-pubkey', certificate)
1911 keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
1912 ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
1913 return ssh_key
1914
1915+ @azure_ds_telemetry_reporter
1916 def _get_fingerprint_from_cert(self, certificate):
1917 """openssl x509 formats fingerprints as so:
1918 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\
1919@@ -163,6 +184,7 @@ class OpenSSLManager(object):
1920 octets = raw_fp[eq+1:-1].split(':')
1921 return ''.join(octets)
1922
1923+ @azure_ds_telemetry_reporter
1924 def _decrypt_certs_from_xml(self, certificates_xml):
1925 """Decrypt the certificates XML document using the our private key;
1926 return the list of certs and private keys contained in the doc.
1927@@ -185,6 +207,7 @@ class OpenSSLManager(object):
1928 shell=True, data=b'\n'.join(lines))
1929 return out
1930
1931+ @azure_ds_telemetry_reporter
1932 def parse_certificates(self, certificates_xml):
1933 """Given the Certificates XML document, return a dictionary of
1934 fingerprints and associated SSH keys derived from the certs."""
1935@@ -265,11 +288,13 @@ class WALinuxAgentShim(object):
1936 return socket.inet_ntoa(packed_bytes)
1937
1938 @staticmethod
1939+ @azure_ds_telemetry_reporter
1940 def _networkd_get_value_from_leases(leases_d=None):
1941 return dhcp.networkd_get_option_from_leases(
1942 'OPTION_245', leases_d=leases_d)
1943
1944 @staticmethod
1945+ @azure_ds_telemetry_reporter
1946 def _get_value_from_leases_file(fallback_lease_file):
1947 leases = []
1948 content = util.load_file(fallback_lease_file)
1949@@ -287,6 +312,7 @@ class WALinuxAgentShim(object):
1950 return leases[-1]
1951
1952 @staticmethod
1953+ @azure_ds_telemetry_reporter
1954 def _load_dhclient_json():
1955 dhcp_options = {}
1956 hooks_dir = WALinuxAgentShim._get_hooks_dir()
1957@@ -305,6 +331,7 @@ class WALinuxAgentShim(object):
1958 return dhcp_options
1959
1960 @staticmethod
1961+ @azure_ds_telemetry_reporter
1962 def _get_value_from_dhcpoptions(dhcp_options):
1963 if dhcp_options is None:
1964 return None
1965@@ -318,6 +345,7 @@ class WALinuxAgentShim(object):
1966 return _value
1967
1968 @staticmethod
1969+ @azure_ds_telemetry_reporter
1970 def find_endpoint(fallback_lease_file=None, dhcp245=None):
1971 value = None
1972 if dhcp245 is not None:
1973@@ -352,6 +380,7 @@ class WALinuxAgentShim(object):
1974 LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
1975 return endpoint_ip_address
1976
1977+ @azure_ds_telemetry_reporter
1978 def register_with_azure_and_fetch_data(self, pubkey_info=None):
1979 if self.openssl_manager is None:
1980 self.openssl_manager = OpenSSLManager()
1981@@ -404,6 +433,7 @@ class WALinuxAgentShim(object):
1982
1983 return keys
1984
1985+ @azure_ds_telemetry_reporter
1986 def _report_ready(self, goal_state, http_client):
1987 LOG.debug('Reporting ready to Azure fabric.')
1988 document = self.REPORT_READY_XML_TEMPLATE.format(
1989@@ -419,6 +449,7 @@ class WALinuxAgentShim(object):
1990 LOG.info('Reported ready to Azure fabric.')
1991
1992
1993+@azure_ds_telemetry_reporter
1994 def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
1995 pubkey_info=None):
1996 shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
1997diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
1998index 6378e98..cb1912b 100644
1999--- a/cloudinit/sources/tests/test_init.py
2000+++ b/cloudinit/sources/tests/test_init.py
2001@@ -575,6 +575,21 @@ class TestDataSource(CiTestCase):
2002 " events: New instance first boot",
2003 self.logs.getvalue())
2004
2005+ def test_data_sources_cant_mutate_update_events_for_others(self):
2006+ """update_events shouldn't be changed for other DSes (LP: #1819913)"""
2007+
2008+ class ModifyingDS(DataSource):
2009+
2010+ def __init__(self, sys_cfg, distro, paths):
2011+ # This mirrors what DataSourceAzure does which causes LP:
2012+ # #1819913
2013+ DataSource.__init__(self, sys_cfg, distro, paths)
2014+ self.update_events['network'].add(EventType.BOOT)
2015+
2016+ before_update_events = copy.deepcopy(self.datasource.update_events)
2017+ ModifyingDS(self.sys_cfg, self.distro, self.paths)
2018+ self.assertEqual(before_update_events, self.datasource.update_events)
2019+
2020
2021 class TestRedactSensitiveData(CiTestCase):
2022
2023diff --git a/cloudinit/util.py b/cloudinit/util.py
2024index a192091..385f231 100644
2025--- a/cloudinit/util.py
2026+++ b/cloudinit/util.py
2027@@ -703,6 +703,21 @@ def get_cfg_option_list(yobj, key, default=None):
2028 # get a cfg entry by its path array
2029 # for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
2030 def get_cfg_by_path(yobj, keyp, default=None):
2031+ """Return the value of the item at path C{keyp} in C{yobj}.
2032+
2033+ example:
2034+ get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4
2035+ get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None
2036+
2037+ @param yobj: A dictionary.
2038+ @param keyp: A path inside yobj. it can be a '/' delimited string,
2039+ or an iterable.
2040+ @param default: The default to return if the path does not exist.
2041+ @return: The value of the item at keyp."
2042+ is not found."""
2043+
2044+ if isinstance(keyp, six.string_types):
2045+ keyp = keyp.split("/")
2046 cur = yobj
2047 for tok in keyp:
2048 if tok not in cur:
2049diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
2050index 7513176..25db43e 100644
2051--- a/config/cloud.cfg.tmpl
2052+++ b/config/cloud.cfg.tmpl
2053@@ -112,6 +112,9 @@ cloud_final_modules:
2054 - landscape
2055 - lxd
2056 {% endif %}
2057+{% if variant in ["ubuntu", "unknown"] %}
2058+ - ubuntu-drivers
2059+{% endif %}
2060 {% if variant not in ["freebsd"] %}
2061 - puppet
2062 - chef
2063diff --git a/debian/changelog b/debian/changelog
2064index ac376ab..f869278 100644
2065--- a/debian/changelog
2066+++ b/debian/changelog
2067@@ -1,3 +1,32 @@
2068+cloud-init (18.5-61-gb76714c3-0ubuntu1) disco; urgency=medium
2069+
2070+ * New upstream snapshot.
2071+ - Change DataSourceNoCloud to ignore file system label's case.
2072+ [Risto Oikarinen]
2073+ - cmd:main.py: Fix missing 'modules-init' key in modes dict
2074+ [Antonio Romito] (LP: #1815109)
2075+ - ubuntu_advantage: rewrite cloud-config module
2076+ - Azure: Treat _unset network configuration as if it were absent
2077+ [Jason Zions (MSFT)] (LP: #1823084)
2078+ - DatasourceAzure: add additional logging for azure datasource [Anh Vo]
2079+ - cloud_tests: fix apt_pipelining test-cases
2080+ - Azure: Ensure platform random_seed is always serializable as JSON.
2081+ [Jason Zions (MSFT)]
2082+ - net/sysconfig: write out SUSE-compatible IPv6 config [Robert Schweikert]
2083+ - tox: Update testenv for openSUSE Leap to 15.0 [Thomas Bechtold]
2084+ - net: Fix ipv6 static routes when using eni renderer
2085+ [Raphael Glon] (LP: #1818669)
2086+ - Add ubuntu_drivers config module
2087+ - doc: Refresh Azure walinuxagent docs
2088+ - tox: bump pylint version to latest (2.3.1)
2089+ - DataSource: move update_events from a class to an instance attribute
2090+ (LP: #1819913)
2091+ - net/sysconfig: Handle default route setup for dhcp configured NICs
2092+ [Robert Schweikert] (LP: #1812117)
2093+ - DataSourceEc2: update RELEASE_BLOCKER to be more accurate
2094+
2095+ -- Daniel Watkins <oddbloke@ubuntu.com> Wed, 10 Apr 2019 11:49:03 -0400
2096+
2097 cloud-init (18.5-45-g3554ffe8-0ubuntu1) disco; urgency=medium
2098
2099 * New upstream snapshot.
2100diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
2101index 720a475..b41cddd 100644
2102--- a/doc/rtd/topics/datasources/azure.rst
2103+++ b/doc/rtd/topics/datasources/azure.rst
2104@@ -5,9 +5,30 @@ Azure
2105
2106 This datasource finds metadata and user-data from the Azure cloud platform.
2107
2108-Azure Platform
2109---------------
2110-The azure cloud-platform provides initial data to an instance via an attached
2111+walinuxagent
2112+------------
2113+walinuxagent has several functions within images. For cloud-init
2114+specifically, the relevant functionality it performs is to register the
2115+instance with the Azure cloud platform at boot so networking will be
2116+permitted. For more information about the other functionality of
2117+walinuxagent, see `Azure's documentation
2118+<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details.
2119+(Note, however, that only one of walinuxagent's provisioning and cloud-init
2120+should be used to perform instance customisation.)
2121+
2122+If you are configuring walinuxagent yourself, you will want to ensure that you
2123+have `Provisioning.UseCloudInit
2124+<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to
2125+``y``.
2126+
2127+
2128+Builtin Agent
2129+-------------
2130+An alternative to using walinuxagent to register to the Azure cloud platform
2131+is to use the ``__builtin__`` agent command. This section contains more
2132+background on what that code path does, and how to enable it.
2133+
2134+The Azure cloud platform provides initial data to an instance via an attached
2135 CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
2136 information. Additional information is obtained via interaction with the
2137 "endpoint".
2138@@ -36,25 +57,17 @@ for the endpoint server (again option 245).
2139 You can define the path to the lease file with the 'dhclient_lease_file'
2140 configuration.
2141
2142-walinuxagent
2143-------------
2144-In order to operate correctly, cloud-init needs walinuxagent to provide much
2145-of the interaction with azure. In addition to "provisioning" code, walinux
2146-does the following on the agent is a long running daemon that handles the
2147-following things:
2148-- generate a x509 certificate and send that to the endpoint
2149-
2150-waagent.conf config
2151-^^^^^^^^^^^^^^^^^^^
2152-in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults.
2153-
2154- ::
2155-
2156- # disabling provisioning turns off all 'Provisioning.*' function
2157- Provisioning.Enabled=n
2158- # this is currently not handled by cloud-init, so let walinuxagent do it.
2159- ResourceDisk.Format=y
2160- ResourceDisk.MountPoint=/mnt
2161+
2162+IMDS
2163+----
2164+Azure provides the `instance metadata service (IMDS)
2165+<https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service>`_
2166+which is a REST service on ``196.254.196.254`` providing additional
2167+configuration information to the instance. Cloud-init uses the IMDS for:
2168+
2169+- network configuration for the instance which is applied per boot
2170+- a preprovisioing gate which blocks instance configuration until Azure fabric
2171+ is ready to provision
2172
2173
2174 Configuration
2175diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
2176index 08578e8..1c5cf96 100644
2177--- a/doc/rtd/topics/datasources/nocloud.rst
2178+++ b/doc/rtd/topics/datasources/nocloud.rst
2179@@ -9,7 +9,7 @@ network at all).
2180
2181 You can provide meta-data and user-data to a local vm boot via files on a
2182 `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be
2183-``cidata``.
2184+``cidata`` or ``CIDATA``.
2185
2186 Alternatively, you can provide meta-data via kernel command line or SMBIOS
2187 "serial number" option. The data must be passed in the form of a string:
2188diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
2189index d9720f6..3dcdd3b 100644
2190--- a/doc/rtd/topics/modules.rst
2191+++ b/doc/rtd/topics/modules.rst
2192@@ -54,6 +54,7 @@ Modules
2193 .. automodule:: cloudinit.config.cc_ssh_import_id
2194 .. automodule:: cloudinit.config.cc_timezone
2195 .. automodule:: cloudinit.config.cc_ubuntu_advantage
2196+.. automodule:: cloudinit.config.cc_ubuntu_drivers
2197 .. automodule:: cloudinit.config.cc_update_etc_hosts
2198 .. automodule:: cloudinit.config.cc_update_hostname
2199 .. automodule:: cloudinit.config.cc_users_groups
2200diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
2201index bd9b5d0..22a31dc 100644
2202--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
2203+++ b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
2204@@ -5,8 +5,7 @@ required_features:
2205 - apt
2206 cloud_config: |
2207 #cloud-config
2208- apt:
2209- apt_pipelining: false
2210+ apt_pipelining: false
2211 collect_scripts:
2212 90cloud-init-pipelining: |
2213 #!/bin/bash
2214diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
2215index 740dc7c..2b940a6 100644
2216--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
2217+++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
2218@@ -8,8 +8,8 @@ class TestAptPipeliningOS(base.CloudTestCase):
2219 """Test apt-pipelining module."""
2220
2221 def test_os_pipelining(self):
2222- """Test pipelining set to os."""
2223- out = self.get_data_file('90cloud-init-pipelining')
2224- self.assertIn('Acquire::http::Pipeline-Depth "0";', out)
2225+ """test 'os' settings does not write apt config file."""
2226+ out = self.get_data_file('90cloud-init-pipelining_not_written')
2227+ self.assertEqual(0, int(out))
2228
2229 # vi: ts=4 expandtab
2230diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
2231index cbed3ba..86d5220 100644
2232--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
2233+++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
2234@@ -1,15 +1,14 @@
2235 #
2236-# Set apt pipelining value to OS
2237+# Set apt pipelining value to OS, no conf written
2238 #
2239 required_features:
2240 - apt
2241 cloud_config: |
2242 #cloud-config
2243- apt:
2244- apt_pipelining: os
2245+ apt_pipelining: os
2246 collect_scripts:
2247- 90cloud-init-pipelining: |
2248+ 90cloud-init-pipelining_not_written: |
2249 #!/bin/bash
2250- cat /etc/apt/apt.conf.d/90cloud-init-pipelining
2251+ ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l
2252
2253 # vi: ts=4 expandtab
2254diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string
2255new file mode 100644
2256index 0000000..b9ecefb
2257--- /dev/null
2258+++ b/tests/data/azure/non_unicode_random_string
2259@@ -0,0 +1 @@
2260+OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$
2261\ No newline at end of file
2262diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
2263index 6b05b8f..53c56cd 100644
2264--- a/tests/unittests/test_datasource/test_azure.py
2265+++ b/tests/unittests/test_datasource/test_azure.py
2266@@ -7,11 +7,11 @@ from cloudinit.sources import (
2267 UNSET, DataSourceAzure as dsaz, InvalidMetaDataException)
2268 from cloudinit.util import (b64e, decode_binary, load_file, write_file,
2269 find_freebsd_part, get_path_dev_freebsd,
2270- MountFailedError)
2271+ MountFailedError, json_dumps, load_json)
2272 from cloudinit.version import version_string as vs
2273 from cloudinit.tests.helpers import (
2274 HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,
2275- ExitStack)
2276+ ExitStack, resourceLocation)
2277
2278 import crypt
2279 import httpretty
2280@@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase):
2281 self.logs.getvalue())
2282
2283
2284+class TestRandomSeed(CiTestCase):
2285+ """Test proper handling of random_seed"""
2286+
2287+ def test_non_ascii_seed_is_serializable(self):
2288+ """Pass if a random string from the Azure infrastructure which
2289+ contains at least one non-Unicode character can be converted to/from
2290+ JSON without alteration and without throwing an exception.
2291+ """
2292+ path = resourceLocation("azure/non_unicode_random_string")
2293+ result = dsaz._get_random_seed(path)
2294+
2295+ obj = {'seed': result}
2296+ try:
2297+ serialized = json_dumps(obj)
2298+ deserialized = load_json(serialized)
2299+ except UnicodeDecodeError:
2300+ self.fail("Non-serializable random seed returned")
2301+
2302+ self.assertEqual(deserialized['seed'], result)
2303+
2304 # vi: ts=4 expandtab
2305diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
2306index 3429272..b785362 100644
2307--- a/tests/unittests/test_datasource/test_nocloud.py
2308+++ b/tests/unittests/test_datasource/test_nocloud.py
2309@@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase):
2310 self.mocks.enter_context(
2311 mock.patch.object(util, 'read_dmi_data', return_value=None))
2312
2313+ def _test_fs_config_is_read(self, fs_label, fs_label_to_search):
2314+ vfat_device = 'device-1'
2315+
2316+ def m_mount_cb(device, callback, mtype):
2317+ if (device == vfat_device):
2318+ return {'meta-data': yaml.dump({'instance-id': 'IID'})}
2319+ else:
2320+ return {}
2321+
2322+ def m_find_devs_with(query='', path=''):
2323+ if 'TYPE=vfat' == query:
2324+ return [vfat_device]
2325+ elif 'LABEL={}'.format(fs_label) == query:
2326+ return [vfat_device]
2327+ else:
2328+ return []
2329+
2330+ self.mocks.enter_context(
2331+ mock.patch.object(util, 'find_devs_with',
2332+ side_effect=m_find_devs_with))
2333+ self.mocks.enter_context(
2334+ mock.patch.object(util, 'mount_cb',
2335+ side_effect=m_mount_cb))
2336+ sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}}
2337+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
2338+ ret = dsrc.get_data()
2339+
2340+ self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
2341+ self.assertTrue(ret)
2342+
2343 def test_nocloud_seed_dir_on_lxd(self, m_is_lxd):
2344 md = {'instance-id': 'IID', 'dsmode': 'local'}
2345 ud = b"USER_DATA_HERE"
2346@@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase):
2347 ret = dsrc.get_data()
2348 self.assertFalse(ret)
2349
2350+ def test_fs_config_lowercase_label(self, m_is_lxd):
2351+ self._test_fs_config_is_read('cidata', 'cidata')
2352+
2353+ def test_fs_config_uppercase_label(self, m_is_lxd):
2354+ self._test_fs_config_is_read('CIDATA', 'cidata')
2355+
2356+ def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd):
2357+ self._test_fs_config_is_read('cidata', 'CIDATA')
2358+
2359+ def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd):
2360+ self._test_fs_config_is_read('CIDATA', 'CIDATA')
2361+
2362 def test_no_datasource_expected(self, m_is_lxd):
2363 # no source should be found if no cmdline, config, and fs_label=None
2364 sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
2365diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
2366index f96bf0a..3bfd752 100644
2367--- a/tests/unittests/test_datasource/test_scaleway.py
2368+++ b/tests/unittests/test_datasource/test_scaleway.py
2369@@ -7,6 +7,7 @@ import requests
2370
2371 from cloudinit import helpers
2372 from cloudinit import settings
2373+from cloudinit.event import EventType
2374 from cloudinit.sources import DataSourceScaleway
2375
2376 from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase
2377@@ -403,3 +404,9 @@ class TestDataSourceScaleway(HttprettyTestCase):
2378
2379 netcfg = self.datasource.network_config
2380 self.assertEqual(netcfg, '0xdeadbeef')
2381+
2382+ def test_update_events_is_correct(self):
2383+ """ensure update_events contains correct data"""
2384+ self.assertEqual(
2385+ {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}},
2386+ self.datasource.update_events)
2387diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
2388index e453040..c3c0c8c 100644
2389--- a/tests/unittests/test_distros/test_netconfig.py
2390+++ b/tests/unittests/test_distros/test_netconfig.py
2391@@ -496,6 +496,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
2392 BOOTPROTO=none
2393 DEFROUTE=yes
2394 DEVICE=eth0
2395+ IPADDR6=2607:f0d0:1002:0011::2/64
2396 IPV6ADDR=2607:f0d0:1002:0011::2/64
2397 IPV6INIT=yes
2398 IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
2399@@ -588,6 +589,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
2400 BOOTPROTO=none
2401 DEFROUTE=yes
2402 DEVICE=eth0
2403+ IPADDR6=2607:f0d0:1002:0011::2/64
2404 IPV6ADDR=2607:f0d0:1002:0011::2/64
2405 IPV6INIT=yes
2406 IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
2407diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
2408index d00c1b4..8c18aa1 100644
2409--- a/tests/unittests/test_ds_identify.py
2410+++ b/tests/unittests/test_ds_identify.py
2411@@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase):
2412 """NoCloud is found with iso9660 filesystem on non-cdrom disk."""
2413 self._test_ds_found('NoCloud')
2414
2415+ def test_nocloud_upper(self):
2416+ """NoCloud is found with uppercase filesystem label."""
2417+ self._test_ds_found('NoCloudUpper')
2418+
2419 def test_nocloud_seed(self):
2420 """Nocloud seed directory."""
2421 self._test_ds_found('NoCloud-seed')
2422@@ -713,6 +717,19 @@ VALID_CFG = {
2423 'dev/vdb': 'pretend iso content for cidata\n',
2424 }
2425 },
2426+ 'NoCloudUpper': {
2427+ 'ds': 'NoCloud',
2428+ 'mocks': [
2429+ MOCK_VIRT_IS_KVM,
2430+ {'name': 'blkid', 'ret': 0,
2431+ 'out': blkid_out(
2432+ BLKID_UEFI_UBUNTU +
2433+ [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])},
2434+ ],
2435+ 'files': {
2436+ 'dev/vdb': 'pretend iso content for cidata\n',
2437+ }
2438+ },
2439 'NoCloud-seed': {
2440 'ds': 'NoCloud',
2441 'files': {
2442diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
2443index 1bad07f..e69a47a 100644
2444--- a/tests/unittests/test_handler/test_schema.py
2445+++ b/tests/unittests/test_handler/test_schema.py
2446@@ -28,6 +28,7 @@ class GetSchemaTest(CiTestCase):
2447 'cc_runcmd',
2448 'cc_snap',
2449 'cc_ubuntu_advantage',
2450+ 'cc_ubuntu_drivers',
2451 'cc_zypper_add_repo'
2452 ],
2453 [subschema['id'] for subschema in schema['allOf']])
2454diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
2455index e3b9e02..fd03deb 100644
2456--- a/tests/unittests/test_net.py
2457+++ b/tests/unittests/test_net.py
2458@@ -691,6 +691,9 @@ DEVICE=eth0
2459 GATEWAY=172.19.3.254
2460 HWADDR=fa:16:3e:ed:9a:59
2461 IPADDR=172.19.1.34
2462+IPADDR6=2001:DB8::10/64
2463+IPADDR6_0=2001:DB9::10/64
2464+IPADDR6_2=2001:DB10::10/64
2465 IPV6ADDR=2001:DB8::10/64
2466 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
2467 IPV6INIT=yes
2468@@ -729,6 +732,9 @@ DEVICE=eth0
2469 GATEWAY=172.19.3.254
2470 HWADDR=fa:16:3e:ed:9a:59
2471 IPADDR=172.19.1.34
2472+IPADDR6=2001:DB8::10/64
2473+IPADDR6_0=2001:DB9::10/64
2474+IPADDR6_2=2001:DB10::10/64
2475 IPV6ADDR=2001:DB8::10/64
2476 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
2477 IPV6INIT=yes
2478@@ -860,6 +866,7 @@ NETWORK_CONFIGS = {
2479 BOOTPROTO=dhcp
2480 DEFROUTE=yes
2481 DEVICE=eth99
2482+ DHCLIENT_SET_DEFAULT_ROUTE=yes
2483 DNS1=8.8.8.8
2484 DNS2=8.8.4.4
2485 DOMAIN="barley.maas sach.maas"
2486@@ -979,6 +986,7 @@ NETWORK_CONFIGS = {
2487 BOOTPROTO=none
2488 DEVICE=iface0
2489 IPADDR=192.168.14.2
2490+ IPADDR6=2001:1::1/64
2491 IPV6ADDR=2001:1::1/64
2492 IPV6INIT=yes
2493 NETMASK=255.255.255.0
2494@@ -1113,8 +1121,8 @@ iface eth0.101 inet static
2495 iface eth0.101 inet static
2496 address 192.168.2.10/24
2497
2498-post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2499-pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2500+post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
2501+pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
2502 """),
2503 'expected_netplan': textwrap.dedent("""
2504 network:
2505@@ -1234,6 +1242,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2506 'ifcfg-bond0.200': textwrap.dedent("""\
2507 BOOTPROTO=dhcp
2508 DEVICE=bond0.200
2509+ DHCLIENT_SET_DEFAULT_ROUTE=no
2510 NM_CONTROLLED=no
2511 ONBOOT=yes
2512 PHYSDEV=bond0
2513@@ -1247,6 +1256,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2514 DEFROUTE=yes
2515 DEVICE=br0
2516 IPADDR=192.168.14.2
2517+ IPADDR6=2001:1::1/64
2518 IPV6ADDR=2001:1::1/64
2519 IPV6INIT=yes
2520 IPV6_DEFAULTGW=2001:4800:78ff:1b::1
2521@@ -1333,6 +1343,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2522 'ifcfg-eth5': textwrap.dedent("""\
2523 BOOTPROTO=dhcp
2524 DEVICE=eth5
2525+ DHCLIENT_SET_DEFAULT_ROUTE=no
2526 HWADDR=98:bb:9f:2c:e8:8a
2527 NM_CONTROLLED=no
2528 ONBOOT=no
2529@@ -1505,17 +1516,18 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2530 - gateway: 192.168.0.3
2531 netmask: 255.255.255.0
2532 network: 10.1.3.0
2533- - gateway: 2001:67c:1562:1
2534- network: 2001:67c:1
2535- netmask: ffff:ffff:0
2536- - gateway: 3001:67c:1562:1
2537- network: 3001:67c:1
2538- netmask: ffff:ffff:0
2539- metric: 10000
2540 - type: static
2541 address: 192.168.1.2/24
2542 - type: static
2543 address: 2001:1::1/92
2544+ routes:
2545+ - gateway: 2001:67c:1562:1
2546+ network: 2001:67c:1
2547+ netmask: ffff:ffff:0
2548+ - gateway: 3001:67c:1562:1
2549+ network: 3001:67c:1
2550+ netmask: ffff:ffff:0
2551+ metric: 10000
2552 """),
2553 'expected_netplan': textwrap.dedent("""
2554 network:
2555@@ -1554,6 +1566,51 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2556 to: 3001:67c:1/32
2557 via: 3001:67c:1562:1
2558 """),
2559+ 'expected_eni': textwrap.dedent("""\
2560+auto lo
2561+iface lo inet loopback
2562+
2563+auto bond0s0
2564+iface bond0s0 inet manual
2565+ bond-master bond0
2566+ bond-mode active-backup
2567+ bond-xmit-hash-policy layer3+4
2568+ bond_miimon 100
2569+
2570+auto bond0s1
2571+iface bond0s1 inet manual
2572+ bond-master bond0
2573+ bond-mode active-backup
2574+ bond-xmit-hash-policy layer3+4
2575+ bond_miimon 100
2576+
2577+auto bond0
2578+iface bond0 inet static
2579+ address 192.168.0.2/24
2580+ gateway 192.168.0.1
2581+ bond-mode active-backup
2582+ bond-slaves none
2583+ bond-xmit-hash-policy layer3+4
2584+ bond_miimon 100
2585+ hwaddress aa:bb:cc:dd:e8:ff
2586+ mtu 9000
2587+ post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true
2588+ pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true
2589+
2590+# control-alias bond0
2591+iface bond0 inet static
2592+ address 192.168.1.2/24
2593+
2594+# control-alias bond0
2595+iface bond0 inet6 static
2596+ address 2001:1::1/92
2597+ post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true
2598+ pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true
2599+ post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \
2600+|| true
2601+ pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \
2602+|| true
2603+ """),
2604 'yaml-v2': textwrap.dedent("""
2605 version: 2
2606 ethernets:
2607@@ -1641,6 +1698,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2608 MACADDR=aa:bb:cc:dd:e8:ff
2609 IPADDR=192.168.0.2
2610 IPADDR1=192.168.1.2
2611+ IPADDR6=2001:1::1/92
2612 IPV6ADDR=2001:1::1/92
2613 IPV6INIT=yes
2614 MTU=9000
2615@@ -1696,6 +1754,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2616 MACADDR=aa:bb:cc:dd:e8:ff
2617 IPADDR=192.168.0.2
2618 IPADDR1=192.168.1.2
2619+ IPADDR6=2001:1::1/92
2620 IPV6ADDR=2001:1::1/92
2621 IPV6INIT=yes
2622 MTU=9000
2623@@ -1786,6 +1845,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2624 GATEWAY=192.168.1.1
2625 IPADDR=192.168.2.2
2626 IPADDR1=192.168.1.2
2627+ IPADDR6=2001:1::bbbb/96
2628 IPV6ADDR=2001:1::bbbb/96
2629 IPV6INIT=yes
2630 IPV6_DEFAULTGW=2001:1::1
2631@@ -1847,6 +1907,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2632 BRIDGE=br0
2633 DEVICE=eth0
2634 HWADDR=52:54:00:12:34:00
2635+ IPADDR6=2001:1::100/96
2636 IPV6ADDR=2001:1::100/96
2637 IPV6INIT=yes
2638 NM_CONTROLLED=no
2639@@ -1860,6 +1921,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
2640 BRIDGE=br0
2641 DEVICE=eth1
2642 HWADDR=52:54:00:12:34:01
2643+ IPADDR6=2001:1::101/96
2644 IPV6ADDR=2001:1::101/96
2645 IPV6INIT=yes
2646 NM_CONTROLLED=no
2647@@ -1988,6 +2050,23 @@ CONFIG_V1_SIMPLE_SUBNET = {
2648 'type': 'static'}],
2649 'type': 'physical'}]}
2650
2651+CONFIG_V1_MULTI_IFACE = {
2652+ 'version': 1,
2653+ 'config': [{'type': 'physical',
2654+ 'mtu': 1500,
2655+ 'subnets': [{'type': 'static',
2656+ 'netmask': '255.255.240.0',
2657+ 'routes': [{'netmask': '0.0.0.0',
2658+ 'network': '0.0.0.0',
2659+ 'gateway': '51.68.80.1'}],
2660+ 'address': '51.68.89.122',
2661+ 'ipv4': True}],
2662+ 'mac_address': 'fa:16:3e:25:b4:59',
2663+ 'name': 'eth0'},
2664+ {'type': 'physical',
2665+ 'mtu': 9000,
2666+ 'subnets': [{'type': 'dhcp4'}],
2667+ 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]}
2668
2669 DEFAULT_DEV_ATTRS = {
2670 'eth1000': {
2671@@ -2460,6 +2539,49 @@ USERCTL=no
2672 respath = '/etc/resolv.conf'
2673 self.assertNotIn(respath, found.keys())
2674
2675+ def test_network_config_v1_multi_iface_samples(self):
2676+ ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE)
2677+ render_dir = self.tmp_path("render")
2678+ os.makedirs(render_dir)
2679+ renderer = self._get_renderer()
2680+ renderer.render_network_state(ns, target=render_dir)
2681+ found = dir2dict(render_dir)
2682+ nspath = '/etc/sysconfig/network-scripts/'
2683+ self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
2684+ expected_i1 = """\
2685+# Created by cloud-init on instance boot automatically, do not edit.
2686+#
2687+BOOTPROTO=none
2688+DEFROUTE=yes
2689+DEVICE=eth0
2690+GATEWAY=51.68.80.1
2691+HWADDR=fa:16:3e:25:b4:59
2692+IPADDR=51.68.89.122
2693+MTU=1500
2694+NETMASK=255.255.240.0
2695+NM_CONTROLLED=no
2696+ONBOOT=yes
2697+STARTMODE=auto
2698+TYPE=Ethernet
2699+USERCTL=no
2700+"""
2701+ self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0'])
2702+ expected_i2 = """\
2703+# Created by cloud-init on instance boot automatically, do not edit.
2704+#
2705+BOOTPROTO=dhcp
2706+DEVICE=eth1
2707+DHCLIENT_SET_DEFAULT_ROUTE=no
2708+HWADDR=fa:16:3e:b1:ca:29
2709+MTU=9000
2710+NM_CONTROLLED=no
2711+ONBOOT=yes
2712+STARTMODE=auto
2713+TYPE=Ethernet
2714+USERCTL=no
2715+"""
2716+ self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1'])
2717+
2718 def test_config_with_explicit_loopback(self):
2719 ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
2720 render_dir = self.tmp_path("render")
2721@@ -2634,6 +2756,7 @@ USERCTL=no
2722 GATEWAY=192.168.42.1
2723 HWADDR=52:54:00:ab:cd:ef
2724 IPADDR=192.168.42.100
2725+ IPADDR6=2001:db8::100/32
2726 IPV6ADDR=2001:db8::100/32
2727 IPV6INIT=yes
2728 IPV6_DEFAULTGW=2001:db8::1
2729@@ -3570,17 +3693,17 @@ class TestEniRoundTrip(CiTestCase):
2730 'iface eth0 inet static',
2731 ' address 172.23.31.42/26',
2732 ' gateway 172.23.31.2',
2733- ('post-up route add -net 10.0.0.0 netmask 255.240.0.0 gw '
2734+ ('post-up route add -net 10.0.0.0/12 gw '
2735 '172.23.31.1 metric 0 || true'),
2736- ('pre-down route del -net 10.0.0.0 netmask 255.240.0.0 gw '
2737+ ('pre-down route del -net 10.0.0.0/12 gw '
2738 '172.23.31.1 metric 0 || true'),
2739- ('post-up route add -net 192.168.2.0 netmask 255.255.0.0 gw '
2740+ ('post-up route add -net 192.168.2.0/16 gw '
2741 '172.23.31.1 metric 0 || true'),
2742- ('pre-down route del -net 192.168.2.0 netmask 255.255.0.0 gw '
2743+ ('pre-down route del -net 192.168.2.0/16 gw '
2744 '172.23.31.1 metric 0 || true'),
2745- ('post-up route add -net 10.0.200.0 netmask 255.255.0.0 gw '
2746+ ('post-up route add -net 10.0.200.0/16 gw '
2747 '172.23.31.1 metric 1 || true'),
2748- ('pre-down route del -net 10.0.200.0 netmask 255.255.0.0 gw '
2749+ ('pre-down route del -net 10.0.200.0/16 gw '
2750 '172.23.31.1 metric 1 || true'),
2751 ]
2752 found = files['/etc/network/interfaces'].splitlines()
2753@@ -3588,6 +3711,77 @@ class TestEniRoundTrip(CiTestCase):
2754 self.assertEqual(
2755 expected, [line for line in found if line])
2756
2757+ def test_ipv6_static_routes(self):
2758+ # as reported in bug 1818669
2759+ conf = [
2760+ {'name': 'eno3', 'type': 'physical',
2761+ 'subnets': [{
2762+ 'address': 'fd00::12/64',
2763+ 'dns_nameservers': ['fd00:2::15'],
2764+ 'gateway': 'fd00::1',
2765+ 'ipv6': True,
2766+ 'type': 'static',
2767+ 'routes': [{'netmask': '32',
2768+ 'network': 'fd00:12::',
2769+ 'gateway': 'fd00::2'},
2770+ {'network': 'fd00:14::',
2771+ 'gateway': 'fd00::3'},
2772+ {'destination': 'fe00:14::/48',
2773+ 'gateway': 'fe00::4',
2774+ 'metric': 500},
2775+ {'gateway': '192.168.23.1',
2776+ 'metric': 999,
2777+ 'netmask': 24,
2778+ 'network': '192.168.23.0'},
2779+ {'destination': '10.23.23.0/24',
2780+ 'gateway': '10.23.23.2',
2781+ 'metric': 300}]}]},
2782+ ]
2783+
2784+ files = self._render_and_read(
2785+ network_config={'config': conf, 'version': 1})
2786+ expected = [
2787+ 'auto lo',
2788+ 'iface lo inet loopback',
2789+ 'auto eno3',
2790+ 'iface eno3 inet6 static',
2791+ ' address fd00::12/64',
2792+ ' dns-nameservers fd00:2::15',
2793+ ' gateway fd00::1',
2794+ (' post-up route add -A inet6 fd00:12::/32 gw '
2795+ 'fd00::2 || true'),
2796+ (' pre-down route del -A inet6 fd00:12::/32 gw '
2797+ 'fd00::2 || true'),
2798+ (' post-up route add -A inet6 fd00:14::/64 gw '
2799+ 'fd00::3 || true'),
2800+ (' pre-down route del -A inet6 fd00:14::/64 gw '
2801+ 'fd00::3 || true'),
2802+ (' post-up route add -A inet6 fe00:14::/48 gw '
2803+ 'fe00::4 metric 500 || true'),
2804+ (' pre-down route del -A inet6 fe00:14::/48 gw '
2805+ 'fe00::4 metric 500 || true'),
2806+ (' post-up route add -net 192.168.23.0/24 gw '
2807+ '192.168.23.1 metric 999 || true'),
2808+ (' pre-down route del -net 192.168.23.0/24 gw '
2809+ '192.168.23.1 metric 999 || true'),
2810+ (' post-up route add -net 10.23.23.0/24 gw '
2811+ '10.23.23.2 metric 300 || true'),
2812+ (' pre-down route del -net 10.23.23.0/24 gw '
2813+ '10.23.23.2 metric 300 || true'),
2814+
2815+ ]
2816+ found = files['/etc/network/interfaces'].splitlines()
2817+
2818+ self.assertEqual(
2819+ expected, [line for line in found if line])
2820+
2821+ def testsimple_render_bond(self):
2822+ entry = NETWORK_CONFIGS['bond']
2823+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
2824+ self.assertEqual(
2825+ entry['expected_eni'].splitlines(),
2826+ files['/etc/network/interfaces'].splitlines())
2827+
2828
2829 class TestNetRenderers(CiTestCase):
2830 @mock.patch("cloudinit.net.renderers.sysconfig.available")
2831diff --git a/tools/ds-identify b/tools/ds-identify
2832index b78b273..6518901 100755
2833--- a/tools/ds-identify
2834+++ b/tools/ds-identify
2835@@ -620,7 +620,7 @@ dscheck_MAAS() {
2836 }
2837
2838 dscheck_NoCloud() {
2839- local fslabel="cidata" d=""
2840+ local fslabel="cidata CIDATA" d=""
2841 case " ${DI_KERNEL_CMDLINE} " in
2842 *\ ds=nocloud*) return ${DS_FOUND};;
2843 esac
2844@@ -632,9 +632,10 @@ dscheck_NoCloud() {
2845 check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
2846 check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
2847 done
2848- if has_fs_with_label "${fslabel}"; then
2849+ if has_fs_with_label $fslabel; then
2850 return ${DS_FOUND}
2851 fi
2852+
2853 return ${DS_NOT_FOUND}
2854 }
2855
2856@@ -762,7 +763,7 @@ is_cdrom_ovf() {
2857
2858 # explicitly skip known labels of other types. rd_rdfe is azure.
2859 case "$label" in
2860- config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;;
2861+ config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;;
2862 esac
2863
2864 local idstr="http://schemas.dmtf.org/ovf/environment/1"
2865diff --git a/tox.ini b/tox.ini
2866index d371720..1f01eb7 100644
2867--- a/tox.ini
2868+++ b/tox.ini
2869@@ -21,7 +21,7 @@ setenv =
2870 basepython = python3
2871 deps =
2872 # requirements
2873- pylint==2.2.2
2874+ pylint==2.3.1
2875 # test-requirements because unit tests are now present in cloudinit tree
2876 -r{toxinidir}/test-requirements.txt
2877 commands = {envpython} -m pylint {posargs:cloudinit tests tools}
2878@@ -96,19 +96,18 @@ deps =
2879 six==1.9.0
2880 -r{toxinidir}/test-requirements.txt
2881
2882-[testenv:opensusel42]
2883+[testenv:opensusel150]
2884 basepython = python2.7
2885 commands = nosetests {posargs:tests/unittests cloudinit}
2886 deps =
2887 # requirements
2888- argparse==1.3.0
2889- jinja2==2.8
2890- PyYAML==3.11
2891- oauthlib==0.7.2
2892+ jinja2==2.10
2893+ PyYAML==3.12
2894+ oauthlib==2.0.6
2895 configobj==5.0.6
2896- requests==2.11.1
2897- jsonpatch==1.11
2898- six==1.9.0
2899+ requests==2.18.4
2900+ jsonpatch==1.16
2901+ six==1.11.0
2902 -r{toxinidir}/test-requirements.txt
2903
2904 [testenv:tip-pycodestyle]

Subscribers

People subscribed via source and target branches