Merge ~oddbloke/cloud-init/+git/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

Proposed by Dan Watkins
Status: Merged
Merged at revision: 6380e13bb69e77f6684e89ff59c467e59a6b8b7f
Proposed branch: ~oddbloke/cloud-init/+git/cloud-init:ubuntu/devel
Merge into: cloud-init:ubuntu/devel
Diff against target: 2904 lines (+1300/-449)
34 files modified
cloudinit/cmd/main.py (+5/-4)
cloudinit/config/cc_ubuntu_advantage.py (+116/-109)
cloudinit/config/cc_ubuntu_drivers.py (+112/-0)
cloudinit/config/tests/test_ubuntu_advantage.py (+191/-156)
cloudinit/config/tests/test_ubuntu_drivers.py (+174/-0)
cloudinit/net/eni.py (+11/-5)
cloudinit/net/network_state.py (+33/-8)
cloudinit/net/sysconfig.py (+25/-9)
cloudinit/sources/DataSourceAzure.py (+168/-89)
cloudinit/sources/DataSourceEc2.py (+6/-2)
cloudinit/sources/DataSourceNoCloud.py (+3/-1)
cloudinit/sources/DataSourceScaleway.py (+2/-1)
cloudinit/sources/__init__.py (+3/-3)
cloudinit/sources/helpers/azure.py (+31/-0)
cloudinit/sources/tests/test_init.py (+15/-0)
cloudinit/util.py (+15/-0)
config/cloud.cfg.tmpl (+3/-0)
debian/changelog (+29/-0)
doc/rtd/topics/datasources/azure.rst (+35/-22)
doc/rtd/topics/datasources/nocloud.rst (+1/-1)
doc/rtd/topics/modules.rst (+1/-0)
tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml (+1/-2)
tests/cloud_tests/testcases/modules/apt_pipelining_os.py (+3/-3)
tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml (+4/-5)
tests/data/azure/non_unicode_random_string (+1/-0)
tests/unittests/test_datasource/test_azure.py (+22/-2)
tests/unittests/test_datasource/test_nocloud.py (+42/-0)
tests/unittests/test_datasource/test_scaleway.py (+7/-0)
tests/unittests/test_distros/test_netconfig.py (+2/-0)
tests/unittests/test_ds_identify.py (+17/-0)
tests/unittests/test_handler/test_schema.py (+1/-0)
tests/unittests/test_net.py (+209/-15)
tools/ds-identify (+4/-3)
tox.ini (+8/-9)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Needs Fixing
Ryan Harper Approve
Review via email: mp+365803@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Ryan Harper (raharper) wrote :

Thanks, this looks perfect. I'd diffed my version of ubuntu/devel with yours and it's clean.

(neipa) cloud-init % git diff oddbloke/ubuntu/devel
diff --git a/debian/changelog b/debian/changelog
index f869278..a8b05a4 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -25,7 +25,7 @@ cloud-init (18.5-61-gb76714c3-0ubuntu1) disco; urgency=medium
       [Robert Schweikert] (LP: #1812117)
     - DataSourceEc2: update RELEASE_BLOCKER to be more accurate

- -- Daniel Watkins <email address hidden> Wed, 10 Apr 2019 11:49:03 -0400
+ -- Ryan Harper <email address hidden> Tue, 09 Apr 2019 15:09:59 -0500

 cloud-init (18.5-45-g3554ffe8-0ubuntu1) disco; urgency=medium

review: Approve
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

FAILED: Continuous integration, rev:6380e13bb69e77f6684e89ff59c467e59a6b8b7f
No commit message was specified in the merge proposal. Click on the following link and set the commit message (if you want a jenkins rebuild you need to trigger it yourself):
https://code.launchpad.net/~daniel-thewatkins/cloud-init/+git/cloud-init/+merge/365803/+edit-commit-message

https://jenkins.ubuntu.com/server/job/cloud-init-ci/674/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/674/rebuild

review: Needs Fixing (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 933c019..a5446da 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None):
632 'start': None,632 'start': None,
633 'finished': None,633 'finished': None,
634 }634 }
635
635 if status is None:636 if status is None:
636 status = {'v1': {}}637 status = {'v1': {}}
637 for m in modes:
638 status['v1'][m] = nullstatus.copy()
639 status['v1']['datasource'] = None638 status['v1']['datasource'] = None
640 elif mode not in status['v1']:639
641 status['v1'][mode] = nullstatus.copy()640 for m in modes:
641 if m not in status['v1']:
642 status['v1'][m] = nullstatus.copy()
642643
643 v1 = status['v1']644 v1 = status['v1']
644 v1['stage'] = mode645 v1['stage'] = mode
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 5e082bd..f488123 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -1,150 +1,143 @@
1# Copyright (C) 2018 Canonical Ltd.
2#
3# This file is part of cloud-init. See LICENSE file for license information.1# This file is part of cloud-init. See LICENSE file for license information.
42
5"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical."""3"""ubuntu_advantage: Configure Ubuntu Advantage support services"""
64
7import sys
8from textwrap import dedent5from textwrap import dedent
96
10from cloudinit import log as logging7import six
8
11from cloudinit.config.schema import (9from cloudinit.config.schema import (
12 get_schema_doc, validate_cloudconfig_schema)10 get_schema_doc, validate_cloudconfig_schema)
11from cloudinit import log as logging
13from cloudinit.settings import PER_INSTANCE12from cloudinit.settings import PER_INSTANCE
14from cloudinit.subp import prepend_base_command
15from cloudinit import util13from cloudinit import util
1614
1715
18distros = ['ubuntu']16UA_URL = 'https://ubuntu.com/advantage'
19frequency = PER_INSTANCE
2017
21LOG = logging.getLogger(__name__)18distros = ['ubuntu']
2219
23schema = {20schema = {
24 'id': 'cc_ubuntu_advantage',21 'id': 'cc_ubuntu_advantage',
25 'name': 'Ubuntu Advantage',22 'name': 'Ubuntu Advantage',
26 'title': 'Install, configure and manage ubuntu-advantage offerings',23 'title': 'Configure Ubuntu Advantage support services',
27 'description': dedent("""\24 'description': dedent("""\
28 This module provides configuration options to setup ubuntu-advantage25 Attach machine to an existing Ubuntu Advantage support contract and
29 subscriptions.26 enable or disable support services such as Livepatch, ESM,
3027 FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
31 .. note::28 one can also specify services to enable. When the 'enable'
32 Both ``commands`` value can be either a dictionary or a list. If29 list is present, any named service will be enabled and all absent
33 the configuration provided is a dictionary, the keys are only used30 services will remain disabled.
34 to order the execution of the commands and the dictionary is31
35 merged with any vendor-data ubuntu-advantage configuration32 Note that when enabling FIPS or FIPS updates you will need to schedule
36 provided. If a ``commands`` is provided as a list, any vendor-data33 a reboot to ensure the machine is running the FIPS-compliant kernel.
37 ubuntu-advantage ``commands`` are ignored.34 See :ref:`Power State Change` for information on how to configure
3835 cloud-init to perform this reboot.
39 Ubuntu-advantage ``commands`` is a dictionary or list of
40 ubuntu-advantage commands to run on the deployed machine.
41 These commands can be used to enable or disable subscriptions to
42 various ubuntu-advantage products. See 'man ubuntu-advantage' for more
43 information on supported subcommands.
44
45 .. note::
46 Each command item can be a string or list. If the item is a list,
47 'ubuntu-advantage' can be omitted and it will automatically be
48 inserted as part of the command.
49 """),36 """),
50 'distros': distros,37 'distros': distros,
51 'examples': [dedent("""\38 'examples': [dedent("""\
52 # Enable Extended Security Maintenance using your service auth token39 # Attach the machine to a Ubuntu Advantage support contract with a
40 # UA contract token obtained from %s.
41 ubuntu_advantage:
42 token: <ua_contract_token>
43 """ % UA_URL), dedent("""\
44 # Attach the machine to an Ubuntu Advantage support contract enabling
45 # only fips and esm services. Services will only be enabled if
46 # the environment supports said service. Otherwise warnings will
47 # be logged for incompatible services specified.
53 ubuntu-advantage:48 ubuntu-advantage:
54 commands:49 token: <ua_contract_token>
55 00: ubuntu-advantage enable-esm <token>50 enable:
51 - fips
52 - esm
56 """), dedent("""\53 """), dedent("""\
57 # Enable livepatch by providing your livepatch token54 # Attach the machine to an Ubuntu Advantage support contract and enable
55 # the FIPS service. Perform a reboot once cloud-init has
56 # completed.
57 power_state:
58 mode: reboot
58 ubuntu-advantage:59 ubuntu-advantage:
59 commands:60 token: <ua_contract_token>
60 00: ubuntu-advantage enable-livepatch <livepatch-token>61 enable:
6162 - fips
62 """), dedent("""\63 """)],
63 # Convenience: the ubuntu-advantage command can be omitted when
64 # specifying commands as a list and 'ubuntu-advantage' will
65 # automatically be prepended.
66 # The following commands are equivalent
67 ubuntu-advantage:
68 commands:
69 00: ['enable-livepatch', 'my-token']
70 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token']
71 02: ubuntu-advantage enable-livepatch my-token
72 03: 'ubuntu-advantage enable-livepatch my-token'
73 """)],
74 'frequency': PER_INSTANCE,64 'frequency': PER_INSTANCE,
75 'type': 'object',65 'type': 'object',
76 'properties': {66 'properties': {
77 'ubuntu-advantage': {67 'ubuntu_advantage': {
78 'type': 'object',68 'type': 'object',
79 'properties': {69 'properties': {
80 'commands': {70 'enable': {
81 'type': ['object', 'array'], # Array of strings or dict71 'type': 'array',
82 'items': {72 'items': {'type': 'string'},
83 'oneOf': [73 },
84 {'type': 'array', 'items': {'type': 'string'}},74 'token': {
85 {'type': 'string'}]75 'type': 'string',
86 },76 'description': (
87 'additionalItems': False, # Reject non-string & non-list77 'A contract token obtained from %s.' % UA_URL)
88 'minItems': 1,
89 'minProperties': 1,
90 }78 }
91 },79 },
92 'additionalProperties': False, # Reject keys not in schema80 'required': ['token'],
93 'required': ['commands']81 'additionalProperties': False
94 }82 }
95 }83 }
96}84}
9785
98# TODO schema for 'assertions' and 'commands' are too permissive at the moment.
99# Once python-jsonschema supports schema draft 6 add support for arbitrary
100# object keys with 'patternProperties' constraint to validate string values.
101
102__doc__ = get_schema_doc(schema) # Supplement python help()86__doc__ = get_schema_doc(schema) # Supplement python help()
10387
104UA_CMD = "ubuntu-advantage"88LOG = logging.getLogger(__name__)
105
106
107def run_commands(commands):
108 """Run the commands provided in ubuntu-advantage:commands config.
10989
110 Commands are run individually. Any errors are collected and reported
111 after attempting all commands.
11290
113 @param commands: A list or dict containing commands to run. Keys of a91def configure_ua(token=None, enable=None):
114 dict will be used to order the commands provided as dict values.92 """Call ua commandline client to attach or enable services."""
115 """93 error = None
116 if not commands:94 if not token:
117 return95 error = ('ubuntu_advantage: token must be provided')
118 LOG.debug('Running user-provided ubuntu-advantage commands')96 LOG.error(error)
119 if isinstance(commands, dict):97 raise RuntimeError(error)
120 # Sort commands based on dictionary key98
121 commands = [v for _, v in sorted(commands.items())]99 if enable is None:
122 elif not isinstance(commands, list):100 enable = []
123 raise TypeError(101 elif isinstance(enable, six.string_types):
124 'commands parameter was not a list or dict: {commands}'.format(102 LOG.warning('ubuntu_advantage: enable should be a list, not'
125 commands=commands))103 ' a string; treating as a single enable')
126104 enable = [enable]
127 fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands)105 elif not isinstance(enable, list):
128106 LOG.warning('ubuntu_advantage: enable should be a list, not'
129 cmd_failures = []107 ' a %s; skipping enabling services',
130 for command in fixed_ua_commands:108 type(enable).__name__)
131 shell = isinstance(command, str)109 enable = []
132 try:110
133 util.subp(command, shell=shell, status_cb=sys.stderr.write)111 attach_cmd = ['ua', 'attach', token]
134 except util.ProcessExecutionError as e:112 LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
135 cmd_failures.append(str(e))113 try:
136 if cmd_failures:114 util.subp(attach_cmd)
137 msg = (115 except util.ProcessExecutionError as e:
138 'Failures running ubuntu-advantage commands:\n'116 msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
139 '{cmd_failures}'.format(117 error=str(e))
140 cmd_failures=cmd_failures))
141 util.logexc(LOG, msg)118 util.logexc(LOG, msg)
142 raise RuntimeError(msg)119 raise RuntimeError(msg)
120 enable_errors = []
121 for service in enable:
122 try:
123 cmd = ['ua', 'enable', service]
124 util.subp(cmd, capture=True)
125 except util.ProcessExecutionError as e:
126 enable_errors.append((service, e))
127 if enable_errors:
128 for service, error in enable_errors:
129 msg = 'Failure enabling "{service}":\n{error}'.format(
130 service=service, error=str(error))
131 util.logexc(LOG, msg)
132 raise RuntimeError(
133 'Failure enabling Ubuntu Advantage service(s): {}'.format(
134 ', '.join('"{}"'.format(service)
135 for service, _ in enable_errors)))
143136
144137
145def maybe_install_ua_tools(cloud):138def maybe_install_ua_tools(cloud):
146 """Install ubuntu-advantage-tools if not present."""139 """Install ubuntu-advantage-tools if not present."""
147 if util.which('ubuntu-advantage'):140 if util.which('ua'):
148 return141 return
149 try:142 try:
150 cloud.distro.update_package_sources()143 cloud.distro.update_package_sources()
@@ -159,14 +152,28 @@ def maybe_install_ua_tools(cloud):
159152
160153
161def handle(name, cfg, cloud, log, args):154def handle(name, cfg, cloud, log, args):
162 cfgin = cfg.get('ubuntu-advantage')155 ua_section = None
163 if cfgin is None:156 if 'ubuntu-advantage' in cfg:
164 LOG.debug(("Skipping module named %s,"157 LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.'
165 " no 'ubuntu-advantage' key in configuration"), name)158 ' Expected underscore delimited "ubuntu_advantage"; will'
159 ' attempt to continue.')
160 ua_section = cfg['ubuntu-advantage']
161 if 'ubuntu_advantage' in cfg:
162 ua_section = cfg['ubuntu_advantage']
163 if ua_section is None:
164 LOG.debug("Skipping module named %s,"
165 " no 'ubuntu_advantage' configuration found", name)
166 return166 return
167
168 validate_cloudconfig_schema(cfg, schema)167 validate_cloudconfig_schema(cfg, schema)
168 if 'commands' in ua_section:
169 msg = (
170 'Deprecated configuration "ubuntu-advantage: commands" provided.'
171 ' Expected "token"')
172 LOG.error(msg)
173 raise RuntimeError(msg)
174
169 maybe_install_ua_tools(cloud)175 maybe_install_ua_tools(cloud)
170 run_commands(cfgin.get('commands', []))176 configure_ua(token=ua_section.get('token'),
177 enable=ua_section.get('enable'))
171178
172# vi: ts=4 expandtab179# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
173new file mode 100644180new file mode 100644
index 0000000..91feb60
--- /dev/null
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -0,0 +1,112 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3"""Ubuntu Drivers: Interact with third party drivers in Ubuntu."""
4
5from textwrap import dedent
6
7from cloudinit.config.schema import (
8 get_schema_doc, validate_cloudconfig_schema)
9from cloudinit import log as logging
10from cloudinit.settings import PER_INSTANCE
11from cloudinit import type_utils
12from cloudinit import util
13
14LOG = logging.getLogger(__name__)
15
16frequency = PER_INSTANCE
17distros = ['ubuntu']
18schema = {
19 'id': 'cc_ubuntu_drivers',
20 'name': 'Ubuntu Drivers',
21 'title': 'Interact with third party drivers in Ubuntu.',
22 'description': dedent("""\
23 This module interacts with the 'ubuntu-drivers' command to install
24 third party driver packages."""),
25 'distros': distros,
26 'examples': [dedent("""\
27 drivers:
28 nvidia:
29 license-accepted: true
30 """)],
31 'frequency': frequency,
32 'type': 'object',
33 'properties': {
34 'drivers': {
35 'type': 'object',
36 'additionalProperties': False,
37 'properties': {
38 'nvidia': {
39 'type': 'object',
40 'additionalProperties': False,
41 'required': ['license-accepted'],
42 'properties': {
43 'license-accepted': {
44 'type': 'boolean',
45 'description': ("Do you accept the NVIDIA driver"
46 " license?"),
47 },
48 'version': {
49 'type': 'string',
50 'description': (
51 'The version of the driver to install (e.g.'
52 ' "390", "410"). Defaults to the latest'
53 ' version.'),
54 },
55 },
56 },
57 },
58 },
59 },
60}
61OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
62 "ubuntu-drivers: error: argument <command>: invalid choice: 'install'")
63
64__doc__ = get_schema_doc(schema) # Supplement python help()
65
66
67def install_drivers(cfg, pkg_install_func):
68 if not isinstance(cfg, dict):
69 raise TypeError(
70 "'drivers' config expected dict, found '%s': %s" %
71 (type_utils.obj_name(cfg), cfg))
72
73 cfgpath = 'nvidia/license-accepted'
74 # Call translate_bool to ensure that we treat string values like "yes" as
75 # acceptance and _don't_ treat string values like "nah" as acceptance
76 # because they're True-ish
77 nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath))
78 if not nv_acc:
79 LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
80 return
81
82 if not util.which('ubuntu-drivers'):
83 LOG.debug("'ubuntu-drivers' command not available. "
84 "Installing ubuntu-drivers-common")
85 pkg_install_func(['ubuntu-drivers-common'])
86
87 driver_arg = 'nvidia'
88 version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version')
89 if version_cfg:
90 driver_arg += ':{}'.format(version_cfg)
91
92 LOG.debug("Installing NVIDIA drivers (%s=%s, version=%s)",
93 cfgpath, nv_acc, version_cfg if version_cfg else 'latest')
94
95 try:
96 util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
97 except util.ProcessExecutionError as exc:
98 if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
99 LOG.warning('the available version of ubuntu-drivers is'
100 ' too old to perform requested driver installation')
101 elif 'No drivers found for installation.' in exc.stdout:
102 LOG.warning('ubuntu-drivers found no drivers for installation')
103 raise
104
105
106def handle(name, cfg, cloud, log, _args):
107 if "drivers" not in cfg:
108 log.debug("Skipping module named %s, no 'drivers' key in config", name)
109 return
110
111 validate_cloudconfig_schema(cfg, schema)
112 install_drivers(cfg['drivers'], cloud.distro.install_packages)
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index b7cf9be..8c4161e 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -1,10 +1,7 @@
1# This file is part of cloud-init. See LICENSE file for license information.1# This file is part of cloud-init. See LICENSE file for license information.
22
3import re
4from six import StringIO
5
6from cloudinit.config.cc_ubuntu_advantage import (3from cloudinit.config.cc_ubuntu_advantage import (
7 handle, maybe_install_ua_tools, run_commands, schema)4 configure_ua, handle, maybe_install_ua_tools, schema)
8from cloudinit.config.schema import validate_cloudconfig_schema5from cloudinit.config.schema import validate_cloudconfig_schema
9from cloudinit import util6from cloudinit import util
10from cloudinit.tests.helpers import (7from cloudinit.tests.helpers import (
@@ -20,90 +17,120 @@ class FakeCloud(object):
20 self.distro = distro17 self.distro = distro
2118
2219
23class TestRunCommands(CiTestCase):20class TestConfigureUA(CiTestCase):
2421
25 with_logs = True22 with_logs = True
26 allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]23 allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
2724
28 def setUp(self):25 def setUp(self):
29 super(TestRunCommands, self).setUp()26 super(TestConfigureUA, self).setUp()
30 self.tmp = self.tmp_dir()27 self.tmp = self.tmp_dir()
3128
32 @mock.patch('%s.util.subp' % MPATH)29 @mock.patch('%s.util.subp' % MPATH)
33 def test_run_commands_on_empty_list(self, m_subp):30 def test_configure_ua_attach_error(self, m_subp):
34 """When provided with an empty list, run_commands does nothing."""31 """Errors from ua attach command are raised."""
35 run_commands([])32 m_subp.side_effect = util.ProcessExecutionError(
36 self.assertEqual('', self.logs.getvalue())33 'Invalid token SomeToken')
37 m_subp.assert_not_called()34 with self.assertRaises(RuntimeError) as context_manager:
3835 configure_ua(token='SomeToken')
39 def test_run_commands_on_non_list_or_dict(self):
40 """When provided an invalid type, run_commands raises an error."""
41 with self.assertRaises(TypeError) as context_manager:
42 run_commands(commands="I'm Not Valid")
43 self.assertEqual(36 self.assertEqual(
44 "commands parameter was not a list or dict: I'm Not Valid",37 'Failure attaching Ubuntu Advantage:\nUnexpected error while'
38 ' running command.\nCommand: -\nExit code: -\nReason: -\n'
39 'Stdout: Invalid token SomeToken\nStderr: -',
45 str(context_manager.exception))40 str(context_manager.exception))
4641
47 def test_run_command_logs_commands_and_exit_codes_to_stderr(self):42 @mock.patch('%s.util.subp' % MPATH)
48 """All exit codes are logged to stderr."""43 def test_configure_ua_attach_with_token(self, m_subp):
49 outfile = self.tmp_path('output.log', dir=self.tmp)44 """When token is provided, attach the machine to ua using the token."""
5045 configure_ua(token='SomeToken')
51 cmd1 = 'echo "HI" >> %s' % outfile46 m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
52 cmd2 = 'bogus command'47 self.assertEqual(
53 cmd3 = 'echo "MOM" >> %s' % outfile48 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
54 commands = [cmd1, cmd2, cmd3]49 self.logs.getvalue())
5550
56 mock_path = '%s.sys.stderr' % MPATH51 @mock.patch('%s.util.subp' % MPATH)
57 with mock.patch(mock_path, new_callable=StringIO) as m_stderr:52 def test_configure_ua_attach_on_service_error(self, m_subp):
58 with self.assertRaises(RuntimeError) as context_manager:53 """all services should be enabled and then any failures raised"""
59 run_commands(commands=commands)
60
61 self.assertIsNotNone(
62 re.search(r'bogus: (command )?not found',
63 str(context_manager.exception)),
64 msg='Expected bogus command not found')
65 expected_stderr_log = '\n'.join([
66 'Begin run command: {cmd}'.format(cmd=cmd1),
67 'End run command: exit(0)',
68 'Begin run command: {cmd}'.format(cmd=cmd2),
69 'ERROR: End run command: exit(127)',
70 'Begin run command: {cmd}'.format(cmd=cmd3),
71 'End run command: exit(0)\n'])
72 self.assertEqual(expected_stderr_log, m_stderr.getvalue())
73
74 def test_run_command_as_lists(self):
75 """When commands are specified as a list, run them in order."""
76 outfile = self.tmp_path('output.log', dir=self.tmp)
77
78 cmd1 = 'echo "HI" >> %s' % outfile
79 cmd2 = 'echo "MOM" >> %s' % outfile
80 commands = [cmd1, cmd2]
81 with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO):
82 run_commands(commands=commands)
8354
55 def fake_subp(cmd, capture=None):
56 fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
57 if cmd in fail_cmds and capture:
58 svc = cmd[-1]
59 raise util.ProcessExecutionError(
60 'Invalid {} credentials'.format(svc.upper()))
61
62 m_subp.side_effect = fake_subp
63
64 with self.assertRaises(RuntimeError) as context_manager:
65 configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips'])
66 self.assertEqual(
67 m_subp.call_args_list,
68 [mock.call(['ua', 'attach', 'SomeToken']),
69 mock.call(['ua', 'enable', 'esm'], capture=True),
70 mock.call(['ua', 'enable', 'cc'], capture=True),
71 mock.call(['ua', 'enable', 'fips'], capture=True)])
84 self.assertIn(72 self.assertIn(
85 'DEBUG: Running user-provided ubuntu-advantage commands',73 'WARNING: Failure enabling "esm":\nUnexpected error'
74 ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
75 'Stdout: Invalid ESM credentials\nStderr: -\n',
86 self.logs.getvalue())76 self.logs.getvalue())
87 self.assertEqual('HI\nMOM\n', util.load_file(outfile))
88 self.assertIn(77 self.assertIn(
89 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage'78 'WARNING: Failure enabling "cc":\nUnexpected error'
90 ' config:',79 ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
80 'Stdout: Invalid CC credentials\nStderr: -\n',
81 self.logs.getvalue())
82 self.assertEqual(
83 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
84 str(context_manager.exception))
85
86 @mock.patch('%s.util.subp' % MPATH)
87 def test_configure_ua_attach_with_empty_services(self, m_subp):
88 """When services is an empty list, do not auto-enable attach."""
89 configure_ua(token='SomeToken', enable=[])
90 m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
91 self.assertEqual(
92 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
91 self.logs.getvalue())93 self.logs.getvalue())
9294
93 def test_run_command_dict_sorted_as_command_script(self):95 @mock.patch('%s.util.subp' % MPATH)
94 """When commands are a dict, sort them and run."""96 def test_configure_ua_attach_with_specific_services(self, m_subp):
95 outfile = self.tmp_path('output.log', dir=self.tmp)97 """When services a list, only enable specific services."""
96 cmd1 = 'echo "HI" >> %s' % outfile98 configure_ua(token='SomeToken', enable=['fips'])
97 cmd2 = 'echo "MOM" >> %s' % outfile99 self.assertEqual(
98 commands = {'02': cmd1, '01': cmd2}100 m_subp.call_args_list,
99 with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO):101 [mock.call(['ua', 'attach', 'SomeToken']),
100 run_commands(commands=commands)102 mock.call(['ua', 'enable', 'fips'], capture=True)])
103 self.assertEqual(
104 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
105 self.logs.getvalue())
106
107 @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
108 @mock.patch('%s.util.subp' % MPATH)
109 def test_configure_ua_attach_with_string_services(self, m_subp):
110 """When services a string, treat as singleton list and warn"""
111 configure_ua(token='SomeToken', enable='fips')
112 self.assertEqual(
113 m_subp.call_args_list,
114 [mock.call(['ua', 'attach', 'SomeToken']),
115 mock.call(['ua', 'enable', 'fips'], capture=True)])
116 self.assertEqual(
117 'WARNING: ubuntu_advantage: enable should be a list, not a'
118 ' string; treating as a single enable\n'
119 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
120 self.logs.getvalue())
101121
102 expected_messages = [122 @mock.patch('%s.util.subp' % MPATH)
103 'DEBUG: Running user-provided ubuntu-advantage commands']123 def test_configure_ua_attach_with_weird_services(self, m_subp):
104 for message in expected_messages:124 """When services not string or list, warn but still attach"""
105 self.assertIn(message, self.logs.getvalue())125 configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
106 self.assertEqual('MOM\nHI\n', util.load_file(outfile))126 self.assertEqual(
127 m_subp.call_args_list,
128 [mock.call(['ua', 'attach', 'SomeToken'])])
129 self.assertEqual(
130 'WARNING: ubuntu_advantage: enable should be a list, not a'
131 ' dict; skipping enabling services\n'
132 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
133 self.logs.getvalue())
107134
108135
109@skipUnlessJsonSchema()136@skipUnlessJsonSchema()
@@ -112,90 +139,50 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
112 with_logs = True139 with_logs = True
113 schema = schema140 schema = schema
114141
115 def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):142 @mock.patch('%s.maybe_install_ua_tools' % MPATH)
116 """If ubuntu-advantage configuration is not a dict, emit a warning."""143 @mock.patch('%s.configure_ua' % MPATH)
117 validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema)144 def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
145 """If ubuntu_advantage configuration is not a dict, emit a warning."""
146 validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema)
118 self.assertEqual(147 self.assertEqual(
119 "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not"148 "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not"
120 " of type 'object'\n",149 " of type 'object'\n",
121 self.logs.getvalue())150 self.logs.getvalue())
122151
123 @mock.patch('%s.run_commands' % MPATH)152 @mock.patch('%s.maybe_install_ua_tools' % MPATH)
124 def test_schema_disallows_unknown_keys(self, _):153 @mock.patch('%s.configure_ua' % MPATH)
125 """Unknown keys in ubuntu-advantage configuration emit warnings."""154 def test_schema_disallows_unknown_keys(self, _cfg, _):
155 """Unknown keys in ubuntu_advantage configuration emit warnings."""
126 validate_cloudconfig_schema(156 validate_cloudconfig_schema(
127 {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}},157 {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}},
128 schema)158 schema)
129 self.assertIn(159 self.assertIn(
130 'WARNING: Invalid config:\nubuntu-advantage: Additional properties'160 'WARNING: Invalid config:\nubuntu_advantage: Additional properties'
131 " are not allowed ('invalid-key' was unexpected)",161 " are not allowed ('invalid-key' was unexpected)",
132 self.logs.getvalue())162 self.logs.getvalue())
133163
134 def test_warn_schema_requires_commands(self):164 @mock.patch('%s.maybe_install_ua_tools' % MPATH)
135 """Warn when ubuntu-advantage configuration lacks commands."""165 @mock.patch('%s.configure_ua' % MPATH)
136 validate_cloudconfig_schema(166 def test_warn_schema_requires_token(self, _cfg, _):
137 {'ubuntu-advantage': {}}, schema)167 """Warn if ubuntu_advantage configuration lacks token."""
138 self.assertEqual(
139 "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a"
140 " required property\n",
141 self.logs.getvalue())
142
143 @mock.patch('%s.run_commands' % MPATH)
144 def test_warn_schema_commands_is_not_list_or_dict(self, _):
145 """Warn when ubuntu-advantage:commands config is not a list or dict."""
146 validate_cloudconfig_schema(168 validate_cloudconfig_schema(
147 {'ubuntu-advantage': {'commands': 'broken'}}, schema)169 {'ubuntu_advantage': {'enable': ['esm']}}, schema)
148 self.assertEqual(170 self.assertEqual(
149 "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is"171 "WARNING: Invalid config:\nubuntu_advantage:"
150 " not of type 'object', 'array'\n",172 " 'token' is a required property\n", self.logs.getvalue())
151 self.logs.getvalue())
152173
153 @mock.patch('%s.run_commands' % MPATH)174 @mock.patch('%s.maybe_install_ua_tools' % MPATH)
154 def test_warn_schema_when_commands_is_empty(self, _):175 @mock.patch('%s.configure_ua' % MPATH)
155 """Emit warnings when ubuntu-advantage:commands is empty."""176 def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
156 validate_cloudconfig_schema(177 """Warn when ubuntu_advantage:enable config is not a list."""
157 {'ubuntu-advantage': {'commands': []}}, schema)
158 validate_cloudconfig_schema(178 validate_cloudconfig_schema(
159 {'ubuntu-advantage': {'commands': {}}}, schema)179 {'ubuntu_advantage': {'enable': 'needslist'}}, schema)
160 self.assertEqual(180 self.assertEqual(
161 "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too"181 "WARNING: Invalid config:\nubuntu_advantage: 'token' is a"
162 " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}"182 " required property\nubuntu_advantage.enable: 'needslist'"
163 " does not have enough properties\n",183 " is not of type 'array'\n",
164 self.logs.getvalue())184 self.logs.getvalue())
165185
166 @mock.patch('%s.run_commands' % MPATH)
167 def test_schema_when_commands_are_list_or_dict(self, _):
168 """No warnings when ubuntu-advantage:commands are a list or dict."""
169 validate_cloudconfig_schema(
170 {'ubuntu-advantage': {'commands': ['valid']}}, schema)
171 validate_cloudconfig_schema(
172 {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)
173 self.assertEqual('', self.logs.getvalue())
174
175 def test_duplicates_are_fine_array_array(self):
176 """Duplicated commands array/array entries are allowed."""
177 self.assertSchemaValid(
178 {'commands': [["echo", "bye"], ["echo" "bye"]]},
179 "command entries can be duplicate.")
180
181 def test_duplicates_are_fine_array_string(self):
182 """Duplicated commands array/string entries are allowed."""
183 self.assertSchemaValid(
184 {'commands': ["echo bye", "echo bye"]},
185 "command entries can be duplicate.")
186
187 def test_duplicates_are_fine_dict_array(self):
188 """Duplicated commands dict/array entries are allowed."""
189 self.assertSchemaValid(
190 {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
191 "command entries can be duplicate.")
192
193 def test_duplicates_are_fine_dict_string(self):
194 """Duplicated commands dict/string entries are allowed."""
195 self.assertSchemaValid(
196 {'commands': {'00': "echo bye", '01': "echo bye"}},
197 "command entries can be duplicate.")
198
199186
200class TestHandle(CiTestCase):187class TestHandle(CiTestCase):
201188
@@ -205,41 +192,89 @@ class TestHandle(CiTestCase):
205 super(TestHandle, self).setUp()192 super(TestHandle, self).setUp()
206 self.tmp = self.tmp_dir()193 self.tmp = self.tmp_dir()
207194
208 @mock.patch('%s.run_commands' % MPATH)
209 @mock.patch('%s.validate_cloudconfig_schema' % MPATH)195 @mock.patch('%s.validate_cloudconfig_schema' % MPATH)
210 def test_handle_no_config(self, m_schema, m_run):196 def test_handle_no_config(self, m_schema):
211 """When no ua-related configuration is provided, nothing happens."""197 """When no ua-related configuration is provided, nothing happens."""
212 cfg = {}198 cfg = {}
213 handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None)199 handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None)
214 self.assertIn(200 self.assertIn(
215 "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key"201 "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
216 " in config",202 ' configuration found',
217 self.logs.getvalue())203 self.logs.getvalue())
218 m_schema.assert_not_called()204 m_schema.assert_not_called()
219 m_run.assert_not_called()
220205
206 @mock.patch('%s.configure_ua' % MPATH)
221 @mock.patch('%s.maybe_install_ua_tools' % MPATH)207 @mock.patch('%s.maybe_install_ua_tools' % MPATH)
222 def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install):208 def test_handle_tries_to_install_ubuntu_advantage_tools(
209 self, m_install, m_cfg):
223 """If ubuntu_advantage is provided, try installing ua-tools package."""210 """If ubuntu_advantage is provided, try installing ua-tools package."""
224 cfg = {'ubuntu-advantage': {}}211 cfg = {'ubuntu_advantage': {'token': 'valid'}}
225 mycloud = FakeCloud(None)212 mycloud = FakeCloud(None)
226 handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None)213 handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None)
227 m_install.assert_called_once_with(mycloud)214 m_install.assert_called_once_with(mycloud)
228215
216 @mock.patch('%s.configure_ua' % MPATH)
229 @mock.patch('%s.maybe_install_ua_tools' % MPATH)217 @mock.patch('%s.maybe_install_ua_tools' % MPATH)
230 def test_handle_runs_commands_provided(self, m_install):218 def test_handle_passes_credentials_and_services_to_configure_ua(
231 """When commands are specified as a list, run them."""219 self, m_install, m_configure_ua):
232 outfile = self.tmp_path('output.log', dir=self.tmp)220 """All ubuntu_advantage config keys are passed to configure_ua."""
221 cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}}
222 handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
223 m_configure_ua.assert_called_once_with(
224 token='token', enable=['esm'])
225
226 @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
227 @mock.patch('%s.configure_ua' % MPATH)
228 def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
229 self, m_configure_ua):
230 """Warning when ubuntu-advantage key is present with new config"""
231 cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}}
232 handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
233 self.assertEqual(
234 'WARNING: Deprecated configuration key "ubuntu-advantage"'
235 ' provided. Expected underscore delimited "ubuntu_advantage";'
236 ' will attempt to continue.',
237 self.logs.getvalue().splitlines()[0])
238 m_configure_ua.assert_called_once_with(
239 token='token', enable=['esm'])
240
241 def test_handle_error_on_deprecated_commands_key_dashed(self):
242 """Error when commands is present in ubuntu-advantage key."""
243 cfg = {'ubuntu-advantage': {'commands': 'nogo'}}
244 with self.assertRaises(RuntimeError) as context_manager:
245 handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
246 self.assertEqual(
247 'Deprecated configuration "ubuntu-advantage: commands" provided.'
248 ' Expected "token"',
249 str(context_manager.exception))
250
251 def test_handle_error_on_deprecated_commands_key_underscored(self):
252 """Error when commands is present in ubuntu_advantage key."""
253 cfg = {'ubuntu_advantage': {'commands': 'nogo'}}
254 with self.assertRaises(RuntimeError) as context_manager:
255 handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
256 self.assertEqual(
257 'Deprecated configuration "ubuntu-advantage: commands" provided.'
258 ' Expected "token"',
259 str(context_manager.exception))
233260
261 @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
262 @mock.patch('%s.configure_ua' % MPATH)
263 def test_handle_prefers_new_style_config(
264 self, m_configure_ua):
265 """ubuntu_advantage should be preferred over ubuntu-advantage"""
234 cfg = {266 cfg = {
235 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile,267 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']},
236 'echo "MOM" >> %s' % outfile]}}268 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']},
237 mock_path = '%s.sys.stderr' % MPATH269 }
238 with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):270 handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
239 with mock.patch(mock_path, new_callable=StringIO):271 self.assertEqual(
240 handle('nomatter', cfg=cfg, cloud=None, log=self.logger,272 'WARNING: Deprecated configuration key "ubuntu-advantage"'
241 args=None)273 ' provided. Expected underscore delimited "ubuntu_advantage";'
242 self.assertEqual('HI\nMOM\n', util.load_file(outfile))274 ' will attempt to continue.',
275 self.logs.getvalue().splitlines()[0])
276 m_configure_ua.assert_called_once_with(
277 token='token', enable=['esm'])
243278
244279
245class TestMaybeInstallUATools(CiTestCase):280class TestMaybeInstallUATools(CiTestCase):
@@ -253,7 +288,7 @@ class TestMaybeInstallUATools(CiTestCase):
253 @mock.patch('%s.util.which' % MPATH)288 @mock.patch('%s.util.which' % MPATH)
254 def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):289 def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
255 """Do nothing if ubuntu-advantage-tools already exists."""290 """Do nothing if ubuntu-advantage-tools already exists."""
256 m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed291 m_which.return_value = '/usr/bin/ua' # already installed
257 distro = mock.MagicMock()292 distro = mock.MagicMock()
258 distro.update_package_sources.side_effect = RuntimeError(293 distro.update_package_sources.side_effect = RuntimeError(
259 'Some apt error')294 'Some apt error')
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
260new file mode 100644295new file mode 100644
index 0000000..efba4ce
--- /dev/null
+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
@@ -0,0 +1,174 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3import copy
4
5from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
6from cloudinit.config.schema import (
7 SchemaValidationError, validate_cloudconfig_schema)
8from cloudinit.config import cc_ubuntu_drivers as drivers
9from cloudinit.util import ProcessExecutionError
10
11MPATH = "cloudinit.config.cc_ubuntu_drivers."
12OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
13 "ubuntu-drivers: error: argument <command>: invalid choice: 'install' "
14 "(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
15
16
17class TestUbuntuDrivers(CiTestCase):
18 cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}}
19 install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia']
20
21 with_logs = True
22
23 @skipUnlessJsonSchema()
24 def test_schema_requires_boolean_for_license_accepted(self):
25 with self.assertRaisesRegex(
26 SchemaValidationError, ".*license-accepted.*TRUE.*boolean"):
27 validate_cloudconfig_schema(
28 {'drivers': {'nvidia': {'license-accepted': "TRUE"}}},
29 schema=drivers.schema, strict=True)
30
31 @mock.patch(MPATH + "util.subp", return_value=('', ''))
32 @mock.patch(MPATH + "util.which", return_value=False)
33 def _assert_happy_path_taken(self, config, m_which, m_subp):
34 """Positive path test through handle. Package should be installed."""
35 myCloud = mock.MagicMock()
36 drivers.handle('ubuntu_drivers', config, myCloud, None, None)
37 self.assertEqual([mock.call(['ubuntu-drivers-common'])],
38 myCloud.distro.install_packages.call_args_list)
39 self.assertEqual([mock.call(self.install_gpgpu)],
40 m_subp.call_args_list)
41
42 def test_handle_does_package_install(self):
43 self._assert_happy_path_taken(self.cfg_accepted)
44
45 def test_trueish_strings_are_considered_approval(self):
46 for true_value in ['yes', 'true', 'on', '1']:
47 new_config = copy.deepcopy(self.cfg_accepted)
48 new_config['drivers']['nvidia']['license-accepted'] = true_value
49 self._assert_happy_path_taken(new_config)
50
51 @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError(
52 stdout='No drivers found for installation.\n', exit_code=1))
53 @mock.patch(MPATH + "util.which", return_value=False)
54 def test_handle_raises_error_if_no_drivers_found(self, m_which, m_subp):
55 """If ubuntu-drivers doesn't install any drivers, raise an error."""
56 myCloud = mock.MagicMock()
57 with self.assertRaises(Exception):
58 drivers.handle(
59 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
60 self.assertEqual([mock.call(['ubuntu-drivers-common'])],
61 myCloud.distro.install_packages.call_args_list)
62 self.assertEqual([mock.call(self.install_gpgpu)],
63 m_subp.call_args_list)
64 self.assertIn('ubuntu-drivers found no drivers for installation',
65 self.logs.getvalue())
66
67 @mock.patch(MPATH + "util.subp", return_value=('', ''))
68 @mock.patch(MPATH + "util.which", return_value=False)
69 def _assert_inert_with_config(self, config, m_which, m_subp):
70 """Helper to reduce repetition when testing negative cases"""
71 myCloud = mock.MagicMock()
72 drivers.handle('ubuntu_drivers', config, myCloud, None, None)
73 self.assertEqual(0, myCloud.distro.install_packages.call_count)
74 self.assertEqual(0, m_subp.call_count)
75
76 def test_handle_inert_if_license_not_accepted(self):
77 """Ensure we don't do anything if the license is rejected."""
78 self._assert_inert_with_config(
79 {'drivers': {'nvidia': {'license-accepted': False}}})
80
81 def test_handle_inert_if_garbage_in_license_field(self):
82 """Ensure we don't do anything if unknown text is in license field."""
83 self._assert_inert_with_config(
84 {'drivers': {'nvidia': {'license-accepted': 'garbage'}}})
85
86 def test_handle_inert_if_no_license_key(self):
87 """Ensure we don't do anything if no license key."""
88 self._assert_inert_with_config({'drivers': {'nvidia': {}}})
89
90 def test_handle_inert_if_no_nvidia_key(self):
91 """Ensure we don't do anything if other license accepted."""
92 self._assert_inert_with_config(
93 {'drivers': {'acme': {'license-accepted': True}}})
94
95 def test_handle_inert_if_string_given(self):
96 """Ensure we don't do anything if string refusal given."""
97 for false_value in ['no', 'false', 'off', '0']:
98 self._assert_inert_with_config(
99 {'drivers': {'nvidia': {'license-accepted': false_value}}})
100
101 @mock.patch(MPATH + "install_drivers")
102 def test_handle_no_drivers_does_nothing(self, m_install_drivers):
103 """If no 'drivers' key in the config, nothing should be done."""
104 myCloud = mock.MagicMock()
105 myLog = mock.MagicMock()
106 drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None)
107 self.assertIn('Skipping module named',
108 myLog.debug.call_args_list[0][0][0])
109 self.assertEqual(0, m_install_drivers.call_count)
110
111 @mock.patch(MPATH + "util.subp", return_value=('', ''))
112 @mock.patch(MPATH + "util.which", return_value=True)
113 def test_install_drivers_no_install_if_present(self, m_which, m_subp):
114 """If 'ubuntu-drivers' is present, no package install should occur."""
115 pkg_install = mock.MagicMock()
116 drivers.install_drivers(self.cfg_accepted['drivers'],
117 pkg_install_func=pkg_install)
118 self.assertEqual(0, pkg_install.call_count)
119 self.assertEqual([mock.call('ubuntu-drivers')],
120 m_which.call_args_list)
121 self.assertEqual([mock.call(self.install_gpgpu)],
122 m_subp.call_args_list)
123
124 def test_install_drivers_rejects_invalid_config(self):
125 """install_drivers should raise TypeError if not given a config dict"""
126 pkg_install = mock.MagicMock()
127 with self.assertRaisesRegex(TypeError, ".*expected dict.*"):
128 drivers.install_drivers("mystring", pkg_install_func=pkg_install)
129 self.assertEqual(0, pkg_install.call_count)
130
131 @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError(
132 stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2))
133 @mock.patch(MPATH + "util.which", return_value=False)
134 def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
135 self, m_which, m_subp):
136 """Older ubuntu-drivers versions should emit message and raise error"""
137 myCloud = mock.MagicMock()
138 with self.assertRaises(Exception):
139 drivers.handle(
140 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
141 self.assertEqual([mock.call(['ubuntu-drivers-common'])],
142 myCloud.distro.install_packages.call_args_list)
143 self.assertEqual([mock.call(self.install_gpgpu)],
144 m_subp.call_args_list)
145 self.assertIn('WARNING: the available version of ubuntu-drivers is'
146 ' too old to perform requested driver installation',
147 self.logs.getvalue())
148
149
150# Sub-class TestUbuntuDrivers to run the same test cases, but with a version
151class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
152 cfg_accepted = {
153 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}}
154 install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
155
156 @mock.patch(MPATH + "util.subp", return_value=('', ''))
157 @mock.patch(MPATH + "util.which", return_value=False)
158 def test_version_none_uses_latest(self, m_which, m_subp):
159 myCloud = mock.MagicMock()
160 version_none_cfg = {
161 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}}
162 drivers.handle(
163 'ubuntu_drivers', version_none_cfg, myCloud, None, None)
164 self.assertEqual(
165 [mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])],
166 m_subp.call_args_list)
167
168 def test_specifying_a_version_doesnt_override_license_acceptance(self):
169 self._assert_inert_with_config({
170 'drivers': {'nvidia': {'license-accepted': False,
171 'version': '123'}}
172 })
173
174# vi: ts=4 expandtab
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 6423632..b129bb6 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -366,8 +366,6 @@ class Renderer(renderer.Renderer):
366 down = indent + "pre-down route del"366 down = indent + "pre-down route del"
367 or_true = " || true"367 or_true = " || true"
368 mapping = {368 mapping = {
369 'network': '-net',
370 'netmask': 'netmask',
371 'gateway': 'gw',369 'gateway': 'gw',
372 'metric': 'metric',370 'metric': 'metric',
373 }371 }
@@ -379,13 +377,21 @@ class Renderer(renderer.Renderer):
379 default_gw = ' -A inet6 default'377 default_gw = ' -A inet6 default'
380378
381 route_line = ''379 route_line = ''
382 for k in ['network', 'netmask', 'gateway', 'metric']:380 for k in ['network', 'gateway', 'metric']:
383 if default_gw and k in ['network', 'netmask']:381 if default_gw and k == 'network':
384 continue382 continue
385 if k == 'gateway':383 if k == 'gateway':
386 route_line += '%s %s %s' % (default_gw, mapping[k], route[k])384 route_line += '%s %s %s' % (default_gw, mapping[k], route[k])
387 elif k in route:385 elif k in route:
388 route_line += ' %s %s' % (mapping[k], route[k])386 if k == 'network':
387 if ':' in route[k]:
388 route_line += ' -A inet6'
389 else:
390 route_line += ' -net'
391 if 'prefix' in route:
392 route_line += ' %s/%s' % (route[k], route['prefix'])
393 else:
394 route_line += ' %s %s' % (mapping[k], route[k])
389 content.append(up + route_line + or_true)395 content.append(up + route_line + or_true)
390 content.append(down + route_line + or_true)396 content.append(down + route_line + or_true)
391 return content397 return content
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 539b76d..4d19f56 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -148,6 +148,7 @@ class NetworkState(object):
148 self._network_state = copy.deepcopy(network_state)148 self._network_state = copy.deepcopy(network_state)
149 self._version = version149 self._version = version
150 self.use_ipv6 = network_state.get('use_ipv6', False)150 self.use_ipv6 = network_state.get('use_ipv6', False)
151 self._has_default_route = None
151152
152 @property153 @property
153 def config(self):154 def config(self):
@@ -157,14 +158,6 @@ class NetworkState(object):
157 def version(self):158 def version(self):
158 return self._version159 return self._version
159160
160 def iter_routes(self, filter_func=None):
161 for route in self._network_state.get('routes', []):
162 if filter_func is not None:
163 if filter_func(route):
164 yield route
165 else:
166 yield route
167
168 @property161 @property
169 def dns_nameservers(self):162 def dns_nameservers(self):
170 try:163 try:
@@ -179,6 +172,12 @@ class NetworkState(object):
179 except KeyError:172 except KeyError:
180 return []173 return []
181174
175 @property
176 def has_default_route(self):
177 if self._has_default_route is None:
178 self._has_default_route = self._maybe_has_default_route()
179 return self._has_default_route
180
182 def iter_interfaces(self, filter_func=None):181 def iter_interfaces(self, filter_func=None):
183 ifaces = self._network_state.get('interfaces', {})182 ifaces = self._network_state.get('interfaces', {})
184 for iface in six.itervalues(ifaces):183 for iface in six.itervalues(ifaces):
@@ -188,6 +187,32 @@ class NetworkState(object):
188 if filter_func(iface):187 if filter_func(iface):
189 yield iface188 yield iface
190189
190 def iter_routes(self, filter_func=None):
191 for route in self._network_state.get('routes', []):
192 if filter_func is not None:
193 if filter_func(route):
194 yield route
195 else:
196 yield route
197
198 def _maybe_has_default_route(self):
199 for route in self.iter_routes():
200 if self._is_default_route(route):
201 return True
202 for iface in self.iter_interfaces():
203 for subnet in iface.get('subnets', []):
204 for route in subnet.get('routes', []):
205 if self._is_default_route(route):
206 return True
207 return False
208
209 def _is_default_route(self, route):
210 default_nets = ('::', '0.0.0.0')
211 return (
212 route.get('prefix') == 0
213 and route.get('network') in default_nets
214 )
215
191216
192@six.add_metaclass(CommandHandlerMeta)217@six.add_metaclass(CommandHandlerMeta)
193class NetworkStateInterpreter(object):218class NetworkStateInterpreter(object):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 19b3e60..0998392 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -322,7 +322,7 @@ class Renderer(renderer.Renderer):
322 iface_cfg[new_key] = old_value322 iface_cfg[new_key] = old_value
323323
324 @classmethod324 @classmethod
325 def _render_subnets(cls, iface_cfg, subnets):325 def _render_subnets(cls, iface_cfg, subnets, has_default_route):
326 # setting base values326 # setting base values
327 iface_cfg['BOOTPROTO'] = 'none'327 iface_cfg['BOOTPROTO'] = 'none'
328328
@@ -331,6 +331,7 @@ class Renderer(renderer.Renderer):
331 mtu_key = 'MTU'331 mtu_key = 'MTU'
332 subnet_type = subnet.get('type')332 subnet_type = subnet.get('type')
333 if subnet_type == 'dhcp6':333 if subnet_type == 'dhcp6':
334 # TODO need to set BOOTPROTO to dhcp6 on SUSE
334 iface_cfg['IPV6INIT'] = True335 iface_cfg['IPV6INIT'] = True
335 iface_cfg['DHCPV6C'] = True336 iface_cfg['DHCPV6C'] = True
336 elif subnet_type in ['dhcp4', 'dhcp']:337 elif subnet_type in ['dhcp4', 'dhcp']:
@@ -375,9 +376,9 @@ class Renderer(renderer.Renderer):
375 ipv6_index = -1376 ipv6_index = -1
376 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):377 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
377 subnet_type = subnet.get('type')378 subnet_type = subnet.get('type')
378 if subnet_type == 'dhcp6':379 if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']:
379 continue380 if has_default_route and iface_cfg['BOOTPROTO'] != 'none':
380 elif subnet_type in ['dhcp4', 'dhcp']:381 iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False
381 continue382 continue
382 elif subnet_type == 'static':383 elif subnet_type == 'static':
383 if subnet_is_ipv6(subnet):384 if subnet_is_ipv6(subnet):
@@ -385,10 +386,13 @@ class Renderer(renderer.Renderer):
385 ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])386 ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
386 if ipv6_index == 0:387 if ipv6_index == 0:
387 iface_cfg['IPV6ADDR'] = ipv6_cidr388 iface_cfg['IPV6ADDR'] = ipv6_cidr
389 iface_cfg['IPADDR6'] = ipv6_cidr
388 elif ipv6_index == 1:390 elif ipv6_index == 1:
389 iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr391 iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
392 iface_cfg['IPADDR6_0'] = ipv6_cidr
390 else:393 else:
391 iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr394 iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr
395 iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
392 else:396 else:
393 ipv4_index = ipv4_index + 1397 ipv4_index = ipv4_index + 1
394 suff = "" if ipv4_index == 0 else str(ipv4_index)398 suff = "" if ipv4_index == 0 else str(ipv4_index)
@@ -443,6 +447,8 @@ class Renderer(renderer.Renderer):
443 # TODO(harlowja): add validation that no other iface has447 # TODO(harlowja): add validation that no other iface has
444 # also provided the default route?448 # also provided the default route?
445 iface_cfg['DEFROUTE'] = True449 iface_cfg['DEFROUTE'] = True
450 if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'):
451 iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True
446 if 'gateway' in route:452 if 'gateway' in route:
447 if is_ipv6 or is_ipv6_addr(route['gateway']):453 if is_ipv6 or is_ipv6_addr(route['gateway']):
448 iface_cfg['IPV6_DEFAULTGW'] = route['gateway']454 iface_cfg['IPV6_DEFAULTGW'] = route['gateway']
@@ -493,7 +499,9 @@ class Renderer(renderer.Renderer):
493 iface_cfg = iface_contents[iface_name]499 iface_cfg = iface_contents[iface_name]
494 route_cfg = iface_cfg.routes500 route_cfg = iface_cfg.routes
495501
496 cls._render_subnets(iface_cfg, iface_subnets)502 cls._render_subnets(
503 iface_cfg, iface_subnets, network_state.has_default_route
504 )
497 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)505 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
498506
499 @classmethod507 @classmethod
@@ -518,7 +526,9 @@ class Renderer(renderer.Renderer):
518526
519 iface_subnets = iface.get("subnets", [])527 iface_subnets = iface.get("subnets", [])
520 route_cfg = iface_cfg.routes528 route_cfg = iface_cfg.routes
521 cls._render_subnets(iface_cfg, iface_subnets)529 cls._render_subnets(
530 iface_cfg, iface_subnets, network_state.has_default_route
531 )
522 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)532 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
523533
524 # iter_interfaces on network-state is not sorted to produce534 # iter_interfaces on network-state is not sorted to produce
@@ -547,7 +557,9 @@ class Renderer(renderer.Renderer):
547557
548 iface_subnets = iface.get("subnets", [])558 iface_subnets = iface.get("subnets", [])
549 route_cfg = iface_cfg.routes559 route_cfg = iface_cfg.routes
550 cls._render_subnets(iface_cfg, iface_subnets)560 cls._render_subnets(
561 iface_cfg, iface_subnets, network_state.has_default_route
562 )
551 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)563 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
552564
553 @staticmethod565 @staticmethod
@@ -608,7 +620,9 @@ class Renderer(renderer.Renderer):
608620
609 iface_subnets = iface.get("subnets", [])621 iface_subnets = iface.get("subnets", [])
610 route_cfg = iface_cfg.routes622 route_cfg = iface_cfg.routes
611 cls._render_subnets(iface_cfg, iface_subnets)623 cls._render_subnets(
624 iface_cfg, iface_subnets, network_state.has_default_route
625 )
612 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)626 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
613627
614 @classmethod628 @classmethod
@@ -620,7 +634,9 @@ class Renderer(renderer.Renderer):
620 iface_cfg.kind = 'infiniband'634 iface_cfg.kind = 'infiniband'
621 iface_subnets = iface.get("subnets", [])635 iface_subnets = iface.get("subnets", [])
622 route_cfg = iface_cfg.routes636 route_cfg = iface_cfg.routes
623 cls._render_subnets(iface_cfg, iface_subnets)637 cls._render_subnets(
638 iface_cfg, iface_subnets, network_state.has_default_route
639 )
624 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)640 cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
625641
626 @classmethod642 @classmethod
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
627old mode 100644643old mode 100644
628new mode 100755644new mode 100755
index eccbee5..76b1661
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -21,10 +21,14 @@ from cloudinit import net
21from cloudinit.event import EventType21from cloudinit.event import EventType
22from cloudinit.net.dhcp import EphemeralDHCPv422from cloudinit.net.dhcp import EphemeralDHCPv4
23from cloudinit import sources23from cloudinit import sources
24from cloudinit.sources.helpers.azure import get_metadata_from_fabric
25from cloudinit.sources.helpers import netlink24from cloudinit.sources.helpers import netlink
26from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc25from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
27from cloudinit import util26from cloudinit import util
27from cloudinit.reporting import events
28
29from cloudinit.sources.helpers.azure import (azure_ds_reporter,
30 azure_ds_telemetry_reporter,
31 get_metadata_from_fabric)
2832
29LOG = logging.getLogger(__name__)33LOG = logging.getLogger(__name__)
3034
@@ -54,6 +58,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
54REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"58REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
55AGENT_SEED_DIR = '/var/lib/waagent'59AGENT_SEED_DIR = '/var/lib/waagent'
56IMDS_URL = "http://169.254.169.254/metadata/"60IMDS_URL = "http://169.254.169.254/metadata/"
61PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
5762
58# List of static scripts and network config artifacts created by63# List of static scripts and network config artifacts created by
59# stock ubuntu suported images.64# stock ubuntu suported images.
@@ -195,6 +200,8 @@ if util.is_FreeBSD():
195 RESOURCE_DISK_PATH = "/dev/" + res_disk200 RESOURCE_DISK_PATH = "/dev/" + res_disk
196 else:201 else:
197 LOG.debug("resource disk is None")202 LOG.debug("resource disk is None")
203 # TODO Find where platform entropy data is surfaced
204 PLATFORM_ENTROPY_SOURCE = None
198205
199BUILTIN_DS_CONFIG = {206BUILTIN_DS_CONFIG = {
200 'agent_command': AGENT_START_BUILTIN,207 'agent_command': AGENT_START_BUILTIN,
@@ -241,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'):
241 util.subp([hostname_command, hostname])248 util.subp([hostname_command, hostname])
242249
243250
251@azure_ds_telemetry_reporter
244@contextlib.contextmanager252@contextlib.contextmanager
245def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):253def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
246 """254 """
@@ -287,6 +295,7 @@ class DataSourceAzure(sources.DataSource):
287 root = sources.DataSource.__str__(self)295 root = sources.DataSource.__str__(self)
288 return "%s [seed=%s]" % (root, self.seed)296 return "%s [seed=%s]" % (root, self.seed)
289297
298 @azure_ds_telemetry_reporter
290 def bounce_network_with_azure_hostname(self):299 def bounce_network_with_azure_hostname(self):
291 # When using cloud-init to provision, we have to set the hostname from300 # When using cloud-init to provision, we have to set the hostname from
292 # the metadata and "bounce" the network to force DDNS to update via301 # the metadata and "bounce" the network to force DDNS to update via
@@ -312,6 +321,7 @@ class DataSourceAzure(sources.DataSource):
312 util.logexc(LOG, "handling set_hostname failed")321 util.logexc(LOG, "handling set_hostname failed")
313 return False322 return False
314323
324 @azure_ds_telemetry_reporter
315 def get_metadata_from_agent(self):325 def get_metadata_from_agent(self):
316 temp_hostname = self.metadata.get('local-hostname')326 temp_hostname = self.metadata.get('local-hostname')
317 agent_cmd = self.ds_cfg['agent_command']327 agent_cmd = self.ds_cfg['agent_command']
@@ -341,15 +351,18 @@ class DataSourceAzure(sources.DataSource):
341 LOG.debug("ssh authentication: "351 LOG.debug("ssh authentication: "
342 "using fingerprint from fabirc")352 "using fingerprint from fabirc")
343353
344 # wait very long for public SSH keys to arrive354 with events.ReportEventStack(
345 # https://bugs.launchpad.net/cloud-init/+bug/1717611355 name="waiting-for-ssh-public-key",
346 missing = util.log_time(logfunc=LOG.debug,356 description="wait for agents to retrieve ssh keys",
347 msg="waiting for SSH public key files",357 parent=azure_ds_reporter):
348 func=util.wait_for_files,358 # wait very long for public SSH keys to arrive
349 args=(fp_files, 900))359 # https://bugs.launchpad.net/cloud-init/+bug/1717611
350360 missing = util.log_time(logfunc=LOG.debug,
351 if len(missing):361 msg="waiting for SSH public key files",
352 LOG.warning("Did not find files, but going on: %s", missing)362 func=util.wait_for_files,
363 args=(fp_files, 900))
364 if len(missing):
365 LOG.warning("Did not find files, but going on: %s", missing)
353366
354 metadata = {}367 metadata = {}
355 metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)368 metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
@@ -363,6 +376,7 @@ class DataSourceAzure(sources.DataSource):
363 subplatform_type = 'seed-dir'376 subplatform_type = 'seed-dir'
364 return '%s (%s)' % (subplatform_type, self.seed)377 return '%s (%s)' % (subplatform_type, self.seed)
365378
379 @azure_ds_telemetry_reporter
366 def crawl_metadata(self):380 def crawl_metadata(self):
367 """Walk all instance metadata sources returning a dict on success.381 """Walk all instance metadata sources returning a dict on success.
368382
@@ -464,6 +478,7 @@ class DataSourceAzure(sources.DataSource):
464 super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)478 super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
465 self._metadata_imds = sources.UNSET479 self._metadata_imds = sources.UNSET
466480
481 @azure_ds_telemetry_reporter
467 def _get_data(self):482 def _get_data(self):
468 """Crawl and process datasource metadata caching metadata as attrs.483 """Crawl and process datasource metadata caching metadata as attrs.
469484
@@ -510,6 +525,7 @@ class DataSourceAzure(sources.DataSource):
510 # quickly (local check only) if self.instance_id is still valid525 # quickly (local check only) if self.instance_id is still valid
511 return sources.instance_id_matches_system_uuid(self.get_instance_id())526 return sources.instance_id_matches_system_uuid(self.get_instance_id())
512527
528 @azure_ds_telemetry_reporter
513 def setup(self, is_new_instance):529 def setup(self, is_new_instance):
514 if self._negotiated is False:530 if self._negotiated is False:
515 LOG.debug("negotiating for %s (new_instance=%s)",531 LOG.debug("negotiating for %s (new_instance=%s)",
@@ -577,6 +593,7 @@ class DataSourceAzure(sources.DataSource):
577 if nl_sock:593 if nl_sock:
578 nl_sock.close()594 nl_sock.close()
579595
596 @azure_ds_telemetry_reporter
580 def _report_ready(self, lease):597 def _report_ready(self, lease):
581 """Tells the fabric provisioning has completed """598 """Tells the fabric provisioning has completed """
582 try:599 try:
@@ -614,9 +631,14 @@ class DataSourceAzure(sources.DataSource):
614 def _reprovision(self):631 def _reprovision(self):
615 """Initiate the reprovisioning workflow."""632 """Initiate the reprovisioning workflow."""
616 contents = self._poll_imds()633 contents = self._poll_imds()
617 md, ud, cfg = read_azure_ovf(contents)634 with events.ReportEventStack(
618 return (md, ud, cfg, {'ovf-env.xml': contents})635 name="reprovisioning-read-azure-ovf",
619636 description="read azure ovf during reprovisioning",
637 parent=azure_ds_reporter):
638 md, ud, cfg = read_azure_ovf(contents)
639 return (md, ud, cfg, {'ovf-env.xml': contents})
640
641 @azure_ds_telemetry_reporter
620 def _negotiate(self):642 def _negotiate(self):
621 """Negotiate with fabric and return data from it.643 """Negotiate with fabric and return data from it.
622644
@@ -649,6 +671,7 @@ class DataSourceAzure(sources.DataSource):
649 util.del_file(REPROVISION_MARKER_FILE)671 util.del_file(REPROVISION_MARKER_FILE)
650 return fabric_data672 return fabric_data
651673
674 @azure_ds_telemetry_reporter
652 def activate(self, cfg, is_new_instance):675 def activate(self, cfg, is_new_instance):
653 address_ephemeral_resize(is_new_instance=is_new_instance,676 address_ephemeral_resize(is_new_instance=is_new_instance,
654 preserve_ntfs=self.ds_cfg.get(677 preserve_ntfs=self.ds_cfg.get(
@@ -665,7 +688,7 @@ class DataSourceAzure(sources.DataSource):
665 2. Generate a fallback network config that does not include any of688 2. Generate a fallback network config that does not include any of
666 the blacklisted devices.689 the blacklisted devices.
667 """690 """
668 if not self._network_config:691 if not self._network_config or self._network_config == sources.UNSET:
669 if self.ds_cfg.get('apply_network_config'):692 if self.ds_cfg.get('apply_network_config'):
670 nc_src = self._metadata_imds693 nc_src = self._metadata_imds
671 else:694 else:
@@ -687,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16):
687 return []710 return []
688711
689712
713@azure_ds_telemetry_reporter
690def _has_ntfs_filesystem(devpath):714def _has_ntfs_filesystem(devpath):
691 ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)715 ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
692 LOG.debug('ntfs_devices found = %s', ntfs_devices)716 LOG.debug('ntfs_devices found = %s', ntfs_devices)
693 return os.path.realpath(devpath) in ntfs_devices717 return os.path.realpath(devpath) in ntfs_devices
694718
695719
720@azure_ds_telemetry_reporter
696def can_dev_be_reformatted(devpath, preserve_ntfs):721def can_dev_be_reformatted(devpath, preserve_ntfs):
697 """Determine if the ephemeral drive at devpath should be reformatted.722 """Determine if the ephemeral drive at devpath should be reformatted.
698723
@@ -741,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
741 (cand_part, cand_path, devpath))766 (cand_part, cand_path, devpath))
742 return False, msg767 return False, msg
743768
769 @azure_ds_telemetry_reporter
744 def count_files(mp):770 def count_files(mp):
745 ignored = set(['dataloss_warning_readme.txt'])771 ignored = set(['dataloss_warning_readme.txt'])
746 return len([f for f in os.listdir(mp) if f.lower() not in ignored])772 return len([f for f in os.listdir(mp) if f.lower() not in ignored])
747773
748 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %774 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
749 (cand_part, cand_path, devpath))775 (cand_part, cand_path, devpath))
750 try:776
751 file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",777 with events.ReportEventStack(
752 update_env_for_mount={'LANG': 'C'})778 name="mount-ntfs-and-count",
753 except util.MountFailedError as e:779 description="mount-ntfs-and-count",
754 if "unknown filesystem type 'ntfs'" in str(e):780 parent=azure_ds_reporter) as evt:
755 return True, (bmsg + ' but this system cannot mount NTFS,'781 try:
756 ' assuming there are no important files.'782 file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
757 ' Formatting allowed.')783 update_env_for_mount={'LANG': 'C'})
758 return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)784 except util.MountFailedError as e:
759785 evt.description = "cannot mount ntfs"
760 if file_count != 0:786 if "unknown filesystem type 'ntfs'" in str(e):
761 LOG.warning("it looks like you're using NTFS on the ephemeral disk, "787 return True, (bmsg + ' but this system cannot mount NTFS,'
762 'to ensure that filesystem does not get wiped, set '788 ' assuming there are no important files.'
763 '%s.%s in config', '.'.join(DS_CFG_PATH),789 ' Formatting allowed.')
764 DS_CFG_KEY_PRESERVE_NTFS)790 return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
765 return False, bmsg + ' but had %d files on it.' % file_count791
792 if file_count != 0:
793 evt.description = "mounted and counted %d files" % file_count
794 LOG.warning("it looks like you're using NTFS on the ephemeral"
795 " disk, to ensure that filesystem does not get wiped,"
796 " set %s.%s in config", '.'.join(DS_CFG_PATH),
797 DS_CFG_KEY_PRESERVE_NTFS)
798 return False, bmsg + ' but had %d files on it.' % file_count
766799
767 return True, bmsg + ' and had no important files. Safe for reformatting.'800 return True, bmsg + ' and had no important files. Safe for reformatting.'
768801
769802
803@azure_ds_telemetry_reporter
770def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,804def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
771 is_new_instance=False, preserve_ntfs=False):805 is_new_instance=False, preserve_ntfs=False):
772 # wait for ephemeral disk to come up806 # wait for ephemeral disk to come up
773 naplen = .2807 naplen = .2
774 missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,808 with events.ReportEventStack(
775 log_pre="Azure ephemeral disk: ")809 name="wait-for-ephemeral-disk",
776810 description="wait for ephemeral disk",
777 if missing:811 parent=azure_ds_reporter):
778 LOG.warning("ephemeral device '%s' did not appear after %d seconds.",812 missing = util.wait_for_files([devpath],
779 devpath, maxwait)813 maxwait=maxwait,
780 return814 naplen=naplen,
815 log_pre="Azure ephemeral disk: ")
816
817 if missing:
818 LOG.warning("ephemeral device '%s' did"
819 " not appear after %d seconds.",
820 devpath, maxwait)
821 return
781822
782 result = False823 result = False
783 msg = None824 msg = None
@@ -805,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
805 return846 return
806847
807848
849@azure_ds_telemetry_reporter
808def perform_hostname_bounce(hostname, cfg, prev_hostname):850def perform_hostname_bounce(hostname, cfg, prev_hostname):
809 # set the hostname to 'hostname' if it is not already set to that.851 # set the hostname to 'hostname' if it is not already set to that.
810 # then, if policy is not off, bounce the interface using command852 # then, if policy is not off, bounce the interface using command
@@ -840,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
840 return True882 return True
841883
842884
885@azure_ds_telemetry_reporter
843def crtfile_to_pubkey(fname, data=None):886def crtfile_to_pubkey(fname, data=None):
844 pipeline = ('openssl x509 -noout -pubkey < "$0" |'887 pipeline = ('openssl x509 -noout -pubkey < "$0" |'
845 'ssh-keygen -i -m PKCS8 -f /dev/stdin')888 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
@@ -848,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None):
848 return out.rstrip()891 return out.rstrip()
849892
850893
894@azure_ds_telemetry_reporter
851def pubkeys_from_crt_files(flist):895def pubkeys_from_crt_files(flist):
852 pubkeys = []896 pubkeys = []
853 errors = []897 errors = []
@@ -863,6 +907,7 @@ def pubkeys_from_crt_files(flist):
863 return pubkeys907 return pubkeys
864908
865909
910@azure_ds_telemetry_reporter
866def write_files(datadir, files, dirmode=None):911def write_files(datadir, files, dirmode=None):
867912
868 def _redact_password(cnt, fname):913 def _redact_password(cnt, fname):
@@ -890,6 +935,7 @@ def write_files(datadir, files, dirmode=None):
890 util.write_file(filename=fname, content=content, mode=0o600)935 util.write_file(filename=fname, content=content, mode=0o600)
891936
892937
938@azure_ds_telemetry_reporter
893def invoke_agent(cmd):939def invoke_agent(cmd):
894 # this is a function itself to simplify patching it for test940 # this is a function itself to simplify patching it for test
895 if cmd:941 if cmd:
@@ -909,6 +955,7 @@ def find_child(node, filter_func):
909 return ret955 return ret
910956
911957
958@azure_ds_telemetry_reporter
912def load_azure_ovf_pubkeys(sshnode):959def load_azure_ovf_pubkeys(sshnode):
913 # This parses a 'SSH' node formatted like below, and returns960 # This parses a 'SSH' node formatted like below, and returns
914 # an array of dicts.961 # an array of dicts.
@@ -961,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode):
961 return found1008 return found
9621009
9631010
1011@azure_ds_telemetry_reporter
964def read_azure_ovf(contents):1012def read_azure_ovf(contents):
965 try:1013 try:
966 dom = minidom.parseString(contents)1014 dom = minidom.parseString(contents)
@@ -1061,6 +1109,7 @@ def read_azure_ovf(contents):
1061 return (md, ud, cfg)1109 return (md, ud, cfg)
10621110
10631111
1112@azure_ds_telemetry_reporter
1064def _extract_preprovisioned_vm_setting(dom):1113def _extract_preprovisioned_vm_setting(dom):
1065 """Read the preprovision flag from the ovf. It should not1114 """Read the preprovision flag from the ovf. It should not
1066 exist unless true."""1115 exist unless true."""
@@ -1089,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"):
1089 return crypt.crypt(password, salt_id + util.rand_str(strlen=16))1138 return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
10901139
10911140
1141@azure_ds_telemetry_reporter
1092def _check_freebsd_cdrom(cdrom_dev):1142def _check_freebsd_cdrom(cdrom_dev):
1093 """Return boolean indicating path to cdrom device has content."""1143 """Return boolean indicating path to cdrom device has content."""
1094 try:1144 try:
@@ -1100,18 +1150,31 @@ def _check_freebsd_cdrom(cdrom_dev):
1100 return False1150 return False
11011151
11021152
1103def _get_random_seed():1153@azure_ds_telemetry_reporter
1154def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
1104 """Return content random seed file if available, otherwise,1155 """Return content random seed file if available, otherwise,
1105 return None."""1156 return None."""
1106 # azure / hyper-v provides random data here1157 # azure / hyper-v provides random data here
1107 # TODO. find the seed on FreeBSD platform
1108 # now update ds_cfg to reflect contents pass in config1158 # now update ds_cfg to reflect contents pass in config
1109 if util.is_FreeBSD():1159 if source is None:
1110 return None1160 return None
1111 return util.load_file("/sys/firmware/acpi/tables/OEM0",1161 seed = util.load_file(source, quiet=True, decode=False)
1112 quiet=True, decode=False)1162
1163 # The seed generally contains non-Unicode characters. load_file puts
1164 # them into a str (in python 2) or bytes (in python 3). In python 2,
1165 # bad octets in a str cause util.json_dumps() to throw an exception. In
1166 # python 3, bytes is a non-serializable type, and the handler load_file
1167 # uses applies b64 encoding *again* to handle it. The simplest solution
1168 # is to just b64encode the data and then decode it to a serializable
1169 # string. Same number of bits of entropy, just with 25% more zeroes.
1170 # There's no need to undo this base64-encoding when the random seed is
1171 # actually used in cc_seed_random.py.
1172 seed = base64.b64encode(seed).decode()
1173
1174 return seed
11131175
11141176
1177@azure_ds_telemetry_reporter
1115def list_possible_azure_ds_devs():1178def list_possible_azure_ds_devs():
1116 devlist = []1179 devlist = []
1117 if util.is_FreeBSD():1180 if util.is_FreeBSD():
@@ -1126,6 +1189,7 @@ def list_possible_azure_ds_devs():
1126 return devlist1189 return devlist
11271190
11281191
1192@azure_ds_telemetry_reporter
1129def load_azure_ds_dir(source_dir):1193def load_azure_ds_dir(source_dir):
1130 ovf_file = os.path.join(source_dir, "ovf-env.xml")1194 ovf_file = os.path.join(source_dir, "ovf-env.xml")
11311195
@@ -1148,47 +1212,54 @@ def parse_network_config(imds_metadata):
1148 @param: imds_metadata: Dict of content read from IMDS network service.1212 @param: imds_metadata: Dict of content read from IMDS network service.
1149 @return: Dictionary containing network version 2 standard configuration.1213 @return: Dictionary containing network version 2 standard configuration.
1150 """1214 """
1151 if imds_metadata != sources.UNSET and imds_metadata:1215 with events.ReportEventStack(
1152 netconfig = {'version': 2, 'ethernets': {}}1216 name="parse_network_config",
1153 LOG.debug('Azure: generating network configuration from IMDS')1217 description="",
1154 network_metadata = imds_metadata['network']1218 parent=azure_ds_reporter) as evt:
1155 for idx, intf in enumerate(network_metadata['interface']):1219 if imds_metadata != sources.UNSET and imds_metadata:
1156 nicname = 'eth{idx}'.format(idx=idx)1220 netconfig = {'version': 2, 'ethernets': {}}
1157 dev_config = {}1221 LOG.debug('Azure: generating network configuration from IMDS')
1158 for addr4 in intf['ipv4']['ipAddress']:1222 network_metadata = imds_metadata['network']
1159 privateIpv4 = addr4['privateIpAddress']1223 for idx, intf in enumerate(network_metadata['interface']):
1160 if privateIpv4:1224 nicname = 'eth{idx}'.format(idx=idx)
1161 if dev_config.get('dhcp4', False):1225 dev_config = {}
1162 # Append static address config for nic > 11226 for addr4 in intf['ipv4']['ipAddress']:
1163 netPrefix = intf['ipv4']['subnet'][0].get(1227 privateIpv4 = addr4['privateIpAddress']
1164 'prefix', '24')1228 if privateIpv4:
1165 if not dev_config.get('addresses'):1229 if dev_config.get('dhcp4', False):
1166 dev_config['addresses'] = []1230 # Append static address config for nic > 1
1167 dev_config['addresses'].append(1231 netPrefix = intf['ipv4']['subnet'][0].get(
1168 '{ip}/{prefix}'.format(1232 'prefix', '24')
1169 ip=privateIpv4, prefix=netPrefix))1233 if not dev_config.get('addresses'):
1170 else:1234 dev_config['addresses'] = []
1171 dev_config['dhcp4'] = True1235 dev_config['addresses'].append(
1172 for addr6 in intf['ipv6']['ipAddress']:1236 '{ip}/{prefix}'.format(
1173 privateIpv6 = addr6['privateIpAddress']1237 ip=privateIpv4, prefix=netPrefix))
1174 if privateIpv6:1238 else:
1175 dev_config['dhcp6'] = True1239 dev_config['dhcp4'] = True
1176 break1240 for addr6 in intf['ipv6']['ipAddress']:
1177 if dev_config:1241 privateIpv6 = addr6['privateIpAddress']
1178 mac = ':'.join(re.findall(r'..', intf['macAddress']))1242 if privateIpv6:
1179 dev_config.update(1243 dev_config['dhcp6'] = True
1180 {'match': {'macaddress': mac.lower()},1244 break
1181 'set-name': nicname})1245 if dev_config:
1182 netconfig['ethernets'][nicname] = dev_config1246 mac = ':'.join(re.findall(r'..', intf['macAddress']))
1183 else:1247 dev_config.update(
1184 blacklist = ['mlx4_core']1248 {'match': {'macaddress': mac.lower()},
1185 LOG.debug('Azure: generating fallback configuration')1249 'set-name': nicname})
1186 # generate a network config, blacklist picking mlx4_core devs1250 netconfig['ethernets'][nicname] = dev_config
1187 netconfig = net.generate_fallback_config(1251 evt.description = "network config from imds"
1188 blacklist_drivers=blacklist, config_driver=True)1252 else:
1189 return netconfig1253 blacklist = ['mlx4_core']
1254 LOG.debug('Azure: generating fallback configuration')
1255 # generate a network config, blacklist picking mlx4_core devs
1256 netconfig = net.generate_fallback_config(
1257 blacklist_drivers=blacklist, config_driver=True)
1258 evt.description = "network config from fallback"
1259 return netconfig
11901260
11911261
1262@azure_ds_telemetry_reporter
1192def get_metadata_from_imds(fallback_nic, retries):1263def get_metadata_from_imds(fallback_nic, retries):
1193 """Query Azure's network metadata service, returning a dictionary.1264 """Query Azure's network metadata service, returning a dictionary.
11941265
@@ -1213,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries):
1213 return util.log_time(**kwargs)1284 return util.log_time(**kwargs)
12141285
12151286
1287@azure_ds_telemetry_reporter
1216def _get_metadata_from_imds(retries):1288def _get_metadata_from_imds(retries):
12171289
1218 url = IMDS_URL + "instance?api-version=2017-12-01"1290 url = IMDS_URL + "instance?api-version=2017-12-01"
@@ -1232,6 +1304,7 @@ def _get_metadata_from_imds(retries):
1232 return {}1304 return {}
12331305
12341306
1307@azure_ds_telemetry_reporter
1235def maybe_remove_ubuntu_network_config_scripts(paths=None):1308def maybe_remove_ubuntu_network_config_scripts(paths=None):
1236 """Remove Azure-specific ubuntu network config for non-primary nics.1309 """Remove Azure-specific ubuntu network config for non-primary nics.
12371310
@@ -1269,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
12691342
12701343
1271def _is_platform_viable(seed_dir):1344def _is_platform_viable(seed_dir):
1272 """Check platform environment to report if this datasource may run."""1345 with events.ReportEventStack(
1273 asset_tag = util.read_dmi_data('chassis-asset-tag')1346 name="check-platform-viability",
1274 if asset_tag == AZURE_CHASSIS_ASSET_TAG:1347 description="found azure asset tag",
1275 return True1348 parent=azure_ds_reporter) as evt:
1276 LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)1349
1277 if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):1350 """Check platform environment to report if this datasource may run."""
1278 return True1351 asset_tag = util.read_dmi_data('chassis-asset-tag')
1279 return False1352 if asset_tag == AZURE_CHASSIS_ASSET_TAG:
1353 return True
1354 LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
1355 evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag
1356 if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
1357 return True
1358 return False
12801359
12811360
1282class BrokenAzureDataSource(Exception):1361class BrokenAzureDataSource(Exception):
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 4f2f6cc..ac28f1d 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -334,8 +334,12 @@ class DataSourceEc2(sources.DataSource):
334 if isinstance(net_md, dict):334 if isinstance(net_md, dict):
335 result = convert_ec2_metadata_network_config(335 result = convert_ec2_metadata_network_config(
336 net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)336 net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
337 # RELEASE_BLOCKER: Xenial debian/postinst needs to add337
338 # EventType.BOOT on upgrade path for classic.338 # RELEASE_BLOCKER: xenial should drop the below if statement,
339 # because the issue being addressed doesn't exist pre-netplan.
340 # (This datasource doesn't implement check_instance_id() so the
341 # datasource object is recreated every boot; this means we don't
342 # need to modify update_events on cloud-init upgrade.)
339343
340 # Non-VPC (aka Classic) Ec2 instances need to rewrite the344 # Non-VPC (aka Classic) Ec2 instances need to rewrite the
341 # network config file every boot due to MAC address change.345 # network config file every boot due to MAC address change.
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 6860f0c..fcf5d58 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource):
106 fslist = util.find_devs_with("TYPE=vfat")106 fslist = util.find_devs_with("TYPE=vfat")
107 fslist.extend(util.find_devs_with("TYPE=iso9660"))107 fslist.extend(util.find_devs_with("TYPE=iso9660"))
108108
109 label_list = util.find_devs_with("LABEL=%s" % label)109 label_list = util.find_devs_with("LABEL=%s" % label.upper())
110 label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
111
110 devlist = list(set(fslist) & set(label_list))112 devlist = list(set(fslist) & set(label_list))
111 devlist.sort(reverse=True)113 devlist.sort(reverse=True)
112114
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index b573b38..54bfc1f 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -171,10 +171,11 @@ def query_data_api(api_type, api_address, retries, timeout):
171171
172class DataSourceScaleway(sources.DataSource):172class DataSourceScaleway(sources.DataSource):
173 dsname = "Scaleway"173 dsname = "Scaleway"
174 update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
175174
176 def __init__(self, sys_cfg, distro, paths):175 def __init__(self, sys_cfg, distro, paths):
177 super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)176 super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
177 self.update_events = {
178 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}
178179
179 self.ds_cfg = util.mergemanydict([180 self.ds_cfg = util.mergemanydict([
180 util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),181 util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index e6966b3..1604932 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -164,9 +164,6 @@ class DataSource(object):
164 # A datasource which supports writing network config on each system boot164 # A datasource which supports writing network config on each system boot
165 # would call update_events['network'].add(EventType.BOOT).165 # would call update_events['network'].add(EventType.BOOT).
166166
167 # Default: generate network config on new instance id (first boot).
168 update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
169
170 # N-tuple listing default values for any metadata-related class167 # N-tuple listing default values for any metadata-related class
171 # attributes cached on an instance by a process_data runs. These attribute168 # attributes cached on an instance by a process_data runs. These attribute
172 # values are reset via clear_cached_attrs during any update_metadata call.169 # values are reset via clear_cached_attrs during any update_metadata call.
@@ -191,6 +188,9 @@ class DataSource(object):
191 self.vendordata = None188 self.vendordata = None
192 self.vendordata_raw = None189 self.vendordata_raw = None
193190
191 # Default: generate network config on new instance id (first boot).
192 self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}}
193
194 self.ds_cfg = util.get_cfg_by_path(194 self.ds_cfg = util.get_cfg_by_path(
195 self.sys_cfg, ("datasource", self.dsname), {})195 self.sys_cfg, ("datasource", self.dsname), {})
196 if not self.ds_cfg:196 if not self.ds_cfg:
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
197old mode 100644197old mode 100644
198new mode 100755198new mode 100755
index 2829dd2..d3af05e
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -16,10 +16,27 @@ from xml.etree import ElementTree
1616
17from cloudinit import url_helper17from cloudinit import url_helper
18from cloudinit import util18from cloudinit import util
19from cloudinit.reporting import events
1920
20LOG = logging.getLogger(__name__)21LOG = logging.getLogger(__name__)
2122
2223
24azure_ds_reporter = events.ReportEventStack(
25 name="azure-ds",
26 description="initialize reporter for azure ds",
27 reporting_enabled=True)
28
29
30def azure_ds_telemetry_reporter(func):
31 def impl(*args, **kwargs):
32 with events.ReportEventStack(
33 name=func.__name__,
34 description=func.__name__,
35 parent=azure_ds_reporter):
36 return func(*args, **kwargs)
37 return impl
38
39
23@contextmanager40@contextmanager
24def cd(newdir):41def cd(newdir):
25 prevdir = os.getcwd()42 prevdir = os.getcwd()
@@ -119,6 +136,7 @@ class OpenSSLManager(object):
119 def clean_up(self):136 def clean_up(self):
120 util.del_dir(self.tmpdir)137 util.del_dir(self.tmpdir)
121138
139 @azure_ds_telemetry_reporter
122 def generate_certificate(self):140 def generate_certificate(self):
123 LOG.debug('Generating certificate for communication with fabric...')141 LOG.debug('Generating certificate for communication with fabric...')
124 if self.certificate is not None:142 if self.certificate is not None:
@@ -139,17 +157,20 @@ class OpenSSLManager(object):
139 LOG.debug('New certificate generated.')157 LOG.debug('New certificate generated.')
140158
141 @staticmethod159 @staticmethod
160 @azure_ds_telemetry_reporter
142 def _run_x509_action(action, cert):161 def _run_x509_action(action, cert):
143 cmd = ['openssl', 'x509', '-noout', action]162 cmd = ['openssl', 'x509', '-noout', action]
144 result, _ = util.subp(cmd, data=cert)163 result, _ = util.subp(cmd, data=cert)
145 return result164 return result
146165
166 @azure_ds_telemetry_reporter
147 def _get_ssh_key_from_cert(self, certificate):167 def _get_ssh_key_from_cert(self, certificate):
148 pub_key = self._run_x509_action('-pubkey', certificate)168 pub_key = self._run_x509_action('-pubkey', certificate)
149 keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']169 keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
150 ssh_key, _ = util.subp(keygen_cmd, data=pub_key)170 ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
151 return ssh_key171 return ssh_key
152172
173 @azure_ds_telemetry_reporter
153 def _get_fingerprint_from_cert(self, certificate):174 def _get_fingerprint_from_cert(self, certificate):
154 """openssl x509 formats fingerprints as so:175 """openssl x509 formats fingerprints as so:
155 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\176 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\
@@ -163,6 +184,7 @@ class OpenSSLManager(object):
163 octets = raw_fp[eq+1:-1].split(':')184 octets = raw_fp[eq+1:-1].split(':')
164 return ''.join(octets)185 return ''.join(octets)
165186
187 @azure_ds_telemetry_reporter
166 def _decrypt_certs_from_xml(self, certificates_xml):188 def _decrypt_certs_from_xml(self, certificates_xml):
167 """Decrypt the certificates XML document using the our private key;189 """Decrypt the certificates XML document using the our private key;
168 return the list of certs and private keys contained in the doc.190 return the list of certs and private keys contained in the doc.
@@ -185,6 +207,7 @@ class OpenSSLManager(object):
185 shell=True, data=b'\n'.join(lines))207 shell=True, data=b'\n'.join(lines))
186 return out208 return out
187209
210 @azure_ds_telemetry_reporter
188 def parse_certificates(self, certificates_xml):211 def parse_certificates(self, certificates_xml):
189 """Given the Certificates XML document, return a dictionary of212 """Given the Certificates XML document, return a dictionary of
190 fingerprints and associated SSH keys derived from the certs."""213 fingerprints and associated SSH keys derived from the certs."""
@@ -265,11 +288,13 @@ class WALinuxAgentShim(object):
265 return socket.inet_ntoa(packed_bytes)288 return socket.inet_ntoa(packed_bytes)
266289
267 @staticmethod290 @staticmethod
291 @azure_ds_telemetry_reporter
268 def _networkd_get_value_from_leases(leases_d=None):292 def _networkd_get_value_from_leases(leases_d=None):
269 return dhcp.networkd_get_option_from_leases(293 return dhcp.networkd_get_option_from_leases(
270 'OPTION_245', leases_d=leases_d)294 'OPTION_245', leases_d=leases_d)
271295
272 @staticmethod296 @staticmethod
297 @azure_ds_telemetry_reporter
273 def _get_value_from_leases_file(fallback_lease_file):298 def _get_value_from_leases_file(fallback_lease_file):
274 leases = []299 leases = []
275 content = util.load_file(fallback_lease_file)300 content = util.load_file(fallback_lease_file)
@@ -287,6 +312,7 @@ class WALinuxAgentShim(object):
287 return leases[-1]312 return leases[-1]
288313
289 @staticmethod314 @staticmethod
315 @azure_ds_telemetry_reporter
290 def _load_dhclient_json():316 def _load_dhclient_json():
291 dhcp_options = {}317 dhcp_options = {}
292 hooks_dir = WALinuxAgentShim._get_hooks_dir()318 hooks_dir = WALinuxAgentShim._get_hooks_dir()
@@ -305,6 +331,7 @@ class WALinuxAgentShim(object):
305 return dhcp_options331 return dhcp_options
306332
307 @staticmethod333 @staticmethod
334 @azure_ds_telemetry_reporter
308 def _get_value_from_dhcpoptions(dhcp_options):335 def _get_value_from_dhcpoptions(dhcp_options):
309 if dhcp_options is None:336 if dhcp_options is None:
310 return None337 return None
@@ -318,6 +345,7 @@ class WALinuxAgentShim(object):
318 return _value345 return _value
319346
320 @staticmethod347 @staticmethod
348 @azure_ds_telemetry_reporter
321 def find_endpoint(fallback_lease_file=None, dhcp245=None):349 def find_endpoint(fallback_lease_file=None, dhcp245=None):
322 value = None350 value = None
323 if dhcp245 is not None:351 if dhcp245 is not None:
@@ -352,6 +380,7 @@ class WALinuxAgentShim(object):
352 LOG.debug('Azure endpoint found at %s', endpoint_ip_address)380 LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
353 return endpoint_ip_address381 return endpoint_ip_address
354382
383 @azure_ds_telemetry_reporter
355 def register_with_azure_and_fetch_data(self, pubkey_info=None):384 def register_with_azure_and_fetch_data(self, pubkey_info=None):
356 if self.openssl_manager is None:385 if self.openssl_manager is None:
357 self.openssl_manager = OpenSSLManager()386 self.openssl_manager = OpenSSLManager()
@@ -404,6 +433,7 @@ class WALinuxAgentShim(object):
404433
405 return keys434 return keys
406435
436 @azure_ds_telemetry_reporter
407 def _report_ready(self, goal_state, http_client):437 def _report_ready(self, goal_state, http_client):
408 LOG.debug('Reporting ready to Azure fabric.')438 LOG.debug('Reporting ready to Azure fabric.')
409 document = self.REPORT_READY_XML_TEMPLATE.format(439 document = self.REPORT_READY_XML_TEMPLATE.format(
@@ -419,6 +449,7 @@ class WALinuxAgentShim(object):
419 LOG.info('Reported ready to Azure fabric.')449 LOG.info('Reported ready to Azure fabric.')
420450
421451
452@azure_ds_telemetry_reporter
422def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,453def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
423 pubkey_info=None):454 pubkey_info=None):
424 shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,455 shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 6378e98..cb1912b 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -575,6 +575,21 @@ class TestDataSource(CiTestCase):
575 " events: New instance first boot",575 " events: New instance first boot",
576 self.logs.getvalue())576 self.logs.getvalue())
577577
578 def test_data_sources_cant_mutate_update_events_for_others(self):
579 """update_events shouldn't be changed for other DSes (LP: #1819913)"""
580
581 class ModifyingDS(DataSource):
582
583 def __init__(self, sys_cfg, distro, paths):
584 # This mirrors what DataSourceAzure does which causes LP:
585 # #1819913
586 DataSource.__init__(self, sys_cfg, distro, paths)
587 self.update_events['network'].add(EventType.BOOT)
588
589 before_update_events = copy.deepcopy(self.datasource.update_events)
590 ModifyingDS(self.sys_cfg, self.distro, self.paths)
591 self.assertEqual(before_update_events, self.datasource.update_events)
592
578593
579class TestRedactSensitiveData(CiTestCase):594class TestRedactSensitiveData(CiTestCase):
580595
diff --git a/cloudinit/util.py b/cloudinit/util.py
index a192091..385f231 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -703,6 +703,21 @@ def get_cfg_option_list(yobj, key, default=None):
703# get a cfg entry by its path array703# get a cfg entry by its path array
704# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))704# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
705def get_cfg_by_path(yobj, keyp, default=None):705def get_cfg_by_path(yobj, keyp, default=None):
706 """Return the value of the item at path C{keyp} in C{yobj}.
707
708 example:
709 get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4
710 get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None
711
712 @param yobj: A dictionary.
713 @param keyp: A path inside yobj. it can be a '/' delimited string,
714 or an iterable.
715 @param default: The default to return if the path does not exist.
716 @return: The value of the item at keyp."
717 is not found."""
718
719 if isinstance(keyp, six.string_types):
720 keyp = keyp.split("/")
706 cur = yobj721 cur = yobj
707 for tok in keyp:722 for tok in keyp:
708 if tok not in cur:723 if tok not in cur:
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 7513176..25db43e 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -112,6 +112,9 @@ cloud_final_modules:
112 - landscape112 - landscape
113 - lxd113 - lxd
114{% endif %}114{% endif %}
115{% if variant in ["ubuntu", "unknown"] %}
116 - ubuntu-drivers
117{% endif %}
115{% if variant not in ["freebsd"] %}118{% if variant not in ["freebsd"] %}
116 - puppet119 - puppet
117 - chef120 - chef
diff --git a/debian/changelog b/debian/changelog
index ac376ab..f869278 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,32 @@
1cloud-init (18.5-61-gb76714c3-0ubuntu1) disco; urgency=medium
2
3 * New upstream snapshot.
4 - Change DataSourceNoCloud to ignore file system label's case.
5 [Risto Oikarinen]
6 - cmd:main.py: Fix missing 'modules-init' key in modes dict
7 [Antonio Romito] (LP: #1815109)
8 - ubuntu_advantage: rewrite cloud-config module
9 - Azure: Treat _unset network configuration as if it were absent
10 [Jason Zions (MSFT)] (LP: #1823084)
11 - DatasourceAzure: add additional logging for azure datasource [Anh Vo]
12 - cloud_tests: fix apt_pipelining test-cases
13 - Azure: Ensure platform random_seed is always serializable as JSON.
14 [Jason Zions (MSFT)]
15 - net/sysconfig: write out SUSE-compatible IPv6 config [Robert Schweikert]
16 - tox: Update testenv for openSUSE Leap to 15.0 [Thomas Bechtold]
17 - net: Fix ipv6 static routes when using eni renderer
18 [Raphael Glon] (LP: #1818669)
19 - Add ubuntu_drivers config module
20 - doc: Refresh Azure walinuxagent docs
21 - tox: bump pylint version to latest (2.3.1)
22 - DataSource: move update_events from a class to an instance attribute
23 (LP: #1819913)
24 - net/sysconfig: Handle default route setup for dhcp configured NICs
25 [Robert Schweikert] (LP: #1812117)
26 - DataSourceEc2: update RELEASE_BLOCKER to be more accurate
27
28 -- Daniel Watkins <oddbloke@ubuntu.com> Wed, 10 Apr 2019 11:49:03 -0400
29
1cloud-init (18.5-45-g3554ffe8-0ubuntu1) disco; urgency=medium30cloud-init (18.5-45-g3554ffe8-0ubuntu1) disco; urgency=medium
231
3 * New upstream snapshot.32 * New upstream snapshot.
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index 720a475..b41cddd 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -5,9 +5,30 @@ Azure
55
6This datasource finds metadata and user-data from the Azure cloud platform.6This datasource finds metadata and user-data from the Azure cloud platform.
77
8Azure Platform8walinuxagent
9--------------9------------
10The azure cloud-platform provides initial data to an instance via an attached10walinuxagent has several functions within images. For cloud-init
11specifically, the relevant functionality it performs is to register the
12instance with the Azure cloud platform at boot so networking will be
13permitted. For more information about the other functionality of
14walinuxagent, see `Azure's documentation
15<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details.
16(Note, however, that only one of walinuxagent's provisioning and cloud-init
17should be used to perform instance customisation.)
18
19If you are configuring walinuxagent yourself, you will want to ensure that you
20have `Provisioning.UseCloudInit
21<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to
22``y``.
23
24
25Builtin Agent
26-------------
27An alternative to using walinuxagent to register to the Azure cloud platform
28is to use the ``__builtin__`` agent command. This section contains more
29background on what that code path does, and how to enable it.
30
31The Azure cloud platform provides initial data to an instance via an attached
11CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some32CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
12information. Additional information is obtained via interaction with the33information. Additional information is obtained via interaction with the
13"endpoint".34"endpoint".
@@ -36,25 +57,17 @@ for the endpoint server (again option 245).
36You can define the path to the lease file with the 'dhclient_lease_file'57You can define the path to the lease file with the 'dhclient_lease_file'
37configuration.58configuration.
3859
39walinuxagent60
40------------61IMDS
41In order to operate correctly, cloud-init needs walinuxagent to provide much62----
42of the interaction with azure. In addition to "provisioning" code, walinux63Azure provides the `instance metadata service (IMDS)
43does the following on the agent is a long running daemon that handles the64<https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service>`_
44following things:65which is a REST service on ``196.254.196.254`` providing additional
45- generate a x509 certificate and send that to the endpoint66configuration information to the instance. Cloud-init uses the IMDS for:
4667
47waagent.conf config68- network configuration for the instance which is applied per boot
48^^^^^^^^^^^^^^^^^^^69- a preprovisioing gate which blocks instance configuration until Azure fabric
49in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults.70 is ready to provision
50
51 ::
52
53 # disabling provisioning turns off all 'Provisioning.*' function
54 Provisioning.Enabled=n
55 # this is currently not handled by cloud-init, so let walinuxagent do it.
56 ResourceDisk.Format=y
57 ResourceDisk.MountPoint=/mnt
5871
5972
60Configuration73Configuration
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 08578e8..1c5cf96 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -9,7 +9,7 @@ network at all).
99
10You can provide meta-data and user-data to a local vm boot via files on a10You can provide meta-data and user-data to a local vm boot via files on a
11`vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be11`vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be
12``cidata``.12``cidata`` or ``CIDATA``.
1313
14Alternatively, you can provide meta-data via kernel command line or SMBIOS14Alternatively, you can provide meta-data via kernel command line or SMBIOS
15"serial number" option. The data must be passed in the form of a string:15"serial number" option. The data must be passed in the form of a string:
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index d9720f6..3dcdd3b 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -54,6 +54,7 @@ Modules
54.. automodule:: cloudinit.config.cc_ssh_import_id54.. automodule:: cloudinit.config.cc_ssh_import_id
55.. automodule:: cloudinit.config.cc_timezone55.. automodule:: cloudinit.config.cc_timezone
56.. automodule:: cloudinit.config.cc_ubuntu_advantage56.. automodule:: cloudinit.config.cc_ubuntu_advantage
57.. automodule:: cloudinit.config.cc_ubuntu_drivers
57.. automodule:: cloudinit.config.cc_update_etc_hosts58.. automodule:: cloudinit.config.cc_update_etc_hosts
58.. automodule:: cloudinit.config.cc_update_hostname59.. automodule:: cloudinit.config.cc_update_hostname
59.. automodule:: cloudinit.config.cc_users_groups60.. automodule:: cloudinit.config.cc_users_groups
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
index bd9b5d0..22a31dc 100644
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
+++ b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
@@ -5,8 +5,7 @@ required_features:
5 - apt5 - apt
6cloud_config: |6cloud_config: |
7 #cloud-config7 #cloud-config
8 apt:8 apt_pipelining: false
9 apt_pipelining: false
10collect_scripts:9collect_scripts:
11 90cloud-init-pipelining: |10 90cloud-init-pipelining: |
12 #!/bin/bash11 #!/bin/bash
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
index 740dc7c..2b940a6 100644
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
@@ -8,8 +8,8 @@ class TestAptPipeliningOS(base.CloudTestCase):
8 """Test apt-pipelining module."""8 """Test apt-pipelining module."""
99
10 def test_os_pipelining(self):10 def test_os_pipelining(self):
11 """Test pipelining set to os."""11 """test 'os' settings does not write apt config file."""
12 out = self.get_data_file('90cloud-init-pipelining')12 out = self.get_data_file('90cloud-init-pipelining_not_written')
13 self.assertIn('Acquire::http::Pipeline-Depth "0";', out)13 self.assertEqual(0, int(out))
1414
15# vi: ts=4 expandtab15# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
index cbed3ba..86d5220 100644
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
+++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
@@ -1,15 +1,14 @@
1#1#
2# Set apt pipelining value to OS2# Set apt pipelining value to OS, no conf written
3#3#
4required_features:4required_features:
5 - apt5 - apt
6cloud_config: |6cloud_config: |
7 #cloud-config7 #cloud-config
8 apt:8 apt_pipelining: os
9 apt_pipelining: os
10collect_scripts:9collect_scripts:
11 90cloud-init-pipelining: |10 90cloud-init-pipelining_not_written: |
12 #!/bin/bash11 #!/bin/bash
13 cat /etc/apt/apt.conf.d/90cloud-init-pipelining12 ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l
1413
15# vi: ts=4 expandtab14# vi: ts=4 expandtab
diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string
16new file mode 10064415new file mode 100644
index 0000000..b9ecefb
--- /dev/null
+++ b/tests/data/azure/non_unicode_random_string
@@ -0,0 +1 @@
1OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$
0\ No newline at end of file2\ No newline at end of file
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 6b05b8f..53c56cd 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -7,11 +7,11 @@ from cloudinit.sources import (
7 UNSET, DataSourceAzure as dsaz, InvalidMetaDataException)7 UNSET, DataSourceAzure as dsaz, InvalidMetaDataException)
8from cloudinit.util import (b64e, decode_binary, load_file, write_file,8from cloudinit.util import (b64e, decode_binary, load_file, write_file,
9 find_freebsd_part, get_path_dev_freebsd,9 find_freebsd_part, get_path_dev_freebsd,
10 MountFailedError)10 MountFailedError, json_dumps, load_json)
11from cloudinit.version import version_string as vs11from cloudinit.version import version_string as vs
12from cloudinit.tests.helpers import (12from cloudinit.tests.helpers import (
13 HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,13 HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,
14 ExitStack)14 ExitStack, resourceLocation)
1515
16import crypt16import crypt
17import httpretty17import httpretty
@@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase):
1923 self.logs.getvalue())1923 self.logs.getvalue())
19241924
19251925
1926class TestRandomSeed(CiTestCase):
1927 """Test proper handling of random_seed"""
1928
1929 def test_non_ascii_seed_is_serializable(self):
1930 """Pass if a random string from the Azure infrastructure which
1931 contains at least one non-Unicode character can be converted to/from
1932 JSON without alteration and without throwing an exception.
1933 """
1934 path = resourceLocation("azure/non_unicode_random_string")
1935 result = dsaz._get_random_seed(path)
1936
1937 obj = {'seed': result}
1938 try:
1939 serialized = json_dumps(obj)
1940 deserialized = load_json(serialized)
1941 except UnicodeDecodeError:
1942 self.fail("Non-serializable random seed returned")
1943
1944 self.assertEqual(deserialized['seed'], result)
1945
1926# vi: ts=4 expandtab1946# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 3429272..b785362 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase):
32 self.mocks.enter_context(32 self.mocks.enter_context(
33 mock.patch.object(util, 'read_dmi_data', return_value=None))33 mock.patch.object(util, 'read_dmi_data', return_value=None))
3434
35 def _test_fs_config_is_read(self, fs_label, fs_label_to_search):
36 vfat_device = 'device-1'
37
38 def m_mount_cb(device, callback, mtype):
39 if (device == vfat_device):
40 return {'meta-data': yaml.dump({'instance-id': 'IID'})}
41 else:
42 return {}
43
44 def m_find_devs_with(query='', path=''):
45 if 'TYPE=vfat' == query:
46 return [vfat_device]
47 elif 'LABEL={}'.format(fs_label) == query:
48 return [vfat_device]
49 else:
50 return []
51
52 self.mocks.enter_context(
53 mock.patch.object(util, 'find_devs_with',
54 side_effect=m_find_devs_with))
55 self.mocks.enter_context(
56 mock.patch.object(util, 'mount_cb',
57 side_effect=m_mount_cb))
58 sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}}
59 dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
60 ret = dsrc.get_data()
61
62 self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
63 self.assertTrue(ret)
64
35 def test_nocloud_seed_dir_on_lxd(self, m_is_lxd):65 def test_nocloud_seed_dir_on_lxd(self, m_is_lxd):
36 md = {'instance-id': 'IID', 'dsmode': 'local'}66 md = {'instance-id': 'IID', 'dsmode': 'local'}
37 ud = b"USER_DATA_HERE"67 ud = b"USER_DATA_HERE"
@@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase):
90 ret = dsrc.get_data()120 ret = dsrc.get_data()
91 self.assertFalse(ret)121 self.assertFalse(ret)
92122
123 def test_fs_config_lowercase_label(self, m_is_lxd):
124 self._test_fs_config_is_read('cidata', 'cidata')
125
126 def test_fs_config_uppercase_label(self, m_is_lxd):
127 self._test_fs_config_is_read('CIDATA', 'cidata')
128
129 def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd):
130 self._test_fs_config_is_read('cidata', 'CIDATA')
131
132 def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd):
133 self._test_fs_config_is_read('CIDATA', 'CIDATA')
134
93 def test_no_datasource_expected(self, m_is_lxd):135 def test_no_datasource_expected(self, m_is_lxd):
94 # no source should be found if no cmdline, config, and fs_label=None136 # no source should be found if no cmdline, config, and fs_label=None
95 sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}137 sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
index f96bf0a..3bfd752 100644
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -7,6 +7,7 @@ import requests
77
8from cloudinit import helpers8from cloudinit import helpers
9from cloudinit import settings9from cloudinit import settings
10from cloudinit.event import EventType
10from cloudinit.sources import DataSourceScaleway11from cloudinit.sources import DataSourceScaleway
1112
12from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase13from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase
@@ -403,3 +404,9 @@ class TestDataSourceScaleway(HttprettyTestCase):
403404
404 netcfg = self.datasource.network_config405 netcfg = self.datasource.network_config
405 self.assertEqual(netcfg, '0xdeadbeef')406 self.assertEqual(netcfg, '0xdeadbeef')
407
408 def test_update_events_is_correct(self):
409 """ensure update_events contains correct data"""
410 self.assertEqual(
411 {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}},
412 self.datasource.update_events)
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index e453040..c3c0c8c 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -496,6 +496,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
496 BOOTPROTO=none496 BOOTPROTO=none
497 DEFROUTE=yes497 DEFROUTE=yes
498 DEVICE=eth0498 DEVICE=eth0
499 IPADDR6=2607:f0d0:1002:0011::2/64
499 IPV6ADDR=2607:f0d0:1002:0011::2/64500 IPV6ADDR=2607:f0d0:1002:0011::2/64
500 IPV6INIT=yes501 IPV6INIT=yes
501 IPV6_DEFAULTGW=2607:f0d0:1002:0011::1502 IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
@@ -588,6 +589,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
588 BOOTPROTO=none589 BOOTPROTO=none
589 DEFROUTE=yes590 DEFROUTE=yes
590 DEVICE=eth0591 DEVICE=eth0
592 IPADDR6=2607:f0d0:1002:0011::2/64
591 IPV6ADDR=2607:f0d0:1002:0011::2/64593 IPV6ADDR=2607:f0d0:1002:0011::2/64
592 IPV6INIT=yes594 IPV6INIT=yes
593 IPV6_DEFAULTGW=2607:f0d0:1002:0011::1595 IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index d00c1b4..8c18aa1 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase):
520 """NoCloud is found with iso9660 filesystem on non-cdrom disk."""520 """NoCloud is found with iso9660 filesystem on non-cdrom disk."""
521 self._test_ds_found('NoCloud')521 self._test_ds_found('NoCloud')
522522
523 def test_nocloud_upper(self):
524 """NoCloud is found with uppercase filesystem label."""
525 self._test_ds_found('NoCloudUpper')
526
523 def test_nocloud_seed(self):527 def test_nocloud_seed(self):
524 """Nocloud seed directory."""528 """Nocloud seed directory."""
525 self._test_ds_found('NoCloud-seed')529 self._test_ds_found('NoCloud-seed')
@@ -713,6 +717,19 @@ VALID_CFG = {
713 'dev/vdb': 'pretend iso content for cidata\n',717 'dev/vdb': 'pretend iso content for cidata\n',
714 }718 }
715 },719 },
720 'NoCloudUpper': {
721 'ds': 'NoCloud',
722 'mocks': [
723 MOCK_VIRT_IS_KVM,
724 {'name': 'blkid', 'ret': 0,
725 'out': blkid_out(
726 BLKID_UEFI_UBUNTU +
727 [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])},
728 ],
729 'files': {
730 'dev/vdb': 'pretend iso content for cidata\n',
731 }
732 },
716 'NoCloud-seed': {733 'NoCloud-seed': {
717 'ds': 'NoCloud',734 'ds': 'NoCloud',
718 'files': {735 'files': {
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
index 1bad07f..e69a47a 100644
--- a/tests/unittests/test_handler/test_schema.py
+++ b/tests/unittests/test_handler/test_schema.py
@@ -28,6 +28,7 @@ class GetSchemaTest(CiTestCase):
28 'cc_runcmd',28 'cc_runcmd',
29 'cc_snap',29 'cc_snap',
30 'cc_ubuntu_advantage',30 'cc_ubuntu_advantage',
31 'cc_ubuntu_drivers',
31 'cc_zypper_add_repo'32 'cc_zypper_add_repo'
32 ],33 ],
33 [subschema['id'] for subschema in schema['allOf']])34 [subschema['id'] for subschema in schema['allOf']])
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index e3b9e02..fd03deb 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -691,6 +691,9 @@ DEVICE=eth0
691GATEWAY=172.19.3.254691GATEWAY=172.19.3.254
692HWADDR=fa:16:3e:ed:9a:59692HWADDR=fa:16:3e:ed:9a:59
693IPADDR=172.19.1.34693IPADDR=172.19.1.34
694IPADDR6=2001:DB8::10/64
695IPADDR6_0=2001:DB9::10/64
696IPADDR6_2=2001:DB10::10/64
694IPV6ADDR=2001:DB8::10/64697IPV6ADDR=2001:DB8::10/64
695IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"698IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
696IPV6INIT=yes699IPV6INIT=yes
@@ -729,6 +732,9 @@ DEVICE=eth0
729GATEWAY=172.19.3.254732GATEWAY=172.19.3.254
730HWADDR=fa:16:3e:ed:9a:59733HWADDR=fa:16:3e:ed:9a:59
731IPADDR=172.19.1.34734IPADDR=172.19.1.34
735IPADDR6=2001:DB8::10/64
736IPADDR6_0=2001:DB9::10/64
737IPADDR6_2=2001:DB10::10/64
732IPV6ADDR=2001:DB8::10/64738IPV6ADDR=2001:DB8::10/64
733IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"739IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
734IPV6INIT=yes740IPV6INIT=yes
@@ -860,6 +866,7 @@ NETWORK_CONFIGS = {
860 BOOTPROTO=dhcp866 BOOTPROTO=dhcp
861 DEFROUTE=yes867 DEFROUTE=yes
862 DEVICE=eth99868 DEVICE=eth99
869 DHCLIENT_SET_DEFAULT_ROUTE=yes
863 DNS1=8.8.8.8870 DNS1=8.8.8.8
864 DNS2=8.8.4.4871 DNS2=8.8.4.4
865 DOMAIN="barley.maas sach.maas"872 DOMAIN="barley.maas sach.maas"
@@ -979,6 +986,7 @@ NETWORK_CONFIGS = {
979 BOOTPROTO=none986 BOOTPROTO=none
980 DEVICE=iface0987 DEVICE=iface0
981 IPADDR=192.168.14.2988 IPADDR=192.168.14.2
989 IPADDR6=2001:1::1/64
982 IPV6ADDR=2001:1::1/64990 IPV6ADDR=2001:1::1/64
983 IPV6INIT=yes991 IPV6INIT=yes
984 NETMASK=255.255.255.0992 NETMASK=255.255.255.0
@@ -1113,8 +1121,8 @@ iface eth0.101 inet static
1113iface eth0.101 inet static1121iface eth0.101 inet static
1114 address 192.168.2.10/241122 address 192.168.2.10/24
11151123
1116post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true1124post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
1117pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true1125pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
1118"""),1126"""),
1119 'expected_netplan': textwrap.dedent("""1127 'expected_netplan': textwrap.dedent("""
1120 network:1128 network:
@@ -1234,6 +1242,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1234 'ifcfg-bond0.200': textwrap.dedent("""\1242 'ifcfg-bond0.200': textwrap.dedent("""\
1235 BOOTPROTO=dhcp1243 BOOTPROTO=dhcp
1236 DEVICE=bond0.2001244 DEVICE=bond0.200
1245 DHCLIENT_SET_DEFAULT_ROUTE=no
1237 NM_CONTROLLED=no1246 NM_CONTROLLED=no
1238 ONBOOT=yes1247 ONBOOT=yes
1239 PHYSDEV=bond01248 PHYSDEV=bond0
@@ -1247,6 +1256,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1247 DEFROUTE=yes1256 DEFROUTE=yes
1248 DEVICE=br01257 DEVICE=br0
1249 IPADDR=192.168.14.21258 IPADDR=192.168.14.2
1259 IPADDR6=2001:1::1/64
1250 IPV6ADDR=2001:1::1/641260 IPV6ADDR=2001:1::1/64
1251 IPV6INIT=yes1261 IPV6INIT=yes
1252 IPV6_DEFAULTGW=2001:4800:78ff:1b::11262 IPV6_DEFAULTGW=2001:4800:78ff:1b::1
@@ -1333,6 +1343,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1333 'ifcfg-eth5': textwrap.dedent("""\1343 'ifcfg-eth5': textwrap.dedent("""\
1334 BOOTPROTO=dhcp1344 BOOTPROTO=dhcp
1335 DEVICE=eth51345 DEVICE=eth5
1346 DHCLIENT_SET_DEFAULT_ROUTE=no
1336 HWADDR=98:bb:9f:2c:e8:8a1347 HWADDR=98:bb:9f:2c:e8:8a
1337 NM_CONTROLLED=no1348 NM_CONTROLLED=no
1338 ONBOOT=no1349 ONBOOT=no
@@ -1505,17 +1516,18 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1505 - gateway: 192.168.0.31516 - gateway: 192.168.0.3
1506 netmask: 255.255.255.01517 netmask: 255.255.255.0
1507 network: 10.1.3.01518 network: 10.1.3.0
1508 - gateway: 2001:67c:1562:1
1509 network: 2001:67c:1
1510 netmask: ffff:ffff:0
1511 - gateway: 3001:67c:1562:1
1512 network: 3001:67c:1
1513 netmask: ffff:ffff:0
1514 metric: 10000
1515 - type: static1519 - type: static
1516 address: 192.168.1.2/241520 address: 192.168.1.2/24
1517 - type: static1521 - type: static
1518 address: 2001:1::1/921522 address: 2001:1::1/92
1523 routes:
1524 - gateway: 2001:67c:1562:1
1525 network: 2001:67c:1
1526 netmask: ffff:ffff:0
1527 - gateway: 3001:67c:1562:1
1528 network: 3001:67c:1
1529 netmask: ffff:ffff:0
1530 metric: 10000
1519 """),1531 """),
1520 'expected_netplan': textwrap.dedent("""1532 'expected_netplan': textwrap.dedent("""
1521 network:1533 network:
@@ -1554,6 +1566,51 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1554 to: 3001:67c:1/321566 to: 3001:67c:1/32
1555 via: 3001:67c:1562:11567 via: 3001:67c:1562:1
1556 """),1568 """),
1569 'expected_eni': textwrap.dedent("""\
1570auto lo
1571iface lo inet loopback
1572
1573auto bond0s0
1574iface bond0s0 inet manual
1575 bond-master bond0
1576 bond-mode active-backup
1577 bond-xmit-hash-policy layer3+4
1578 bond_miimon 100
1579
1580auto bond0s1
1581iface bond0s1 inet manual
1582 bond-master bond0
1583 bond-mode active-backup
1584 bond-xmit-hash-policy layer3+4
1585 bond_miimon 100
1586
1587auto bond0
1588iface bond0 inet static
1589 address 192.168.0.2/24
1590 gateway 192.168.0.1
1591 bond-mode active-backup
1592 bond-slaves none
1593 bond-xmit-hash-policy layer3+4
1594 bond_miimon 100
1595 hwaddress aa:bb:cc:dd:e8:ff
1596 mtu 9000
1597 post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true
1598 pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true
1599
1600# control-alias bond0
1601iface bond0 inet static
1602 address 192.168.1.2/24
1603
1604# control-alias bond0
1605iface bond0 inet6 static
1606 address 2001:1::1/92
1607 post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true
1608 pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true
1609 post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \
1610|| true
1611 pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \
1612|| true
1613 """),
1557 'yaml-v2': textwrap.dedent("""1614 'yaml-v2': textwrap.dedent("""
1558 version: 21615 version: 2
1559 ethernets:1616 ethernets:
@@ -1641,6 +1698,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1641 MACADDR=aa:bb:cc:dd:e8:ff1698 MACADDR=aa:bb:cc:dd:e8:ff
1642 IPADDR=192.168.0.21699 IPADDR=192.168.0.2
1643 IPADDR1=192.168.1.21700 IPADDR1=192.168.1.2
1701 IPADDR6=2001:1::1/92
1644 IPV6ADDR=2001:1::1/921702 IPV6ADDR=2001:1::1/92
1645 IPV6INIT=yes1703 IPV6INIT=yes
1646 MTU=90001704 MTU=9000
@@ -1696,6 +1754,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1696 MACADDR=aa:bb:cc:dd:e8:ff1754 MACADDR=aa:bb:cc:dd:e8:ff
1697 IPADDR=192.168.0.21755 IPADDR=192.168.0.2
1698 IPADDR1=192.168.1.21756 IPADDR1=192.168.1.2
1757 IPADDR6=2001:1::1/92
1699 IPV6ADDR=2001:1::1/921758 IPV6ADDR=2001:1::1/92
1700 IPV6INIT=yes1759 IPV6INIT=yes
1701 MTU=90001760 MTU=9000
@@ -1786,6 +1845,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1786 GATEWAY=192.168.1.11845 GATEWAY=192.168.1.1
1787 IPADDR=192.168.2.21846 IPADDR=192.168.2.2
1788 IPADDR1=192.168.1.21847 IPADDR1=192.168.1.2
1848 IPADDR6=2001:1::bbbb/96
1789 IPV6ADDR=2001:1::bbbb/961849 IPV6ADDR=2001:1::bbbb/96
1790 IPV6INIT=yes1850 IPV6INIT=yes
1791 IPV6_DEFAULTGW=2001:1::11851 IPV6_DEFAULTGW=2001:1::1
@@ -1847,6 +1907,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1847 BRIDGE=br01907 BRIDGE=br0
1848 DEVICE=eth01908 DEVICE=eth0
1849 HWADDR=52:54:00:12:34:001909 HWADDR=52:54:00:12:34:00
1910 IPADDR6=2001:1::100/96
1850 IPV6ADDR=2001:1::100/961911 IPV6ADDR=2001:1::100/96
1851 IPV6INIT=yes1912 IPV6INIT=yes
1852 NM_CONTROLLED=no1913 NM_CONTROLLED=no
@@ -1860,6 +1921,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1860 BRIDGE=br01921 BRIDGE=br0
1861 DEVICE=eth11922 DEVICE=eth1
1862 HWADDR=52:54:00:12:34:011923 HWADDR=52:54:00:12:34:01
1924 IPADDR6=2001:1::101/96
1863 IPV6ADDR=2001:1::101/961925 IPV6ADDR=2001:1::101/96
1864 IPV6INIT=yes1926 IPV6INIT=yes
1865 NM_CONTROLLED=no1927 NM_CONTROLLED=no
@@ -1988,6 +2050,23 @@ CONFIG_V1_SIMPLE_SUBNET = {
1988 'type': 'static'}],2050 'type': 'static'}],
1989 'type': 'physical'}]}2051 'type': 'physical'}]}
19902052
2053CONFIG_V1_MULTI_IFACE = {
2054 'version': 1,
2055 'config': [{'type': 'physical',
2056 'mtu': 1500,
2057 'subnets': [{'type': 'static',
2058 'netmask': '255.255.240.0',
2059 'routes': [{'netmask': '0.0.0.0',
2060 'network': '0.0.0.0',
2061 'gateway': '51.68.80.1'}],
2062 'address': '51.68.89.122',
2063 'ipv4': True}],
2064 'mac_address': 'fa:16:3e:25:b4:59',
2065 'name': 'eth0'},
2066 {'type': 'physical',
2067 'mtu': 9000,
2068 'subnets': [{'type': 'dhcp4'}],
2069 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]}
19912070
1992DEFAULT_DEV_ATTRS = {2071DEFAULT_DEV_ATTRS = {
1993 'eth1000': {2072 'eth1000': {
@@ -2460,6 +2539,49 @@ USERCTL=no
2460 respath = '/etc/resolv.conf'2539 respath = '/etc/resolv.conf'
2461 self.assertNotIn(respath, found.keys())2540 self.assertNotIn(respath, found.keys())
24622541
2542 def test_network_config_v1_multi_iface_samples(self):
2543 ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE)
2544 render_dir = self.tmp_path("render")
2545 os.makedirs(render_dir)
2546 renderer = self._get_renderer()
2547 renderer.render_network_state(ns, target=render_dir)
2548 found = dir2dict(render_dir)
2549 nspath = '/etc/sysconfig/network-scripts/'
2550 self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
2551 expected_i1 = """\
2552# Created by cloud-init on instance boot automatically, do not edit.
2553#
2554BOOTPROTO=none
2555DEFROUTE=yes
2556DEVICE=eth0
2557GATEWAY=51.68.80.1
2558HWADDR=fa:16:3e:25:b4:59
2559IPADDR=51.68.89.122
2560MTU=1500
2561NETMASK=255.255.240.0
2562NM_CONTROLLED=no
2563ONBOOT=yes
2564STARTMODE=auto
2565TYPE=Ethernet
2566USERCTL=no
2567"""
2568 self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0'])
2569 expected_i2 = """\
2570# Created by cloud-init on instance boot automatically, do not edit.
2571#
2572BOOTPROTO=dhcp
2573DEVICE=eth1
2574DHCLIENT_SET_DEFAULT_ROUTE=no
2575HWADDR=fa:16:3e:b1:ca:29
2576MTU=9000
2577NM_CONTROLLED=no
2578ONBOOT=yes
2579STARTMODE=auto
2580TYPE=Ethernet
2581USERCTL=no
2582"""
2583 self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1'])
2584
2463 def test_config_with_explicit_loopback(self):2585 def test_config_with_explicit_loopback(self):
2464 ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)2586 ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
2465 render_dir = self.tmp_path("render")2587 render_dir = self.tmp_path("render")
@@ -2634,6 +2756,7 @@ USERCTL=no
2634 GATEWAY=192.168.42.12756 GATEWAY=192.168.42.1
2635 HWADDR=52:54:00:ab:cd:ef2757 HWADDR=52:54:00:ab:cd:ef
2636 IPADDR=192.168.42.1002758 IPADDR=192.168.42.100
2759 IPADDR6=2001:db8::100/32
2637 IPV6ADDR=2001:db8::100/322760 IPV6ADDR=2001:db8::100/32
2638 IPV6INIT=yes2761 IPV6INIT=yes
2639 IPV6_DEFAULTGW=2001:db8::12762 IPV6_DEFAULTGW=2001:db8::1
@@ -3570,17 +3693,17 @@ class TestEniRoundTrip(CiTestCase):
3570 'iface eth0 inet static',3693 'iface eth0 inet static',
3571 ' address 172.23.31.42/26',3694 ' address 172.23.31.42/26',
3572 ' gateway 172.23.31.2',3695 ' gateway 172.23.31.2',
3573 ('post-up route add -net 10.0.0.0 netmask 255.240.0.0 gw '3696 ('post-up route add -net 10.0.0.0/12 gw '
3574 '172.23.31.1 metric 0 || true'),3697 '172.23.31.1 metric 0 || true'),
3575 ('pre-down route del -net 10.0.0.0 netmask 255.240.0.0 gw '3698 ('pre-down route del -net 10.0.0.0/12 gw '
3576 '172.23.31.1 metric 0 || true'),3699 '172.23.31.1 metric 0 || true'),
3577 ('post-up route add -net 192.168.2.0 netmask 255.255.0.0 gw '3700 ('post-up route add -net 192.168.2.0/16 gw '
3578 '172.23.31.1 metric 0 || true'),3701 '172.23.31.1 metric 0 || true'),
3579 ('pre-down route del -net 192.168.2.0 netmask 255.255.0.0 gw '3702 ('pre-down route del -net 192.168.2.0/16 gw '
3580 '172.23.31.1 metric 0 || true'),3703 '172.23.31.1 metric 0 || true'),
3581 ('post-up route add -net 10.0.200.0 netmask 255.255.0.0 gw '3704 ('post-up route add -net 10.0.200.0/16 gw '
3582 '172.23.31.1 metric 1 || true'),3705 '172.23.31.1 metric 1 || true'),
3583 ('pre-down route del -net 10.0.200.0 netmask 255.255.0.0 gw '3706 ('pre-down route del -net 10.0.200.0/16 gw '
3584 '172.23.31.1 metric 1 || true'),3707 '172.23.31.1 metric 1 || true'),
3585 ]3708 ]
3586 found = files['/etc/network/interfaces'].splitlines()3709 found = files['/etc/network/interfaces'].splitlines()
@@ -3588,6 +3711,77 @@ class TestEniRoundTrip(CiTestCase):
3588 self.assertEqual(3711 self.assertEqual(
3589 expected, [line for line in found if line])3712 expected, [line for line in found if line])
35903713
3714 def test_ipv6_static_routes(self):
3715 # as reported in bug 1818669
3716 conf = [
3717 {'name': 'eno3', 'type': 'physical',
3718 'subnets': [{
3719 'address': 'fd00::12/64',
3720 'dns_nameservers': ['fd00:2::15'],
3721 'gateway': 'fd00::1',
3722 'ipv6': True,
3723 'type': 'static',
3724 'routes': [{'netmask': '32',
3725 'network': 'fd00:12::',
3726 'gateway': 'fd00::2'},
3727 {'network': 'fd00:14::',
3728 'gateway': 'fd00::3'},
3729 {'destination': 'fe00:14::/48',
3730 'gateway': 'fe00::4',
3731 'metric': 500},
3732 {'gateway': '192.168.23.1',
3733 'metric': 999,
3734 'netmask': 24,
3735 'network': '192.168.23.0'},
3736 {'destination': '10.23.23.0/24',
3737 'gateway': '10.23.23.2',
3738 'metric': 300}]}]},
3739 ]
3740
3741 files = self._render_and_read(
3742 network_config={'config': conf, 'version': 1})
3743 expected = [
3744 'auto lo',
3745 'iface lo inet loopback',
3746 'auto eno3',
3747 'iface eno3 inet6 static',
3748 ' address fd00::12/64',
3749 ' dns-nameservers fd00:2::15',
3750 ' gateway fd00::1',
3751 (' post-up route add -A inet6 fd00:12::/32 gw '
3752 'fd00::2 || true'),
3753 (' pre-down route del -A inet6 fd00:12::/32 gw '
3754 'fd00::2 || true'),
3755 (' post-up route add -A inet6 fd00:14::/64 gw '
3756 'fd00::3 || true'),
3757 (' pre-down route del -A inet6 fd00:14::/64 gw '
3758 'fd00::3 || true'),
3759 (' post-up route add -A inet6 fe00:14::/48 gw '
3760 'fe00::4 metric 500 || true'),
3761 (' pre-down route del -A inet6 fe00:14::/48 gw '
3762 'fe00::4 metric 500 || true'),
3763 (' post-up route add -net 192.168.23.0/24 gw '
3764 '192.168.23.1 metric 999 || true'),
3765 (' pre-down route del -net 192.168.23.0/24 gw '
3766 '192.168.23.1 metric 999 || true'),
3767 (' post-up route add -net 10.23.23.0/24 gw '
3768 '10.23.23.2 metric 300 || true'),
3769 (' pre-down route del -net 10.23.23.0/24 gw '
3770 '10.23.23.2 metric 300 || true'),
3771
3772 ]
3773 found = files['/etc/network/interfaces'].splitlines()
3774
3775 self.assertEqual(
3776 expected, [line for line in found if line])
3777
3778 def testsimple_render_bond(self):
3779 entry = NETWORK_CONFIGS['bond']
3780 files = self._render_and_read(network_config=yaml.load(entry['yaml']))
3781 self.assertEqual(
3782 entry['expected_eni'].splitlines(),
3783 files['/etc/network/interfaces'].splitlines())
3784
35913785
3592class TestNetRenderers(CiTestCase):3786class TestNetRenderers(CiTestCase):
3593 @mock.patch("cloudinit.net.renderers.sysconfig.available")3787 @mock.patch("cloudinit.net.renderers.sysconfig.available")
diff --git a/tools/ds-identify b/tools/ds-identify
index b78b273..6518901 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -620,7 +620,7 @@ dscheck_MAAS() {
620}620}
621621
622dscheck_NoCloud() {622dscheck_NoCloud() {
623 local fslabel="cidata" d=""623 local fslabel="cidata CIDATA" d=""
624 case " ${DI_KERNEL_CMDLINE} " in624 case " ${DI_KERNEL_CMDLINE} " in
625 *\ ds=nocloud*) return ${DS_FOUND};;625 *\ ds=nocloud*) return ${DS_FOUND};;
626 esac626 esac
@@ -632,9 +632,10 @@ dscheck_NoCloud() {
632 check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}632 check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
633 check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND}633 check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
634 done634 done
635 if has_fs_with_label "${fslabel}"; then635 if has_fs_with_label $fslabel; then
636 return ${DS_FOUND}636 return ${DS_FOUND}
637 fi637 fi
638
638 return ${DS_NOT_FOUND}639 return ${DS_NOT_FOUND}
639}640}
640641
@@ -762,7 +763,7 @@ is_cdrom_ovf() {
762763
763 # explicitly skip known labels of other types. rd_rdfe is azure.764 # explicitly skip known labels of other types. rd_rdfe is azure.
764 case "$label" in765 case "$label" in
765 config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;;766 config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;;
766 esac767 esac
767768
768 local idstr="http://schemas.dmtf.org/ovf/environment/1"769 local idstr="http://schemas.dmtf.org/ovf/environment/1"
diff --git a/tox.ini b/tox.ini
index d371720..1f01eb7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -21,7 +21,7 @@ setenv =
21basepython = python321basepython = python3
22deps =22deps =
23 # requirements23 # requirements
24 pylint==2.2.224 pylint==2.3.1
25 # test-requirements because unit tests are now present in cloudinit tree25 # test-requirements because unit tests are now present in cloudinit tree
26 -r{toxinidir}/test-requirements.txt26 -r{toxinidir}/test-requirements.txt
27commands = {envpython} -m pylint {posargs:cloudinit tests tools}27commands = {envpython} -m pylint {posargs:cloudinit tests tools}
@@ -96,19 +96,18 @@ deps =
96 six==1.9.096 six==1.9.0
97 -r{toxinidir}/test-requirements.txt97 -r{toxinidir}/test-requirements.txt
9898
99[testenv:opensusel42]99[testenv:opensusel150]
100basepython = python2.7100basepython = python2.7
101commands = nosetests {posargs:tests/unittests cloudinit}101commands = nosetests {posargs:tests/unittests cloudinit}
102deps =102deps =
103 # requirements103 # requirements
104 argparse==1.3.0104 jinja2==2.10
105 jinja2==2.8105 PyYAML==3.12
106 PyYAML==3.11106 oauthlib==2.0.6
107 oauthlib==0.7.2
108 configobj==5.0.6107 configobj==5.0.6
109 requests==2.11.1108 requests==2.18.4
110 jsonpatch==1.11109 jsonpatch==1.16
111 six==1.9.0110 six==1.11.0
112 -r{toxinidir}/test-requirements.txt111 -r{toxinidir}/test-requirements.txt
113112
114[testenv:tip-pycodestyle]113[testenv:tip-pycodestyle]

Subscribers

People subscribed via source and target branches