Merge ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

Proposed by Chad Smith
Status: Merged
Merged at revision: e0b108e961515e19f720b77b15b56daa63150f1a
Proposed branch: ~chad.smith/cloud-init:ubuntu/devel
Merge into: cloud-init:ubuntu/devel
Diff against target: 1562 lines (+711/-94)
26 files modified
cloudinit/config/cc_lxd.py (+56/-8)
cloudinit/net/eni.py (+17/-3)
cloudinit/net/netplan.py (+14/-8)
cloudinit/net/sysconfig.py (+7/-0)
cloudinit/sources/DataSourceOpenStack.py (+23/-0)
cloudinit/stages.py (+3/-1)
cloudinit/user_data.py (+13/-9)
cloudinit/util.py (+90/-21)
debian/changelog (+25/-0)
doc/rtd/topics/datasources/openstack.rst (+15/-0)
doc/rtd/topics/network-config-format-v1.rst (+27/-0)
doc/rtd/topics/network-config-format-v2.rst (+6/-0)
doc/rtd/topics/tests.rst (+6/-1)
integration-requirements.txt (+1/-1)
tests/cloud_tests/args.py (+3/-0)
tests/cloud_tests/collect.py (+2/-1)
tests/cloud_tests/stage.py (+12/-3)
tests/cloud_tests/testcases/modules/ntp_chrony.py (+12/-1)
tests/cloud_tests/testcases/modules/salt_minion.yaml (+12/-2)
tests/cloud_tests/verify.py (+46/-1)
tests/unittests/test_data.py (+10/-1)
tests/unittests/test_datasource/test_openstack.py (+110/-14)
tests/unittests/test_handler/test_handler_lxd.py (+64/-16)
tests/unittests/test_net.py (+20/-1)
tests/unittests/test_runs/test_simple_run.py (+30/-2)
tests/unittests/test_util.py (+87/-0)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
Scott Moser Pending
Review via email: mp+348113@code.launchpad.net

Commit message

Sync upstream snapshot for cosmic release 18.2-77-g4ce67201-0ubuntu1

Includes:
    - lxd: Delete default network and detach device if lxd-init created them.
      (LP: #1776958)
    - openstack: avoid unneeded metadata probe on non-openstack platforms
      (LP: #1776701)
    - stages: fix tracebacks if a module stage is undefined or empty
      [Robert Schweikert] (LP: #1770462)
    - Be more safe on string/bytes when writing multipart user-data to disk.
      (LP: #1768600)
    - Fix get_proc_env for pids that have non-utf8 content in environment.
      (LP: #1775371)
    - tests: fix salt_minion integration test on bionic and later
    - tests: provide human-readable integration test summary when --verbose
    - tests: skip chrony integration tests on lxd running artful or older
    - test: add optional --preserve-instance arg to integraiton tests
    - netplan: fix mtu if provided by network config for all rendered types
      (LP: #1774666)
    - tests: remove pip install workarounds for pylxd, take upstream fix.
    - subp: support combine_capture argument.
    - tests: ordered tox dependencies for pylxd install

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:e0b108e961515e19f720b77b15b56daa63150f1a
https://jenkins.ubuntu.com/server/job/cloud-init-ci/104/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    SUCCESS: MAAS Compatability Testing
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/104/rebuild

review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) wrote :

I'm fine with this, and will upload.
But before I do I wanted to see green on
 https://jenkins.ubuntu.com/server/view/cloud-init,%20curtin,%20streams/job/cloud-init-integration-lxd-c/
which was failing due to missing the fix for bug 1776958.

So this morning I kicked that off, but that ran with
 18.2-1891-gfef2616-0ubuntu1+1625~trunk~ubuntu18.10.1

which is missing the 2 top commits on master.

So... I did a 'request build' at
 https://code.launchpad.net/~cloud-init-dev/+recipe/cloud-init-daily-devel

then when that is into
 https://code.launchpad.net/~cloud-init-dev/+archive/ubuntu/daily
I'll kick off a cloud-init-integration-lxd-c and for good measure a cloud-init-integration-ec2-c.

Hopefully those will pass and then I'll do the upload.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
2index 09374d2..ac72ac4 100644
3--- a/cloudinit/config/cc_lxd.py
4+++ b/cloudinit/config/cc_lxd.py
5@@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly.
6 domain: <domain>
7 """
8
9+from cloudinit import log as logging
10 from cloudinit import util
11 import os
12
13 distros = ['ubuntu']
14
15+LOG = logging.getLogger(__name__)
16+
17+_DEFAULT_NETWORK_NAME = "lxdbr0"
18+
19
20 def handle(name, cfg, cloud, log, args):
21 # Get config
22@@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args):
23 # Set up lxd-bridge if bridge config is given
24 dconf_comm = "debconf-communicate"
25 if bridge_cfg:
26+ net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
27 if os.path.exists("/etc/default/lxd-bridge") \
28 and util.which(dconf_comm):
29 # Bridge configured through packaging
30@@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args):
31 else:
32 # Built-in LXD bridge support
33 cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
34+ maybe_cleanup_default(
35+ net_name=net_name, did_init=bool(init_cfg),
36+ create=bool(cmd_create), attach=bool(cmd_attach))
37 if cmd_create:
38 log.debug("Creating lxd bridge: %s" %
39 " ".join(cmd_create))
40- util.subp(cmd_create)
41+ _lxc(cmd_create)
42
43 if cmd_attach:
44 log.debug("Setting up default lxd bridge: %s" %
45 " ".join(cmd_create))
46- util.subp(cmd_attach)
47+ _lxc(cmd_attach)
48
49 elif bridge_cfg:
50 raise RuntimeError(
51@@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg):
52 if bridge_cfg.get("mode") == "none":
53 return None, None
54
55- bridge_name = bridge_cfg.get("name", "lxdbr0")
56+ bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
57 cmd_create = []
58- cmd_attach = ["lxc", "network", "attach-profile", bridge_name,
59- "default", "eth0", "--force-local"]
60+ cmd_attach = ["network", "attach-profile", bridge_name,
61+ "default", "eth0"]
62
63 if bridge_cfg.get("mode") == "existing":
64 return None, cmd_attach
65@@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg):
66 if bridge_cfg.get("mode") != "new":
67 raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
68
69- cmd_create = ["lxc", "network", "create", bridge_name]
70+ cmd_create = ["network", "create", bridge_name]
71
72 if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
73 cmd_create.append("ipv4.address=%s/%s" %
74@@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg):
75 if bridge_cfg.get("domain"):
76 cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))
77
78- cmd_create.append("--force-local")
79-
80 return cmd_create, cmd_attach
81
82+
83+def _lxc(cmd):
84+ env = {'LC_ALL': 'C'}
85+ util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
86+
87+
88+def maybe_cleanup_default(net_name, did_init, create, attach,
89+ profile="default", nic_name="eth0"):
90+ """Newer versions of lxc (3.0.1+) create a lxdbr0 network when
91+ 'lxd init --auto' is run. Older versions did not.
92+
93+ By removing ay that lxd-init created, we simply leave the add/attach
94+ code in-tact.
95+
96+ https://github.com/lxc/lxd/issues/4649"""
97+ if net_name != _DEFAULT_NETWORK_NAME or not did_init:
98+ return
99+
100+ fail_assume_enoent = " failed. Assuming it did not exist."
101+ succeeded = " succeeded."
102+ if create:
103+ msg = "Deletion of lxd network '%s'" % net_name
104+ try:
105+ _lxc(["network", "delete", net_name])
106+ LOG.debug(msg + succeeded)
107+ except util.ProcessExecutionError as e:
108+ if e.exit_code != 1:
109+ raise e
110+ LOG.debug(msg + fail_assume_enoent)
111+
112+ if attach:
113+ msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile)
114+ try:
115+ _lxc(["profile", "device", "remove", profile, nic_name])
116+ LOG.debug(msg + succeeded)
117+ except util.ProcessExecutionError as e:
118+ if e.exit_code != 1:
119+ raise e
120+ LOG.debug(msg + fail_assume_enoent)
121+
122+
123 # vi: ts=4 expandtab
124diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
125index c6a71d1..bd20a36 100644
126--- a/cloudinit/net/eni.py
127+++ b/cloudinit/net/eni.py
128@@ -10,9 +10,12 @@ from . import ParserError
129 from . import renderer
130 from .network_state import subnet_is_ipv6
131
132+from cloudinit import log as logging
133 from cloudinit import util
134
135
136+LOG = logging.getLogger(__name__)
137+
138 NET_CONFIG_COMMANDS = [
139 "pre-up", "up", "post-up", "down", "pre-down", "post-down",
140 ]
141@@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet):
142
143
144 # TODO: switch to valid_map for attrs
145-def _iface_add_attrs(iface, index):
146+def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
147 # If the index is non-zero, this is an alias interface. Alias interfaces
148 # represent additional interface addresses, and should not have additional
149 # attributes. (extra attributes here are almost always either incorrect,
150@@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index):
151 value = 'on' if iface[key] else 'off'
152 if not value or key in ignore_map:
153 continue
154+ if key == 'mtu' and ipv4_subnet_mtu:
155+ if value != ipv4_subnet_mtu:
156+ LOG.warning(
157+ "Network config: ignoring %s device-level mtu:%s because"
158+ " ipv4 subnet-level mtu:%s provided.",
159+ iface['name'], value, ipv4_subnet_mtu)
160+ continue
161 if key in multiline_keys:
162 for v in value:
163 content.append(" {0} {1}".format(renames.get(key, key), v))
164@@ -377,12 +387,15 @@ class Renderer(renderer.Renderer):
165 subnets = iface.get('subnets', {})
166 if subnets:
167 for index, subnet in enumerate(subnets):
168+ ipv4_subnet_mtu = None
169 iface['index'] = index
170 iface['mode'] = subnet['type']
171 iface['control'] = subnet.get('control', 'auto')
172 subnet_inet = 'inet'
173 if subnet_is_ipv6(subnet):
174 subnet_inet += '6'
175+ else:
176+ ipv4_subnet_mtu = subnet.get('mtu')
177 iface['inet'] = subnet_inet
178 if subnet['type'].startswith('dhcp'):
179 iface['mode'] = 'dhcp'
180@@ -397,7 +410,7 @@ class Renderer(renderer.Renderer):
181 _iface_start_entry(
182 iface, index, render_hwaddress=render_hwaddress) +
183 _iface_add_subnet(iface, subnet) +
184- _iface_add_attrs(iface, index)
185+ _iface_add_attrs(iface, index, ipv4_subnet_mtu)
186 )
187 for route in subnet.get('routes', []):
188 lines.extend(self._render_route(route, indent=" "))
189@@ -409,7 +422,8 @@ class Renderer(renderer.Renderer):
190 if 'bond-master' in iface or 'bond-slaves' in iface:
191 lines.append("auto {name}".format(**iface))
192 lines.append("iface {name} {inet} {mode}".format(**iface))
193- lines.extend(_iface_add_attrs(iface, index=0))
194+ lines.extend(
195+ _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None))
196 sections.append(lines)
197 return sections
198
199diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
200index 6344348..4014363 100644
201--- a/cloudinit/net/netplan.py
202+++ b/cloudinit/net/netplan.py
203@@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match):
204 if key.startswith(match))
205
206
207-def _extract_addresses(config, entry):
208+def _extract_addresses(config, entry, ifname):
209 """This method parse a cloudinit.net.network_state dictionary (config) and
210 maps netstate keys/values into a dictionary (entry) to represent
211 netplan yaml.
212@@ -124,6 +124,15 @@ def _extract_addresses(config, entry):
213
214 addresses.append(addr)
215
216+ if 'mtu' in config:
217+ entry_mtu = entry.get('mtu')
218+ if entry_mtu and config['mtu'] != entry_mtu:
219+ LOG.warning(
220+ "Network config: ignoring %s device-level mtu:%s because"
221+ " ipv4 subnet-level mtu:%s provided.",
222+ ifname, config['mtu'], entry_mtu)
223+ else:
224+ entry['mtu'] = config['mtu']
225 if len(addresses) > 0:
226 entry.update({'addresses': addresses})
227 if len(routes) > 0:
228@@ -262,10 +271,7 @@ class Renderer(renderer.Renderer):
229 else:
230 del eth['match']
231 del eth['set-name']
232- if 'mtu' in ifcfg:
233- eth['mtu'] = ifcfg.get('mtu')
234-
235- _extract_addresses(ifcfg, eth)
236+ _extract_addresses(ifcfg, eth, ifname)
237 ethernets.update({ifname: eth})
238
239 elif if_type == 'bond':
240@@ -288,7 +294,7 @@ class Renderer(renderer.Renderer):
241 slave_interfaces = ifcfg.get('bond-slaves')
242 if slave_interfaces == 'none':
243 _extract_bond_slaves_by_name(interfaces, bond, ifname)
244- _extract_addresses(ifcfg, bond)
245+ _extract_addresses(ifcfg, bond, ifname)
246 bonds.update({ifname: bond})
247
248 elif if_type == 'bridge':
249@@ -321,7 +327,7 @@ class Renderer(renderer.Renderer):
250
251 if len(br_config) > 0:
252 bridge.update({'parameters': br_config})
253- _extract_addresses(ifcfg, bridge)
254+ _extract_addresses(ifcfg, bridge, ifname)
255 bridges.update({ifname: bridge})
256
257 elif if_type == 'vlan':
258@@ -333,7 +339,7 @@ class Renderer(renderer.Renderer):
259 macaddr = ifcfg.get('mac_address', None)
260 if macaddr is not None:
261 vlan['macaddress'] = macaddr.lower()
262- _extract_addresses(ifcfg, vlan)
263+ _extract_addresses(ifcfg, vlan, ifname)
264 vlans.update({ifname: vlan})
265
266 # inject global nameserver values under each all interface which
267diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
268index e53b9f1..3d71923 100644
269--- a/cloudinit/net/sysconfig.py
270+++ b/cloudinit/net/sysconfig.py
271@@ -304,6 +304,13 @@ class Renderer(renderer.Renderer):
272 mtu_key = 'IPV6_MTU'
273 iface_cfg['IPV6INIT'] = True
274 if 'mtu' in subnet:
275+ mtu_mismatch = bool(mtu_key in iface_cfg and
276+ subnet['mtu'] != iface_cfg[mtu_key])
277+ if mtu_mismatch:
278+ LOG.warning(
279+ 'Network config: ignoring %s device-level mtu:%s'
280+ ' because ipv4 subnet-level mtu:%s provided.',
281+ iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
282 iface_cfg[mtu_key] = subnet['mtu']
283 elif subnet_type == 'manual':
284 # If the subnet has an MTU setting, then ONBOOT=True
285diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
286index 1a12a3f..365af96 100644
287--- a/cloudinit/sources/DataSourceOpenStack.py
288+++ b/cloudinit/sources/DataSourceOpenStack.py
289@@ -23,6 +23,13 @@ DEFAULT_METADATA = {
290 "instance-id": DEFAULT_IID,
291 }
292
293+# OpenStack DMI constants
294+DMI_PRODUCT_NOVA = 'OpenStack Nova'
295+DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
296+VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
297+DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
298+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
299+
300
301 class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
302
303@@ -114,6 +121,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
304 False when unable to contact metadata service or when metadata
305 format is invalid or disabled.
306 """
307+ if not detect_openstack():
308+ return False
309 if self.perform_dhcp_setup: # Setup networking in init-local stage.
310 try:
311 with EphemeralDHCPv4(self.fallback_interface):
312@@ -205,6 +214,20 @@ def read_metadata_service(base_url, ssl_details=None,
313 return reader.read_v2()
314
315
316+def detect_openstack():
317+ """Return True when a potential OpenStack platform is detected."""
318+ if not util.is_x86():
319+ return True # Non-Intel cpus don't properly report dmi product names
320+ product_name = util.read_dmi_data('system-product-name')
321+ if product_name in VALID_DMI_PRODUCT_NAMES:
322+ return True
323+ elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
324+ return True
325+ elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
326+ return True
327+ return False
328+
329+
330 # Used to match classes to dependencies
331 datasources = [
332 (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)),
333diff --git a/cloudinit/stages.py b/cloudinit/stages.py
334index 3998cf6..286607b 100644
335--- a/cloudinit/stages.py
336+++ b/cloudinit/stages.py
337@@ -697,7 +697,9 @@ class Modules(object):
338 module_list = []
339 if name not in self.cfg:
340 return module_list
341- cfg_mods = self.cfg[name]
342+ cfg_mods = self.cfg.get(name)
343+ if not cfg_mods:
344+ return module_list
345 # Create 'module_list', an array of hashes
346 # Where hash['mod'] = module name
347 # hash['freq'] = frequency
348diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
349index 8f6aba1..ed83d2d 100644
350--- a/cloudinit/user_data.py
351+++ b/cloudinit/user_data.py
352@@ -337,8 +337,10 @@ def is_skippable(part):
353
354 # Coverts a raw string into a mime message
355 def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
356+ """convert a string (more likely bytes) or a message into
357+ a mime message."""
358 if not raw_data:
359- raw_data = ''
360+ raw_data = b''
361
362 def create_binmsg(data, content_type):
363 maintype, subtype = content_type.split("/", 1)
364@@ -346,15 +348,17 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
365 msg.set_payload(data)
366 return msg
367
368- try:
369- data = util.decode_binary(util.decomp_gzip(raw_data))
370- if "mime-version:" in data[0:4096].lower():
371- msg = util.message_from_string(data)
372- else:
373- msg = create_binmsg(data, content_type)
374- except UnicodeDecodeError:
375- msg = create_binmsg(raw_data, content_type)
376+ if isinstance(raw_data, six.text_type):
377+ bdata = raw_data.encode('utf-8')
378+ else:
379+ bdata = raw_data
380+ bdata = util.decomp_gzip(bdata, decode=False)
381+ if b"mime-version:" in bdata[0:4096].lower():
382+ msg = util.message_from_string(bdata.decode('utf-8'))
383+ else:
384+ msg = create_binmsg(bdata, content_type)
385
386 return msg
387
388+
389 # vi: ts=4 expandtab
390diff --git a/cloudinit/util.py b/cloudinit/util.py
391index d9b61cf..6da9511 100644
392--- a/cloudinit/util.py
393+++ b/cloudinit/util.py
394@@ -1876,9 +1876,55 @@ def subp_blob_in_tempfile(blob, *args, **kwargs):
395 return subp(*args, **kwargs)
396
397
398-def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
399+def subp(args, data=None, rcs=None, env=None, capture=True,
400+ combine_capture=False, shell=False,
401 logstring=False, decode="replace", target=None, update_env=None,
402 status_cb=None):
403+ """Run a subprocess.
404+
405+ :param args: command to run in a list. [cmd, arg1, arg2...]
406+ :param data: input to the command, made available on its stdin.
407+ :param rcs:
408+ a list of allowed return codes. If subprocess exits with a value not
409+ in this list, a ProcessExecutionError will be raised. By default,
410+ data is returned as a string. See 'decode' parameter.
411+ :param env: a dictionary for the command's environment.
412+ :param capture:
413+ boolean indicating if output should be captured. If True, then stderr
414+ and stdout will be returned. If False, they will not be redirected.
415+ :param combine_capture:
416+ boolean indicating if stderr should be redirected to stdout. When True,
417+ interleaved stderr and stdout will be returned as the first element of
418+ a tuple, the second will be empty string or bytes (per decode).
419+ if combine_capture is True, then output is captured independent of
420+ the value of capture.
421+ :param shell: boolean indicating if this should be run with a shell.
422+ :param logstring:
423+ the command will be logged to DEBUG. If it contains info that should
424+ not be logged, then logstring will be logged instead.
425+ :param decode:
426+ if False, no decoding will be done and returned stdout and stderr will
427+ be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
428+ These values are passed through to bytes().decode() as the 'errors'
429+ parameter. There is no support for decoding to other than utf-8.
430+ :param target:
431+ not supported, kwarg present only to make function signature similar
432+ to curtin's subp.
433+ :param update_env:
434+ update the enviornment for this command with this dictionary.
435+ this will not affect the current processes os.environ.
436+ :param status_cb:
437+ call this fuction with a single string argument before starting
438+ and after finishing.
439+
440+ :return
441+ if not capturing, return is (None, None)
442+ if capturing, stdout and stderr are returned.
443+ if decode:
444+ entries in tuple will be python2 unicode or python3 string
445+ if not decode:
446+ entries in tuple will be python2 string or python3 bytes
447+ """
448
449 # not supported in cloud-init (yet), for now kept in the call signature
450 # to ease maintaining code shared between cloud-init and curtin
451@@ -1904,7 +1950,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
452 status_cb('Begin run command: {command}\n'.format(command=command))
453 if not logstring:
454 LOG.debug(("Running command %s with allowed return codes %s"
455- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
456+ " (shell=%s, capture=%s)"),
457+ args, rcs, shell, 'combine' if combine_capture else capture)
458 else:
459 LOG.debug(("Running hidden command to protect sensitive "
460 "input/output logstring: %s"), logstring)
461@@ -1915,6 +1962,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
462 if capture:
463 stdout = subprocess.PIPE
464 stderr = subprocess.PIPE
465+ if combine_capture:
466+ stdout = subprocess.PIPE
467+ stderr = subprocess.STDOUT
468 if data is None:
469 # using devnull assures any reads get null, rather
470 # than possibly waiting on input.
471@@ -1953,10 +2003,11 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
472 devnull_fp.close()
473
474 # Just ensure blank instead of none.
475- if not out and capture:
476- out = b''
477- if not err and capture:
478- err = b''
479+ if capture or combine_capture:
480+ if not out:
481+ out = b''
482+ if not err:
483+ err = b''
484 if decode:
485 def ldecode(data, m='utf-8'):
486 if not isinstance(data, bytes):
487@@ -2080,24 +2131,33 @@ def is_container():
488 return False
489
490
491-def get_proc_env(pid):
492+def get_proc_env(pid, encoding='utf-8', errors='replace'):
493 """
494 Return the environment in a dict that a given process id was started with.
495- """
496
497- env = {}
498- fn = os.path.join("/proc/", str(pid), "environ")
499+ @param encoding: if true, then decoding will be done with
500+ .decode(encoding, errors) and text will be returned.
501+ if false then binary will be returned.
502+ @param errors: only used if encoding is true."""
503+ fn = os.path.join("/proc", str(pid), "environ")
504+
505 try:
506- contents = load_file(fn)
507- toks = contents.split("\x00")
508- for tok in toks:
509- if tok == "":
510- continue
511- (name, val) = tok.split("=", 1)
512- if name:
513- env[name] = val
514+ contents = load_file(fn, decode=False)
515 except (IOError, OSError):
516- pass
517+ return {}
518+
519+ env = {}
520+ null, equal = (b"\x00", b"=")
521+ if encoding:
522+ null, equal = ("\x00", "=")
523+ contents = contents.decode(encoding, errors)
524+
525+ for tok in contents.split(null):
526+ if not tok:
527+ continue
528+ (name, val) = tok.split(equal, 1)
529+ if name:
530+ env[name] = val
531 return env
532
533
534@@ -2569,6 +2629,16 @@ def _call_dmidecode(key, dmidecode_path):
535 return None
536
537
538+def is_x86(uname_arch=None):
539+ """Return True if platform is x86-based"""
540+ if uname_arch is None:
541+ uname_arch = os.uname()[4]
542+ x86_arch_match = (
543+ uname_arch == 'x86_64' or
544+ (uname_arch[0] == 'i' and uname_arch[2:] == '86'))
545+ return x86_arch_match
546+
547+
548 def read_dmi_data(key):
549 """
550 Wrapper for reading DMI data.
551@@ -2596,8 +2666,7 @@ def read_dmi_data(key):
552
553 # running dmidecode can be problematic on some arches (LP: #1243287)
554 uname_arch = os.uname()[4]
555- if not (uname_arch == "x86_64" or
556- (uname_arch.startswith("i") and uname_arch[2:] == "86") or
557+ if not (is_x86(uname_arch) or
558 uname_arch == 'aarch64' or
559 uname_arch == 'amd64'):
560 LOG.debug("dmidata is not supported on %s", uname_arch)
561diff --git a/debian/changelog b/debian/changelog
562index b529b78..e419f47 100644
563--- a/debian/changelog
564+++ b/debian/changelog
565@@ -1,3 +1,28 @@
566+cloud-init (18.2-77-g4ce67201-0ubuntu1) cosmic; urgency=medium
567+
568+ * New upstream snapshot.
569+ - lxd: Delete default network and detach device if lxd-init created them.
570+ (LP: #1776958)
571+ - openstack: avoid unneeded metadata probe on non-openstack platforms
572+ (LP: #1776701)
573+ - stages: fix tracebacks if a module stage is undefined or empty
574+ [Robert Schweikert] (LP: #1770462)
575+ - Be more safe on string/bytes when writing multipart user-data to disk.
576+ (LP: #1768600)
577+ - Fix get_proc_env for pids that have non-utf8 content in environment.
578+ (LP: #1775371)
579+ - tests: fix salt_minion integration test on bionic and later
580+ - tests: provide human-readable integration test summary when --verbose
581+ - tests: skip chrony integration tests on lxd running artful or older
582+ - test: add optional --preserve-instance arg to integraiton tests
583+ - netplan: fix mtu if provided by network config for all rendered types
584+ (LP: #1774666)
585+ - tests: remove pip install workarounds for pylxd, take upstream fix.
586+ - subp: support combine_capture argument.
587+ - tests: ordered tox dependencies for pylxd install
588+
589+ -- Chad Smith <chad.smith@canonical.com> Fri, 15 Jun 2018 20:05:07 -0600
590+
591 cloud-init (18.2-64-gbbcc5e82-0ubuntu1) cosmic; urgency=medium
592
593 * debian/rules: update version.version_string to contain packaged version.
594diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
595index 0ea8994..421da08 100644
596--- a/doc/rtd/topics/datasources/openstack.rst
597+++ b/doc/rtd/topics/datasources/openstack.rst
598@@ -7,6 +7,21 @@ This datasource supports reading data from the
599 `OpenStack Metadata Service
600 <https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_.
601
602+Discovery
603+-------------
604+To determine whether a platform looks like it may be OpenStack, cloud-init
605+checks the following environment attributes as a potential OpenStack platform:
606+
607+ * Maybe OpenStack if
608+
609+ * **non-x86 cpu architecture**: because DMI data is buggy on some arches
610+ * Is OpenStack **if x86 architecture and ANY** of the following
611+
612+ * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
613+ * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
614+ * **DMI chassis_asset_tag** is *OpenTelekomCloud*
615+
616+
617 Configuration
618 -------------
619 The following configuration can be set for the datasource in system
620diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
621index 2f8ab54..3b0148c 100644
622--- a/doc/rtd/topics/network-config-format-v1.rst
623+++ b/doc/rtd/topics/network-config-format-v1.rst
624@@ -130,6 +130,18 @@ the bond interfaces.
625 The ``bond_interfaces`` key accepts a list of network device ``name`` values
626 from the configuration. This list may be empty.
627
628+**mtu**: *<MTU SizeBytes>*
629+
630+The MTU key represents a device's Maximum Transmission Unit, the largest size
631+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
632+packet- or frame-based network. Specifying ``mtu`` is optional.
633+
634+.. note::
635+
636+ The possible supported values of a device's MTU is not available at
637+ configuration time. It's possible to specify a value too large or to
638+ small for a device and may be ignored by the device.
639+
640 **params**: *<Dictionary of key: value bonding parameter pairs>*
641
642 The ``params`` key in a bond holds a dictionary of bonding parameters.
643@@ -268,6 +280,21 @@ Type ``vlan`` requires the following keys:
644 - ``vlan_link``: Specify the underlying link via its ``name``.
645 - ``vlan_id``: Specify the VLAN numeric id.
646
647+The following optional keys are supported:
648+
649+**mtu**: *<MTU SizeBytes>*
650+
651+The MTU key represents a device's Maximum Transmission Unit, the largest size
652+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
653+packet- or frame-based network. Specifying ``mtu`` is optional.
654+
655+.. note::
656+
657+ The possible supported values of a device's MTU is not available at
658+ configuration time. It's possible to specify a value too large or to
659+ small for a device and may be ignored by the device.
660+
661+
662 **VLAN Example**::
663
664 network:
665diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
666index 335d236..ea370ef 100644
667--- a/doc/rtd/topics/network-config-format-v2.rst
668+++ b/doc/rtd/topics/network-config-format-v2.rst
669@@ -174,6 +174,12 @@ recognized by ``inet_pton(3)``
670 Example for IPv4: ``gateway4: 172.16.0.1``
671 Example for IPv6: ``gateway6: 2001:4::1``
672
673+**mtu**: *<MTU SizeBytes>*
674+
675+The MTU key represents a device's Maximum Transmission Unit, the largest size
676+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
677+packet- or frame-based network. Specifying ``mtu`` is optional.
678+
679 **nameservers**: *<(mapping)>*
680
681 Set DNS servers and search domains, for manual address configuration. There
682diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
683index cac4a6e..b83bd89 100644
684--- a/doc/rtd/topics/tests.rst
685+++ b/doc/rtd/topics/tests.rst
686@@ -58,7 +58,8 @@ explaining how to run one or the other independently.
687 $ tox -e citest -- run --verbose \
688 --os-name stretch --os-name xenial \
689 --deb cloud-init_0.7.8~my_patch_all.deb \
690- --preserve-data --data-dir ~/collection
691+ --preserve-data --data-dir ~/collection \
692+ --preserve-instance
693
694 The above command will do the following:
695
696@@ -76,6 +77,10 @@ The above command will do the following:
697 * ``--preserve-data`` always preserve collected data, do not remove data
698 after successful test run
699
700+* ``--preserve-instance`` do not destroy the instance after test to allow
701+ for debugging the stopped instance during integration test development. By
702+ default, test instances are destroyed after the test completes.
703+
704 * ``--data-dir ~/collection`` write collected data into `~/collection`,
705 rather than using a temporary directory
706
707diff --git a/integration-requirements.txt b/integration-requirements.txt
708index df3a73e..e5bb5b2 100644
709--- a/integration-requirements.txt
710+++ b/integration-requirements.txt
711@@ -13,7 +13,7 @@ paramiko==2.4.0
712
713 # lxd backend
714 # 04/03/2018: enables use of lxd 3.0
715-git+https://github.com/lxc/pylxd.git@1a85a12a23401de6e96b1aeaf59ecbff2e88f49d
716+git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779
717
718
719 # finds latest image information
720diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py
721index c6c1877..ab34549 100644
722--- a/tests/cloud_tests/args.py
723+++ b/tests/cloud_tests/args.py
724@@ -62,6 +62,9 @@ ARG_SETS = {
725 (('-d', '--data-dir'),
726 {'help': 'directory to store test data in',
727 'action': 'store', 'metavar': 'DIR', 'required': False}),
728+ (('--preserve-instance',),
729+ {'help': 'do not destroy the instance under test',
730+ 'action': 'store_true', 'default': False, 'required': False}),
731 (('--preserve-data',),
732 {'help': 'do not remove collected data after successful run',
733 'action': 'store_true', 'default': False, 'required': False}),),
734diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
735index 78263bf..75b5061 100644
736--- a/tests/cloud_tests/collect.py
737+++ b/tests/cloud_tests/collect.py
738@@ -93,7 +93,8 @@ def collect_test_data(args, snapshot, os_name, test_name):
739 # create test instance
740 component = PlatformComponent(
741 partial(platforms.get_instance, snapshot, user_data,
742- block=True, start=False, use_desc=test_name))
743+ block=True, start=False, use_desc=test_name),
744+ preserve_instance=args.preserve_instance)
745
746 LOG.info('collecting test data for test: %s', test_name)
747 with component as instance:
748diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py
749index 74a7d46..d64a1dc 100644
750--- a/tests/cloud_tests/stage.py
751+++ b/tests/cloud_tests/stage.py
752@@ -12,9 +12,15 @@ from tests.cloud_tests import LOG
753 class PlatformComponent(object):
754 """Context manager to safely handle platform components."""
755
756- def __init__(self, get_func):
757- """Store get_<platform component> function as partial with no args."""
758+ def __init__(self, get_func, preserve_instance=False):
759+ """Store get_<platform component> function as partial with no args.
760+
761+ @param get_func: Callable returning an instance from the platform.
762+ @param preserve_instance: Boolean, when True, do not destroy instance
763+ after test. Used for test development.
764+ """
765 self.get_func = get_func
766+ self.preserve_instance = preserve_instance
767
768 def __enter__(self):
769 """Create instance of platform component."""
770@@ -24,7 +30,10 @@ class PlatformComponent(object):
771 def __exit__(self, etype, value, trace):
772 """Destroy instance."""
773 if self.instance is not None:
774- self.instance.destroy()
775+ if self.preserve_instance:
776+ LOG.info('Preserving test instance %s', self.instance.name)
777+ else:
778+ self.instance.destroy()
779
780
781 def run_single(name, call):
782diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
783index 461630a..7d34177 100644
784--- a/tests/cloud_tests/testcases/modules/ntp_chrony.py
785+++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py
786@@ -1,13 +1,24 @@
787 # This file is part of cloud-init. See LICENSE file for license information.
788
789 """cloud-init Integration Test Verify Script."""
790+import unittest
791+
792 from tests.cloud_tests.testcases import base
793
794
795 class TestNtpChrony(base.CloudTestCase):
796 """Test ntp module with chrony client"""
797
798- def test_chrony_entires(self):
799+ def setUp(self):
800+ """Skip this suite of tests on lxd and artful or older."""
801+ if self.platform == 'lxd':
802+ if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
803+ raise unittest.SkipTest(
804+ 'No support for chrony on containers <= artful.'
805+ ' LP: #1589780')
806+ return super(TestNtpChrony, self).setUp()
807+
808+ def test_chrony_entries(self):
809 """Test chrony config entries"""
810 out = self.get_data_file('chrony_conf')
811 self.assertIn('.pool.ntp.org', out)
812diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml
813index c24aa17..9227147 100644
814--- a/tests/cloud_tests/testcases/modules/salt_minion.yaml
815+++ b/tests/cloud_tests/testcases/modules/salt_minion.yaml
816@@ -28,10 +28,20 @@ collect_scripts:
817 cat /etc/salt/minion_id
818 minion.pem: |
819 #!/bin/bash
820- cat /etc/salt/pki/minion/minion.pem
821+ PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem
822+ if [ ! -f $PRIV_KEYFILE ]; then
823+ # Bionic and later automatically moves /etc/salt/pki/minion/*
824+ PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem
825+ fi
826+ cat $PRIV_KEYFILE
827 minion.pub: |
828 #!/bin/bash
829- cat /etc/salt/pki/minion/minion.pub
830+ PUB_KEYFILE=/etc/salt/pki/minion/minion.pub
831+ if [ ! -f $PUB_KEYFILE ]; then
832+ # Bionic and later automatically moves /etc/salt/pki/minion/*
833+ PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub
834+ fi
835+ cat $PUB_KEYFILE
836 grains: |
837 #!/bin/bash
838 cat /etc/salt/grains
839diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
840index 5a68a48..bfb2744 100644
841--- a/tests/cloud_tests/verify.py
842+++ b/tests/cloud_tests/verify.py
843@@ -56,6 +56,51 @@ def verify_data(data_dir, platform, os_name, tests):
844 return res
845
846
847+def format_test_failures(test_result):
848+ """Return a human-readable printable format of test failures."""
849+ if not test_result['failures']:
850+ return ''
851+ failure_hdr = ' test failures:'
852+ failure_fmt = ' * {module}.{class}.{function}\n {error}'
853+ output = []
854+ for failure in test_result['failures']:
855+ if not output:
856+ output = [failure_hdr]
857+ output.append(failure_fmt.format(**failure))
858+ return '\n'.join(output)
859+
860+
861+def format_results(res):
862+ """Return human-readable results as a string"""
863+ platform_hdr = 'Platform: {platform}'
864+ distro_hdr = ' Distro: {distro}'
865+ distro_summary_fmt = (
866+ ' test modules passed:{passed} tests failed:{failed}')
867+ output = ['']
868+ counts = {}
869+ for platform, platform_data in res.items():
870+ output.append(platform_hdr.format(platform=platform))
871+ counts[platform] = {}
872+ for distro, distro_data in platform_data.items():
873+ distro_failure_output = []
874+ output.append(distro_hdr.format(distro=distro))
875+ counts[platform][distro] = {'passed': 0, 'failed': 0}
876+ for _, test_result in distro_data.items():
877+ if test_result['passed']:
878+ counts[platform][distro]['passed'] += 1
879+ else:
880+ counts[platform][distro]['failed'] += len(
881+ test_result['failures'])
882+ failure_output = format_test_failures(test_result)
883+ if failure_output:
884+ distro_failure_output.append(failure_output)
885+ output.append(
886+ distro_summary_fmt.format(**counts[platform][distro]))
887+ if distro_failure_output:
888+ output.extend(distro_failure_output)
889+ return '\n'.join(output)
890+
891+
892 def verify(args):
893 """Verify test data.
894
895@@ -90,7 +135,7 @@ def verify(args):
896 failed += len(fail_list)
897
898 # dump results
899- LOG.debug('verify results: %s', res)
900+ LOG.debug('\n---- Verify summarized results:\n%s', format_results(res))
901 if args.result:
902 util.merge_results({'verify': res}, args.result)
903
904diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
905index 91d35cb..3efe7ad 100644
906--- a/tests/unittests/test_data.py
907+++ b/tests/unittests/test_data.py
908@@ -606,8 +606,10 @@ class TestUDProcess(helpers.ResourceUsingTestCase):
909
910
911 class TestConvertString(helpers.TestCase):
912+
913 def test_handles_binary_non_utf8_decodable(self):
914- blob = b'\x32\x99'
915+ """Printable unicode (not utf8-decodable) is safely converted."""
916+ blob = b'#!/bin/bash\necho \xc3\x84\n'
917 msg = ud.convert_string(blob)
918 self.assertEqual(blob, msg.get_payload(decode=True))
919
920@@ -621,6 +623,13 @@ class TestConvertString(helpers.TestCase):
921 msg = ud.convert_string(text)
922 self.assertEqual(text, msg.get_payload(decode=False))
923
924+ def test_handle_mime_parts(self):
925+ """Mime parts are properly returned as a mime message."""
926+ message = MIMEBase("text", "plain")
927+ message.set_payload("Just text")
928+ msg = ud.convert_string(str(message))
929+ self.assertEqual("Just text", msg.get_payload(decode=False))
930+
931
932 class TestFetchBaseConfig(helpers.TestCase):
933 def test_only_builtin_gets_builtin(self):
934diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
935index fad73b2..585acc3 100644
936--- a/tests/unittests/test_datasource/test_openstack.py
937+++ b/tests/unittests/test_datasource/test_openstack.py
938@@ -69,6 +69,8 @@ EC2_VERSIONS = [
939 'latest',
940 ]
941
942+MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.'
943+
944
945 # TODO _register_uris should leverage test_ec2.register_mock_metaserver.
946 def _register_uris(version, ec2_files, ec2_meta, os_files):
947@@ -231,7 +233,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
948 ds_os = ds.DataSourceOpenStack(
949 settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
950 self.assertIsNone(ds_os.version)
951- found = ds_os.get_data()
952+ mock_path = MOCK_PATH + 'detect_openstack'
953+ with test_helpers.mock.patch(mock_path) as m_detect_os:
954+ m_detect_os.return_value = True
955+ found = ds_os.get_data()
956 self.assertTrue(found)
957 self.assertEqual(2, ds_os.version)
958 md = dict(ds_os.metadata)
959@@ -260,7 +265,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
960 'broadcast-address': '192.168.2.255'}]
961
962 self.assertIsNone(ds_os_local.version)
963- found = ds_os_local.get_data()
964+ mock_path = MOCK_PATH + 'detect_openstack'
965+ with test_helpers.mock.patch(mock_path) as m_detect_os:
966+ m_detect_os.return_value = True
967+ found = ds_os_local.get_data()
968 self.assertTrue(found)
969 self.assertEqual(2, ds_os_local.version)
970 md = dict(ds_os_local.metadata)
971@@ -284,7 +292,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
972 None,
973 helpers.Paths({'run_dir': self.tmp}))
974 self.assertIsNone(ds_os.version)
975- found = ds_os.get_data()
976+ mock_path = MOCK_PATH + 'detect_openstack'
977+ with test_helpers.mock.patch(mock_path) as m_detect_os:
978+ m_detect_os.return_value = True
979+ found = ds_os.get_data()
980 self.assertFalse(found)
981 self.assertIsNone(ds_os.version)
982 self.assertIn(
983@@ -306,15 +317,16 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
984 'timeout': 0,
985 }
986 self.assertIsNone(ds_os.version)
987- found = ds_os.get_data()
988+ mock_path = MOCK_PATH + 'detect_openstack'
989+ with test_helpers.mock.patch(mock_path) as m_detect_os:
990+ m_detect_os.return_value = True
991+ found = ds_os.get_data()
992 self.assertFalse(found)
993 self.assertIsNone(ds_os.version)
994
995 def test_network_config_disabled_by_datasource_config(self):
996 """The network_config can be disabled from datasource config."""
997- mock_path = (
998- 'cloudinit.sources.DataSourceOpenStack.openstack.'
999- 'convert_net_json')
1000+ mock_path = MOCK_PATH + 'openstack.convert_net_json'
1001 ds_os = ds.DataSourceOpenStack(
1002 settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
1003 ds_os.ds_cfg = {'apply_network_config': False}
1004@@ -327,9 +339,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
1005
1006 def test_network_config_from_network_json(self):
1007 """The datasource gets network_config from network_data.json."""
1008- mock_path = (
1009- 'cloudinit.sources.DataSourceOpenStack.openstack.'
1010- 'convert_net_json')
1011+ mock_path = MOCK_PATH + 'openstack.convert_net_json'
1012 example_cfg = {'version': 1, 'config': []}
1013 ds_os = ds.DataSourceOpenStack(
1014 settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
1015@@ -345,9 +355,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
1016
1017 def test_network_config_cached(self):
1018 """The datasource caches the network_config property."""
1019- mock_path = (
1020- 'cloudinit.sources.DataSourceOpenStack.openstack.'
1021- 'convert_net_json')
1022+ mock_path = MOCK_PATH + 'openstack.convert_net_json'
1023 example_cfg = {'version': 1, 'config': []}
1024 ds_os = ds.DataSourceOpenStack(
1025 settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
1026@@ -374,7 +382,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
1027 'timeout': 0,
1028 }
1029 self.assertIsNone(ds_os.version)
1030- found = ds_os.get_data()
1031+ mock_path = MOCK_PATH + 'detect_openstack'
1032+ with test_helpers.mock.patch(mock_path) as m_detect_os:
1033+ m_detect_os.return_value = True
1034+ found = ds_os.get_data()
1035 self.assertFalse(found)
1036 self.assertIsNone(ds_os.version)
1037
1038@@ -438,4 +449,89 @@ class TestVendorDataLoading(test_helpers.TestCase):
1039 data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']}
1040 self.assertEqual(self.cvj(data), data['cloud-init'])
1041
1042+
1043+@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86')
1044+class TestDetectOpenStack(test_helpers.CiTestCase):
1045+
1046+ def test_detect_openstack_non_intel_x86(self, m_is_x86):
1047+ """Return True on non-intel platforms because dmi isn't conclusive."""
1048+ m_is_x86.return_value = False
1049+ self.assertTrue(
1050+ ds.detect_openstack(), 'Expected detect_openstack == True')
1051+
1052+ @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
1053+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
1054+ def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env,
1055+ m_is_x86):
1056+ """Return False on EC2 platforms."""
1057+ m_is_x86.return_value = True
1058+ # No product_name in proc/1/environ
1059+ m_proc_env.return_value = {'HOME': '/'}
1060+
1061+ def fake_dmi_read(dmi_key):
1062+ if dmi_key == 'system-product-name':
1063+ return 'HVM domU' # Nothing 'openstackish' on EC2
1064+ if dmi_key == 'chassis-asset-tag':
1065+ return '' # Empty string on EC2
1066+ assert False, 'Unexpected dmi read of %s' % dmi_key
1067+
1068+ m_dmi.side_effect = fake_dmi_read
1069+ self.assertFalse(
1070+ ds.detect_openstack(), 'Expected detect_openstack == False on EC2')
1071+ m_proc_env.assert_called_with(1)
1072+
1073+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
1074+ def test_detect_openstack_intel_product_name_compute(self, m_dmi,
1075+ m_is_x86):
1076+ """Return True on OpenStack compute and nova instances."""
1077+ m_is_x86.return_value = True
1078+ openstack_product_names = ['OpenStack Nova', 'OpenStack Compute']
1079+
1080+ for product_name in openstack_product_names:
1081+ m_dmi.return_value = product_name
1082+ self.assertTrue(
1083+ ds.detect_openstack(), 'Failed to detect_openstack')
1084+
1085+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
1086+ def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi,
1087+ m_is_x86):
1088+ """Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
1089+ m_is_x86.return_value = True
1090+
1091+ def fake_dmi_read(dmi_key):
1092+ if dmi_key == 'system-product-name':
1093+ return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud
1094+ if dmi_key == 'chassis-asset-tag':
1095+ return 'OpenTelekomCloud'
1096+ assert False, 'Unexpected dmi read of %s' % dmi_key
1097+
1098+ m_dmi.side_effect = fake_dmi_read
1099+ self.assertTrue(
1100+ ds.detect_openstack(),
1101+ 'Expected detect_openstack == True on OpenTelekomCloud')
1102+
1103+ @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
1104+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
1105+ def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env,
1106+ m_is_x86):
1107+ """Return True when nova product_name specified in /proc/1/environ."""
1108+ m_is_x86.return_value = True
1109+ # Nova product_name in proc/1/environ
1110+ m_proc_env.return_value = {
1111+ 'HOME': '/', 'product_name': 'OpenStack Nova'}
1112+
1113+ def fake_dmi_read(dmi_key):
1114+ if dmi_key == 'system-product-name':
1115+ return 'HVM domU' # Nothing 'openstackish'
1116+ if dmi_key == 'chassis-asset-tag':
1117+ return '' # Nothin 'openstackish'
1118+ assert False, 'Unexpected dmi read of %s' % dmi_key
1119+
1120+ m_dmi.side_effect = fake_dmi_read
1121+ self.assertTrue(
1122+ ds.detect_openstack(),
1123+ 'Expected detect_openstack == True on OpenTelekomCloud')
1124+ m_proc_env.assert_called_with(1)
1125+
1126+
1127 # vi: ts=4 expandtab
1128diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
1129index a205498..4dd7e09 100644
1130--- a/tests/unittests/test_handler/test_handler_lxd.py
1131+++ b/tests/unittests/test_handler/test_handler_lxd.py
1132@@ -33,12 +33,16 @@ class TestLxd(t_help.CiTestCase):
1133 cc = cloud.Cloud(ds, paths, {}, d, None)
1134 return cc
1135
1136+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
1137 @mock.patch("cloudinit.config.cc_lxd.util")
1138- def test_lxd_init(self, mock_util):
1139+ def test_lxd_init(self, mock_util, m_maybe_clean):
1140 cc = self._get_cloud('ubuntu')
1141 mock_util.which.return_value = True
1142+ m_maybe_clean.return_value = None
1143 cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
1144 self.assertTrue(mock_util.which.called)
1145+ # no bridge config, so maybe_cleanup should not be called.
1146+ self.assertFalse(m_maybe_clean.called)
1147 init_call = mock_util.subp.call_args_list[0][0][0]
1148 self.assertEqual(init_call,
1149 ['lxd', 'init', '--auto',
1150@@ -46,32 +50,39 @@ class TestLxd(t_help.CiTestCase):
1151 '--storage-backend=zfs',
1152 '--storage-pool=poolname'])
1153
1154+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
1155 @mock.patch("cloudinit.config.cc_lxd.util")
1156- def test_lxd_install(self, mock_util):
1157+ def test_lxd_install(self, mock_util, m_maybe_clean):
1158 cc = self._get_cloud('ubuntu')
1159 cc.distro = mock.MagicMock()
1160 mock_util.which.return_value = None
1161 cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
1162 self.assertNotIn('WARN', self.logs.getvalue())
1163 self.assertTrue(cc.distro.install_packages.called)
1164+ cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
1165+ self.assertFalse(m_maybe_clean.called)
1166 install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
1167 self.assertEqual(sorted(install_pkg), ['lxd', 'zfs'])
1168
1169+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
1170 @mock.patch("cloudinit.config.cc_lxd.util")
1171- def test_no_init_does_nothing(self, mock_util):
1172+ def test_no_init_does_nothing(self, mock_util, m_maybe_clean):
1173 cc = self._get_cloud('ubuntu')
1174 cc.distro = mock.MagicMock()
1175 cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, [])
1176 self.assertFalse(cc.distro.install_packages.called)
1177 self.assertFalse(mock_util.subp.called)
1178+ self.assertFalse(m_maybe_clean.called)
1179
1180+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
1181 @mock.patch("cloudinit.config.cc_lxd.util")
1182- def test_no_lxd_does_nothing(self, mock_util):
1183+ def test_no_lxd_does_nothing(self, mock_util, m_maybe_clean):
1184 cc = self._get_cloud('ubuntu')
1185 cc.distro = mock.MagicMock()
1186 cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, [])
1187 self.assertFalse(cc.distro.install_packages.called)
1188 self.assertFalse(mock_util.subp.called)
1189+ self.assertFalse(m_maybe_clean.called)
1190
1191 def test_lxd_debconf_new_full(self):
1192 data = {"mode": "new",
1193@@ -147,14 +158,13 @@ class TestLxd(t_help.CiTestCase):
1194 "domain": "lxd"}
1195 self.assertEqual(
1196 cc_lxd.bridge_to_cmd(data),
1197- (["lxc", "network", "create", "testbr0",
1198+ (["network", "create", "testbr0",
1199 "ipv4.address=10.0.8.1/24", "ipv4.nat=true",
1200 "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254",
1201 "ipv6.address=fd98:9e0:3744::1/64",
1202- "ipv6.nat=true", "dns.domain=lxd",
1203- "--force-local"],
1204- ["lxc", "network", "attach-profile",
1205- "testbr0", "default", "eth0", "--force-local"]))
1206+ "ipv6.nat=true", "dns.domain=lxd"],
1207+ ["network", "attach-profile",
1208+ "testbr0", "default", "eth0"]))
1209
1210 def test_lxd_cmd_new_partial(self):
1211 data = {"mode": "new",
1212@@ -163,19 +173,18 @@ class TestLxd(t_help.CiTestCase):
1213 "ipv6_nat": "true"}
1214 self.assertEqual(
1215 cc_lxd.bridge_to_cmd(data),
1216- (["lxc", "network", "create", "lxdbr0", "ipv4.address=none",
1217- "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true",
1218- "--force-local"],
1219- ["lxc", "network", "attach-profile",
1220- "lxdbr0", "default", "eth0", "--force-local"]))
1221+ (["network", "create", "lxdbr0", "ipv4.address=none",
1222+ "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"],
1223+ ["network", "attach-profile",
1224+ "lxdbr0", "default", "eth0"]))
1225
1226 def test_lxd_cmd_existing(self):
1227 data = {"mode": "existing",
1228 "name": "testbr0"}
1229 self.assertEqual(
1230 cc_lxd.bridge_to_cmd(data),
1231- (None, ["lxc", "network", "attach-profile",
1232- "testbr0", "default", "eth0", "--force-local"]))
1233+ (None, ["network", "attach-profile",
1234+ "testbr0", "default", "eth0"]))
1235
1236 def test_lxd_cmd_none(self):
1237 data = {"mode": "none"}
1238@@ -183,4 +192,43 @@ class TestLxd(t_help.CiTestCase):
1239 cc_lxd.bridge_to_cmd(data),
1240 (None, None))
1241
1242+
1243+class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
1244+ """Test the implementation of maybe_cleanup_default."""
1245+
1246+ defnet = cc_lxd._DEFAULT_NETWORK_NAME
1247+
1248+ @mock.patch("cloudinit.config.cc_lxd._lxc")
1249+ def test_network_other_than_default_not_deleted(self, m_lxc):
1250+ """deletion or removal should only occur if bridge is default."""
1251+ cc_lxd.maybe_cleanup_default(
1252+ net_name="lxdbr1", did_init=True, create=True, attach=True)
1253+ m_lxc.assert_not_called()
1254+
1255+ @mock.patch("cloudinit.config.cc_lxd._lxc")
1256+ def test_did_init_false_does_not_delete(self, m_lxc):
1257+ """deletion or removal should only occur if did_init is True."""
1258+ cc_lxd.maybe_cleanup_default(
1259+ net_name=self.defnet, did_init=False, create=True, attach=True)
1260+ m_lxc.assert_not_called()
1261+
1262+ @mock.patch("cloudinit.config.cc_lxd._lxc")
1263+ def test_network_deleted_if_create_true(self, m_lxc):
1264+ """deletion of network should occur if create is True."""
1265+ cc_lxd.maybe_cleanup_default(
1266+ net_name=self.defnet, did_init=True, create=True, attach=False)
1267+ m_lxc.assert_called_once_with(["network", "delete", self.defnet])
1268+
1269+ @mock.patch("cloudinit.config.cc_lxd._lxc")
1270+ def test_device_removed_if_attach_true(self, m_lxc):
1271+ """deletion of network should occur if create is True."""
1272+ nic_name = "my_nic"
1273+ profile = "my_profile"
1274+ cc_lxd.maybe_cleanup_default(
1275+ net_name=self.defnet, did_init=True, create=False, attach=True,
1276+ profile=profile, nic_name=nic_name)
1277+ m_lxc.assert_called_once_with(
1278+ ["profile", "device", "remove", profile, nic_name])
1279+
1280+
1281 # vi: ts=4 expandtab
1282diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
1283index e13ca3c..5ab61cf 100644
1284--- a/tests/unittests/test_net.py
1285+++ b/tests/unittests/test_net.py
1286@@ -525,6 +525,7 @@ NETWORK_CONFIGS = {
1287 config:
1288 - type: 'physical'
1289 name: 'iface0'
1290+ mtu: 8999
1291 subnets:
1292 - type: static
1293 address: 192.168.14.2/24
1294@@ -660,8 +661,8 @@ iface eth0.101 inet static
1295 dns-nameservers 192.168.0.10 10.23.23.134
1296 dns-search barley.maas sacchromyces.maas brettanomyces.maas
1297 gateway 192.168.0.1
1298- hwaddress aa:bb:cc:dd:ee:11
1299 mtu 1500
1300+ hwaddress aa:bb:cc:dd:ee:11
1301 vlan-raw-device eth0
1302 vlan_id 101
1303
1304@@ -757,6 +758,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1305 id: 101
1306 link: eth0
1307 macaddress: aa:bb:cc:dd:ee:11
1308+ mtu: 1500
1309 nameservers:
1310 addresses:
1311 - 192.168.0.10
1312@@ -920,6 +922,8 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1313 mtu: 1500
1314 subnets:
1315 - type: static
1316+ # When 'mtu' matches device-level mtu, no warnings
1317+ mtu: 1500
1318 address: 192.168.0.2/24
1319 gateway: 192.168.0.1
1320 dns_nameservers:
1321@@ -1028,6 +1032,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1322 - type: bond
1323 name: bond0
1324 mac_address: "aa:bb:cc:dd:e8:ff"
1325+ mtu: 9000
1326 bond_interfaces:
1327 - bond0s0
1328 - bond0s1
1329@@ -1070,6 +1075,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1330 interfaces:
1331 - bond0s0
1332 - bond0s1
1333+ mtu: 9000
1334 parameters:
1335 mii-monitor-interval: 100
1336 mode: active-backup
1337@@ -1157,6 +1163,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1338 IPADDR1=192.168.1.2
1339 IPV6ADDR=2001:1::1/92
1340 IPV6INIT=yes
1341+ MTU=9000
1342 NETMASK=255.255.255.0
1343 NETMASK1=255.255.255.0
1344 NM_CONTROLLED=no
1345@@ -1203,6 +1210,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1346 name: en0
1347 mac_address: "aa:bb:cc:dd:e8:00"
1348 - type: vlan
1349+ mtu: 2222
1350 name: en0.99
1351 vlan_link: en0
1352 vlan_id: 99
1353@@ -1238,6 +1246,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
1354 IPV6ADDR=2001:1::bbbb/96
1355 IPV6INIT=yes
1356 IPV6_DEFAULTGW=2001:1::1
1357+ MTU=2222
1358 NETMASK=255.255.255.0
1359 NETMASK1=255.255.255.0
1360 NM_CONTROLLED=no
1361@@ -1669,6 +1678,8 @@ iface eth1 inet dhcp
1362
1363 class TestSysConfigRendering(CiTestCase):
1364
1365+ with_logs = True
1366+
1367 scripts_dir = '/etc/sysconfig/network-scripts'
1368 header = ('# Created by cloud-init on instance boot automatically, '
1369 'do not edit.\n#\n')
1370@@ -1917,6 +1928,9 @@ USERCTL=no
1371 found = self._render_and_read(network_config=yaml.load(entry['yaml']))
1372 self._compare_files_to_expected(entry['expected_sysconfig'], found)
1373 self._assert_headers(found)
1374+ self.assertNotIn(
1375+ 'WARNING: Network config: ignoring eth0.101 device-level mtu',
1376+ self.logs.getvalue())
1377
1378 def test_small_config(self):
1379 entry = NETWORK_CONFIGS['small']
1380@@ -1929,6 +1943,10 @@ USERCTL=no
1381 found = self._render_and_read(network_config=yaml.load(entry['yaml']))
1382 self._compare_files_to_expected(entry['expected_sysconfig'], found)
1383 self._assert_headers(found)
1384+ expected_msg = (
1385+ 'WARNING: Network config: ignoring iface0 device-level mtu:8999'
1386+ ' because ipv4 subnet-level mtu:9000 provided.')
1387+ self.assertIn(expected_msg, self.logs.getvalue())
1388
1389 def test_dhcpv6_only_config(self):
1390 entry = NETWORK_CONFIGS['dhcpv6_only']
1391@@ -2410,6 +2428,7 @@ class TestNetplanRoundTrip(CiTestCase):
1392
1393
1394 class TestEniRoundTrip(CiTestCase):
1395+
1396 def _render_and_read(self, network_config=None, state=None, eni_path=None,
1397 netrules_path=None, dir=None):
1398 if dir is None:
1399diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py
1400index 762974e..d67c422 100644
1401--- a/tests/unittests/test_runs/test_simple_run.py
1402+++ b/tests/unittests/test_runs/test_simple_run.py
1403@@ -1,5 +1,6 @@
1404 # This file is part of cloud-init. See LICENSE file for license information.
1405
1406+import copy
1407 import os
1408
1409
1410@@ -127,8 +128,9 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
1411 """run_section forced skipped modules by using unverified_modules."""
1412
1413 # re-write cloud.cfg with unverified_modules override
1414- self.cfg['unverified_modules'] = ['spacewalk'] # Would have skipped
1415- cloud_cfg = util.yaml_dumps(self.cfg)
1416+ cfg = copy.deepcopy(self.cfg)
1417+ cfg['unverified_modules'] = ['spacewalk'] # Would have skipped
1418+ cloud_cfg = util.yaml_dumps(cfg)
1419 util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
1420 util.write_file(os.path.join(self.new_root, 'etc',
1421 'cloud', 'cloud.cfg'), cloud_cfg)
1422@@ -150,4 +152,30 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
1423 "running unverified_modules: 'spacewalk'",
1424 self.logs.getvalue())
1425
1426+ def test_none_ds_run_with_no_config_modules(self):
1427+ """run_section will report no modules run when none are configured."""
1428+
1429+ # re-write cloud.cfg with unverified_modules override
1430+ cfg = copy.deepcopy(self.cfg)
1431+ # Represent empty configuration in /etc/cloud/cloud.cfg
1432+ cfg['cloud_init_modules'] = None
1433+ cloud_cfg = util.yaml_dumps(cfg)
1434+ util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
1435+ util.write_file(os.path.join(self.new_root, 'etc',
1436+ 'cloud', 'cloud.cfg'), cloud_cfg)
1437+
1438+ initer = stages.Init()
1439+ initer.read_cfg()
1440+ initer.initialize()
1441+ initer.fetch()
1442+ initer.instancify()
1443+ initer.update()
1444+ initer.cloudify().run('consume_data', initer.consume_data,
1445+ args=[PER_INSTANCE], freq=PER_INSTANCE)
1446+
1447+ mods = stages.Modules(initer)
1448+ (which_ran, failures) = mods.run_section('cloud_init_modules')
1449+ self.assertTrue(len(failures) == 0)
1450+ self.assertEqual([], which_ran)
1451+
1452 # vi: ts=4 expandtab
1453diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
1454index d774f3d..7a203ce 100644
1455--- a/tests/unittests/test_util.py
1456+++ b/tests/unittests/test_util.py
1457@@ -468,6 +468,29 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
1458 self.assertIsNone(ret)
1459
1460
1461+class TestIsX86(helpers.CiTestCase):
1462+
1463+ def test_is_x86_matches_x86_types(self):
1464+ """is_x86 returns True if CPU architecture matches."""
1465+ matched_arches = ['x86_64', 'i386', 'i586', 'i686']
1466+ for arch in matched_arches:
1467+ self.assertTrue(
1468+ util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch)
1469+
1470+ def test_is_x86_unmatched_types(self):
1471+ """is_x86 returns Fale on non-intel x86 architectures."""
1472+ unmatched_arches = ['ia64', '9000/800', 'arm64v71']
1473+ for arch in unmatched_arches:
1474+ self.assertFalse(
1475+ util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch)
1476+
1477+ @mock.patch('cloudinit.util.os.uname')
1478+ def test_is_x86_calls_uname_for_architecture(self, m_uname):
1479+ """is_x86 returns True if platform from uname matches."""
1480+ m_uname.return_value = [0, 1, 2, 3, 'x86_64']
1481+ self.assertTrue(util.is_x86())
1482+
1483+
1484 class TestReadDMIData(helpers.FilesystemMockingTestCase):
1485
1486 def setUp(self):
1487@@ -829,6 +852,14 @@ class TestSubp(helpers.CiTestCase):
1488 r'Missing #! in script\?',
1489 util.subp, (noshebang,))
1490
1491+ def test_subp_combined_stderr_stdout(self):
1492+ """Providing combine_capture as True redirects stderr to stdout."""
1493+ data = b'hello world'
1494+ (out, err) = util.subp(self.stdin2err, capture=True,
1495+ combine_capture=True, decode=False, data=data)
1496+ self.assertEqual(b'', err)
1497+ self.assertEqual(data, out)
1498+
1499 def test_returns_none_if_no_capture(self):
1500 (out, err) = util.subp(self.stdin2out, data=b'', capture=False)
1501 self.assertIsNone(err)
1502@@ -1081,4 +1112,60 @@ class TestLoadShellContent(helpers.TestCase):
1503 ''])))
1504
1505
1506+class TestGetProcEnv(helpers.TestCase):
1507+ """test get_proc_env."""
1508+ null = b'\x00'
1509+ simple1 = b'HOME=/'
1510+ simple2 = b'PATH=/bin:/sbin'
1511+ bootflag = b'BOOTABLE_FLAG=\x80' # from LP: #1775371
1512+ mixed = b'MIXED=' + b'ab\xccde'
1513+
1514+ def _val_decoded(self, blob, encoding='utf-8', errors='replace'):
1515+ # return the value portion of key=val decoded.
1516+ return blob.split(b'=', 1)[1].decode(encoding, errors)
1517+
1518+ @mock.patch("cloudinit.util.load_file")
1519+ def test_non_utf8_in_environment(self, m_load_file):
1520+ """env may have non utf-8 decodable content."""
1521+ content = self.null.join(
1522+ (self.bootflag, self.simple1, self.simple2, self.mixed))
1523+ m_load_file.return_value = content
1524+
1525+ self.assertEqual(
1526+ {'BOOTABLE_FLAG': self._val_decoded(self.bootflag),
1527+ 'HOME': '/', 'PATH': '/bin:/sbin',
1528+ 'MIXED': self._val_decoded(self.mixed)},
1529+ util.get_proc_env(1))
1530+ self.assertEqual(1, m_load_file.call_count)
1531+
1532+ @mock.patch("cloudinit.util.load_file")
1533+ def test_encoding_none_returns_bytes(self, m_load_file):
1534+ """encoding none returns bytes."""
1535+ lines = (self.bootflag, self.simple1, self.simple2, self.mixed)
1536+ content = self.null.join(lines)
1537+ m_load_file.return_value = content
1538+
1539+ self.assertEqual(
1540+ dict([t.split(b'=') for t in lines]),
1541+ util.get_proc_env(1, encoding=None))
1542+ self.assertEqual(1, m_load_file.call_count)
1543+
1544+ @mock.patch("cloudinit.util.load_file")
1545+ def test_all_utf8_encoded(self, m_load_file):
1546+ """common path where only utf-8 decodable content."""
1547+ content = self.null.join((self.simple1, self.simple2))
1548+ m_load_file.return_value = content
1549+ self.assertEqual(
1550+ {'HOME': '/', 'PATH': '/bin:/sbin'},
1551+ util.get_proc_env(1))
1552+ self.assertEqual(1, m_load_file.call_count)
1553+
1554+ @mock.patch("cloudinit.util.load_file")
1555+ def test_non_existing_file_returns_empty_dict(self, m_load_file):
1556+ """as implemented, a non-existing pid returns empty dict.
1557+ This is how it was originally implemented."""
1558+ m_load_file.side_effect = OSError("File does not exist.")
1559+ self.assertEqual({}, util.get_proc_env(1))
1560+ self.assertEqual(1, m_load_file.call_count)
1561+
1562 # vi: ts=4 expandtab

Subscribers

People subscribed via source and target branches