Merge ~chad.smith/cloud-init:ubuntu/artful into cloud-init:ubuntu/artful

Proposed by Chad Smith
Status: Merged
Merged at revision: 87041591ec51e779429f16454f5b406214bc3059
Proposed branch: ~chad.smith/cloud-init:ubuntu/artful
Merge into: cloud-init:ubuntu/artful
Diff against target: 1095 lines (+657/-91)
20 files modified
cloudinit/config/cc_bootcmd.py (+7/-1)
cloudinit/config/cc_runcmd.py (+5/-0)
cloudinit/config/cc_write_files.py (+6/-1)
cloudinit/event.py (+17/-0)
cloudinit/gpg.py (+42/-10)
cloudinit/sources/__init__.py (+77/-1)
cloudinit/sources/tests/test_init.py (+82/-1)
cloudinit/stages.py (+10/-4)
cloudinit/tests/test_gpg.py (+54/-0)
cloudinit/tests/test_stages.py (+231/-0)
cloudinit/tests/test_util.py (+68/-1)
cloudinit/util.py (+18/-10)
debian/changelog (+17/-0)
dev/null (+0/-49)
doc/examples/cloud-config-run-cmds.txt (+4/-1)
doc/examples/cloud-config.txt (+4/-1)
doc/rtd/topics/format.rst (+1/-1)
integration-requirements.txt (+1/-1)
tests/unittests/test_datasource/test_azure_helper.py (+3/-1)
tools/run-container (+10/-8)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
Scott Moser Pending
Review via email: mp+349222@code.launchpad.net

Commit message

Merge new-upstream-snapshot to get SRU bug fix for LP: #1780481

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:87041591ec51e779429f16454f5b406214bc3059
https://jenkins.ubuntu.com/server/job/cloud-init-ci/152/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    SUCCESS: MAAS Compatability Testing
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/152/rebuild

review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index db64f0a..6813f53 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -42,7 +42,13 @@ schema = {
4242
43 .. note::43 .. note::
44 bootcmd should only be used for things that could not be done later44 bootcmd should only be used for things that could not be done later
45 in the boot process."""),45 in the boot process.
46
47 .. note::
48
49 when writing files, do not use /tmp dir as it races with
50 systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
51 """),
46 'distros': distros,52 'distros': distros,
47 'examples': [dedent("""\53 'examples': [dedent("""\
48 bootcmd:54 bootcmd:
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index b6f6c80..1f75d6c 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -42,6 +42,11 @@ schema = {
4242
43 all commands must be proper yaml, so you have to quote any characters43 all commands must be proper yaml, so you have to quote any characters
44 yaml would eat (':' can be problematic)44 yaml would eat (':' can be problematic)
45
46 .. note::
47
48 when writing files, do not use /tmp dir as it races with
49 systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
45 """),50 """),
46 'distros': distros,51 'distros': distros,
47 'examples': [dedent("""\52 'examples': [dedent("""\
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 54ae3a6..31d1db6 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -15,9 +15,14 @@ binary gzip data can be specified and will be decoded before being written.
1515
16.. note::16.. note::
17 if multiline data is provided, care should be taken to ensure that it17 if multiline data is provided, care should be taken to ensure that it
18 follows yaml formatting standargs. to specify binary data, use the yaml18 follows yaml formatting standards. to specify binary data, use the yaml
19 option ``!!binary``19 option ``!!binary``
2020
21.. note::
22 Do not write files under /tmp during boot because of a race with
23 systemd-tmpfiles-clean that can cause temp files to get cleaned during
24 the early boot process. Use /run/somedir instead to avoid race LP:1707222.
25
21**Internal name:** ``cc_write_files``26**Internal name:** ``cc_write_files``
2227
23**Module frequency:** per instance28**Module frequency:** per instance
diff --git a/cloudinit/event.py b/cloudinit/event.py
24new file mode 10064429new file mode 100644
index 0000000..f7b311f
--- /dev/null
+++ b/cloudinit/event.py
@@ -0,0 +1,17 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3"""Classes and functions related to event handling."""
4
5
6# Event types which can generate maintenance requests for cloud-init.
7class EventType(object):
8 BOOT = "System boot"
9 BOOT_NEW_INSTANCE = "New instance first boot"
10
11 # TODO: Cloud-init will grow support for the follow event types:
12 # UDEV
13 # METADATA_CHANGE
14 # USER_REQUEST
15
16
17# vi: ts=4 expandtab
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index d58d73e..7fe17a2 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -10,6 +10,8 @@
10from cloudinit import log as logging10from cloudinit import log as logging
11from cloudinit import util11from cloudinit import util
1212
13import time
14
13LOG = logging.getLogger(__name__)15LOG = logging.getLogger(__name__)
1416
1517
@@ -25,16 +27,46 @@ def export_armour(key):
25 return armour27 return armour
2628
2729
28def recv_key(key, keyserver):30def recv_key(key, keyserver, retries=(1, 1)):
29 """Receive gpg key from the specified keyserver"""31 """Receive gpg key from the specified keyserver.
30 LOG.debug('Receive gpg key "%s"', key)32
31 try:33 Retries are done by default because keyservers can be unreliable.
32 util.subp(["gpg", "--keyserver", keyserver, "--recv", key],34 Additionally, there is no way to determine the difference between
33 capture=True)35 a non-existant key and a failure. In both cases gpg (at least 2.2.4)
34 except util.ProcessExecutionError as error:36 exits with status 2 and stderr: "keyserver receive failed: No data"
35 raise ValueError(('Failed to import key "%s" '37 It is assumed that a key provided to cloud-init exists on the keyserver
36 'from server "%s" - error %s') %38 so re-trying makes better sense than failing.
37 (key, keyserver, error))39
40 @param key: a string key fingerprint (as passed to gpg --recv-keys).
41 @param keyserver: the keyserver to request keys from.
42 @param retries: an iterable of sleep lengths for retries.
43 Use None to indicate no retries."""
44 LOG.debug("Importing key '%s' from keyserver '%s'", key, keyserver)
45 cmd = ["gpg", "--keyserver=%s" % keyserver, "--recv-keys", key]
46 if retries is None:
47 retries = []
48 trynum = 0
49 error = None
50 sleeps = iter(retries)
51 while True:
52 trynum += 1
53 try:
54 util.subp(cmd, capture=True)
55 LOG.debug("Imported key '%s' from keyserver '%s' on try %d",
56 key, keyserver, trynum)
57 return
58 except util.ProcessExecutionError as e:
59 error = e
60 try:
61 naplen = next(sleeps)
62 LOG.debug(
63 "Import failed with exit code %d, will try again in %ss",
64 error.exit_code, naplen)
65 time.sleep(naplen)
66 except StopIteration:
67 raise ValueError(
68 ("Failed to import key '%s' from keyserver '%s' "
69 "after %d tries: %s") % (key, keyserver, trynum, error))
3870
3971
40def delete_key(key):72def delete_key(key):
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 90d7457..f424316 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -19,6 +19,7 @@ from cloudinit.atomic_helper import write_json
19from cloudinit import importer19from cloudinit import importer
20from cloudinit import log as logging20from cloudinit import log as logging
21from cloudinit import net21from cloudinit import net
22from cloudinit.event import EventType
22from cloudinit import type_utils23from cloudinit import type_utils
23from cloudinit import user_data as ud24from cloudinit import user_data as ud
24from cloudinit import util25from cloudinit import util
@@ -102,6 +103,25 @@ class DataSource(object):
102 url_timeout = 10 # timeout for each metadata url read attempt103 url_timeout = 10 # timeout for each metadata url read attempt
103 url_retries = 5 # number of times to retry url upon 404104 url_retries = 5 # number of times to retry url upon 404
104105
106 # The datasource defines a list of supported EventTypes during which
107 # the datasource can react to changes in metadata and regenerate
108 # network configuration on metadata changes.
109 # A datasource which supports writing network config on each system boot
110 # would set update_events = {'network': [EventType.BOOT]}
111
112 # Default: generate network config on new instance id (first boot).
113 update_events = {'network': [EventType.BOOT_NEW_INSTANCE]}
114
115 # N-tuple listing default values for any metadata-related class
116 # attributes cached on an instance by a process_data runs. These attribute
117 # values are reset via clear_cached_attrs during any update_metadata call.
118 cached_attr_defaults = (
119 ('ec2_metadata', UNSET), ('network_json', UNSET),
120 ('metadata', {}), ('userdata', None), ('userdata_raw', None),
121 ('vendordata', None), ('vendordata_raw', None))
122
123 _dirty_cache = False
124
105 def __init__(self, sys_cfg, distro, paths, ud_proc=None):125 def __init__(self, sys_cfg, distro, paths, ud_proc=None):
106 self.sys_cfg = sys_cfg126 self.sys_cfg = sys_cfg
107 self.distro = distro127 self.distro = distro
@@ -134,11 +154,31 @@ class DataSource(object):
134 'region': self.region,154 'region': self.region,
135 'availability-zone': self.availability_zone}}155 'availability-zone': self.availability_zone}}
136156
157 def clear_cached_attrs(self, attr_defaults=()):
158 """Reset any cached metadata attributes to datasource defaults.
159
160 @param attr_defaults: Optional tuple of (attr, value) pairs to
161 set instead of cached_attr_defaults.
162 """
163 if not self._dirty_cache:
164 return
165 if attr_defaults:
166 attr_values = attr_defaults
167 else:
168 attr_values = self.cached_attr_defaults
169
170 for attribute, value in attr_values:
171 if hasattr(self, attribute):
172 setattr(self, attribute, value)
173 if not attr_defaults:
174 self._dirty_cache = False
175
137 def get_data(self):176 def get_data(self):
138 """Datasources implement _get_data to setup metadata and userdata_raw.177 """Datasources implement _get_data to setup metadata and userdata_raw.
139178
140 Minimally, the datasource should return a boolean True on success.179 Minimally, the datasource should return a boolean True on success.
141 """180 """
181 self._dirty_cache = True
142 return_value = self._get_data()182 return_value = self._get_data()
143 json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)183 json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
144 if not return_value:184 if not return_value:
@@ -174,6 +214,7 @@ class DataSource(object):
174 return return_value214 return return_value
175215
176 def _get_data(self):216 def _get_data(self):
217 """Walk metadata sources, process crawled data and save attributes."""
177 raise NotImplementedError(218 raise NotImplementedError(
178 'Subclasses of DataSource must implement _get_data which'219 'Subclasses of DataSource must implement _get_data which'
179 ' sets self.metadata, vendordata_raw and userdata_raw.')220 ' sets self.metadata, vendordata_raw and userdata_raw.')
@@ -416,6 +457,41 @@ class DataSource(object):
416 def get_package_mirror_info(self):457 def get_package_mirror_info(self):
417 return self.distro.get_package_mirror_info(data_source=self)458 return self.distro.get_package_mirror_info(data_source=self)
418459
460 def update_metadata(self, source_event_types):
461 """Refresh cached metadata if the datasource supports this event.
462
463 The datasource has a list of update_events which
464 trigger refreshing all cached metadata as well as refreshing the
465 network configuration.
466
467 @param source_event_types: List of EventTypes which may trigger a
468 metadata update.
469
470 @return True if the datasource did successfully update cached metadata
471 due to source_event_type.
472 """
473 supported_events = {}
474 for event in source_event_types:
475 for update_scope, update_events in self.update_events.items():
476 if event in update_events:
477 if not supported_events.get(update_scope):
478 supported_events[update_scope] = []
479 supported_events[update_scope].append(event)
480 for scope, matched_events in supported_events.items():
481 LOG.debug(
482 "Update datasource metadata and %s config due to events: %s",
483 scope, ', '.join(matched_events))
484 # Each datasource has a cached config property which needs clearing
485 # Once cleared that config property will be regenerated from
486 # current metadata.
487 self.clear_cached_attrs((('_%s_config' % scope, UNSET),))
488 if supported_events:
489 self.clear_cached_attrs()
490 result = self.get_data()
491 if result:
492 return True
493 return False
494
419 def check_instance_id(self, sys_cfg):495 def check_instance_id(self, sys_cfg):
420 # quickly (local check only) if self.instance_id is still496 # quickly (local check only) if self.instance_id is still
421 return False497 return False
@@ -520,7 +596,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
520 with myrep:596 with myrep:
521 LOG.debug("Seeing if we can get any data from %s", cls)597 LOG.debug("Seeing if we can get any data from %s", cls)
522 s = cls(sys_cfg, distro, paths)598 s = cls(sys_cfg, distro, paths)
523 if s.get_data():599 if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):
524 myrep.message = "found %s data from %s" % (mode, name)600 myrep.message = "found %s data from %s" % (mode, name)
525 return (s, type_utils.obj_name(cls))601 return (s, type_utils.obj_name(cls))
526 except Exception:602 except Exception:
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index d5bc98a..dcd221b 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -5,10 +5,11 @@ import os
5import six5import six
6import stat6import stat
77
8from cloudinit.event import EventType
8from cloudinit.helpers import Paths9from cloudinit.helpers import Paths
9from cloudinit import importer10from cloudinit import importer
10from cloudinit.sources import (11from cloudinit.sources import (
11 INSTANCE_JSON_FILE, DataSource)12 INSTANCE_JSON_FILE, DataSource, UNSET)
12from cloudinit.tests.helpers import CiTestCase, skipIf, mock13from cloudinit.tests.helpers import CiTestCase, skipIf, mock
13from cloudinit.user_data import UserDataProcessor14from cloudinit.user_data import UserDataProcessor
14from cloudinit import util15from cloudinit import util
@@ -381,3 +382,83 @@ class TestDataSource(CiTestCase):
381 get_args(grandchild.get_hostname), # pylint: disable=W1505382 get_args(grandchild.get_hostname), # pylint: disable=W1505
382 '%s does not implement DataSource.get_hostname params'383 '%s does not implement DataSource.get_hostname params'
383 % grandchild)384 % grandchild)
385
386 def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
387 """Class attributes listed in cached_attr_defaults are reset."""
388 count = 0
389 # Setup values for all cached class attributes
390 for attr, value in self.datasource.cached_attr_defaults:
391 setattr(self.datasource, attr, count)
392 count += 1
393 self.datasource._dirty_cache = True
394 self.datasource.clear_cached_attrs()
395 for attr, value in self.datasource.cached_attr_defaults:
396 self.assertEqual(value, getattr(self.datasource, attr))
397
398 def test_clear_cached_attrs_noops_on_clean_cache(self):
399 """Class attributes listed in cached_attr_defaults are reset."""
400 count = 0
401 # Setup values for all cached class attributes
402 for attr, _ in self.datasource.cached_attr_defaults:
403 setattr(self.datasource, attr, count)
404 count += 1
405 self.datasource._dirty_cache = False # Fake clean cache
406 self.datasource.clear_cached_attrs()
407 count = 0
408 for attr, _ in self.datasource.cached_attr_defaults:
409 self.assertEqual(count, getattr(self.datasource, attr))
410 count += 1
411
412 def test_clear_cached_attrs_skips_non_attr_class_attributes(self):
413 """Skip any cached_attr_defaults which aren't class attributes."""
414 self.datasource._dirty_cache = True
415 self.datasource.clear_cached_attrs()
416 for attr in ('ec2_metadata', 'network_json'):
417 self.assertFalse(hasattr(self.datasource, attr))
418
419 def test_clear_cached_attrs_of_custom_attrs(self):
420 """Custom attr_values can be passed to clear_cached_attrs."""
421 self.datasource._dirty_cache = True
422 cached_attr_name = self.datasource.cached_attr_defaults[0][0]
423 setattr(self.datasource, cached_attr_name, 'himom')
424 self.datasource.myattr = 'orig'
425 self.datasource.clear_cached_attrs(
426 attr_defaults=(('myattr', 'updated'),))
427 self.assertEqual('himom', getattr(self.datasource, cached_attr_name))
428 self.assertEqual('updated', self.datasource.myattr)
429
430 def test_update_metadata_only_acts_on_supported_update_events(self):
431 """update_metadata won't get_data on unsupported update events."""
432 self.assertEqual(
433 {'network': [EventType.BOOT_NEW_INSTANCE]},
434 self.datasource.update_events)
435
436 def fake_get_data():
437 raise Exception('get_data should not be called')
438
439 self.datasource.get_data = fake_get_data
440 self.assertFalse(
441 self.datasource.update_metadata(
442 source_event_types=[EventType.BOOT]))
443
444 def test_update_metadata_returns_true_on_supported_update_event(self):
445 """update_metadata returns get_data response on supported events."""
446
447 def fake_get_data():
448 return True
449
450 self.datasource.get_data = fake_get_data
451 self.datasource._network_config = 'something'
452 self.datasource._dirty_cache = True
453 self.assertTrue(
454 self.datasource.update_metadata(
455 source_event_types=[
456 EventType.BOOT, EventType.BOOT_NEW_INSTANCE]))
457 self.assertEqual(UNSET, self.datasource._network_config)
458 self.assertIn(
459 "DEBUG: Update datasource metadata and network config due to"
460 " events: New instance first boot",
461 self.logs.getvalue())
462
463
464# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 286607b..c132b57 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -22,6 +22,8 @@ from cloudinit.handlers import cloud_config as cc_part
22from cloudinit.handlers import shell_script as ss_part22from cloudinit.handlers import shell_script as ss_part
23from cloudinit.handlers import upstart_job as up_part23from cloudinit.handlers import upstart_job as up_part
2424
25from cloudinit.event import EventType
26
25from cloudinit import cloud27from cloudinit import cloud
26from cloudinit import config28from cloudinit import config
27from cloudinit import distros29from cloudinit import distros
@@ -648,10 +650,14 @@ class Init(object):
648 except Exception as e:650 except Exception as e:
649 LOG.warning("Failed to rename devices: %s", e)651 LOG.warning("Failed to rename devices: %s", e)
650652
651 if (self.datasource is not NULL_DATA_SOURCE and653 if self.datasource is not NULL_DATA_SOURCE:
652 not self.is_new_instance()):654 if not self.is_new_instance():
653 LOG.debug("not a new instance. network config is not applied.")655 if not self.datasource.update_metadata([EventType.BOOT]):
654 return656 LOG.debug(
657 "No network config applied. Neither a new instance"
658 " nor datasource network update on '%s' event",
659 EventType.BOOT)
660 return
655661
656 LOG.info("Applying network configuration from %s bringup=%s: %s",662 LOG.info("Applying network configuration from %s bringup=%s: %s",
657 src, bring_up, netcfg)663 src, bring_up, netcfg)
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
658new file mode 100644664new file mode 100644
index 0000000..0562b96
--- /dev/null
+++ b/cloudinit/tests/test_gpg.py
@@ -0,0 +1,54 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2"""Test gpg module."""
3
4from cloudinit import gpg
5from cloudinit import util
6from cloudinit.tests.helpers import CiTestCase
7
8import mock
9
10
11@mock.patch("cloudinit.gpg.time.sleep")
12@mock.patch("cloudinit.gpg.util.subp")
13class TestReceiveKeys(CiTestCase):
14 """Test the recv_key method."""
15
16 def test_retries_on_subp_exc(self, m_subp, m_sleep):
17 """retry should be done on gpg receive keys failure."""
18 retries = (1, 2, 4)
19 my_exc = util.ProcessExecutionError(
20 stdout='', stderr='', exit_code=2, cmd=['mycmd'])
21 m_subp.side_effect = (my_exc, my_exc, ('', ''))
22 gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
23 self.assertEqual([mock.call(1), mock.call(2)], m_sleep.call_args_list)
24
25 def test_raises_error_after_retries(self, m_subp, m_sleep):
26 """If the final run fails, error should be raised."""
27 naplen = 1
28 keyid, keyserver = ("ABCD", "keyserver.example.com")
29 m_subp.side_effect = util.ProcessExecutionError(
30 stdout='', stderr='', exit_code=2, cmd=['mycmd'])
31 with self.assertRaises(ValueError) as rcm:
32 gpg.recv_key(keyid, keyserver, retries=(naplen,))
33 self.assertIn(keyid, str(rcm.exception))
34 self.assertIn(keyserver, str(rcm.exception))
35 m_sleep.assert_called_with(naplen)
36
37 def test_no_retries_on_none(self, m_subp, m_sleep):
38 """retry should not be done if retries is None."""
39 m_subp.side_effect = util.ProcessExecutionError(
40 stdout='', stderr='', exit_code=2, cmd=['mycmd'])
41 with self.assertRaises(ValueError):
42 gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
43 m_sleep.assert_not_called()
44
45 def test_expected_gpg_command(self, m_subp, m_sleep):
46 """Verify gpg is called with expected args."""
47 key, keyserver = ("DEADBEEF", "keyserver.example.com")
48 retries = (1, 2, 4)
49 m_subp.return_value = ('', '')
50 gpg.recv_key(key, keyserver, retries=retries)
51 m_subp.assert_called_once_with(
52 ['gpg', '--keyserver=%s' % keyserver, '--recv-keys', key],
53 capture=True)
54 m_sleep.assert_not_called()
diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py
0new file mode 10064455new file mode 100644
index 0000000..94b6b25
--- /dev/null
+++ b/cloudinit/tests/test_stages.py
@@ -0,0 +1,231 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3"""Tests related to cloudinit.stages module."""
4
5import os
6
7from cloudinit import stages
8from cloudinit import sources
9
10from cloudinit.event import EventType
11from cloudinit.util import write_file
12
13from cloudinit.tests.helpers import CiTestCase, mock
14
15TEST_INSTANCE_ID = 'i-testing'
16
17
18class FakeDataSource(sources.DataSource):
19
20 def __init__(self, paths=None, userdata=None, vendordata=None,
21 network_config=''):
22 super(FakeDataSource, self).__init__({}, None, paths=paths)
23 self.metadata = {'instance-id': TEST_INSTANCE_ID}
24 self.userdata_raw = userdata
25 self.vendordata_raw = vendordata
26 self._network_config = None
27 if network_config: # Permit for None value to setup attribute
28 self._network_config = network_config
29
30 @property
31 def network_config(self):
32 return self._network_config
33
34 def _get_data(self):
35 return True
36
37
38class TestInit(CiTestCase):
39 with_logs = True
40
41 def setUp(self):
42 super(TestInit, self).setUp()
43 self.tmpdir = self.tmp_dir()
44 self.init = stages.Init()
45 # Setup fake Paths for Init to reference
46 self.init._cfg = {'system_info': {
47 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir,
48 'run_dir': self.tmpdir}}}
49 self.init.datasource = FakeDataSource(paths=self.init.paths)
50
51 def test_wb__find_networking_config_disabled(self):
52 """find_networking_config returns no config when disabled."""
53 disable_file = os.path.join(
54 self.init.paths.get_cpath('data'), 'upgraded-network')
55 write_file(disable_file, '')
56 self.assertEqual(
57 (None, disable_file),
58 self.init._find_networking_config())
59
60 @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
61 def test_wb__find_networking_config_disabled_by_kernel(self, m_cmdline):
62 """find_networking_config returns when disabled by kernel cmdline."""
63 m_cmdline.return_value = {'config': 'disabled'}
64 self.assertEqual(
65 (None, 'cmdline'),
66 self.init._find_networking_config())
67 self.assertEqual('DEBUG: network config disabled by cmdline\n',
68 self.logs.getvalue())
69
70 @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
71 def test_wb__find_networking_config_disabled_by_datasrc(self, m_cmdline):
72 """find_networking_config returns when disabled by datasource cfg."""
73 m_cmdline.return_value = {} # Kernel doesn't disable networking
74 self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
75 'network': {}} # system config doesn't disable
76
77 self.init.datasource = FakeDataSource(
78 network_config={'config': 'disabled'})
79 self.assertEqual(
80 (None, 'ds'),
81 self.init._find_networking_config())
82 self.assertEqual('DEBUG: network config disabled by ds\n',
83 self.logs.getvalue())
84
85 @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
86 def test_wb__find_networking_config_disabled_by_sysconfig(self, m_cmdline):
87 """find_networking_config returns when disabled by system config."""
88 m_cmdline.return_value = {} # Kernel doesn't disable networking
89 self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
90 'network': {'config': 'disabled'}}
91 self.assertEqual(
92 (None, 'system_cfg'),
93 self.init._find_networking_config())
94 self.assertEqual('DEBUG: network config disabled by system_cfg\n',
95 self.logs.getvalue())
96
97 @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
98 def test_wb__find_networking_config_returns_kernel(self, m_cmdline):
99 """find_networking_config returns kernel cmdline config if present."""
100 expected_cfg = {'config': ['fakekernel']}
101 m_cmdline.return_value = expected_cfg
102 self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
103 'network': {'config': ['fakesys_config']}}
104 self.init.datasource = FakeDataSource(
105 network_config={'config': ['fakedatasource']})
106 self.assertEqual(
107 (expected_cfg, 'cmdline'),
108 self.init._find_networking_config())
109
110 @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
111 def test_wb__find_networking_config_returns_system_cfg(self, m_cmdline):
112 """find_networking_config returns system config when present."""
113 m_cmdline.return_value = {} # No kernel network config
114 expected_cfg = {'config': ['fakesys_config']}
115 self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
116 'network': expected_cfg}
117 self.init.datasource = FakeDataSource(
118 network_config={'config': ['fakedatasource']})
119 self.assertEqual(
120 (expected_cfg, 'system_cfg'),
121 self.init._find_networking_config())
122
123 @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
124 def test_wb__find_networking_config_returns_datasrc_cfg(self, m_cmdline):
125 """find_networking_config returns datasource net config if present."""
126 m_cmdline.return_value = {} # No kernel network config
127 # No system config for network in setUp
128 expected_cfg = {'config': ['fakedatasource']}
129 self.init.datasource = FakeDataSource(network_config=expected_cfg)
130 self.assertEqual(
131 (expected_cfg, 'ds'),
132 self.init._find_networking_config())
133
134 @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
135 def test_wb__find_networking_config_returns_fallback(self, m_cmdline):
136 """find_networking_config returns fallback config if not defined."""
137 m_cmdline.return_value = {} # Kernel doesn't disable networking
138 # Neither datasource nor system_info disable or provide network
139
140 fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}],
141 'version': 1}
142
143 def fake_generate_fallback():
144 return fake_cfg
145
146 # Monkey patch distro which gets cached on self.init
147 distro = self.init.distro
148 distro.generate_fallback_config = fake_generate_fallback
149 self.assertEqual(
150 (fake_cfg, 'fallback'),
151 self.init._find_networking_config())
152 self.assertNotIn('network config disabled', self.logs.getvalue())
153
154 def test_apply_network_config_disabled(self):
155 """Log when network is disabled by upgraded-network."""
156 disable_file = os.path.join(
157 self.init.paths.get_cpath('data'), 'upgraded-network')
158
159 def fake_network_config():
160 return (None, disable_file)
161
162 self.init._find_networking_config = fake_network_config
163
164 self.init.apply_network_config(True)
165 self.assertIn(
166 'INFO: network config is disabled by %s' % disable_file,
167 self.logs.getvalue())
168
169 @mock.patch('cloudinit.distros.ubuntu.Distro')
170 def test_apply_network_on_new_instance(self, m_ubuntu):
171 """Call distro apply_network_config methods on is_new_instance."""
172 net_cfg = {
173 'version': 1, 'config': [
174 {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
175 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
176
177 def fake_network_config():
178 return net_cfg, 'fallback'
179
180 self.init._find_networking_config = fake_network_config
181 self.init.apply_network_config(True)
182 self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
183 self.init.distro.apply_network_config.assert_called_with(
184 net_cfg, bring_up=True)
185
186 @mock.patch('cloudinit.distros.ubuntu.Distro')
187 def test_apply_network_on_same_instance_id(self, m_ubuntu):
188 """Only call distro.apply_network_config_names on same instance id."""
189 old_instance_id = os.path.join(
190 self.init.paths.get_cpath('data'), 'instance-id')
191 write_file(old_instance_id, TEST_INSTANCE_ID)
192 net_cfg = {
193 'version': 1, 'config': [
194 {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
195 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
196
197 def fake_network_config():
198 return net_cfg, 'fallback'
199
200 self.init._find_networking_config = fake_network_config
201 self.init.apply_network_config(True)
202 self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
203 self.init.distro.apply_network_config.assert_not_called()
204 self.assertIn(
205 'No network config applied. Neither a new instance'
206 " nor datasource network update on '%s' event" % EventType.BOOT,
207 self.logs.getvalue())
208
209 @mock.patch('cloudinit.distros.ubuntu.Distro')
210 def test_apply_network_on_datasource_allowed_event(self, m_ubuntu):
211 """Apply network if datasource.update_metadata permits BOOT event."""
212 old_instance_id = os.path.join(
213 self.init.paths.get_cpath('data'), 'instance-id')
214 write_file(old_instance_id, TEST_INSTANCE_ID)
215 net_cfg = {
216 'version': 1, 'config': [
217 {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
218 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
219
220 def fake_network_config():
221 return net_cfg, 'fallback'
222
223 self.init._find_networking_config = fake_network_config
224 self.init.datasource = FakeDataSource(paths=self.init.paths)
225 self.init.datasource.update_events = {'network': [EventType.BOOT]}
226 self.init.apply_network_config(True)
227 self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
228 self.init.distro.apply_network_config.assert_called_with(
229 net_cfg, bring_up=True)
230
231# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 17853fc..6a31e50 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -26,8 +26,51 @@ OS_RELEASE_SLES = dedent("""\
26 CPE_NAME="cpe:/o:suse:sles:12:sp3"\n26 CPE_NAME="cpe:/o:suse:sles:12:sp3"\n
27""")27""")
2828
29OS_RELEASE_OPENSUSE = dedent("""\
30NAME="openSUSE Leap"
31VERSION="42.3"
32ID=opensuse
33ID_LIKE="suse"
34VERSION_ID="42.3"
35PRETTY_NAME="openSUSE Leap 42.3"
36ANSI_COLOR="0;32"
37CPE_NAME="cpe:/o:opensuse:leap:42.3"
38BUG_REPORT_URL="https://bugs.opensuse.org"
39HOME_URL="https://www.opensuse.org/"
40""")
41
42OS_RELEASE_CENTOS = dedent("""\
43 NAME="CentOS Linux"
44 VERSION="7 (Core)"
45 ID="centos"
46 ID_LIKE="rhel fedora"
47 VERSION_ID="7"
48 PRETTY_NAME="CentOS Linux 7 (Core)"
49 ANSI_COLOR="0;31"
50 CPE_NAME="cpe:/o:centos:centos:7"
51 HOME_URL="https://www.centos.org/"
52 BUG_REPORT_URL="https://bugs.centos.org/"
53
54 CENTOS_MANTISBT_PROJECT="CentOS-7"
55 CENTOS_MANTISBT_PROJECT_VERSION="7"
56 REDHAT_SUPPORT_PRODUCT="centos"
57 REDHAT_SUPPORT_PRODUCT_VERSION="7"
58""")
59
60OS_RELEASE_DEBIAN = dedent("""\
61 PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
62 NAME="Debian GNU/Linux"
63 VERSION_ID="9"
64 VERSION="9 (stretch)"
65 ID=debian
66 HOME_URL="https://www.debian.org/"
67 SUPPORT_URL="https://www.debian.org/support"
68 BUG_REPORT_URL="https://bugs.debian.org/"
69""")
70
29OS_RELEASE_UBUNTU = dedent("""\71OS_RELEASE_UBUNTU = dedent("""\
30 NAME="Ubuntu"\n72 NAME="Ubuntu"\n
73 # comment test
31 VERSION="16.04.3 LTS (Xenial Xerus)"\n74 VERSION="16.04.3 LTS (Xenial Xerus)"\n
32 ID=ubuntu\n75 ID=ubuntu\n
33 ID_LIKE=debian\n76 ID_LIKE=debian\n
@@ -310,7 +353,31 @@ class TestGetLinuxDistro(CiTestCase):
310 m_os_release.return_value = OS_RELEASE_UBUNTU353 m_os_release.return_value = OS_RELEASE_UBUNTU
311 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists354 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
312 dist = util.get_linux_distro()355 dist = util.get_linux_distro()
313 self.assertEqual(('ubuntu', '16.04', platform.machine()), dist)356 self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
357
358 @mock.patch('cloudinit.util.load_file')
359 def test_get_linux_centos(self, m_os_release, m_path_exists):
360 """Verify we get the correct name and release name on CentOS."""
361 m_os_release.return_value = OS_RELEASE_CENTOS
362 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
363 dist = util.get_linux_distro()
364 self.assertEqual(('centos', '7', 'Core'), dist)
365
366 @mock.patch('cloudinit.util.load_file')
367 def test_get_linux_debian(self, m_os_release, m_path_exists):
368 """Verify we get the correct name and release name on Debian."""
369 m_os_release.return_value = OS_RELEASE_DEBIAN
370 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
371 dist = util.get_linux_distro()
372 self.assertEqual(('debian', '9', 'stretch'), dist)
373
374 @mock.patch('cloudinit.util.load_file')
375 def test_get_linux_opensuse(self, m_os_release, m_path_exists):
376 """Verify we get the correct name and machine arch on OpenSUSE."""
377 m_os_release.return_value = OS_RELEASE_OPENSUSE
378 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
379 dist = util.get_linux_distro()
380 self.assertEqual(('opensuse', '42.3', platform.machine()), dist)
314381
315 @mock.patch('platform.dist')382 @mock.patch('platform.dist')
316 def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):383 def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 6da9511..d0b0e90 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -579,16 +579,24 @@ def get_cfg_option_int(yobj, key, default=0):
579def get_linux_distro():579def get_linux_distro():
580 distro_name = ''580 distro_name = ''
581 distro_version = ''581 distro_version = ''
582 flavor = ''
582 if os.path.exists('/etc/os-release'):583 if os.path.exists('/etc/os-release'):
583 os_release = load_file('/etc/os-release')584 os_release = load_shell_content(load_file('/etc/os-release'))
584 for line in os_release.splitlines():585 distro_name = os_release.get('ID', '')
585 if line.strip().startswith('ID='):586 distro_version = os_release.get('VERSION_ID', '')
586 distro_name = line.split('=')[-1]587 if 'sles' in distro_name or 'suse' in distro_name:
587 distro_name = distro_name.replace('"', '')588 # RELEASE_BLOCKER: We will drop this sles ivergent behavior in
588 if line.strip().startswith('VERSION_ID='):589 # before 18.4 so that get_linux_distro returns a named tuple
589 # Lets hope for the best that distros stay consistent ;)590 # which will include both version codename and architecture
590 distro_version = line.split('=')[-1]591 # on all distributions.
591 distro_version = distro_version.replace('"', '')592 flavor = platform.machine()
593 else:
594 flavor = os_release.get('VERSION_CODENAME', '')
595 if not flavor:
596 match = re.match(r'[^ ]+ \((?P<codename>[^)]+)\)',
597 os_release.get('VERSION'))
598 if match:
599 flavor = match.groupdict()['codename']
592 else:600 else:
593 dist = ('', '', '')601 dist = ('', '', '')
594 try:602 try:
@@ -606,7 +614,7 @@ def get_linux_distro():
606 'expansion may have unexpected results')614 'expansion may have unexpected results')
607 return dist615 return dist
608616
609 return (distro_name, distro_version, platform.machine())617 return (distro_name, distro_version, flavor)
610618
611619
612def system_info():620def system_info():
diff --git a/debian/changelog b/debian/changelog
index 7951bdb..6d9cf37 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,20 @@
1cloud-init (18.3-9-g2e62cb8a-0ubuntu1~17.10.1) artful-proposed; urgency=medium
2
3 * New upstream snapshot.
4 - docs: note in rtd about avoiding /tmp when writing files
5 - ubuntu,centos,debian: get_linux_distro to align with platform.dist
6 (LP: #1780481)
7 - Fix boothook docs on environment variable name (INSTANCE_I ->
8 INSTANCE_ID) [Marc Tamsky]
9 - update_metadata: a datasource can support network re-config every boot
10 - tests: drop salt-minion integration test
11 - Retry on failed import of gpg receive keys.
12 - tools: Fix run-container when neither source or binary package requested.
13 - docs: Fix a small spelling error. [Oz N Tiram]
14 - tox: use simplestreams from git repository rather than bzr.
15
16 -- Chad Smith <chad.smith@canonical.com> Mon, 09 Jul 2018 15:33:35 -0600
17
1cloud-init (18.3-0ubuntu1~17.10.1) artful-proposed; urgency=medium18cloud-init (18.3-0ubuntu1~17.10.1) artful-proposed; urgency=medium
219
3 * debian/rules: update version.version_string to contain packaged version.20 * debian/rules: update version.version_string to contain packaged version.
diff --git a/doc/examples/cloud-config-run-cmds.txt b/doc/examples/cloud-config-run-cmds.txt
index 3bb0686..002398f 100644
--- a/doc/examples/cloud-config-run-cmds.txt
+++ b/doc/examples/cloud-config-run-cmds.txt
@@ -18,5 +18,8 @@ runcmd:
18 - [ sh, -xc, "echo $(date) ': hello world!'" ]18 - [ sh, -xc, "echo $(date) ': hello world!'" ]
19 - [ sh, -c, echo "=========hello world'=========" ]19 - [ sh, -c, echo "=========hello world'=========" ]
20 - ls -l /root20 - ls -l /root
21 - [ wget, "http://slashdot.org", -O, /tmp/index.html ]21 # Note: Don't write files to /tmp from cloud-init use /run/somedir instead.
22 # Early boot environments can race systemd-tmpfiles-clean LP: #1707222.
23 - mkdir /run/mydir
24 - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ]
2225
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index bd84c64..774f66b 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -127,7 +127,10 @@ runcmd:
127 - [ sh, -xc, "echo $(date) ': hello world!'" ]127 - [ sh, -xc, "echo $(date) ': hello world!'" ]
128 - [ sh, -c, echo "=========hello world'=========" ]128 - [ sh, -c, echo "=========hello world'=========" ]
129 - ls -l /root129 - ls -l /root
130 - [ wget, "http://slashdot.org", -O, /tmp/index.html ]130 # Note: Don't write files to /tmp from cloud-init use /run/somedir instead.
131 # Early boot environments can race systemd-tmpfiles-clean LP: #1707222.
132 - mkdir /run/mydir
133 - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ]
131134
132135
133# boot commands136# boot commands
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index e25289a..1b0ff36 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -121,7 +121,7 @@ Cloud Boothook
121121
122This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately.122This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately.
123This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself.123This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself.
124It is provided with the instance id in the environment variable ``INSTANCE_I``. This could be made use of to provide a 'once-per-instance' type of functionality.124It is provided with the instance id in the environment variable ``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' type of functionality.
125125
126Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive.126Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive.
127127
diff --git a/integration-requirements.txt b/integration-requirements.txt
index e5bb5b2..01baebd 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -17,4 +17,4 @@ git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779
1717
1818
19# finds latest image information19# finds latest image information
20bzr+lp:simplestreams20git+https://git.launchpad.net/simplestreams
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py
21deleted file mode 10064421deleted file mode 100644
index fc9688e..0000000
--- a/tests/cloud_tests/testcases/modules/salt_minion.py
+++ /dev/null
@@ -1,38 +0,0 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3"""cloud-init Integration Test Verify Script."""
4from tests.cloud_tests.testcases import base
5
6
7class Test(base.CloudTestCase):
8 """Test salt minion module."""
9
10 def test_minon_master(self):
11 """Test master value in config."""
12 out = self.get_data_file('minion')
13 self.assertIn('master: salt.mydomain.com', out)
14
15 def test_minion_pem(self):
16 """Test private key."""
17 out = self.get_data_file('minion.pem')
18 self.assertIn('------BEGIN PRIVATE KEY------', out)
19 self.assertIn('<key data>', out)
20 self.assertIn('------END PRIVATE KEY-------', out)
21
22 def test_minion_pub(self):
23 """Test public key."""
24 out = self.get_data_file('minion.pub')
25 self.assertIn('------BEGIN PUBLIC KEY-------', out)
26 self.assertIn('<key data>', out)
27 self.assertIn('------END PUBLIC KEY-------', out)
28
29 def test_grains(self):
30 """Test master value in config."""
31 out = self.get_data_file('grains')
32 self.assertIn('role: web', out)
33
34 def test_minion_installed(self):
35 """Test if the salt-minion package is installed"""
36 self.assertPackageInstalled('salt-minion')
37
38# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml
39deleted file mode 1006440deleted file mode 100644
index 9227147..0000000
--- a/tests/cloud_tests/testcases/modules/salt_minion.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
1#
2# Create config for a salt minion
3#
4# 2016-11-17: Currently takes >60 seconds results in test failure
5#
6enabled: True
7cloud_config: |
8 #cloud-config
9 salt_minion:
10 conf:
11 master: salt.mydomain.com
12 public_key: |
13 ------BEGIN PUBLIC KEY-------
14 <key data>
15 ------END PUBLIC KEY-------
16 private_key: |
17 ------BEGIN PRIVATE KEY------
18 <key data>
19 ------END PRIVATE KEY-------
20 grains:
21 role: web
22collect_scripts:
23 minion: |
24 #!/bin/bash
25 cat /etc/salt/minion
26 minion_id: |
27 #!/bin/bash
28 cat /etc/salt/minion_id
29 minion.pem: |
30 #!/bin/bash
31 PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem
32 if [ ! -f $PRIV_KEYFILE ]; then
33 # Bionic and later automatically moves /etc/salt/pki/minion/*
34 PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem
35 fi
36 cat $PRIV_KEYFILE
37 minion.pub: |
38 #!/bin/bash
39 PUB_KEYFILE=/etc/salt/pki/minion/minion.pub
40 if [ ! -f $PUB_KEYFILE ]; then
41 # Bionic and later automatically moves /etc/salt/pki/minion/*
42 PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub
43 fi
44 cat $PUB_KEYFILE
45 grains: |
46 #!/bin/bash
47 cat /etc/salt/grains
48
49# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index af9d3e1..26b2b93 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -85,7 +85,9 @@ class TestFindEndpoint(CiTestCase):
85 self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}85 self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
86 self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None))86 self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None))
8787
88 def test_latest_lease_used(self):88 @mock.patch('cloudinit.sources.helpers.azure.util.is_FreeBSD')
89 def test_latest_lease_used(self, m_is_freebsd):
90 m_is_freebsd.return_value = False # To avoid hitting load_file
89 encoded_addresses = ['5:4:3:2', '4:3:2:1']91 encoded_addresses = ['5:4:3:2', '4:3:2:1']
90 file_content = '\n'.join([self._build_lease_content(encoded_address)92 file_content = '\n'.join([self._build_lease_content(encoded_address)
91 for encoded_address in encoded_addresses])93 for encoded_address in encoded_addresses])
diff --git a/tools/run-container b/tools/run-container
index 499e85b..6dedb75 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -418,7 +418,7 @@ main() {
418 { bad_Usage; return; }418 { bad_Usage; return; }
419419
420 local cur="" next=""420 local cur="" next=""
421 local package="" source_package="" unittest="" name=""421 local package=false srcpackage=false unittest="" name=""
422 local dirty=false pyexe="auto" artifact_d="."422 local dirty=false pyexe="auto" artifact_d="."
423423
424 while [ $# -ne 0 ]; do424 while [ $# -ne 0 ]; do
@@ -430,8 +430,8 @@ main() {
430 -k|--keep) KEEP=true;;430 -k|--keep) KEEP=true;;
431 -n|--name) name="$next"; shift;;431 -n|--name) name="$next"; shift;;
432 --pyexe) pyexe=$next; shift;;432 --pyexe) pyexe=$next; shift;;
433 -p|--package) package=1;;433 -p|--package) package=true;;
434 -s|--source-package) source_package=1;;434 -s|--source-package) srcpackage=true;;
435 -u|--unittest) unittest=1;;435 -u|--unittest) unittest=1;;
436 -v|--verbose) VERBOSITY=$((VERBOSITY+1));;436 -v|--verbose) VERBOSITY=$((VERBOSITY+1));;
437 --) shift; break;;437 --) shift; break;;
@@ -529,8 +529,8 @@ main() {
529 build_srcpkg="./packages/brpm $distflag --srpm"529 build_srcpkg="./packages/brpm $distflag --srpm"
530 pkg_ext=".rpm";;530 pkg_ext=".rpm";;
531 esac531 esac
532 if [ -n "$source_package" ]; then532 if [ "$srcpackage" = "true" ]; then
533 [ -n "$build_pkg" ] || {533 [ -n "$build_srcpkg" ] || {
534 error "Unknown package command for $OS_NAME"534 error "Unknown package command for $OS_NAME"
535 return 1535 return 1
536 }536 }
@@ -542,19 +542,21 @@ main() {
542 }542 }
543 fi543 fi
544544
545 if [ -n "$package" ]; then545 if [ "$package" = "true" ]; then
546 [ -n "$build_srcpkg" ] || {546 [ -n "$build_pkg" ] || {
547 error "Unknown build source command for $OS_NAME"547 error "Unknown build source command for $OS_NAME"
548 return 1548 return 1
549 }549 }
550 debug 1 "building binary package with $build_pkg."550 debug 1 "building binary package with $build_pkg."
551 # shellcheck disable=SC2086
551 inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || {552 inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || {
552 errorrc "failed: $build_pkg";553 errorrc "failed: $build_pkg";
553 errors[${#errors[@]}]="binary package"554 errors[${#errors[@]}]="binary package"
554 }555 }
555 fi556 fi
556557
557 if [ -n "$artifact_d" ]; then558 if [ -n "$artifact_d" ] &&
559 [ "$package" = "true" -o "$srcpackage" = "true" ]; then
558 local art=""560 local art=""
559 artifact_d="${artifact_d%/}/"561 artifact_d="${artifact_d%/}/"
560 [ -d "${artifact_d}" ] || mkdir -p "$artifact_d" || {562 [ -d "${artifact_d}" ] || mkdir -p "$artifact_d" || {

Subscribers

People subscribed via source and target branches