Merge ~chad.smith/cloud-init:ubuntu/artful into cloud-init:ubuntu/artful
- Git
- lp:~chad.smith/cloud-init
- ubuntu/artful
- Merge into ubuntu/artful
Proposed by
Chad Smith
Status: | Merged | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | 903f02e16735bfa0c745330c1be63363c0798fcf | ||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/artful | ||||||||||||
Merge into: | cloud-init:ubuntu/artful | ||||||||||||
Diff against target: |
7933 lines (+4629/-634) 103 files modified
.pylintrc (+11/-1) ChangeLog (+110/-0) cloudinit/apport.py (+3/-3) cloudinit/cloud.py (+3/-2) cloudinit/cmd/main.py (+29/-6) cloudinit/cmd/tests/test_clean.py (+2/-1) cloudinit/cmd/tests/test_main.py (+161/-0) cloudinit/cmd/tests/test_status.py (+2/-1) cloudinit/config/cc_keys_to_console.py (+1/-3) cloudinit/config/cc_puppet.py (+44/-15) cloudinit/config/cc_resizefs.py (+22/-0) cloudinit/config/cc_runcmd.py (+4/-2) cloudinit/config/cc_salt_minion.py (+65/-20) cloudinit/config/cc_set_hostname.py (+35/-6) cloudinit/config/cc_snap.py (+230/-0) cloudinit/config/cc_snap_config.py (+7/-0) cloudinit/config/cc_snappy.py (+8/-0) cloudinit/config/cc_ssh_authkey_fingerprints.py (+4/-5) cloudinit/config/cc_ubuntu_advantage.py (+173/-0) cloudinit/config/tests/test_snap.py (+490/-0) cloudinit/config/tests/test_ubuntu_advantage.py (+269/-0) cloudinit/distros/arch.py (+1/-4) cloudinit/distros/freebsd.py (+6/-0) cloudinit/distros/opensuse.py (+2/-3) cloudinit/ec2_utils.py (+2/-4) cloudinit/net/cmdline.py (+22/-2) cloudinit/net/netplan.py (+14/-21) cloudinit/net/network_state.py (+11/-1) cloudinit/settings.py (+2/-0) cloudinit/sources/DataSourceAliYun.py (+1/-1) cloudinit/sources/DataSourceAzure.py (+11/-22) cloudinit/sources/DataSourceCloudSigma.py (+1/-1) cloudinit/sources/DataSourceConfigDrive.py (+10/-0) cloudinit/sources/DataSourceGCE.py (+8/-9) cloudinit/sources/DataSourceHetzner.py (+106/-0) cloudinit/sources/DataSourceIBMCloud.py (+325/-0) cloudinit/sources/DataSourceOVF.py (+16/-5) cloudinit/sources/DataSourceOpenNebula.py (+75/-34) cloudinit/sources/DataSourceScaleway.py (+4/-4) cloudinit/sources/__init__.py (+17/-4) cloudinit/sources/helpers/hetzner.py (+26/-0) cloudinit/sources/tests/test_init.py (+97/-1) cloudinit/stages.py (+1/-2) cloudinit/subp.py (+57/-0) cloudinit/tests/helpers.py (+35/-19) cloudinit/tests/test_subp.py (+61/-0) cloudinit/tests/test_util.py (+169/-0) cloudinit/url_helper.py (+14/-10) cloudinit/util.py (+115/-24) cloudinit/version.py (+1/-1) config/cloud.cfg.tmpl (+9/-3) debian/changelog (+69/-3) doc/examples/cloud-config-chef.txt (+2/-2) doc/rtd/conf.py (+1/-0) doc/rtd/topics/capabilities.rst (+8/-6) doc/rtd/topics/debugging.rst (+31/-26) doc/rtd/topics/modules.rst (+2/-0) doc/rtd/topics/network-config.rst (+2/-2) doc/rtd/topics/tests.rst (+10/-10) packages/debian/control.in (+2/-1) tests/cloud_tests/bddeb.py (+1/-1) tests/cloud_tests/platforms/ec2/__init__.py (+0/-0) tests/cloud_tests/platforms/lxd/__init__.py (+0/-0) tests/cloud_tests/platforms/lxd/platform.py (+0/-4) tests/cloud_tests/platforms/nocloudkvm/__init__.py (+0/-0) tests/cloud_tests/platforms/nocloudkvm/instance.py (+1/-1) tests/cloud_tests/platforms/nocloudkvm/platform.py (+0/-4) tests/cloud_tests/platforms/platforms.py (+12/-2) tests/cloud_tests/releases.yaml (+3/-0) tests/cloud_tests/testcases.yaml (+3/-0) tests/cloud_tests/testcases/__init__.py (+3/-0) tests/cloud_tests/testcases/base.py (+168/-5) tests/cloud_tests/testcases/main/command_output_simple.py (+2/-15) tests/cloud_tests/testcases/modules/salt_minion.py (+10/-0) tests/cloud_tests/testcases/modules/salt_minion.yaml (+9/-1) tests/cloud_tests/testcases/modules/snap.py (+16/-0) tests/cloud_tests/testcases/modules/snap.yaml (+18/-0) tests/cloud_tests/testcases/modules/snappy.py (+2/-0) tests/cloud_tests/util.py (+5/-1) tests/cloud_tests/verify.py (+7/-4) tests/data/mount_parse_ext.txt (+19/-0) tests/data/mount_parse_zfs.txt (+21/-0) tests/data/zpool_status_simple.txt (+10/-0) tests/unittests/test_datasource/test_azure.py (+22/-15) tests/unittests/test_datasource/test_common.py (+4/-0) tests/unittests/test_datasource/test_gce.py (+19/-1) tests/unittests/test_datasource/test_hetzner.py (+117/-0) tests/unittests/test_datasource/test_ibmcloud.py (+262/-0) tests/unittests/test_datasource/test_opennebula.py (+177/-89) tests/unittests/test_ds_identify.py (+161/-6) tests/unittests/test_handler/test_handler_apt_source_v1.py (+2/-1) tests/unittests/test_handler/test_handler_bootcmd.py (+7/-12) tests/unittests/test_handler/test_handler_ntp.py (+6/-12) tests/unittests/test_handler/test_handler_resizefs.py (+60/-12) tests/unittests/test_handler/test_handler_runcmd.py (+4/-10) tests/unittests/test_handler/test_handler_set_hostname.py (+53/-4) tests/unittests/test_handler/test_schema.py (+16/-19) tests/unittests/test_net.py (+58/-81) tests/unittests/test_util.py (+135/-0) tools/ds-identify (+99/-28) tools/pipremove (+14/-0) tools/run-centos (+78/-13) tox.ini (+6/-3) |
||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
Scott Moser | Pending | ||
Review via email:
|
Commit message
Sync tip of cloud-init for SRU into Artful.
Also git cherry-pick isc-dchp-client package dependency per
5b630c3419c5e28
Description of the change
To post a comment you must log in.
Revision history for this message

Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Revision history for this message

Scott Moser (smoser) wrote : | # |
Revision history for this message

Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:903f02e1673
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild:
https:/
review:
Approve
(continuous-integration)
There was an error fetching revisions from git servers. Please try again in a few minutes. If the problem persists, contact Launchpad support.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/.pylintrc b/.pylintrc |
2 | index 05a086d..0bdfa59 100644 |
3 | --- a/.pylintrc |
4 | +++ b/.pylintrc |
5 | @@ -46,7 +46,17 @@ reports=no |
6 | # (useful for modules/projects where namespaces are manipulated during runtime |
7 | # and thus existing member attributes cannot be deduced by static analysis. It |
8 | # supports qualified module names, as well as Unix pattern matching. |
9 | -ignored-modules=six.moves,pkg_resources,httplib,http.client,paramiko,simplestreams |
10 | +ignored-modules= |
11 | + http.client, |
12 | + httplib, |
13 | + pkg_resources, |
14 | + six.moves, |
15 | + # cloud_tests requirements. |
16 | + boto3, |
17 | + botocore, |
18 | + paramiko, |
19 | + pylxd, |
20 | + simplestreams |
21 | |
22 | # List of class names for which member attributes should not be checked (useful |
23 | # for classes with dynamically set attributes). This supports the use of |
24 | diff --git a/ChangeLog b/ChangeLog |
25 | index 31c2dcb..daa7ccf 100644 |
26 | --- a/ChangeLog |
27 | +++ b/ChangeLog |
28 | @@ -1,3 +1,113 @@ |
29 | +18.2: |
30 | + - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. |
31 | + - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. |
32 | + (LP: #1759307) |
33 | + - FreeBSD: resizefs module now able to handle zfs/zpool. |
34 | + [Dominic Schlegel] (LP: #1721243) |
35 | + - cc_puppet: Revert regression of puppet creating ssl and ssl_cert dirs |
36 | + - Enable IBMCloud datasource in settings.py. |
37 | + - IBMCloud: Initial IBM Cloud datasource. |
38 | + - tests: remove jsonschema from xenial tox environment. |
39 | + - tests: Fix newly added schema unit tests to skip if no jsonschema. |
40 | + - ec2: Adjust ec2 datasource after exception_cb change. |
41 | + - Reduce AzurePreprovisioning HTTP timeouts. |
42 | + [Douglas Jordan] (LP: #1752977) |
43 | + - Revert the logic of exception_cb in read_url. |
44 | + [Kurt Garloff] (LP: #1702160, #1298921) |
45 | + - ubuntu-advantage: Add new config module to support |
46 | + ubuntu-advantage-tools |
47 | + - Handle global dns entries in netplan (LP: #1750884) |
48 | + - Identify OpenTelekomCloud Xen as OpenStack DS. |
49 | + [Kurt Garloff] (LP: #1756471) |
50 | + - datasources: fix DataSource subclass get_hostname method signature |
51 | + (LP: #1757176) |
52 | + - OpenNebula: Update network to return v2 config rather than ENI. |
53 | + [Akihiko Ota] |
54 | + - Add Hetzner Cloud DataSource |
55 | + - net: recognize iscsi root cases without ip= on kernel command line. |
56 | + (LP: #1752391) |
57 | + - tests: fix flakes warning for unused variable |
58 | + - tests: patch leaked stderr messages from snap unit tests |
59 | + - cc_snap: Add new module to install and configure snapd and snap |
60 | + packages. |
61 | + - tests: Make pylint happy and fix python2.6 uses of assertRaisesRegex. |
62 | + - netplan: render bridge port-priority values (LP: #1735821) |
63 | + - util: Fix subp regression. Allow specifying subp command as a string. |
64 | + (LP: #1755965) |
65 | + - doc: fix all warnings issued by 'tox -e doc' |
66 | + - FreeBSD: Set hostname to FQDN. [Dominic Schlegel] (LP: #1753499) |
67 | + - tests: fix run_tree and bddeb |
68 | + - tests: Fix some warnings in tests that popped up with newer python. |
69 | + - set_hostname: When present in metadata, set it before network bringup. |
70 | + (LP: #1746455) |
71 | + - tests: Centralize and re-use skipTest based on json schema presense. |
72 | + - This commit fixes get_hostname on the AzureDataSource. |
73 | + [Douglas Jordan] (LP: #1754495) |
74 | + - shellify: raise TypeError on bad input. |
75 | + - Make salt minion module work on FreeBSD. |
76 | + [Dominic Schlegel] (LP: #1721503) |
77 | + - Simplify some comparisions. [Rémy Léone] |
78 | + - Change some list creation and population to literal. [Rémy Léone] |
79 | + - GCE: fix reading of user-data that is not base64 encoded. (LP: #1752711) |
80 | + - doc: fix chef install from apt packages example in RTD. |
81 | + - Implement puppet 4 support [Romanos Skiadas] (LP: #1446804) |
82 | + - subp: Fix subp usage with non-ascii characters when no system locale. |
83 | + (LP: #1751051) |
84 | + - salt: configure grains in grains file rather than in minion config. |
85 | + [Daniel Wallace] |
86 | + |
87 | +18.1: |
88 | + - OVF: Fix VMware support for 64-bit platforms. [Sankar Tanguturi] |
89 | + - ds-identify: Fix searching for iso9660 OVF cdroms. (LP: #1749980) |
90 | + - SUSE: Fix groups used for ownership of cloud-init.log [Robert Schweikert] |
91 | + - ds-identify: check /writable/system-data/ for nocloud seed. |
92 | + (LP: #1747070) |
93 | + - tests: run nosetests in cloudinit/ directory, fix py26 fallout. |
94 | + - tools: run-centos: git clone rather than tar. |
95 | + - tests: add support for logs with lxd from snap and future lxd 3. |
96 | + (LP: #1745663) |
97 | + - EC2: Fix get_instance_id called against cached datasource pickle. |
98 | + (LP: #1748354) |
99 | + - cli: fix cloud-init status to report running when before result.json |
100 | + (LP: #1747965) |
101 | + - net: accept network-config in netplan format for renaming interfaces |
102 | + (LP: #1709715) |
103 | + - Fix ssh keys validation in ssh_util [Tatiana Kholkina] |
104 | + - docs: Update RTD content for cloud-init subcommands. |
105 | + - OVF: Extend well-known labels to include OVFENV. (LP: #1698669) |
106 | + - Fix potential cases of uninitialized variables. (LP: #1744796) |
107 | + - tests: Collect script output as binary, collect systemd journal, fix lxd. |
108 | + - HACKING.rst: mention setting user name and email via git config. |
109 | + - Azure VM Preprovisioning support. [Douglas Jordan] (LP: #1734991) |
110 | + - tools/read-version: Fix read-version when in a git worktree. |
111 | + - docs: Fix typos in docs and one debug message. [Florian Grignon] |
112 | + - btrfs: support resizing if root is mounted ro. |
113 | + [Robert Schweikert] (LP: #1734787) |
114 | + - OpenNebula: Improve network configuration support. |
115 | + [Akihiko Ota] (LP: #1719157, #1716397, #1736750) |
116 | + - tests: Fix EC2 Platform to return console output as bytes. |
117 | + - tests: Fix attempted use of /run in a test case. |
118 | + - GCE: Improvements and changes to ssh key behavior for default user. |
119 | + [Max Illfelder] (LP: #1670456, #1707033, #1707037, #1707039) |
120 | + - subp: make ProcessExecutionError have expected types in stderr, stdout. |
121 | + - tests: when querying ntp server, do not do dns resolution. |
122 | + - Recognize uppercase vfat disk labels [James Penick] (LP: #1598783) |
123 | + - tests: remove zesty as supported OS to test [Joshua Powers] |
124 | + - Do not log warning on config files that represent None. (LP: #1742479) |
125 | + - tests: Use git hash pip dependency format for pylxd. |
126 | + - tests: add integration requirements text file [Joshua Powers] |
127 | + - MAAS: add check_instance_id based off oauth tokens. (LP: #1712680) |
128 | + - tests: update apt sources list test [Joshua Powers] |
129 | + - tests: clean up image properties [Joshua Powers] |
130 | + - tests: rename test ssh keys to avoid appearance of leaking private keys. |
131 | + [Joshua Powers] |
132 | + - tests: Enable AWS EC2 Integration Testing [Joshua Powers] |
133 | + - cli: cloud-init clean handles symlinks (LP: #1741093) |
134 | + - SUSE: Add a basic test of network config rendering. [Robert Schweikert] |
135 | + - Azure: Only bounce network when necessary. (LP: #1722668) |
136 | + - lint: Fix lints seen by pylint version 1.8.1. |
137 | + - cli: Fix error in cloud-init modules --mode=init. (LP: #1736600) |
138 | + |
139 | 17.2: |
140 | - ds-identify: failure in NoCloud due to unset variable usage. |
141 | (LP: #1737704) |
142 | diff --git a/cloudinit/apport.py b/cloudinit/apport.py |
143 | index 221f341..618b016 100644 |
144 | --- a/cloudinit/apport.py |
145 | +++ b/cloudinit/apport.py |
146 | @@ -14,9 +14,9 @@ except ImportError: |
147 | |
148 | KNOWN_CLOUD_NAMES = [ |
149 | 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', |
150 | - 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS', |
151 | - 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS', |
152 | - 'VMware', 'Other'] |
153 | + 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', |
154 | + 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', |
155 | + 'Scaleway', 'SmartOS', 'VMware', 'Other'] |
156 | |
157 | # Potentially clear text collected logs |
158 | CLOUDINIT_LOG = '/var/log/cloud-init.log' |
159 | diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py |
160 | index ba61678..6d12c43 100644 |
161 | --- a/cloudinit/cloud.py |
162 | +++ b/cloudinit/cloud.py |
163 | @@ -78,8 +78,9 @@ class Cloud(object): |
164 | def get_locale(self): |
165 | return self.datasource.get_locale() |
166 | |
167 | - def get_hostname(self, fqdn=False): |
168 | - return self.datasource.get_hostname(fqdn=fqdn) |
169 | + def get_hostname(self, fqdn=False, metadata_only=False): |
170 | + return self.datasource.get_hostname( |
171 | + fqdn=fqdn, metadata_only=metadata_only) |
172 | |
173 | def device_name_to_device(self, name): |
174 | return self.datasource.device_name_to_device(name) |
175 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py |
176 | index d2f1b77..3f2dbb9 100644 |
177 | --- a/cloudinit/cmd/main.py |
178 | +++ b/cloudinit/cmd/main.py |
179 | @@ -40,6 +40,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, |
180 | |
181 | from cloudinit import atomic_helper |
182 | |
183 | +from cloudinit.config import cc_set_hostname |
184 | from cloudinit.dhclient_hook import LogDhclient |
185 | |
186 | |
187 | @@ -215,12 +216,10 @@ def main_init(name, args): |
188 | if args.local: |
189 | deps = [sources.DEP_FILESYSTEM] |
190 | |
191 | - early_logs = [] |
192 | - early_logs.append( |
193 | - attempt_cmdline_url( |
194 | - path=os.path.join("%s.d" % CLOUD_CONFIG, |
195 | - "91_kernel_cmdline_url.cfg"), |
196 | - network=not args.local)) |
197 | + early_logs = [attempt_cmdline_url( |
198 | + path=os.path.join("%s.d" % CLOUD_CONFIG, |
199 | + "91_kernel_cmdline_url.cfg"), |
200 | + network=not args.local)] |
201 | |
202 | # Cloud-init 'init' stage is broken up into the following sub-stages |
203 | # 1. Ensure that the init object fetches its config without errors |
204 | @@ -354,6 +353,11 @@ def main_init(name, args): |
205 | LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", |
206 | mode, name, iid, init.is_new_instance()) |
207 | |
208 | + if mode == sources.DSMODE_LOCAL: |
209 | + # Before network comes up, set any configured hostname to allow |
210 | + # dhcp clients to advertize this hostname to any DDNS services |
211 | + # LP: #1746455. |
212 | + _maybe_set_hostname(init, stage='local', retry_stage='network') |
213 | init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL)) |
214 | |
215 | if mode == sources.DSMODE_LOCAL: |
216 | @@ -370,6 +374,7 @@ def main_init(name, args): |
217 | init.setup_datasource() |
218 | # update fully realizes user-data (pulling in #include if necessary) |
219 | init.update() |
220 | + _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') |
221 | # Stage 7 |
222 | try: |
223 | # Attempt to consume the data per instance. |
224 | @@ -683,6 +688,24 @@ def status_wrapper(name, args, data_d=None, link_d=None): |
225 | return len(v1[mode]['errors']) |
226 | |
227 | |
228 | +def _maybe_set_hostname(init, stage, retry_stage): |
229 | + """Call set-hostname if metadata, vendordata or userdata provides it. |
230 | + |
231 | + @param stage: String representing current stage in which we are running. |
232 | + @param retry_stage: String represented logs upon error setting hostname. |
233 | + """ |
234 | + cloud = init.cloudify() |
235 | + (hostname, _fqdn) = util.get_hostname_fqdn( |
236 | + init.cfg, cloud, metadata_only=True) |
237 | + if hostname: # meta-data or user-data hostname content |
238 | + try: |
239 | + cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) |
240 | + except cc_set_hostname.SetHostnameError as e: |
241 | + LOG.debug( |
242 | + 'Failed setting hostname in %s stage. Will' |
243 | + ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) |
244 | + |
245 | + |
246 | def main_features(name, args): |
247 | sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') |
248 | |
249 | diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py |
250 | index 6713af4..5a3ec3b 100644 |
251 | --- a/cloudinit/cmd/tests/test_clean.py |
252 | +++ b/cloudinit/cmd/tests/test_clean.py |
253 | @@ -165,10 +165,11 @@ class TestClean(CiTestCase): |
254 | wrap_and_call( |
255 | 'cloudinit.cmd.clean', |
256 | {'Init': {'side_effect': self.init_class}, |
257 | + 'sys.exit': {'side_effect': self.sys_exit}, |
258 | 'sys.argv': {'new': ['clean', '--logs']}}, |
259 | clean.main) |
260 | |
261 | - self.assertRaisesCodeEqual(0, context_manager.exception.code) |
262 | + self.assertEqual(0, context_manager.exception.code) |
263 | self.assertFalse( |
264 | os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) |
265 | |
266 | diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py |
267 | new file mode 100644 |
268 | index 0000000..dbe421c |
269 | --- /dev/null |
270 | +++ b/cloudinit/cmd/tests/test_main.py |
271 | @@ -0,0 +1,161 @@ |
272 | +# This file is part of cloud-init. See LICENSE file for license information. |
273 | + |
274 | +from collections import namedtuple |
275 | +import copy |
276 | +import os |
277 | +from six import StringIO |
278 | + |
279 | +from cloudinit.cmd import main |
280 | +from cloudinit.util import ( |
281 | + ensure_dir, load_file, write_file, yaml_dumps) |
282 | +from cloudinit.tests.helpers import ( |
283 | + FilesystemMockingTestCase, wrap_and_call) |
284 | + |
285 | +mypaths = namedtuple('MyPaths', 'run_dir') |
286 | +myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') |
287 | + |
288 | + |
289 | +class TestMain(FilesystemMockingTestCase): |
290 | + |
291 | + with_logs = True |
292 | + |
293 | + def setUp(self): |
294 | + super(TestMain, self).setUp() |
295 | + self.new_root = self.tmp_dir() |
296 | + self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) |
297 | + os.makedirs(self.cloud_dir) |
298 | + self.replicateTestRoot('simple_ubuntu', self.new_root) |
299 | + self.cfg = { |
300 | + 'datasource_list': ['None'], |
301 | + 'runcmd': ['ls /etc'], # test ALL_DISTROS |
302 | + 'system_info': {'paths': {'cloud_dir': self.cloud_dir, |
303 | + 'run_dir': self.new_root}}, |
304 | + 'write_files': [ |
305 | + { |
306 | + 'path': '/etc/blah.ini', |
307 | + 'content': 'blah', |
308 | + 'permissions': 0o755, |
309 | + }, |
310 | + ], |
311 | + 'cloud_init_modules': ['write-files', 'runcmd'], |
312 | + } |
313 | + cloud_cfg = yaml_dumps(self.cfg) |
314 | + ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) |
315 | + self.cloud_cfg_file = os.path.join( |
316 | + self.new_root, 'etc', 'cloud', 'cloud.cfg') |
317 | + write_file(self.cloud_cfg_file, cloud_cfg) |
318 | + self.patchOS(self.new_root) |
319 | + self.patchUtils(self.new_root) |
320 | + self.stderr = StringIO() |
321 | + self.patchStdoutAndStderr(stderr=self.stderr) |
322 | + |
323 | + def test_main_init_run_net_stops_on_file_no_net(self): |
324 | + """When no-net file is present, main_init does not process modules.""" |
325 | + stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file |
326 | + write_file(stop_file, '') |
327 | + cmdargs = myargs( |
328 | + debug=False, files=None, force=False, local=False, reporter=None, |
329 | + subcommand='init') |
330 | + (item1, item2) = wrap_and_call( |
331 | + 'cloudinit.cmd.main', |
332 | + {'util.close_stdin': True, |
333 | + 'netinfo.debug_info': 'my net debug info', |
334 | + 'util.fixup_output': ('outfmt', 'errfmt')}, |
335 | + main.main_init, 'init', cmdargs) |
336 | + # We should not run write_files module |
337 | + self.assertFalse( |
338 | + os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), |
339 | + 'Unexpected run of write_files module produced blah.ini') |
340 | + self.assertEqual([], item2) |
341 | + # Instancify is called |
342 | + instance_id_path = 'var/lib/cloud/data/instance-id' |
343 | + self.assertFalse( |
344 | + os.path.exists(os.path.join(self.new_root, instance_id_path)), |
345 | + 'Unexpected call to datasource.instancify produced instance-id') |
346 | + expected_logs = [ |
347 | + "Exiting. stop file ['{stop_file}'] existed\n".format( |
348 | + stop_file=stop_file), |
349 | + 'my net debug info' # netinfo.debug_info |
350 | + ] |
351 | + for log in expected_logs: |
352 | + self.assertIn(log, self.stderr.getvalue()) |
353 | + |
354 | + def test_main_init_run_net_runs_modules(self): |
355 | + """Modules like write_files are run in 'net' mode.""" |
356 | + cmdargs = myargs( |
357 | + debug=False, files=None, force=False, local=False, reporter=None, |
358 | + subcommand='init') |
359 | + (item1, item2) = wrap_and_call( |
360 | + 'cloudinit.cmd.main', |
361 | + {'util.close_stdin': True, |
362 | + 'netinfo.debug_info': 'my net debug info', |
363 | + 'util.fixup_output': ('outfmt', 'errfmt')}, |
364 | + main.main_init, 'init', cmdargs) |
365 | + self.assertEqual([], item2) |
366 | + # Instancify is called |
367 | + instance_id_path = 'var/lib/cloud/data/instance-id' |
368 | + self.assertEqual( |
369 | + 'iid-datasource-none\n', |
370 | + os.path.join(load_file( |
371 | + os.path.join(self.new_root, instance_id_path)))) |
372 | + # modules are run (including write_files) |
373 | + self.assertEqual( |
374 | + 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) |
375 | + expected_logs = [ |
376 | + 'network config is disabled by fallback', # apply_network_config |
377 | + 'my net debug info', # netinfo.debug_info |
378 | + 'no previous run detected' |
379 | + ] |
380 | + for log in expected_logs: |
381 | + self.assertIn(log, self.stderr.getvalue()) |
382 | + |
383 | + def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): |
384 | + """When local-hostname metadata is present, call cc_set_hostname.""" |
385 | + self.cfg['datasource'] = { |
386 | + 'None': {'metadata': {'local-hostname': 'md-hostname'}}} |
387 | + cloud_cfg = yaml_dumps(self.cfg) |
388 | + write_file(self.cloud_cfg_file, cloud_cfg) |
389 | + cmdargs = myargs( |
390 | + debug=False, files=None, force=False, local=False, reporter=None, |
391 | + subcommand='init') |
392 | + |
393 | + def set_hostname(name, cfg, cloud, log, args): |
394 | + self.assertEqual('set-hostname', name) |
395 | + updated_cfg = copy.deepcopy(self.cfg) |
396 | + updated_cfg.update( |
397 | + {'def_log_file': '/var/log/cloud-init.log', |
398 | + 'log_cfgs': [], |
399 | + 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], |
400 | + 'vendor_data': {'enabled': True, 'prefix': []}}) |
401 | + updated_cfg.pop('system_info') |
402 | + |
403 | + self.assertEqual(updated_cfg, cfg) |
404 | + self.assertEqual(main.LOG, log) |
405 | + self.assertIsNone(args) |
406 | + |
407 | + (item1, item2) = wrap_and_call( |
408 | + 'cloudinit.cmd.main', |
409 | + {'util.close_stdin': True, |
410 | + 'netinfo.debug_info': 'my net debug info', |
411 | + 'cc_set_hostname.handle': {'side_effect': set_hostname}, |
412 | + 'util.fixup_output': ('outfmt', 'errfmt')}, |
413 | + main.main_init, 'init', cmdargs) |
414 | + self.assertEqual([], item2) |
415 | + # Instancify is called |
416 | + instance_id_path = 'var/lib/cloud/data/instance-id' |
417 | + self.assertEqual( |
418 | + 'iid-datasource-none\n', |
419 | + os.path.join(load_file( |
420 | + os.path.join(self.new_root, instance_id_path)))) |
421 | + # modules are run (including write_files) |
422 | + self.assertEqual( |
423 | + 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) |
424 | + expected_logs = [ |
425 | + 'network config is disabled by fallback', # apply_network_config |
426 | + 'my net debug info', # netinfo.debug_info |
427 | + 'no previous run detected' |
428 | + ] |
429 | + for log in expected_logs: |
430 | + self.assertIn(log, self.stderr.getvalue()) |
431 | + |
432 | +# vi: ts=4 expandtab |
433 | diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py |
434 | index 4a5a8c0..37a8993 100644 |
435 | --- a/cloudinit/cmd/tests/test_status.py |
436 | +++ b/cloudinit/cmd/tests/test_status.py |
437 | @@ -380,10 +380,11 @@ class TestStatus(CiTestCase): |
438 | wrap_and_call( |
439 | 'cloudinit.cmd.status', |
440 | {'sys.argv': {'new': ['status']}, |
441 | + 'sys.exit': {'side_effect': self.sys_exit}, |
442 | '_is_cloudinit_disabled': (False, ''), |
443 | 'Init': {'side_effect': self.init_class}}, |
444 | status.main) |
445 | - self.assertRaisesCodeEqual(0, context_manager.exception.code) |
446 | + self.assertEqual(0, context_manager.exception.code) |
447 | self.assertEqual('status: running\n', m_stdout.getvalue()) |
448 | |
449 | # vi: ts=4 expandtab syntax=python |
450 | diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py |
451 | index efedd4a..aff4010 100644 |
452 | --- a/cloudinit/config/cc_keys_to_console.py |
453 | +++ b/cloudinit/config/cc_keys_to_console.py |
454 | @@ -63,9 +63,7 @@ def handle(name, cfg, cloud, log, _args): |
455 | ["ssh-dss"]) |
456 | |
457 | try: |
458 | - cmd = [helper_path] |
459 | - cmd.append(','.join(fp_blacklist)) |
460 | - cmd.append(','.join(key_blacklist)) |
461 | + cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] |
462 | (stdout, _stderr) = util.subp(cmd) |
463 | util.multi_log("%s\n" % (stdout.strip()), |
464 | stderr=False, console=True) |
465 | diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py |
466 | index 28b1d56..4190a20 100644 |
467 | --- a/cloudinit/config/cc_puppet.py |
468 | +++ b/cloudinit/config/cc_puppet.py |
469 | @@ -21,6 +21,13 @@ under ``version``, and defaults to ``none``, which selects the latest version |
470 | in the repos. If the ``puppet`` config key exists in the config archive, this |
471 | module will attempt to start puppet even if no installation was performed. |
472 | |
473 | +The module also provides keys for configuring the new puppet 4 paths and |
474 | +installing the puppet package from the puppetlabs repositories: |
475 | +https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html |
476 | +The keys are ``package_name``, ``conf_file`` and ``ssl_dir``. If unset, their |
477 | +values will default to ones that work with puppet 3.x and with distributions |
478 | +that ship modified puppet 4.x that uses the old paths. |
479 | + |
480 | Puppet configuration can be specified under the ``conf`` key. The |
481 | configuration is specified as a dictionary containing high-level ``<section>`` |
482 | keys and lists of ``<key>=<value>`` pairs within each section. Each section |
483 | @@ -44,6 +51,9 @@ in pem format as a multi-line string (using the ``|`` yaml notation). |
484 | puppet: |
485 | install: <true/false> |
486 | version: <version> |
487 | + conf_file: '/etc/puppet/puppet.conf' |
488 | + ssl_dir: '/var/lib/puppet/ssl' |
489 | + package_name: 'puppet' |
490 | conf: |
491 | agent: |
492 | server: "puppetmaster.example.org" |
493 | @@ -63,9 +73,17 @@ from cloudinit import helpers |
494 | from cloudinit import util |
495 | |
496 | PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' |
497 | -PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/' |
498 | PUPPET_SSL_DIR = '/var/lib/puppet/ssl' |
499 | -PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' |
500 | +PUPPET_PACKAGE_NAME = 'puppet' |
501 | + |
502 | + |
503 | +class PuppetConstants(object): |
504 | + |
505 | + def __init__(self, puppet_conf_file, puppet_ssl_dir, log): |
506 | + self.conf_path = puppet_conf_file |
507 | + self.ssl_dir = puppet_ssl_dir |
508 | + self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") |
509 | + self.ssl_cert_path = os.path.join(self.ssl_cert_dir, "ca.pem") |
510 | |
511 | |
512 | def _autostart_puppet(log): |
513 | @@ -92,22 +110,29 @@ def handle(name, cfg, cloud, log, _args): |
514 | return |
515 | |
516 | puppet_cfg = cfg['puppet'] |
517 | - |
518 | # Start by installing the puppet package if necessary... |
519 | install = util.get_cfg_option_bool(puppet_cfg, 'install', True) |
520 | version = util.get_cfg_option_str(puppet_cfg, 'version', None) |
521 | + package_name = util.get_cfg_option_str( |
522 | + puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME) |
523 | + conf_file = util.get_cfg_option_str( |
524 | + puppet_cfg, 'conf_file', PUPPET_CONF_PATH) |
525 | + ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) |
526 | + |
527 | + p_constants = PuppetConstants(conf_file, ssl_dir, log) |
528 | if not install and version: |
529 | log.warn(("Puppet install set false but version supplied," |
530 | " doing nothing.")) |
531 | elif install: |
532 | log.debug(("Attempting to install puppet %s,"), |
533 | version if version else 'latest') |
534 | - cloud.distro.install_packages(('puppet', version)) |
535 | + |
536 | + cloud.distro.install_packages((package_name, version)) |
537 | |
538 | # ... and then update the puppet configuration |
539 | if 'conf' in puppet_cfg: |
540 | # Add all sections from the conf object to puppet.conf |
541 | - contents = util.load_file(PUPPET_CONF_PATH) |
542 | + contents = util.load_file(p_constants.conf_path) |
543 | # Create object for reading puppet.conf values |
544 | puppet_config = helpers.DefaultingConfigParser() |
545 | # Read puppet.conf values from original file in order to be able to |
546 | @@ -115,20 +140,23 @@ def handle(name, cfg, cloud, log, _args): |
547 | # (TODO(harlowja) is this really needed??) |
548 | cleaned_lines = [i.lstrip() for i in contents.splitlines()] |
549 | cleaned_contents = '\n'.join(cleaned_lines) |
550 | - puppet_config.readfp(StringIO(cleaned_contents), |
551 | - filename=PUPPET_CONF_PATH) |
552 | + # Move to puppet_config.read_file when dropping py2.7 |
553 | + puppet_config.readfp( # pylint: disable=W1505 |
554 | + StringIO(cleaned_contents), |
555 | + filename=p_constants.conf_path) |
556 | for (cfg_name, cfg) in puppet_cfg['conf'].items(): |
557 | # Cert configuration is a special case |
558 | # Dump the puppet master ca certificate in the correct place |
559 | if cfg_name == 'ca_cert': |
560 | # Puppet ssl sub-directory isn't created yet |
561 | # Create it with the proper permissions and ownership |
562 | - util.ensure_dir(PUPPET_SSL_DIR, 0o771) |
563 | - util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') |
564 | - util.ensure_dir(PUPPET_SSL_CERT_DIR) |
565 | - util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') |
566 | - util.write_file(PUPPET_SSL_CERT_PATH, cfg) |
567 | - util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') |
568 | + util.ensure_dir(p_constants.ssl_dir, 0o771) |
569 | + util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') |
570 | + util.ensure_dir(p_constants.ssl_cert_dir) |
571 | + |
572 | + util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') |
573 | + util.write_file(p_constants.ssl_cert_path, cfg) |
574 | + util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root') |
575 | else: |
576 | # Iterate through the config items, we'll use ConfigParser.set |
577 | # to overwrite or create new items as needed |
578 | @@ -144,8 +172,9 @@ def handle(name, cfg, cloud, log, _args): |
579 | puppet_config.set(cfg_name, o, v) |
580 | # We got all our config as wanted we'll rename |
581 | # the previous puppet.conf and create our new one |
582 | - util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) |
583 | - util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) |
584 | + util.rename(p_constants.conf_path, "%s.old" |
585 | + % (p_constants.conf_path)) |
586 | + util.write_file(p_constants.conf_path, puppet_config.stringify()) |
587 | |
588 | # Set it up so it autostarts |
589 | _autostart_puppet(log) |
590 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py |
591 | index cec22bb..c8e1752 100644 |
592 | --- a/cloudinit/config/cc_resizefs.py |
593 | +++ b/cloudinit/config/cc_resizefs.py |
594 | @@ -84,6 +84,10 @@ def _resize_ufs(mount_point, devpth): |
595 | return ('growfs', devpth) |
596 | |
597 | |
598 | +def _resize_zfs(mount_point, devpth): |
599 | + return ('zpool', 'online', '-e', mount_point, devpth) |
600 | + |
601 | + |
602 | def _get_dumpfs_output(mount_point): |
603 | dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) |
604 | return dumpfs_res |
605 | @@ -148,6 +152,7 @@ RESIZE_FS_PREFIXES_CMDS = [ |
606 | ('ext', _resize_ext), |
607 | ('xfs', _resize_xfs), |
608 | ('ufs', _resize_ufs), |
609 | + ('zfs', _resize_zfs), |
610 | ] |
611 | |
612 | RESIZE_FS_PRECHECK_CMDS = { |
613 | @@ -188,6 +193,13 @@ def maybe_get_writable_device_path(devpath, info, log): |
614 | log.debug("Not attempting to resize devpath '%s': %s", devpath, info) |
615 | return None |
616 | |
617 | + # FreeBSD zpool can also just use gpt/<label> |
618 | + # with that in mind we can not do an os.stat on "gpt/whatever" |
619 | + # therefore return the devpath already here. |
620 | + if devpath.startswith('gpt/'): |
621 | + log.debug('We have a gpt label - just go ahead') |
622 | + return devpath |
623 | + |
624 | try: |
625 | statret = os.stat(devpath) |
626 | except OSError as exc: |
627 | @@ -231,6 +243,16 @@ def handle(name, cfg, _cloud, log, args): |
628 | |
629 | (devpth, fs_type, mount_point) = result |
630 | |
631 | + # if we have a zfs then our device path at this point |
632 | + # is the zfs label. For example: vmzroot/ROOT/freebsd |
633 | + # we will have to get the zpool name out of this |
634 | + # and set the resize_what variable to the zpool |
635 | + # so the _resize_zfs function gets the right attribute. |
636 | + if fs_type == 'zfs': |
637 | + zpool = devpth.split('/')[0] |
638 | + devpth = util.get_device_info_from_zpool(zpool) |
639 | + resize_what = zpool |
640 | + |
641 | info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) |
642 | log.debug("resize_info: %s" % info) |
643 | |
644 | diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py |
645 | index 449872f..539cbd5 100644 |
646 | --- a/cloudinit/config/cc_runcmd.py |
647 | +++ b/cloudinit/config/cc_runcmd.py |
648 | @@ -39,8 +39,10 @@ schema = { |
649 | using ``sh``. |
650 | |
651 | .. note:: |
652 | - all commands must be proper yaml, so you have to quote any characters |
653 | - yaml would eat (':' can be problematic)"""), |
654 | + |
655 | + all commands must be proper yaml, so you have to quote any characters |
656 | + yaml would eat (':' can be problematic) |
657 | + """), |
658 | 'distros': distros, |
659 | 'examples': [dedent("""\ |
660 | runcmd: |
661 | diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py |
662 | index 2b38837..d6a21d7 100644 |
663 | --- a/cloudinit/config/cc_salt_minion.py |
664 | +++ b/cloudinit/config/cc_salt_minion.py |
665 | @@ -12,7 +12,9 @@ key is present in the config parts, then salt minion will be installed and |
666 | started. Configuration for salt minion can be specified in the ``conf`` key |
667 | under ``salt_minion``. Any conf values present there will be assigned in |
668 | ``/etc/salt/minion``. The public and private keys to use for salt minion can be |
669 | -specified with ``public_key`` and ``private_key`` respectively. |
670 | +specified with ``public_key`` and ``private_key`` respectively. Optionally if |
671 | +you have a custom package name, service name or config directory you can |
672 | +specify them with ``pkg_name``, ``service_name`` and ``config_dir``. |
673 | |
674 | **Internal name:** ``cc_salt_minion`` |
675 | |
676 | @@ -23,8 +25,14 @@ specified with ``public_key`` and ``private_key`` respectively. |
677 | **Config keys**:: |
678 | |
679 | salt_minion: |
680 | + pkg_name: 'salt-minion' |
681 | + service_name: 'salt-minion' |
682 | + config_dir: '/etc/salt' |
683 | conf: |
684 | master: salt.example.com |
685 | + grains: |
686 | + role: |
687 | + - web |
688 | public_key: | |
689 | ------BEGIN PUBLIC KEY------- |
690 | <key data> |
691 | @@ -39,7 +47,34 @@ import os |
692 | |
693 | from cloudinit import util |
694 | |
695 | -# Note: see http://saltstack.org/topics/installation/ |
696 | +# Note: see https://docs.saltstack.com/en/latest/topics/installation/ |
697 | +# Note: see https://docs.saltstack.com/en/latest/ref/configuration/ |
698 | + |
699 | + |
700 | +class SaltConstants(object): |
701 | + """ |
702 | + defines default distribution specific salt variables |
703 | + """ |
704 | + def __init__(self, cfg): |
705 | + |
706 | + # constants tailored for FreeBSD |
707 | + if util.is_FreeBSD(): |
708 | + self.pkg_name = 'py27-salt' |
709 | + self.srv_name = 'salt_minion' |
710 | + self.conf_dir = '/usr/local/etc/salt' |
711 | + # constants for any other OS |
712 | + else: |
713 | + self.pkg_name = 'salt-minion' |
714 | + self.srv_name = 'salt-minion' |
715 | + self.conf_dir = '/etc/salt' |
716 | + |
717 | + # if there are constants given in cloud config use those |
718 | + self.pkg_name = util.get_cfg_option_str(cfg, 'pkg_name', |
719 | + self.pkg_name) |
720 | + self.conf_dir = util.get_cfg_option_str(cfg, 'config_dir', |
721 | + self.conf_dir) |
722 | + self.srv_name = util.get_cfg_option_str(cfg, 'service_name', |
723 | + self.srv_name) |
724 | |
725 | |
726 | def handle(name, cfg, cloud, log, _args): |
727 | @@ -49,39 +84,49 @@ def handle(name, cfg, cloud, log, _args): |
728 | " no 'salt_minion' key in configuration"), name) |
729 | return |
730 | |
731 | - salt_cfg = cfg['salt_minion'] |
732 | + s_cfg = cfg['salt_minion'] |
733 | + const = SaltConstants(cfg=s_cfg) |
734 | |
735 | # Start by installing the salt package ... |
736 | - cloud.distro.install_packages(('salt-minion',)) |
737 | + cloud.distro.install_packages(const.pkg_name) |
738 | |
739 | # Ensure we can configure files at the right dir |
740 | - config_dir = salt_cfg.get("config_dir", '/etc/salt') |
741 | - util.ensure_dir(config_dir) |
742 | + util.ensure_dir(const.conf_dir) |
743 | |
744 | # ... and then update the salt configuration |
745 | - if 'conf' in salt_cfg: |
746 | - # Add all sections from the conf object to /etc/salt/minion |
747 | - minion_config = os.path.join(config_dir, 'minion') |
748 | - minion_data = util.yaml_dumps(salt_cfg.get('conf')) |
749 | + if 'conf' in s_cfg: |
750 | + # Add all sections from the conf object to minion config file |
751 | + minion_config = os.path.join(const.conf_dir, 'minion') |
752 | + minion_data = util.yaml_dumps(s_cfg.get('conf')) |
753 | util.write_file(minion_config, minion_data) |
754 | |
755 | + if 'grains' in s_cfg: |
756 | + # add grains to /etc/salt/grains |
757 | + grains_config = os.path.join(const.conf_dir, 'grains') |
758 | + grains_data = util.yaml_dumps(s_cfg.get('grains')) |
759 | + util.write_file(grains_config, grains_data) |
760 | + |
761 | # ... copy the key pair if specified |
762 | - if 'public_key' in salt_cfg and 'private_key' in salt_cfg: |
763 | - if os.path.isdir("/etc/salt/pki/minion"): |
764 | - pki_dir_default = "/etc/salt/pki/minion" |
765 | - else: |
766 | - pki_dir_default = "/etc/salt/pki" |
767 | + if 'public_key' in s_cfg and 'private_key' in s_cfg: |
768 | + pki_dir_default = os.path.join(const.conf_dir, "pki/minion") |
769 | + if not os.path.isdir(pki_dir_default): |
770 | + pki_dir_default = os.path.join(const.conf_dir, "pki") |
771 | |
772 | - pki_dir = salt_cfg.get('pki_dir', pki_dir_default) |
773 | + pki_dir = s_cfg.get('pki_dir', pki_dir_default) |
774 | with util.umask(0o77): |
775 | util.ensure_dir(pki_dir) |
776 | pub_name = os.path.join(pki_dir, 'minion.pub') |
777 | pem_name = os.path.join(pki_dir, 'minion.pem') |
778 | - util.write_file(pub_name, salt_cfg['public_key']) |
779 | - util.write_file(pem_name, salt_cfg['private_key']) |
780 | + util.write_file(pub_name, s_cfg['public_key']) |
781 | + util.write_file(pem_name, s_cfg['private_key']) |
782 | + |
783 | + # we need to have the salt minion service enabled in rc in order to be |
784 | + # able to start the service. this does only apply on FreeBSD servers. |
785 | + if cloud.distro.osfamily == 'freebsd': |
786 | + cloud.distro.updatercconf('salt_minion_enable', 'YES') |
787 | |
788 | - # restart salt-minion. 'service' will start even if not started. if it |
789 | + # restart salt-minion. 'service' will start even if not started. if it |
790 | # was started, it needs to be restarted for config change. |
791 | - util.subp(['service', 'salt-minion', 'restart'], capture=False) |
792 | + util.subp(['service', const.srv_name, 'restart'], capture=False) |
793 | |
794 | # vi: ts=4 expandtab |
795 | diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py |
796 | index aa3dfe5..3d2b2da 100644 |
797 | --- a/cloudinit/config/cc_set_hostname.py |
798 | +++ b/cloudinit/config/cc_set_hostname.py |
799 | @@ -32,22 +32,51 @@ will be used. |
800 | hostname: <fqdn/hostname> |
801 | """ |
802 | |
803 | +import os |
804 | + |
805 | + |
806 | +from cloudinit.atomic_helper import write_json |
807 | from cloudinit import util |
808 | |
809 | |
810 | +class SetHostnameError(Exception): |
811 | + """Raised when the distro runs into an exception when setting hostname. |
812 | + |
813 | + This may happen if we attempt to set the hostname early in cloud-init's |
814 | + init-local timeframe as certain services may not be running yet. |
815 | + """ |
816 | + pass |
817 | + |
818 | + |
819 | def handle(name, cfg, cloud, log, _args): |
820 | if util.get_cfg_option_bool(cfg, "preserve_hostname", False): |
821 | log.debug(("Configuration option 'preserve_hostname' is set," |
822 | " not setting the hostname in module %s"), name) |
823 | return |
824 | - |
825 | (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) |
826 | + # Check for previous successful invocation of set-hostname |
827 | + |
828 | + # set-hostname artifact file accounts for both hostname and fqdn |
829 | + # deltas. As such, it's format is different than cc_update_hostname's |
830 | + # previous-hostname file which only contains the base hostname. |
831 | + # TODO consolidate previous-hostname and set-hostname artifact files and |
832 | + # distro._read_hostname implementation so we only validate one artifact. |
833 | + prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname") |
834 | + prev_hostname = {} |
835 | + if os.path.exists(prev_fn): |
836 | + prev_hostname = util.load_json(util.load_file(prev_fn)) |
837 | + hostname_changed = (hostname != prev_hostname.get('hostname') or |
838 | + fqdn != prev_hostname.get('fqdn')) |
839 | + if not hostname_changed: |
840 | + log.debug('No hostname changes. Skipping set-hostname') |
841 | + return |
842 | + log.debug("Setting the hostname to %s (%s)", fqdn, hostname) |
843 | try: |
844 | - log.debug("Setting the hostname to %s (%s)", fqdn, hostname) |
845 | cloud.distro.set_hostname(hostname, fqdn) |
846 | - except Exception: |
847 | - util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn, |
848 | - hostname) |
849 | - raise |
850 | + except Exception as e: |
851 | + msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname) |
852 | + util.logexc(log, msg) |
853 | + raise SetHostnameError("%s: %s" % (msg, e)) |
854 | + write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn}) |
855 | |
856 | # vi: ts=4 expandtab |
857 | diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py |
858 | new file mode 100644 |
859 | index 0000000..34a53fd |
860 | --- /dev/null |
861 | +++ b/cloudinit/config/cc_snap.py |
862 | @@ -0,0 +1,230 @@ |
863 | +# Copyright (C) 2018 Canonical Ltd. |
864 | +# |
865 | +# This file is part of cloud-init. See LICENSE file for license information. |
866 | + |
867 | +"""Snap: Install, configure and manage snapd and snap packages.""" |
868 | + |
869 | +import sys |
870 | +from textwrap import dedent |
871 | + |
872 | +from cloudinit import log as logging |
873 | +from cloudinit.config.schema import ( |
874 | + get_schema_doc, validate_cloudconfig_schema) |
875 | +from cloudinit.settings import PER_INSTANCE |
876 | +from cloudinit.subp import prepend_base_command |
877 | +from cloudinit import util |
878 | + |
879 | + |
880 | +distros = ['ubuntu'] |
881 | +frequency = PER_INSTANCE |
882 | + |
883 | +LOG = logging.getLogger(__name__) |
884 | + |
885 | +schema = { |
886 | + 'id': 'cc_snap', |
887 | + 'name': 'Snap', |
888 | + 'title': 'Install, configure and manage snapd and snap packages', |
889 | + 'description': dedent("""\ |
890 | + This module provides a simple configuration namespace in cloud-init to |
891 | + both setup snapd and install snaps. |
892 | + |
893 | + .. note:: |
894 | + Both ``assertions`` and ``commands`` values can be either a |
895 | + dictionary or a list. If these configs are provided as a |
896 | + dictionary, the keys are only used to order the execution of the |
897 | + assertions or commands and the dictionary is merged with any |
898 | + vendor-data snap configuration provided. If a list is provided by |
899 | + the user instead of a dict, any vendor-data snap configuration is |
900 | + ignored. |
901 | + |
902 | + The ``assertions`` configuration option is a dictionary or list of |
903 | + properly-signed snap assertions which will run before any snap |
904 | + ``commands``. They will be added to snapd's assertion database by |
905 | + invoking ``snap ack <aggregate_assertion_file>``. |
906 | + |
907 | + Snap ``commands`` is a dictionary or list of individual snap |
908 | + commands to run on the target system. These commands can be used to |
909 | + create snap users, install snaps and provide snap configuration. |
910 | + |
911 | + .. note:: |
912 | + If 'side-loading' private/unpublished snaps on an instance, it is |
913 | + best to create a snap seed directory and seed.yaml manifest in |
914 | + **/var/lib/snapd/seed/** which snapd automatically installs on |
915 | + startup. |
916 | + |
917 | + **Development only**: The ``squashfuse_in_container`` boolean can be |
918 | + set true to install squashfuse package when in a container to enable |
919 | + snap installs. Default is false. |
920 | + """), |
921 | + 'distros': distros, |
922 | + 'examples': [dedent("""\ |
923 | + snap: |
924 | + assertions: |
925 | + 00: | |
926 | + signed_assertion_blob_here |
927 | + 02: | |
928 | + signed_assertion_blob_here |
929 | + commands: |
930 | + 00: snap create-user --sudoer --known <snap-user>@mydomain.com |
931 | + 01: snap install canonical-livepatch |
932 | + 02: canonical-livepatch enable <AUTH_TOKEN> |
933 | + """), dedent("""\ |
934 | + # LXC-based containers require squashfuse before snaps can be installed |
935 | + snap: |
936 | + commands: |
937 | + 00: apt-get install squashfuse -y |
938 | + 11: snap install emoj |
939 | + |
940 | + """), dedent("""\ |
941 | + # Convenience: the snap command can be omitted when specifying commands |
942 | + # as a list and 'snap' will automatically be prepended. |
943 | + # The following commands are equivalent: |
944 | + snap: |
945 | + commands: |
946 | + 00: ['install', 'vlc'] |
947 | + 01: ['snap', 'install', 'vlc'] |
948 | + 02: snap install vlc |
949 | + 03: 'snap install vlc' |
950 | + """)], |
951 | + 'frequency': PER_INSTANCE, |
952 | + 'type': 'object', |
953 | + 'properties': { |
954 | + 'snap': { |
955 | + 'type': 'object', |
956 | + 'properties': { |
957 | + 'assertions': { |
958 | + 'type': ['object', 'array'], # Array of strings or dict |
959 | + 'items': {'type': 'string'}, |
960 | + 'additionalItems': False, # Reject items non-string |
961 | + 'minItems': 1, |
962 | + 'minProperties': 1, |
963 | + 'uniqueItems': True |
964 | + }, |
965 | + 'commands': { |
966 | + 'type': ['object', 'array'], # Array of strings or dict |
967 | + 'items': { |
968 | + 'oneOf': [ |
969 | + {'type': 'array', 'items': {'type': 'string'}}, |
970 | + {'type': 'string'}] |
971 | + }, |
972 | + 'additionalItems': False, # Reject non-string & non-list |
973 | + 'minItems': 1, |
974 | + 'minProperties': 1, |
975 | + 'uniqueItems': True |
976 | + }, |
977 | + 'squashfuse_in_container': { |
978 | + 'type': 'boolean' |
979 | + } |
980 | + }, |
981 | + 'additionalProperties': False, # Reject keys not in schema |
982 | + 'required': [], |
983 | + 'minProperties': 1 |
984 | + } |
985 | + } |
986 | +} |
987 | + |
988 | +# TODO schema for 'assertions' and 'commands' are too permissive at the moment. |
989 | +# Once python-jsonschema supports schema draft 6 add support for arbitrary |
990 | +# object keys with 'patternProperties' constraint to validate string values. |
991 | + |
992 | +__doc__ = get_schema_doc(schema) # Supplement python help() |
993 | + |
994 | +SNAP_CMD = "snap" |
995 | +ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" |
996 | + |
997 | + |
998 | +def add_assertions(assertions): |
999 | + """Import list of assertions. |
1000 | + |
1001 | + Import assertions by concatenating each assertion into a |
1002 | + string separated by a '\n'. Write this string to a instance file and |
1003 | + then invoke `snap ack /path/to/file` and check for errors. |
1004 | + If snap exits 0, then all assertions are imported. |
1005 | + """ |
1006 | + if not assertions: |
1007 | + return |
1008 | + LOG.debug('Importing user-provided snap assertions') |
1009 | + if isinstance(assertions, dict): |
1010 | + assertions = assertions.values() |
1011 | + elif not isinstance(assertions, list): |
1012 | + raise TypeError( |
1013 | + 'assertion parameter was not a list or dict: {assertions}'.format( |
1014 | + assertions=assertions)) |
1015 | + |
1016 | + snap_cmd = [SNAP_CMD, 'ack'] |
1017 | + combined = "\n".join(assertions) |
1018 | + |
1019 | + for asrt in assertions: |
1020 | + LOG.debug('Snap acking: %s', asrt.split('\n')[0:2]) |
1021 | + |
1022 | + util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) |
1023 | + util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True) |
1024 | + |
1025 | + |
1026 | +def run_commands(commands): |
1027 | + """Run the provided commands provided in snap:commands configuration. |
1028 | + |
1029 | + Commands are run individually. Any errors are collected and reported |
1030 | + after attempting all commands. |
1031 | + |
1032 | + @param commands: A list or dict containing commands to run. Keys of a |
1033 | + dict will be used to order the commands provided as dict values. |
1034 | + """ |
1035 | + if not commands: |
1036 | + return |
1037 | + LOG.debug('Running user-provided snap commands') |
1038 | + if isinstance(commands, dict): |
1039 | + # Sort commands based on dictionary key |
1040 | + commands = [v for _, v in sorted(commands.items())] |
1041 | + elif not isinstance(commands, list): |
1042 | + raise TypeError( |
1043 | + 'commands parameter was not a list or dict: {commands}'.format( |
1044 | + commands=commands)) |
1045 | + |
1046 | + fixed_snap_commands = prepend_base_command('snap', commands) |
1047 | + |
1048 | + cmd_failures = [] |
1049 | + for command in fixed_snap_commands: |
1050 | + shell = isinstance(command, str) |
1051 | + try: |
1052 | + util.subp(command, shell=shell, status_cb=sys.stderr.write) |
1053 | + except util.ProcessExecutionError as e: |
1054 | + cmd_failures.append(str(e)) |
1055 | + if cmd_failures: |
1056 | + msg = 'Failures running snap commands:\n{cmd_failures}'.format( |
1057 | + cmd_failures=cmd_failures) |
1058 | + util.logexc(LOG, msg) |
1059 | + raise RuntimeError(msg) |
1060 | + |
1061 | + |
1062 | +# RELEASE_BLOCKER: Once LP: #1628289 is released on xenial, drop this function. |
1063 | +def maybe_install_squashfuse(cloud): |
1064 | + """Install squashfuse if we are in a container.""" |
1065 | + if not util.is_container(): |
1066 | + return |
1067 | + try: |
1068 | + cloud.distro.update_package_sources() |
1069 | + except Exception as e: |
1070 | + util.logexc(LOG, "Package update failed") |
1071 | + raise |
1072 | + try: |
1073 | + cloud.distro.install_packages(['squashfuse']) |
1074 | + except Exception as e: |
1075 | + util.logexc(LOG, "Failed to install squashfuse") |
1076 | + raise |
1077 | + |
1078 | + |
1079 | +def handle(name, cfg, cloud, log, args): |
1080 | + cfgin = cfg.get('snap', {}) |
1081 | + if not cfgin: |
1082 | + LOG.debug(("Skipping module named %s," |
1083 | + " no 'snap' key in configuration"), name) |
1084 | + return |
1085 | + |
1086 | + validate_cloudconfig_schema(cfg, schema) |
1087 | + if util.is_true(cfgin.get('squashfuse_in_container', False)): |
1088 | + maybe_install_squashfuse(cloud) |
1089 | + add_assertions(cfgin.get('assertions', [])) |
1090 | + run_commands(cfgin.get('commands', [])) |
1091 | + |
1092 | +# vi: ts=4 expandtab |
1093 | diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py |
1094 | index e82c081..afe297e 100644 |
1095 | --- a/cloudinit/config/cc_snap_config.py |
1096 | +++ b/cloudinit/config/cc_snap_config.py |
1097 | @@ -4,11 +4,15 @@ |
1098 | # |
1099 | # This file is part of cloud-init. See LICENSE file for license information. |
1100 | |
1101 | +# RELEASE_BLOCKER: Remove this deprecated module in 18.3 |
1102 | """ |
1103 | Snap Config |
1104 | ----------- |
1105 | **Summary:** snap_config modules allows configuration of snapd. |
1106 | |
1107 | +**Deprecated**: Use :ref:`snap` module instead. This module will not exist |
1108 | +in cloud-init 18.3. |
1109 | + |
1110 | This module uses the same ``snappy`` namespace for configuration but |
1111 | acts only only a subset of the configuration. |
1112 | |
1113 | @@ -154,6 +158,9 @@ def handle(name, cfg, cloud, log, args): |
1114 | LOG.debug('No snappy config provided, skipping') |
1115 | return |
1116 | |
1117 | + log.warning( |
1118 | + 'DEPRECATION: snap_config module will be dropped in 18.3 release.' |
1119 | + ' Use snap module instead') |
1120 | if not(util.system_is_snappy()): |
1121 | LOG.debug("%s: system not snappy", name) |
1122 | return |
1123 | diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py |
1124 | index eecb817..bab80bb 100644 |
1125 | --- a/cloudinit/config/cc_snappy.py |
1126 | +++ b/cloudinit/config/cc_snappy.py |
1127 | @@ -1,10 +1,14 @@ |
1128 | # This file is part of cloud-init. See LICENSE file for license information. |
1129 | |
1130 | +# RELEASE_BLOCKER: Remove this deprecated module in 18.3 |
1131 | """ |
1132 | Snappy |
1133 | ------ |
1134 | **Summary:** snappy modules allows configuration of snappy. |
1135 | |
1136 | +**Deprecated**: Use :ref:`snap` module instead. This module will not exist |
1137 | +in cloud-init 18.3. |
1138 | + |
1139 | The below example config config would install ``etcd``, and then install |
1140 | ``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has |
1141 | ``config-blob`` inside it. If ``pkgname`` is installed already, then |
1142 | @@ -271,6 +275,10 @@ def handle(name, cfg, cloud, log, args): |
1143 | LOG.debug("%s: 'auto' mode, and system not snappy", name) |
1144 | return |
1145 | |
1146 | + log.warning( |
1147 | + 'DEPRECATION: snappy module will be dropped in 18.3 release.' |
1148 | + ' Use snap module instead') |
1149 | + |
1150 | set_snappy_command() |
1151 | |
1152 | pkg_ops = get_package_ops(packages=mycfg['packages'], |
1153 | diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py |
1154 | index 35d8c57..98b0e66 100755 |
1155 | --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py |
1156 | +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py |
1157 | @@ -77,11 +77,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', |
1158 | tbl = SimpleTable(tbl_fields) |
1159 | for entry in key_entries: |
1160 | if _is_printable_key(entry): |
1161 | - row = [] |
1162 | - row.append(entry.keytype or '-') |
1163 | - row.append(_gen_fingerprint(entry.base64, hash_meth) or '-') |
1164 | - row.append(entry.options or '-') |
1165 | - row.append(entry.comment or '-') |
1166 | + row = [entry.keytype or '-', |
1167 | + _gen_fingerprint(entry.base64, hash_meth) or '-', |
1168 | + entry.options or '-', |
1169 | + entry.comment or '-'] |
1170 | tbl.add_row(row) |
1171 | authtbl_s = tbl.get_string() |
1172 | authtbl_lines = authtbl_s.splitlines() |
1173 | diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py |
1174 | new file mode 100644 |
1175 | index 0000000..16b1868 |
1176 | --- /dev/null |
1177 | +++ b/cloudinit/config/cc_ubuntu_advantage.py |
1178 | @@ -0,0 +1,173 @@ |
1179 | +# Copyright (C) 2018 Canonical Ltd. |
1180 | +# |
1181 | +# This file is part of cloud-init. See LICENSE file for license information. |
1182 | + |
1183 | +"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical.""" |
1184 | + |
1185 | +import sys |
1186 | +from textwrap import dedent |
1187 | + |
1188 | +from cloudinit import log as logging |
1189 | +from cloudinit.config.schema import ( |
1190 | + get_schema_doc, validate_cloudconfig_schema) |
1191 | +from cloudinit.settings import PER_INSTANCE |
1192 | +from cloudinit.subp import prepend_base_command |
1193 | +from cloudinit import util |
1194 | + |
1195 | + |
1196 | +distros = ['ubuntu'] |
1197 | +frequency = PER_INSTANCE |
1198 | + |
1199 | +LOG = logging.getLogger(__name__) |
1200 | + |
1201 | +schema = { |
1202 | + 'id': 'cc_ubuntu_advantage', |
1203 | + 'name': 'Ubuntu Advantage', |
1204 | + 'title': 'Install, configure and manage ubuntu-advantage offerings', |
1205 | + 'description': dedent("""\ |
1206 | + This module provides configuration options to setup ubuntu-advantage |
1207 | + subscriptions. |
1208 | + |
1209 | + .. note:: |
1210 | + Both ``commands`` value can be either a dictionary or a list. If |
1211 | + the configuration provided is a dictionary, the keys are only used |
1212 | + to order the execution of the commands and the dictionary is |
1213 | + merged with any vendor-data ubuntu-advantage configuration |
1214 | + provided. If a ``commands`` is provided as a list, any vendor-data |
1215 | + ubuntu-advantage ``commands`` are ignored. |
1216 | + |
1217 | + Ubuntu-advantage ``commands`` is a dictionary or list of |
1218 | + ubuntu-advantage commands to run on the deployed machine. |
1219 | + These commands can be used to enable or disable subscriptions to |
1220 | + various ubuntu-advantage products. See 'man ubuntu-advantage' for more |
1221 | + information on supported subcommands. |
1222 | + |
1223 | + .. note:: |
1224 | + Each command item can be a string or list. If the item is a list, |
1225 | + 'ubuntu-advantage' can be omitted and it will automatically be |
1226 | + inserted as part of the command. |
1227 | + """), |
1228 | + 'distros': distros, |
1229 | + 'examples': [dedent("""\ |
1230 | + # Enable Extended Security Maintenance using your service auth token |
1231 | + ubuntu-advantage: |
1232 | + commands: |
1233 | + 00: ubuntu-advantage enable-esm <token> |
1234 | + """), dedent("""\ |
1235 | + # Enable livepatch by providing your livepatch token |
1236 | + ubuntu-advantage: |
1237 | + commands: |
1238 | + 00: ubuntu-advantage enable-livepatch <livepatch-token> |
1239 | + |
1240 | + """), dedent("""\ |
1241 | + # Convenience: the ubuntu-advantage command can be omitted when |
1242 | + # specifying commands as a list and 'ubuntu-advantage' will |
1243 | + # automatically be prepended. |
1244 | + # The following commands are equivalent |
1245 | + ubuntu-advantage: |
1246 | + commands: |
1247 | + 00: ['enable-livepatch', 'my-token'] |
1248 | + 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token'] |
1249 | + 02: ubuntu-advantage enable-livepatch my-token |
1250 | + 03: 'ubuntu-advantage enable-livepatch my-token' |
1251 | + """)], |
1252 | + 'frequency': PER_INSTANCE, |
1253 | + 'type': 'object', |
1254 | + 'properties': { |
1255 | + 'ubuntu-advantage': { |
1256 | + 'type': 'object', |
1257 | + 'properties': { |
1258 | + 'commands': { |
1259 | + 'type': ['object', 'array'], # Array of strings or dict |
1260 | + 'items': { |
1261 | + 'oneOf': [ |
1262 | + {'type': 'array', 'items': {'type': 'string'}}, |
1263 | + {'type': 'string'}] |
1264 | + }, |
1265 | + 'additionalItems': False, # Reject non-string & non-list |
1266 | + 'minItems': 1, |
1267 | + 'minProperties': 1, |
1268 | + 'uniqueItems': True |
1269 | + } |
1270 | + }, |
1271 | + 'additionalProperties': False, # Reject keys not in schema |
1272 | + 'required': ['commands'] |
1273 | + } |
1274 | + } |
1275 | +} |
1276 | + |
1277 | +# TODO schema for 'assertions' and 'commands' are too permissive at the moment. |
1278 | +# Once python-jsonschema supports schema draft 6 add support for arbitrary |
1279 | +# object keys with 'patternProperties' constraint to validate string values. |
1280 | + |
1281 | +__doc__ = get_schema_doc(schema) # Supplement python help() |
1282 | + |
1283 | +UA_CMD = "ubuntu-advantage" |
1284 | + |
1285 | + |
1286 | +def run_commands(commands): |
1287 | + """Run the commands provided in ubuntu-advantage:commands config. |
1288 | + |
1289 | + Commands are run individually. Any errors are collected and reported |
1290 | + after attempting all commands. |
1291 | + |
1292 | + @param commands: A list or dict containing commands to run. Keys of a |
1293 | + dict will be used to order the commands provided as dict values. |
1294 | + """ |
1295 | + if not commands: |
1296 | + return |
1297 | + LOG.debug('Running user-provided ubuntu-advantage commands') |
1298 | + if isinstance(commands, dict): |
1299 | + # Sort commands based on dictionary key |
1300 | + commands = [v for _, v in sorted(commands.items())] |
1301 | + elif not isinstance(commands, list): |
1302 | + raise TypeError( |
1303 | + 'commands parameter was not a list or dict: {commands}'.format( |
1304 | + commands=commands)) |
1305 | + |
1306 | + fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands) |
1307 | + |
1308 | + cmd_failures = [] |
1309 | + for command in fixed_ua_commands: |
1310 | + shell = isinstance(command, str) |
1311 | + try: |
1312 | + util.subp(command, shell=shell, status_cb=sys.stderr.write) |
1313 | + except util.ProcessExecutionError as e: |
1314 | + cmd_failures.append(str(e)) |
1315 | + if cmd_failures: |
1316 | + msg = ( |
1317 | + 'Failures running ubuntu-advantage commands:\n' |
1318 | + '{cmd_failures}'.format( |
1319 | + cmd_failures=cmd_failures)) |
1320 | + util.logexc(LOG, msg) |
1321 | + raise RuntimeError(msg) |
1322 | + |
1323 | + |
1324 | +def maybe_install_ua_tools(cloud): |
1325 | + """Install ubuntu-advantage-tools if not present.""" |
1326 | + if util.which('ubuntu-advantage'): |
1327 | + return |
1328 | + try: |
1329 | + cloud.distro.update_package_sources() |
1330 | + except Exception as e: |
1331 | + util.logexc(LOG, "Package update failed") |
1332 | + raise |
1333 | + try: |
1334 | + cloud.distro.install_packages(['ubuntu-advantage-tools']) |
1335 | + except Exception as e: |
1336 | + util.logexc(LOG, "Failed to install ubuntu-advantage-tools") |
1337 | + raise |
1338 | + |
1339 | + |
1340 | +def handle(name, cfg, cloud, log, args): |
1341 | + cfgin = cfg.get('ubuntu-advantage') |
1342 | + if cfgin is None: |
1343 | + LOG.debug(("Skipping module named %s," |
1344 | + " no 'ubuntu-advantage' key in configuration"), name) |
1345 | + return |
1346 | + |
1347 | + validate_cloudconfig_schema(cfg, schema) |
1348 | + maybe_install_ua_tools(cloud) |
1349 | + run_commands(cfgin.get('commands', [])) |
1350 | + |
1351 | +# vi: ts=4 expandtab |
1352 | diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py |
1353 | new file mode 100644 |
1354 | index 0000000..c5b4a9d |
1355 | --- /dev/null |
1356 | +++ b/cloudinit/config/tests/test_snap.py |
1357 | @@ -0,0 +1,490 @@ |
1358 | +# This file is part of cloud-init. See LICENSE file for license information. |
1359 | + |
1360 | +import re |
1361 | +from six import StringIO |
1362 | + |
1363 | +from cloudinit.config.cc_snap import ( |
1364 | + ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, |
1365 | + run_commands, schema) |
1366 | +from cloudinit.config.schema import validate_cloudconfig_schema |
1367 | +from cloudinit import util |
1368 | +from cloudinit.tests.helpers import ( |
1369 | + CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema) |
1370 | + |
1371 | + |
1372 | +SYSTEM_USER_ASSERTION = """\ |
1373 | +type: system-user |
1374 | +authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp |
1375 | +brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp |
1376 | +email: foo@bar.com |
1377 | +password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt |
1378 | +series: |
1379 | +- 16 |
1380 | +since: 2016-09-10T16:34:00+03:00 |
1381 | +until: 2017-11-10T16:34:00+03:00 |
1382 | +username: baz |
1383 | +sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj |
1384 | + |
1385 | +AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP |
1386 | +Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI |
1387 | +zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF |
1388 | +s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj |
1389 | ++to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP |
1390 | +Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS |
1391 | +d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q |
1392 | +BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H |
1393 | +f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V |
1394 | +v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""" |
1395 | + |
1396 | +ACCOUNT_ASSERTION = """\ |
1397 | +type: account-key |
1398 | +authority-id: canonical |
1399 | +revision: 2 |
1400 | +public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0 |
1401 | +account-id: canonical |
1402 | +name: store |
1403 | +since: 2016-04-01T00:00:00.0Z |
1404 | +body-length: 717 |
1405 | +sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH |
1406 | + |
1407 | +AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j |
1408 | +qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482 |
1409 | +vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ |
1410 | +UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK |
1411 | +Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG |
1412 | +o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl |
1413 | +VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9 |
1414 | +2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an |
1415 | +Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc |
1416 | +vUvV7RjVzv17ut0AEQEAAQ== |
1417 | + |
1418 | +AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM |
1419 | +WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b |
1420 | +nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL |
1421 | +3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL |
1422 | +eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY |
1423 | +inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1 |
1424 | +rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+ |
1425 | +rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE |
1426 | +aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ |
1427 | +6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO |
1428 | +haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF |
1429 | +yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9 |
1430 | +HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi |
1431 | +skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK |
1432 | +CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde |
1433 | +ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF |
1434 | +qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR |
1435 | +IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t |
1436 | +oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""" |
1437 | + |
1438 | + |
1439 | +class FakeCloud(object): |
1440 | + def __init__(self, distro): |
1441 | + self.distro = distro |
1442 | + |
1443 | + |
1444 | +class TestAddAssertions(CiTestCase): |
1445 | + |
1446 | + with_logs = True |
1447 | + |
1448 | + def setUp(self): |
1449 | + super(TestAddAssertions, self).setUp() |
1450 | + self.tmp = self.tmp_dir() |
1451 | + |
1452 | + @mock.patch('cloudinit.config.cc_snap.util.subp') |
1453 | + def test_add_assertions_on_empty_list(self, m_subp): |
1454 | + """When provided with an empty list, add_assertions does nothing.""" |
1455 | + add_assertions([]) |
1456 | + self.assertEqual('', self.logs.getvalue()) |
1457 | + m_subp.assert_not_called() |
1458 | + |
1459 | + def test_add_assertions_on_non_list_or_dict(self): |
1460 | + """When provided an invalid type, add_assertions raises an error.""" |
1461 | + with self.assertRaises(TypeError) as context_manager: |
1462 | + add_assertions(assertions="I'm Not Valid") |
1463 | + self.assertEqual( |
1464 | + "assertion parameter was not a list or dict: I'm Not Valid", |
1465 | + str(context_manager.exception)) |
1466 | + |
1467 | + @mock.patch('cloudinit.config.cc_snap.util.subp') |
1468 | + def test_add_assertions_adds_assertions_as_list(self, m_subp): |
1469 | + """When provided with a list, add_assertions adds all assertions.""" |
1470 | + self.assertEqual( |
1471 | + ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') |
1472 | + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) |
1473 | + assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION] |
1474 | + wrap_and_call( |
1475 | + 'cloudinit.config.cc_snap', |
1476 | + {'ASSERTIONS_FILE': {'new': assert_file}}, |
1477 | + add_assertions, assertions) |
1478 | + self.assertIn( |
1479 | + 'Importing user-provided snap assertions', self.logs.getvalue()) |
1480 | + self.assertIn( |
1481 | + 'sertions', self.logs.getvalue()) |
1482 | + self.assertEqual( |
1483 | + [mock.call(['snap', 'ack', assert_file], capture=True)], |
1484 | + m_subp.call_args_list) |
1485 | + compare_file = self.tmp_path('comparison', dir=self.tmp) |
1486 | + util.write_file(compare_file, '\n'.join(assertions).encode('utf-8')) |
1487 | + self.assertEqual( |
1488 | + util.load_file(compare_file), util.load_file(assert_file)) |
1489 | + |
1490 | + @mock.patch('cloudinit.config.cc_snap.util.subp') |
1491 | + def test_add_assertions_adds_assertions_as_dict(self, m_subp): |
1492 | + """When provided with a dict, add_assertions adds all assertions.""" |
1493 | + self.assertEqual( |
1494 | + ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') |
1495 | + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) |
1496 | + assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION} |
1497 | + wrap_and_call( |
1498 | + 'cloudinit.config.cc_snap', |
1499 | + {'ASSERTIONS_FILE': {'new': assert_file}}, |
1500 | + add_assertions, assertions) |
1501 | + self.assertIn( |
1502 | + 'Importing user-provided snap assertions', self.logs.getvalue()) |
1503 | + self.assertIn( |
1504 | + "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv", |
1505 | + self.logs.getvalue()) |
1506 | + self.assertIn( |
1507 | + "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic", |
1508 | + self.logs.getvalue()) |
1509 | + self.assertEqual( |
1510 | + [mock.call(['snap', 'ack', assert_file], capture=True)], |
1511 | + m_subp.call_args_list) |
1512 | + compare_file = self.tmp_path('comparison', dir=self.tmp) |
1513 | + combined = '\n'.join(assertions.values()) |
1514 | + util.write_file(compare_file, combined.encode('utf-8')) |
1515 | + self.assertEqual( |
1516 | + util.load_file(compare_file), util.load_file(assert_file)) |
1517 | + |
1518 | + |
1519 | +class TestRunCommands(CiTestCase): |
1520 | + |
1521 | + with_logs = True |
1522 | + |
1523 | + def setUp(self): |
1524 | + super(TestRunCommands, self).setUp() |
1525 | + self.tmp = self.tmp_dir() |
1526 | + |
1527 | + @mock.patch('cloudinit.config.cc_snap.util.subp') |
1528 | + def test_run_commands_on_empty_list(self, m_subp): |
1529 | + """When provided with an empty list, run_commands does nothing.""" |
1530 | + run_commands([]) |
1531 | + self.assertEqual('', self.logs.getvalue()) |
1532 | + m_subp.assert_not_called() |
1533 | + |
1534 | + def test_run_commands_on_non_list_or_dict(self): |
1535 | + """When provided an invalid type, run_commands raises an error.""" |
1536 | + with self.assertRaises(TypeError) as context_manager: |
1537 | + run_commands(commands="I'm Not Valid") |
1538 | + self.assertEqual( |
1539 | + "commands parameter was not a list or dict: I'm Not Valid", |
1540 | + str(context_manager.exception)) |
1541 | + |
1542 | + def test_run_command_logs_commands_and_exit_codes_to_stderr(self): |
1543 | + """All exit codes are logged to stderr.""" |
1544 | + outfile = self.tmp_path('output.log', dir=self.tmp) |
1545 | + |
1546 | + cmd1 = 'echo "HI" >> %s' % outfile |
1547 | + cmd2 = 'bogus command' |
1548 | + cmd3 = 'echo "MOM" >> %s' % outfile |
1549 | + commands = [cmd1, cmd2, cmd3] |
1550 | + |
1551 | + mock_path = 'cloudinit.config.cc_snap.sys.stderr' |
1552 | + with mock.patch(mock_path, new_callable=StringIO) as m_stderr: |
1553 | + with self.assertRaises(RuntimeError) as context_manager: |
1554 | + run_commands(commands=commands) |
1555 | + |
1556 | + self.assertIsNotNone( |
1557 | + re.search(r'bogus: (command )?not found', |
1558 | + str(context_manager.exception)), |
1559 | + msg='Expected bogus command not found') |
1560 | + expected_stderr_log = '\n'.join([ |
1561 | + 'Begin run command: {cmd}'.format(cmd=cmd1), |
1562 | + 'End run command: exit(0)', |
1563 | + 'Begin run command: {cmd}'.format(cmd=cmd2), |
1564 | + 'ERROR: End run command: exit(127)', |
1565 | + 'Begin run command: {cmd}'.format(cmd=cmd3), |
1566 | + 'End run command: exit(0)\n']) |
1567 | + self.assertEqual(expected_stderr_log, m_stderr.getvalue()) |
1568 | + |
1569 | + def test_run_command_as_lists(self): |
1570 | + """When commands are specified as a list, run them in order.""" |
1571 | + outfile = self.tmp_path('output.log', dir=self.tmp) |
1572 | + |
1573 | + cmd1 = 'echo "HI" >> %s' % outfile |
1574 | + cmd2 = 'echo "MOM" >> %s' % outfile |
1575 | + commands = [cmd1, cmd2] |
1576 | + mock_path = 'cloudinit.config.cc_snap.sys.stderr' |
1577 | + with mock.patch(mock_path, new_callable=StringIO): |
1578 | + run_commands(commands=commands) |
1579 | + |
1580 | + self.assertIn( |
1581 | + 'DEBUG: Running user-provided snap commands', |
1582 | + self.logs.getvalue()) |
1583 | + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) |
1584 | + self.assertIn( |
1585 | + 'WARNING: Non-snap commands in snap config:', self.logs.getvalue()) |
1586 | + |
1587 | + def test_run_command_dict_sorted_as_command_script(self): |
1588 | + """When commands are a dict, sort them and run.""" |
1589 | + outfile = self.tmp_path('output.log', dir=self.tmp) |
1590 | + cmd1 = 'echo "HI" >> %s' % outfile |
1591 | + cmd2 = 'echo "MOM" >> %s' % outfile |
1592 | + commands = {'02': cmd1, '01': cmd2} |
1593 | + mock_path = 'cloudinit.config.cc_snap.sys.stderr' |
1594 | + with mock.patch(mock_path, new_callable=StringIO): |
1595 | + run_commands(commands=commands) |
1596 | + |
1597 | + expected_messages = [ |
1598 | + 'DEBUG: Running user-provided snap commands'] |
1599 | + for message in expected_messages: |
1600 | + self.assertIn(message, self.logs.getvalue()) |
1601 | + self.assertEqual('MOM\nHI\n', util.load_file(outfile)) |
1602 | + |
1603 | + |
1604 | +@skipUnlessJsonSchema() |
1605 | +class TestSchema(CiTestCase): |
1606 | + |
1607 | + with_logs = True |
1608 | + |
1609 | + def test_schema_warns_on_snap_not_as_dict(self): |
1610 | + """If the snap configuration is not a dict, emit a warning.""" |
1611 | + validate_cloudconfig_schema({'snap': 'wrong type'}, schema) |
1612 | + self.assertEqual( |
1613 | + "WARNING: Invalid config:\nsnap: 'wrong type' is not of type" |
1614 | + " 'object'\n", |
1615 | + self.logs.getvalue()) |
1616 | + |
1617 | + @mock.patch('cloudinit.config.cc_snap.run_commands') |
1618 | + def test_schema_disallows_unknown_keys(self, _): |
1619 | + """Unknown keys in the snap configuration emit warnings.""" |
1620 | + validate_cloudconfig_schema( |
1621 | + {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema) |
1622 | + self.assertIn( |
1623 | + 'WARNING: Invalid config:\nsnap: Additional properties are not' |
1624 | + " allowed ('invalid-key' was unexpected)", |
1625 | + self.logs.getvalue()) |
1626 | + |
1627 | + def test_warn_schema_requires_either_commands_or_assertions(self): |
1628 | + """Warn when snap configuration lacks both commands and assertions.""" |
1629 | + validate_cloudconfig_schema( |
1630 | + {'snap': {}}, schema) |
1631 | + self.assertIn( |
1632 | + 'WARNING: Invalid config:\nsnap: {} does not have enough' |
1633 | + ' properties', |
1634 | + self.logs.getvalue()) |
1635 | + |
1636 | + @mock.patch('cloudinit.config.cc_snap.run_commands') |
1637 | + def test_warn_schema_commands_is_not_list_or_dict(self, _): |
1638 | + """Warn when snap:commands config is not a list or dict.""" |
1639 | + validate_cloudconfig_schema( |
1640 | + {'snap': {'commands': 'broken'}}, schema) |
1641 | + self.assertEqual( |
1642 | + "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type" |
1643 | + " 'object', 'array'\n", |
1644 | + self.logs.getvalue()) |
1645 | + |
1646 | + @mock.patch('cloudinit.config.cc_snap.run_commands') |
1647 | + def test_warn_schema_when_commands_is_empty(self, _): |
1648 | + """Emit warnings when snap:commands is an empty list or dict.""" |
1649 | + validate_cloudconfig_schema( |
1650 | + {'snap': {'commands': []}}, schema) |
1651 | + validate_cloudconfig_schema( |
1652 | + {'snap': {'commands': {}}}, schema) |
1653 | + self.assertEqual( |
1654 | + "WARNING: Invalid config:\nsnap.commands: [] is too short\n" |
1655 | + "WARNING: Invalid config:\nsnap.commands: {} does not have enough" |
1656 | + " properties\n", |
1657 | + self.logs.getvalue()) |
1658 | + |
1659 | + @mock.patch('cloudinit.config.cc_snap.run_commands') |
1660 | + def test_schema_when_commands_are_list_or_dict(self, _): |
1661 | + """No warnings when snap:commands are either a list or dict.""" |
1662 | + validate_cloudconfig_schema( |
1663 | + {'snap': {'commands': ['valid']}}, schema) |
1664 | + validate_cloudconfig_schema( |
1665 | + {'snap': {'commands': {'01': 'also valid'}}}, schema) |
1666 | + self.assertEqual('', self.logs.getvalue()) |
1667 | + |
1668 | + @mock.patch('cloudinit.config.cc_snap.add_assertions') |
1669 | + def test_warn_schema_assertions_is_not_list_or_dict(self, _): |
1670 | + """Warn when snap:assertions config is not a list or dict.""" |
1671 | + validate_cloudconfig_schema( |
1672 | + {'snap': {'assertions': 'broken'}}, schema) |
1673 | + self.assertEqual( |
1674 | + "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of" |
1675 | + " type 'object', 'array'\n", |
1676 | + self.logs.getvalue()) |
1677 | + |
1678 | + @mock.patch('cloudinit.config.cc_snap.add_assertions') |
1679 | + def test_warn_schema_when_assertions_is_empty(self, _): |
1680 | + """Emit warnings when snap:assertions is an empty list or dict.""" |
1681 | + validate_cloudconfig_schema( |
1682 | + {'snap': {'assertions': []}}, schema) |
1683 | + validate_cloudconfig_schema( |
1684 | + {'snap': {'assertions': {}}}, schema) |
1685 | + self.assertEqual( |
1686 | + "WARNING: Invalid config:\nsnap.assertions: [] is too short\n" |
1687 | + "WARNING: Invalid config:\nsnap.assertions: {} does not have" |
1688 | + " enough properties\n", |
1689 | + self.logs.getvalue()) |
1690 | + |
1691 | + @mock.patch('cloudinit.config.cc_snap.add_assertions') |
1692 | + def test_schema_when_assertions_are_list_or_dict(self, _): |
1693 | + """No warnings when snap:assertions are a list or dict.""" |
1694 | + validate_cloudconfig_schema( |
1695 | + {'snap': {'assertions': ['valid']}}, schema) |
1696 | + validate_cloudconfig_schema( |
1697 | + {'snap': {'assertions': {'01': 'also valid'}}}, schema) |
1698 | + self.assertEqual('', self.logs.getvalue()) |
1699 | + |
1700 | + |
1701 | +class TestHandle(CiTestCase): |
1702 | + |
1703 | + with_logs = True |
1704 | + |
1705 | + def setUp(self): |
1706 | + super(TestHandle, self).setUp() |
1707 | + self.tmp = self.tmp_dir() |
1708 | + |
1709 | + @mock.patch('cloudinit.config.cc_snap.run_commands') |
1710 | + @mock.patch('cloudinit.config.cc_snap.add_assertions') |
1711 | + @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema') |
1712 | + def test_handle_no_config(self, m_schema, m_add, m_run): |
1713 | + """When no snap-related configuration is provided, nothing happens.""" |
1714 | + cfg = {} |
1715 | + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) |
1716 | + self.assertIn( |
1717 | + "DEBUG: Skipping module named snap, no 'snap' key in config", |
1718 | + self.logs.getvalue()) |
1719 | + m_schema.assert_not_called() |
1720 | + m_add.assert_not_called() |
1721 | + m_run.assert_not_called() |
1722 | + |
1723 | + @mock.patch('cloudinit.config.cc_snap.run_commands') |
1724 | + @mock.patch('cloudinit.config.cc_snap.add_assertions') |
1725 | + @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') |
1726 | + def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add, |
1727 | + m_run): |
1728 | + """When squashfuse_in_container is unset, don't attempt to install.""" |
1729 | + handle( |
1730 | + 'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None) |
1731 | + handle( |
1732 | + 'snap', cfg={'snap': {'squashfuse_in_container': None}}, |
1733 | + cloud=None, log=self.logger, args=None) |
1734 | + handle( |
1735 | + 'snap', cfg={'snap': {'squashfuse_in_container': False}}, |
1736 | + cloud=None, log=self.logger, args=None) |
1737 | + self.assertEqual([], m_squash.call_args_list) # No calls |
1738 | + # snap configuration missing assertions and commands will default to [] |
1739 | + self.assertIn(mock.call([]), m_add.call_args_list) |
1740 | + self.assertIn(mock.call([]), m_run.call_args_list) |
1741 | + |
1742 | + @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') |
1743 | + def test_handle_tries_to_install_squashfuse(self, m_squash): |
1744 | + """If squashfuse_in_container is True, try installing squashfuse.""" |
1745 | + cfg = {'snap': {'squashfuse_in_container': True}} |
1746 | + mycloud = FakeCloud(None) |
1747 | + handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None) |
1748 | + self.assertEqual( |
1749 | + [mock.call(mycloud)], m_squash.call_args_list) |
1750 | + |
1751 | + def test_handle_runs_commands_provided(self): |
1752 | + """If commands are specified as a list, run them.""" |
1753 | + outfile = self.tmp_path('output.log', dir=self.tmp) |
1754 | + |
1755 | + cfg = { |
1756 | + 'snap': {'commands': ['echo "HI" >> %s' % outfile, |
1757 | + 'echo "MOM" >> %s' % outfile]}} |
1758 | + mock_path = 'cloudinit.config.cc_snap.sys.stderr' |
1759 | + with mock.patch(mock_path, new_callable=StringIO): |
1760 | + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) |
1761 | + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) |
1762 | + |
1763 | + @mock.patch('cloudinit.config.cc_snap.util.subp') |
1764 | + def test_handle_adds_assertions(self, m_subp): |
1765 | + """Any configured snap assertions are provided to add_assertions.""" |
1766 | + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) |
1767 | + compare_file = self.tmp_path('comparison', dir=self.tmp) |
1768 | + cfg = { |
1769 | + 'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}} |
1770 | + wrap_and_call( |
1771 | + 'cloudinit.config.cc_snap', |
1772 | + {'ASSERTIONS_FILE': {'new': assert_file}}, |
1773 | + handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) |
1774 | + content = '\n'.join(cfg['snap']['assertions']) |
1775 | + util.write_file(compare_file, content.encode('utf-8')) |
1776 | + self.assertEqual( |
1777 | + util.load_file(compare_file), util.load_file(assert_file)) |
1778 | + |
1779 | + @mock.patch('cloudinit.config.cc_snap.util.subp') |
1780 | + @skipUnlessJsonSchema() |
1781 | + def test_handle_validates_schema(self, m_subp): |
1782 | + """Any provided configuration is runs validate_cloudconfig_schema.""" |
1783 | + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) |
1784 | + cfg = {'snap': {'invalid': ''}} # Generates schema warning |
1785 | + wrap_and_call( |
1786 | + 'cloudinit.config.cc_snap', |
1787 | + {'ASSERTIONS_FILE': {'new': assert_file}}, |
1788 | + handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) |
1789 | + self.assertEqual( |
1790 | + "WARNING: Invalid config:\nsnap: Additional properties are not" |
1791 | + " allowed ('invalid' was unexpected)\n", |
1792 | + self.logs.getvalue()) |
1793 | + |
1794 | + |
1795 | +class TestMaybeInstallSquashFuse(CiTestCase): |
1796 | + |
1797 | + with_logs = True |
1798 | + |
1799 | + def setUp(self): |
1800 | + super(TestMaybeInstallSquashFuse, self).setUp() |
1801 | + self.tmp = self.tmp_dir() |
1802 | + |
1803 | + @mock.patch('cloudinit.config.cc_snap.util.is_container') |
1804 | + def test_maybe_install_squashfuse_skips_non_containers(self, m_container): |
1805 | + """maybe_install_squashfuse does nothing when not on a container.""" |
1806 | + m_container.return_value = False |
1807 | + maybe_install_squashfuse(cloud=FakeCloud(None)) |
1808 | + self.assertEqual([mock.call()], m_container.call_args_list) |
1809 | + self.assertEqual('', self.logs.getvalue()) |
1810 | + |
1811 | + @mock.patch('cloudinit.config.cc_snap.util.is_container') |
1812 | + def test_maybe_install_squashfuse_raises_install_errors(self, m_container): |
1813 | + """maybe_install_squashfuse logs and raises package install errors.""" |
1814 | + m_container.return_value = True |
1815 | + distro = mock.MagicMock() |
1816 | + distro.update_package_sources.side_effect = RuntimeError( |
1817 | + 'Some apt error') |
1818 | + with self.assertRaises(RuntimeError) as context_manager: |
1819 | + maybe_install_squashfuse(cloud=FakeCloud(distro)) |
1820 | + self.assertEqual('Some apt error', str(context_manager.exception)) |
1821 | + self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) |
1822 | + |
1823 | + @mock.patch('cloudinit.config.cc_snap.util.is_container') |
1824 | + def test_maybe_install_squashfuse_raises_update_errors(self, m_container): |
1825 | + """maybe_install_squashfuse logs and raises package update errors.""" |
1826 | + m_container.return_value = True |
1827 | + distro = mock.MagicMock() |
1828 | + distro.update_package_sources.side_effect = RuntimeError( |
1829 | + 'Some apt error') |
1830 | + with self.assertRaises(RuntimeError) as context_manager: |
1831 | + maybe_install_squashfuse(cloud=FakeCloud(distro)) |
1832 | + self.assertEqual('Some apt error', str(context_manager.exception)) |
1833 | + self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) |
1834 | + |
1835 | + @mock.patch('cloudinit.config.cc_snap.util.is_container') |
1836 | + def test_maybe_install_squashfuse_happy_path(self, m_container): |
1837 | + """maybe_install_squashfuse logs and raises package install errors.""" |
1838 | + m_container.return_value = True |
1839 | + distro = mock.MagicMock() # No errors raised |
1840 | + maybe_install_squashfuse(cloud=FakeCloud(distro)) |
1841 | + self.assertEqual( |
1842 | + [mock.call()], distro.update_package_sources.call_args_list) |
1843 | + self.assertEqual( |
1844 | + [mock.call(['squashfuse'])], |
1845 | + distro.install_packages.call_args_list) |
1846 | + |
1847 | +# vi: ts=4 expandtab |
1848 | diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py |
1849 | new file mode 100644 |
1850 | index 0000000..f2a59fa |
1851 | --- /dev/null |
1852 | +++ b/cloudinit/config/tests/test_ubuntu_advantage.py |
1853 | @@ -0,0 +1,269 @@ |
1854 | +# This file is part of cloud-init. See LICENSE file for license information. |
1855 | + |
1856 | +import re |
1857 | +from six import StringIO |
1858 | + |
1859 | +from cloudinit.config.cc_ubuntu_advantage import ( |
1860 | + handle, maybe_install_ua_tools, run_commands, schema) |
1861 | +from cloudinit.config.schema import validate_cloudconfig_schema |
1862 | +from cloudinit import util |
1863 | +from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema |
1864 | + |
1865 | + |
1866 | +# Module path used in mocks |
1867 | +MPATH = 'cloudinit.config.cc_ubuntu_advantage' |
1868 | + |
1869 | + |
1870 | +class FakeCloud(object): |
1871 | + def __init__(self, distro): |
1872 | + self.distro = distro |
1873 | + |
1874 | + |
1875 | +class TestRunCommands(CiTestCase): |
1876 | + |
1877 | + with_logs = True |
1878 | + |
1879 | + def setUp(self): |
1880 | + super(TestRunCommands, self).setUp() |
1881 | + self.tmp = self.tmp_dir() |
1882 | + |
1883 | + @mock.patch('%s.util.subp' % MPATH) |
1884 | + def test_run_commands_on_empty_list(self, m_subp): |
1885 | + """When provided with an empty list, run_commands does nothing.""" |
1886 | + run_commands([]) |
1887 | + self.assertEqual('', self.logs.getvalue()) |
1888 | + m_subp.assert_not_called() |
1889 | + |
1890 | + def test_run_commands_on_non_list_or_dict(self): |
1891 | + """When provided an invalid type, run_commands raises an error.""" |
1892 | + with self.assertRaises(TypeError) as context_manager: |
1893 | + run_commands(commands="I'm Not Valid") |
1894 | + self.assertEqual( |
1895 | + "commands parameter was not a list or dict: I'm Not Valid", |
1896 | + str(context_manager.exception)) |
1897 | + |
1898 | + def test_run_command_logs_commands_and_exit_codes_to_stderr(self): |
1899 | + """All exit codes are logged to stderr.""" |
1900 | + outfile = self.tmp_path('output.log', dir=self.tmp) |
1901 | + |
1902 | + cmd1 = 'echo "HI" >> %s' % outfile |
1903 | + cmd2 = 'bogus command' |
1904 | + cmd3 = 'echo "MOM" >> %s' % outfile |
1905 | + commands = [cmd1, cmd2, cmd3] |
1906 | + |
1907 | + mock_path = '%s.sys.stderr' % MPATH |
1908 | + with mock.patch(mock_path, new_callable=StringIO) as m_stderr: |
1909 | + with self.assertRaises(RuntimeError) as context_manager: |
1910 | + run_commands(commands=commands) |
1911 | + |
1912 | + self.assertIsNotNone( |
1913 | + re.search(r'bogus: (command )?not found', |
1914 | + str(context_manager.exception)), |
1915 | + msg='Expected bogus command not found') |
1916 | + expected_stderr_log = '\n'.join([ |
1917 | + 'Begin run command: {cmd}'.format(cmd=cmd1), |
1918 | + 'End run command: exit(0)', |
1919 | + 'Begin run command: {cmd}'.format(cmd=cmd2), |
1920 | + 'ERROR: End run command: exit(127)', |
1921 | + 'Begin run command: {cmd}'.format(cmd=cmd3), |
1922 | + 'End run command: exit(0)\n']) |
1923 | + self.assertEqual(expected_stderr_log, m_stderr.getvalue()) |
1924 | + |
1925 | + def test_run_command_as_lists(self): |
1926 | + """When commands are specified as a list, run them in order.""" |
1927 | + outfile = self.tmp_path('output.log', dir=self.tmp) |
1928 | + |
1929 | + cmd1 = 'echo "HI" >> %s' % outfile |
1930 | + cmd2 = 'echo "MOM" >> %s' % outfile |
1931 | + commands = [cmd1, cmd2] |
1932 | + with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): |
1933 | + run_commands(commands=commands) |
1934 | + |
1935 | + self.assertIn( |
1936 | + 'DEBUG: Running user-provided ubuntu-advantage commands', |
1937 | + self.logs.getvalue()) |
1938 | + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) |
1939 | + self.assertIn( |
1940 | + 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage' |
1941 | + ' config:', |
1942 | + self.logs.getvalue()) |
1943 | + |
1944 | + def test_run_command_dict_sorted_as_command_script(self): |
1945 | + """When commands are a dict, sort them and run.""" |
1946 | + outfile = self.tmp_path('output.log', dir=self.tmp) |
1947 | + cmd1 = 'echo "HI" >> %s' % outfile |
1948 | + cmd2 = 'echo "MOM" >> %s' % outfile |
1949 | + commands = {'02': cmd1, '01': cmd2} |
1950 | + with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): |
1951 | + run_commands(commands=commands) |
1952 | + |
1953 | + expected_messages = [ |
1954 | + 'DEBUG: Running user-provided ubuntu-advantage commands'] |
1955 | + for message in expected_messages: |
1956 | + self.assertIn(message, self.logs.getvalue()) |
1957 | + self.assertEqual('MOM\nHI\n', util.load_file(outfile)) |
1958 | + |
1959 | + |
1960 | +@skipUnlessJsonSchema() |
1961 | +class TestSchema(CiTestCase): |
1962 | + |
1963 | + with_logs = True |
1964 | + |
1965 | + def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): |
1966 | + """If ubuntu-advantage configuration is not a dict, emit a warning.""" |
1967 | + validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema) |
1968 | + self.assertEqual( |
1969 | + "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not" |
1970 | + " of type 'object'\n", |
1971 | + self.logs.getvalue()) |
1972 | + |
1973 | + @mock.patch('%s.run_commands' % MPATH) |
1974 | + def test_schema_disallows_unknown_keys(self, _): |
1975 | + """Unknown keys in ubuntu-advantage configuration emit warnings.""" |
1976 | + validate_cloudconfig_schema( |
1977 | + {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}}, |
1978 | + schema) |
1979 | + self.assertIn( |
1980 | + 'WARNING: Invalid config:\nubuntu-advantage: Additional properties' |
1981 | + " are not allowed ('invalid-key' was unexpected)", |
1982 | + self.logs.getvalue()) |
1983 | + |
1984 | + def test_warn_schema_requires_commands(self): |
1985 | + """Warn when ubuntu-advantage configuration lacks commands.""" |
1986 | + validate_cloudconfig_schema( |
1987 | + {'ubuntu-advantage': {}}, schema) |
1988 | + self.assertEqual( |
1989 | + "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a" |
1990 | + " required property\n", |
1991 | + self.logs.getvalue()) |
1992 | + |
1993 | + @mock.patch('%s.run_commands' % MPATH) |
1994 | + def test_warn_schema_commands_is_not_list_or_dict(self, _): |
1995 | + """Warn when ubuntu-advantage:commands config is not a list or dict.""" |
1996 | + validate_cloudconfig_schema( |
1997 | + {'ubuntu-advantage': {'commands': 'broken'}}, schema) |
1998 | + self.assertEqual( |
1999 | + "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is" |
2000 | + " not of type 'object', 'array'\n", |
2001 | + self.logs.getvalue()) |
2002 | + |
2003 | + @mock.patch('%s.run_commands' % MPATH) |
2004 | + def test_warn_schema_when_commands_is_empty(self, _): |
2005 | + """Emit warnings when ubuntu-advantage:commands is empty.""" |
2006 | + validate_cloudconfig_schema( |
2007 | + {'ubuntu-advantage': {'commands': []}}, schema) |
2008 | + validate_cloudconfig_schema( |
2009 | + {'ubuntu-advantage': {'commands': {}}}, schema) |
2010 | + self.assertEqual( |
2011 | + "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too" |
2012 | + " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}" |
2013 | + " does not have enough properties\n", |
2014 | + self.logs.getvalue()) |
2015 | + |
2016 | + @mock.patch('%s.run_commands' % MPATH) |
2017 | + def test_schema_when_commands_are_list_or_dict(self, _): |
2018 | + """No warnings when ubuntu-advantage:commands are a list or dict.""" |
2019 | + validate_cloudconfig_schema( |
2020 | + {'ubuntu-advantage': {'commands': ['valid']}}, schema) |
2021 | + validate_cloudconfig_schema( |
2022 | + {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) |
2023 | + self.assertEqual('', self.logs.getvalue()) |
2024 | + |
2025 | + |
2026 | +class TestHandle(CiTestCase): |
2027 | + |
2028 | + with_logs = True |
2029 | + |
2030 | + def setUp(self): |
2031 | + super(TestHandle, self).setUp() |
2032 | + self.tmp = self.tmp_dir() |
2033 | + |
2034 | + @mock.patch('%s.run_commands' % MPATH) |
2035 | + @mock.patch('%s.validate_cloudconfig_schema' % MPATH) |
2036 | + def test_handle_no_config(self, m_schema, m_run): |
2037 | + """When no ua-related configuration is provided, nothing happens.""" |
2038 | + cfg = {} |
2039 | + handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) |
2040 | + self.assertIn( |
2041 | + "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key" |
2042 | + " in config", |
2043 | + self.logs.getvalue()) |
2044 | + m_schema.assert_not_called() |
2045 | + m_run.assert_not_called() |
2046 | + |
2047 | + @mock.patch('%s.maybe_install_ua_tools' % MPATH) |
2048 | + def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install): |
2049 | + """If ubuntu_advantage is provided, try installing ua-tools package.""" |
2050 | + cfg = {'ubuntu-advantage': {}} |
2051 | + mycloud = FakeCloud(None) |
2052 | + handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) |
2053 | + m_install.assert_called_once_with(mycloud) |
2054 | + |
2055 | + @mock.patch('%s.maybe_install_ua_tools' % MPATH) |
2056 | + def test_handle_runs_commands_provided(self, m_install): |
2057 | + """When commands are specified as a list, run them.""" |
2058 | + outfile = self.tmp_path('output.log', dir=self.tmp) |
2059 | + |
2060 | + cfg = { |
2061 | + 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, |
2062 | + 'echo "MOM" >> %s' % outfile]}} |
2063 | + mock_path = '%s.sys.stderr' % MPATH |
2064 | + with mock.patch(mock_path, new_callable=StringIO): |
2065 | + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) |
2066 | + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) |
2067 | + |
2068 | + |
2069 | +class TestMaybeInstallUATools(CiTestCase): |
2070 | + |
2071 | + with_logs = True |
2072 | + |
2073 | + def setUp(self): |
2074 | + super(TestMaybeInstallUATools, self).setUp() |
2075 | + self.tmp = self.tmp_dir() |
2076 | + |
2077 | + @mock.patch('%s.util.which' % MPATH) |
2078 | + def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): |
2079 | + """Do nothing if ubuntu-advantage-tools already exists.""" |
2080 | + m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed |
2081 | + distro = mock.MagicMock() |
2082 | + distro.update_package_sources.side_effect = RuntimeError( |
2083 | + 'Some apt error') |
2084 | + maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError |
2085 | + |
2086 | + @mock.patch('%s.util.which' % MPATH) |
2087 | + def test_maybe_install_ua_tools_raises_update_errors(self, m_which): |
2088 | + """maybe_install_ua_tools logs and raises apt update errors.""" |
2089 | + m_which.return_value = None |
2090 | + distro = mock.MagicMock() |
2091 | + distro.update_package_sources.side_effect = RuntimeError( |
2092 | + 'Some apt error') |
2093 | + with self.assertRaises(RuntimeError) as context_manager: |
2094 | + maybe_install_ua_tools(cloud=FakeCloud(distro)) |
2095 | + self.assertEqual('Some apt error', str(context_manager.exception)) |
2096 | + self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) |
2097 | + |
2098 | + @mock.patch('%s.util.which' % MPATH) |
2099 | + def test_maybe_install_ua_raises_install_errors(self, m_which): |
2100 | + """maybe_install_ua_tools logs and raises package install errors.""" |
2101 | + m_which.return_value = None |
2102 | + distro = mock.MagicMock() |
2103 | + distro.update_package_sources.return_value = None |
2104 | + distro.install_packages.side_effect = RuntimeError( |
2105 | + 'Some install error') |
2106 | + with self.assertRaises(RuntimeError) as context_manager: |
2107 | + maybe_install_ua_tools(cloud=FakeCloud(distro)) |
2108 | + self.assertEqual('Some install error', str(context_manager.exception)) |
2109 | + self.assertIn( |
2110 | + 'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue()) |
2111 | + |
2112 | + @mock.patch('%s.util.which' % MPATH) |
2113 | + def test_maybe_install_ua_tools_happy_path(self, m_which): |
2114 | + """maybe_install_ua_tools installs ubuntu-advantage-tools.""" |
2115 | + m_which.return_value = None |
2116 | + distro = mock.MagicMock() # No errors raised |
2117 | + maybe_install_ua_tools(cloud=FakeCloud(distro)) |
2118 | + distro.update_package_sources.assert_called_once_with() |
2119 | + distro.install_packages.assert_called_once_with( |
2120 | + ['ubuntu-advantage-tools']) |
2121 | + |
2122 | +# vi: ts=4 expandtab |
2123 | diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py |
2124 | index f87a343..b814c8b 100644 |
2125 | --- a/cloudinit/distros/arch.py |
2126 | +++ b/cloudinit/distros/arch.py |
2127 | @@ -129,11 +129,8 @@ class Distro(distros.Distro): |
2128 | if pkgs is None: |
2129 | pkgs = [] |
2130 | |
2131 | - cmd = ['pacman'] |
2132 | + cmd = ['pacman', "-Sy", "--quiet", "--noconfirm"] |
2133 | # Redirect output |
2134 | - cmd.append("-Sy") |
2135 | - cmd.append("--quiet") |
2136 | - cmd.append("--noconfirm") |
2137 | |
2138 | if args and isinstance(args, str): |
2139 | cmd.append(args) |
2140 | diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py |
2141 | index aa468bc..754d3df 100644 |
2142 | --- a/cloudinit/distros/freebsd.py |
2143 | +++ b/cloudinit/distros/freebsd.py |
2144 | @@ -132,6 +132,12 @@ class Distro(distros.Distro): |
2145 | LOG.debug("Using network interface %s", bsddev) |
2146 | return bsddev |
2147 | |
2148 | + def _select_hostname(self, hostname, fqdn): |
2149 | + # Should be FQDN if available. See rc.conf(5) in FreeBSD |
2150 | + if fqdn: |
2151 | + return fqdn |
2152 | + return hostname |
2153 | + |
2154 | def _read_system_hostname(self): |
2155 | sys_hostname = self._read_hostname(filename=None) |
2156 | return ('rc.conf', sys_hostname) |
2157 | diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py |
2158 | index a219e9f..162dfa0 100644 |
2159 | --- a/cloudinit/distros/opensuse.py |
2160 | +++ b/cloudinit/distros/opensuse.py |
2161 | @@ -67,11 +67,10 @@ class Distro(distros.Distro): |
2162 | if pkgs is None: |
2163 | pkgs = [] |
2164 | |
2165 | - cmd = ['zypper'] |
2166 | # No user interaction possible, enable non-interactive mode |
2167 | - cmd.append('--non-interactive') |
2168 | + cmd = ['zypper', '--non-interactive'] |
2169 | |
2170 | - # Comand is the operation, such as install |
2171 | + # Command is the operation, such as install |
2172 | if command == 'upgrade': |
2173 | command = 'update' |
2174 | cmd.append(command) |
2175 | diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py |
2176 | index d6c61e4..dc3f0fc 100644 |
2177 | --- a/cloudinit/ec2_utils.py |
2178 | +++ b/cloudinit/ec2_utils.py |
2179 | @@ -135,10 +135,8 @@ class MetadataMaterializer(object): |
2180 | |
2181 | |
2182 | def _skip_retry_on_codes(status_codes, _request_args, cause): |
2183 | - """Returns if a request should retry based on a given set of codes that |
2184 | - case retrying to be stopped/skipped. |
2185 | - """ |
2186 | - return cause.code in status_codes |
2187 | + """Returns False if cause.code is in status_codes.""" |
2188 | + return cause.code not in status_codes |
2189 | |
2190 | |
2191 | def get_instance_userdata(api_version='latest', |
2192 | diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py |
2193 | index 7b2cc9d..9e9fe0f 100755 |
2194 | --- a/cloudinit/net/cmdline.py |
2195 | +++ b/cloudinit/net/cmdline.py |
2196 | @@ -9,12 +9,15 @@ import base64 |
2197 | import glob |
2198 | import gzip |
2199 | import io |
2200 | +import os |
2201 | |
2202 | from . import get_devicelist |
2203 | from . import read_sys_net_safe |
2204 | |
2205 | from cloudinit import util |
2206 | |
2207 | +_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" |
2208 | + |
2209 | |
2210 | def _klibc_to_config_entry(content, mac_addrs=None): |
2211 | """Convert a klibc written shell content file to a 'config' entry |
2212 | @@ -103,9 +106,13 @@ def _klibc_to_config_entry(content, mac_addrs=None): |
2213 | return name, iface |
2214 | |
2215 | |
2216 | +def _get_klibc_net_cfg_files(): |
2217 | + return glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf') |
2218 | + |
2219 | + |
2220 | def config_from_klibc_net_cfg(files=None, mac_addrs=None): |
2221 | if files is None: |
2222 | - files = glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf') |
2223 | + files = _get_klibc_net_cfg_files() |
2224 | |
2225 | entries = [] |
2226 | names = {} |
2227 | @@ -160,10 +167,23 @@ def _b64dgz(b64str, gzipped="try"): |
2228 | return _decomp_gzip(blob, strict=gzipped != "try") |
2229 | |
2230 | |
2231 | +def _is_initramfs_netconfig(files, cmdline): |
2232 | + if files: |
2233 | + if 'ip=' in cmdline or 'ip6=' in cmdline: |
2234 | + return True |
2235 | + if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE): |
2236 | + # iBft can configure networking without ip= |
2237 | + return True |
2238 | + return False |
2239 | + |
2240 | + |
2241 | def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): |
2242 | if cmdline is None: |
2243 | cmdline = util.get_cmdline() |
2244 | |
2245 | + if files is None: |
2246 | + files = _get_klibc_net_cfg_files() |
2247 | + |
2248 | if 'network-config=' in cmdline: |
2249 | data64 = None |
2250 | for tok in cmdline.split(): |
2251 | @@ -172,7 +192,7 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): |
2252 | if data64: |
2253 | return util.load_yaml(_b64dgz(data64)) |
2254 | |
2255 | - if 'ip=' not in cmdline and 'ip6=' not in cmdline: |
2256 | + if not _is_initramfs_netconfig(files, cmdline): |
2257 | return None |
2258 | |
2259 | if mac_addrs is None: |
2260 | diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py |
2261 | index d3788af..6344348 100644 |
2262 | --- a/cloudinit/net/netplan.py |
2263 | +++ b/cloudinit/net/netplan.py |
2264 | @@ -311,12 +311,12 @@ class Renderer(renderer.Renderer): |
2265 | if newname is None: |
2266 | continue |
2267 | br_config.update({newname: value}) |
2268 | - if newname == 'path-cost': |
2269 | - # <interface> <cost> -> <interface>: int(<cost>) |
2270 | + if newname in ['path-cost', 'port-priority']: |
2271 | + # <interface> <value> -> <interface>: int(<value>) |
2272 | newvalue = {} |
2273 | - for costval in value: |
2274 | - (port, cost) = costval.split() |
2275 | - newvalue[port] = int(cost) |
2276 | + for val in value: |
2277 | + (port, portval) = val.split() |
2278 | + newvalue[port] = int(portval) |
2279 | br_config.update({newname: newvalue}) |
2280 | |
2281 | if len(br_config) > 0: |
2282 | @@ -336,22 +336,15 @@ class Renderer(renderer.Renderer): |
2283 | _extract_addresses(ifcfg, vlan) |
2284 | vlans.update({ifname: vlan}) |
2285 | |
2286 | - # inject global nameserver values under each physical interface |
2287 | - if nameservers: |
2288 | - for _eth, cfg in ethernets.items(): |
2289 | - nscfg = cfg.get('nameservers', {}) |
2290 | - addresses = nscfg.get('addresses', []) |
2291 | - addresses += nameservers |
2292 | - nscfg.update({'addresses': addresses}) |
2293 | - cfg.update({'nameservers': nscfg}) |
2294 | - |
2295 | - if searchdomains: |
2296 | - for _eth, cfg in ethernets.items(): |
2297 | - nscfg = cfg.get('nameservers', {}) |
2298 | - search = nscfg.get('search', []) |
2299 | - search += searchdomains |
2300 | - nscfg.update({'search': search}) |
2301 | - cfg.update({'nameservers': nscfg}) |
2302 | + # inject global nameserver values under each all interface which |
2303 | + # has addresses and do not already have a DNS configuration |
2304 | + if nameservers or searchdomains: |
2305 | + nscfg = {'addresses': nameservers, 'search': searchdomains} |
2306 | + for section in [ethernets, wifis, bonds, bridges, vlans]: |
2307 | + for _name, cfg in section.items(): |
2308 | + if 'nameservers' in cfg or 'addresses' not in cfg: |
2309 | + continue |
2310 | + cfg.update({'nameservers': nscfg}) |
2311 | |
2312 | # workaround yaml dictionary key sorting when dumping |
2313 | def _render_section(name, section): |
2314 | diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py |
2315 | index fe667d8..6d63e5c 100644 |
2316 | --- a/cloudinit/net/network_state.py |
2317 | +++ b/cloudinit/net/network_state.py |
2318 | @@ -47,7 +47,7 @@ NET_CONFIG_TO_V2 = { |
2319 | 'bridge_maxage': 'max-age', |
2320 | 'bridge_maxwait': None, |
2321 | 'bridge_pathcost': 'path-cost', |
2322 | - 'bridge_portprio': None, |
2323 | + 'bridge_portprio': 'port-priority', |
2324 | 'bridge_stp': 'stp', |
2325 | 'bridge_waitport': None}} |
2326 | |
2327 | @@ -708,6 +708,7 @@ class NetworkStateInterpreter(object): |
2328 | |
2329 | gateway4 = None |
2330 | gateway6 = None |
2331 | + nameservers = {} |
2332 | for address in cfg.get('addresses', []): |
2333 | subnet = { |
2334 | 'type': 'static', |
2335 | @@ -723,6 +724,15 @@ class NetworkStateInterpreter(object): |
2336 | gateway4 = cfg.get('gateway4') |
2337 | subnet.update({'gateway': gateway4}) |
2338 | |
2339 | + if 'nameservers' in cfg and not nameservers: |
2340 | + addresses = cfg.get('nameservers').get('addresses') |
2341 | + if addresses: |
2342 | + nameservers['dns_nameservers'] = addresses |
2343 | + search = cfg.get('nameservers').get('search') |
2344 | + if search: |
2345 | + nameservers['dns_search'] = search |
2346 | + subnet.update(nameservers) |
2347 | + |
2348 | subnets.append(subnet) |
2349 | |
2350 | routes = [] |
2351 | diff --git a/cloudinit/settings.py b/cloudinit/settings.py |
2352 | index c120498..dde5749 100644 |
2353 | --- a/cloudinit/settings.py |
2354 | +++ b/cloudinit/settings.py |
2355 | @@ -36,6 +36,8 @@ CFG_BUILTIN = { |
2356 | 'SmartOS', |
2357 | 'Bigstep', |
2358 | 'Scaleway', |
2359 | + 'Hetzner', |
2360 | + 'IBMCloud', |
2361 | # At the end to act as a 'catch' when none of the above work... |
2362 | 'None', |
2363 | ], |
2364 | diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py |
2365 | index 7ac8288..22279d0 100644 |
2366 | --- a/cloudinit/sources/DataSourceAliYun.py |
2367 | +++ b/cloudinit/sources/DataSourceAliYun.py |
2368 | @@ -22,7 +22,7 @@ class DataSourceAliYun(EC2.DataSourceEc2): |
2369 | super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) |
2370 | self.seed_dir = os.path.join(paths.seed_dir, "AliYun") |
2371 | |
2372 | - def get_hostname(self, fqdn=False, _resolve_ip=False): |
2373 | + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
2374 | return self.metadata.get('hostname', 'localhost.localdomain') |
2375 | |
2376 | def get_public_ssh_keys(self): |
2377 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py |
2378 | index 4bcbf3a..0ee622e 100644 |
2379 | --- a/cloudinit/sources/DataSourceAzure.py |
2380 | +++ b/cloudinit/sources/DataSourceAzure.py |
2381 | @@ -20,7 +20,7 @@ from cloudinit import net |
2382 | from cloudinit.net.dhcp import EphemeralDHCPv4 |
2383 | from cloudinit import sources |
2384 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric |
2385 | -from cloudinit.url_helper import readurl, wait_for_url, UrlError |
2386 | +from cloudinit.url_helper import readurl, UrlError |
2387 | from cloudinit import util |
2388 | |
2389 | LOG = logging.getLogger(__name__) |
2390 | @@ -49,7 +49,6 @@ DEFAULT_FS = 'ext4' |
2391 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' |
2392 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" |
2393 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" |
2394 | -IMDS_RETRIES = 5 |
2395 | |
2396 | |
2397 | def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): |
2398 | @@ -223,6 +222,8 @@ DEF_PASSWD_REDACTION = 'REDACTED' |
2399 | |
2400 | |
2401 | def get_hostname(hostname_command='hostname'): |
2402 | + if not isinstance(hostname_command, (list, tuple)): |
2403 | + hostname_command = (hostname_command,) |
2404 | return util.subp(hostname_command, capture=True)[0].strip() |
2405 | |
2406 | |
2407 | @@ -449,36 +450,24 @@ class DataSourceAzure(sources.DataSource): |
2408 | headers = {"Metadata": "true"} |
2409 | LOG.debug("Start polling IMDS") |
2410 | |
2411 | - def sleep_cb(response, loop_n): |
2412 | - return 1 |
2413 | - |
2414 | - def exception_cb(msg, exception): |
2415 | + def exc_cb(msg, exception): |
2416 | if isinstance(exception, UrlError) and exception.code == 404: |
2417 | - return |
2418 | - LOG.warning("Exception during polling. Will try DHCP.", |
2419 | - exc_info=True) |
2420 | - |
2421 | + return True |
2422 | # If we get an exception while trying to call IMDS, we |
2423 | # call DHCP and setup the ephemeral network to acquire the new IP. |
2424 | - raise exception |
2425 | + return False |
2426 | |
2427 | need_report = report_ready |
2428 | - for i in range(IMDS_RETRIES): |
2429 | + while True: |
2430 | try: |
2431 | with EphemeralDHCPv4() as lease: |
2432 | if need_report: |
2433 | self._report_ready(lease=lease) |
2434 | need_report = False |
2435 | - wait_for_url([url], max_wait=None, timeout=60, |
2436 | - status_cb=LOG.info, |
2437 | - headers_cb=lambda url: headers, sleep_time=1, |
2438 | - exception_cb=exception_cb, |
2439 | - sleep_time_cb=sleep_cb) |
2440 | - return str(readurl(url, headers=headers)) |
2441 | - except Exception: |
2442 | - LOG.debug("Exception during polling-retrying dhcp" + |
2443 | - " %d more time(s).", (IMDS_RETRIES - i), |
2444 | - exc_info=True) |
2445 | + return readurl(url, timeout=1, headers=headers, |
2446 | + exception_cb=exc_cb, infinite=True).contents |
2447 | + except UrlError: |
2448 | + pass |
2449 | |
2450 | def _report_ready(self, lease): |
2451 | """Tells the fabric provisioning has completed |
2452 | diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py |
2453 | index 4eaad47..c816f34 100644 |
2454 | --- a/cloudinit/sources/DataSourceCloudSigma.py |
2455 | +++ b/cloudinit/sources/DataSourceCloudSigma.py |
2456 | @@ -84,7 +84,7 @@ class DataSourceCloudSigma(sources.DataSource): |
2457 | |
2458 | return True |
2459 | |
2460 | - def get_hostname(self, fqdn=False, resolve_ip=False): |
2461 | + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
2462 | """ |
2463 | Cleans up and uses the server's name if the latter is set. Otherwise |
2464 | the first part from uuid is being used. |
2465 | diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py |
2466 | index b8db626..c7b5fe5 100644 |
2467 | --- a/cloudinit/sources/DataSourceConfigDrive.py |
2468 | +++ b/cloudinit/sources/DataSourceConfigDrive.py |
2469 | @@ -14,6 +14,7 @@ from cloudinit import util |
2470 | |
2471 | from cloudinit.net import eni |
2472 | |
2473 | +from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform |
2474 | from cloudinit.sources.helpers import openstack |
2475 | |
2476 | LOG = logging.getLogger(__name__) |
2477 | @@ -255,6 +256,15 @@ def find_candidate_devs(probe_optical=True): |
2478 | # an unpartitioned block device (ex sda, not sda1) |
2479 | devices = [d for d in candidates |
2480 | if d in by_label or not util.is_partition(d)] |
2481 | + |
2482 | + if devices: |
2483 | + # IBMCloud uses config-2 label, but limited to a single UUID. |
2484 | + ibm_platform, ibm_path = get_ibm_platform() |
2485 | + if ibm_path in devices: |
2486 | + devices.remove(ibm_path) |
2487 | + LOG.debug("IBMCloud device '%s' (%s) removed from candidate list", |
2488 | + ibm_path, ibm_platform) |
2489 | + |
2490 | return devices |
2491 | |
2492 | |
2493 | diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py |
2494 | index 2da34a9..d816262 100644 |
2495 | --- a/cloudinit/sources/DataSourceGCE.py |
2496 | +++ b/cloudinit/sources/DataSourceGCE.py |
2497 | @@ -90,7 +90,7 @@ class DataSourceGCE(sources.DataSource): |
2498 | public_keys_data = self.metadata['public-keys-data'] |
2499 | return _parse_public_keys(public_keys_data, self.default_user) |
2500 | |
2501 | - def get_hostname(self, fqdn=False, resolve_ip=False): |
2502 | + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
2503 | # GCE has long FDQN's and has asked for short hostnames. |
2504 | return self.metadata['local-hostname'].split('.')[0] |
2505 | |
2506 | @@ -213,16 +213,15 @@ def read_md(address=None, platform_check=True): |
2507 | if md['availability-zone']: |
2508 | md['availability-zone'] = md['availability-zone'].split('/')[-1] |
2509 | |
2510 | - encoding = instance_data.get('user-data-encoding') |
2511 | - if encoding: |
2512 | + if 'user-data' in instance_data: |
2513 | + # instance_data was json, so values are all utf-8 strings. |
2514 | + ud = instance_data['user-data'].encode("utf-8") |
2515 | + encoding = instance_data.get('user-data-encoding') |
2516 | if encoding == 'base64': |
2517 | - md['user-data'] = b64decode(instance_data.get('user-data')) |
2518 | - else: |
2519 | + ud = b64decode(ud) |
2520 | + elif encoding: |
2521 | LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) |
2522 | - |
2523 | - if 'user-data' in md: |
2524 | - ret['user-data'] = md['user-data'] |
2525 | - del md['user-data'] |
2526 | + ret['user-data'] = ud |
2527 | |
2528 | ret['meta-data'] = md |
2529 | ret['success'] = True |
2530 | diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py |
2531 | new file mode 100644 |
2532 | index 0000000..5c75b65 |
2533 | --- /dev/null |
2534 | +++ b/cloudinit/sources/DataSourceHetzner.py |
2535 | @@ -0,0 +1,106 @@ |
2536 | +# Author: Jonas Keidel <jonas.keidel@hetzner.com> |
2537 | +# Author: Markus Schade <markus.schade@hetzner.com> |
2538 | +# |
2539 | +# This file is part of cloud-init. See LICENSE file for license information. |
2540 | +# |
2541 | +"""Hetzner Cloud API Documentation. |
2542 | + https://docs.hetzner.cloud/""" |
2543 | + |
2544 | +from cloudinit import log as logging |
2545 | +from cloudinit import net as cloudnet |
2546 | +from cloudinit import sources |
2547 | +from cloudinit import util |
2548 | + |
2549 | +import cloudinit.sources.helpers.hetzner as hc_helper |
2550 | + |
2551 | +LOG = logging.getLogger(__name__) |
2552 | + |
2553 | +BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1' |
2554 | + |
2555 | +BUILTIN_DS_CONFIG = { |
2556 | + 'metadata_url': BASE_URL_V1 + '/metadata', |
2557 | + 'userdata_url': BASE_URL_V1 + '/userdata', |
2558 | +} |
2559 | + |
2560 | +MD_RETRIES = 60 |
2561 | +MD_TIMEOUT = 2 |
2562 | +MD_WAIT_RETRY = 2 |
2563 | + |
2564 | + |
2565 | +class DataSourceHetzner(sources.DataSource): |
2566 | + def __init__(self, sys_cfg, distro, paths): |
2567 | + sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2568 | + self.distro = distro |
2569 | + self.metadata = dict() |
2570 | + self.ds_cfg = util.mergemanydict([ |
2571 | + util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), |
2572 | + BUILTIN_DS_CONFIG]) |
2573 | + self.metadata_address = self.ds_cfg['metadata_url'] |
2574 | + self.userdata_address = self.ds_cfg['userdata_url'] |
2575 | + self.retries = self.ds_cfg.get('retries', MD_RETRIES) |
2576 | + self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) |
2577 | + self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) |
2578 | + self._network_config = None |
2579 | + self.dsmode = sources.DSMODE_NETWORK |
2580 | + |
2581 | + def get_data(self): |
2582 | + if not on_hetzner(): |
2583 | + return False |
2584 | + nic = cloudnet.find_fallback_nic() |
2585 | + with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16, |
2586 | + "169.254.255.255"): |
2587 | + md = hc_helper.read_metadata( |
2588 | + self.metadata_address, timeout=self.timeout, |
2589 | + sec_between=self.wait_retry, retries=self.retries) |
2590 | + ud = hc_helper.read_userdata( |
2591 | + self.userdata_address, timeout=self.timeout, |
2592 | + sec_between=self.wait_retry, retries=self.retries) |
2593 | + |
2594 | + self.userdata_raw = ud |
2595 | + self.metadata_full = md |
2596 | + |
2597 | + """hostname is name provided by user at launch. The API enforces |
2598 | + it is a valid hostname, but it is not guaranteed to be resolvable |
2599 | + in dns or fully qualified.""" |
2600 | + self.metadata['instance-id'] = md['instance-id'] |
2601 | + self.metadata['local-hostname'] = md['hostname'] |
2602 | + self.metadata['network-config'] = md.get('network-config', None) |
2603 | + self.metadata['public-keys'] = md.get('public-keys', None) |
2604 | + self.vendordata_raw = md.get("vendor_data", None) |
2605 | + |
2606 | + return True |
2607 | + |
2608 | + @property |
2609 | + def network_config(self): |
2610 | + """Configure the networking. This needs to be done each boot, since |
2611 | + the IP information may have changed due to snapshot and/or |
2612 | + migration. |
2613 | + """ |
2614 | + |
2615 | + if self._network_config: |
2616 | + return self._network_config |
2617 | + |
2618 | + _net_config = self.metadata['network-config'] |
2619 | + if not _net_config: |
2620 | + raise Exception("Unable to get meta-data from server....") |
2621 | + |
2622 | + self._network_config = _net_config |
2623 | + |
2624 | + return self._network_config |
2625 | + |
2626 | + |
2627 | +def on_hetzner(): |
2628 | + return util.read_dmi_data('system-manufacturer') == "Hetzner" |
2629 | + |
2630 | + |
2631 | +# Used to match classes to dependencies |
2632 | +datasources = [ |
2633 | + (DataSourceHetzner, (sources.DEP_FILESYSTEM, )), |
2634 | +] |
2635 | + |
2636 | + |
2637 | +# Return a list of data sources that match this set of dependencies |
2638 | +def get_datasource_list(depends): |
2639 | + return sources.list_from_depends(depends, datasources) |
2640 | + |
2641 | +# vi: ts=4 expandtab |
2642 | diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py |
2643 | new file mode 100644 |
2644 | index 0000000..02b3d56 |
2645 | --- /dev/null |
2646 | +++ b/cloudinit/sources/DataSourceIBMCloud.py |
2647 | @@ -0,0 +1,325 @@ |
2648 | +# This file is part of cloud-init. See LICENSE file for license information. |
2649 | +"""Datasource for IBMCloud. |
2650 | + |
2651 | +IBMCloud is also know as SoftLayer or BlueMix. |
2652 | +IBMCloud hypervisor is xen (2018-03-10). |
2653 | + |
2654 | +There are 2 different api exposed launch methods. |
2655 | + * template: This is the legacy method of launching instances. |
2656 | + When booting from an image template, the system boots first into |
2657 | + a "provisioning" mode. There, host <-> guest mechanisms are utilized |
2658 | + to execute code in the guest and provision it. |
2659 | + |
2660 | + Cloud-init will disable itself when it detects that it is in the |
2661 | + provisioning mode. It detects this by the presence of |
2662 | + a file '/root/provisioningConfiguration.cfg'. |
2663 | + |
2664 | + When provided with user-data, the "first boot" will contain a |
2665 | + ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data |
2666 | + provided, then there is no data-source. |
2667 | + |
2668 | + Cloud-init never does any network configuration in this mode. |
2669 | + |
2670 | + * os_code: Essentially "launch by OS Code" (Operating System Code). |
2671 | + This is a more modern approach. There is no specific "provisioning" boot. |
2672 | + Instead, cloud-init does all the customization. With or without |
2673 | + user-data provided, an OpenStack ConfigDrive like disk is attached. |
2674 | + |
2675 | + Only disks with label 'config-2' and UUID '9796-932E' are considered. |
2676 | + This is to avoid this datasource claiming ConfigDrive. This does |
2677 | + mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be |
2678 | + incorrectly identified as IBMCloud. |
2679 | + |
2680 | +TODO: |
2681 | + * is uuid (/sys/hypervisor/uuid) stable for life of an instance? |
2682 | + it seems it is not the same as data's uuid in the os_code case |
2683 | + but is in the template case. |
2684 | + |
2685 | +""" |
2686 | +import base64 |
2687 | +import json |
2688 | +import os |
2689 | + |
2690 | +from cloudinit import log as logging |
2691 | +from cloudinit import sources |
2692 | +from cloudinit.sources.helpers import openstack |
2693 | +from cloudinit import util |
2694 | + |
2695 | +LOG = logging.getLogger(__name__) |
2696 | + |
2697 | +IBM_CONFIG_UUID = "9796-932E" |
2698 | + |
2699 | + |
2700 | +class Platforms(object): |
2701 | + TEMPLATE_LIVE_METADATA = "Template/Live/Metadata" |
2702 | + TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED." |
2703 | + TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata" |
2704 | + TEMPLATE_PROVISIONING_NODATA = "Template/Provisioning/No-Metadata" |
2705 | + OS_CODE = "OS-Code/Live" |
2706 | + |
2707 | + |
2708 | +PROVISIONING = ( |
2709 | + Platforms.TEMPLATE_PROVISIONING_METADATA, |
2710 | + Platforms.TEMPLATE_PROVISIONING_NODATA) |
2711 | + |
2712 | + |
2713 | +class DataSourceIBMCloud(sources.DataSource): |
2714 | + |
2715 | + dsname = 'IBMCloud' |
2716 | + system_uuid = None |
2717 | + |
2718 | + def __init__(self, sys_cfg, distro, paths): |
2719 | + super(DataSourceIBMCloud, self).__init__(sys_cfg, distro, paths) |
2720 | + self.source = None |
2721 | + self._network_config = None |
2722 | + self.network_json = None |
2723 | + self.platform = None |
2724 | + |
2725 | + def __str__(self): |
2726 | + root = super(DataSourceIBMCloud, self).__str__() |
2727 | + mstr = "%s [%s %s]" % (root, self.platform, self.source) |
2728 | + return mstr |
2729 | + |
2730 | + def _get_data(self): |
2731 | + results = read_md() |
2732 | + if results is None: |
2733 | + return False |
2734 | + |
2735 | + self.source = results['source'] |
2736 | + self.platform = results['platform'] |
2737 | + self.metadata = results['metadata'] |
2738 | + self.userdata_raw = results.get('userdata') |
2739 | + self.network_json = results.get('networkdata') |
2740 | + vd = results.get('vendordata') |
2741 | + self.vendordata_pure = vd |
2742 | + self.system_uuid = results['system-uuid'] |
2743 | + try: |
2744 | + self.vendordata_raw = sources.convert_vendordata(vd) |
2745 | + except ValueError as e: |
2746 | + LOG.warning("Invalid content in vendor-data: %s", e) |
2747 | + self.vendordata_raw = None |
2748 | + |
2749 | + return True |
2750 | + |
2751 | + def check_instance_id(self, sys_cfg): |
2752 | + """quickly (local check only) if self.instance_id is still valid |
2753 | + |
2754 | + in Template mode, the system uuid (/sys/hypervisor/uuid) is the |
2755 | + same as found in the METADATA disk. But that is not true in OS_CODE |
2756 | + mode. So we read the system_uuid and keep that for later compare.""" |
2757 | + if self.system_uuid is None: |
2758 | + return False |
2759 | + return self.system_uuid == _read_system_uuid() |
2760 | + |
2761 | + @property |
2762 | + def network_config(self): |
2763 | + if self.platform != Platforms.OS_CODE: |
2764 | + # If deployed from template, an agent in the provisioning |
2765 | + # environment handles networking configuration. Not cloud-init. |
2766 | + return {'config': 'disabled', 'version': 1} |
2767 | + if self._network_config is None: |
2768 | + if self.network_json is not None: |
2769 | + LOG.debug("network config provided via network_json") |
2770 | + self._network_config = openstack.convert_net_json( |
2771 | + self.network_json, known_macs=None) |
2772 | + else: |
2773 | + LOG.debug("no network configuration available.") |
2774 | + return self._network_config |
2775 | + |
2776 | + |
2777 | +def _read_system_uuid(): |
2778 | + uuid_path = "/sys/hypervisor/uuid" |
2779 | + if not os.path.isfile(uuid_path): |
2780 | + return None |
2781 | + return util.load_file(uuid_path).strip().lower() |
2782 | + |
2783 | + |
2784 | +def _is_xen(): |
2785 | + return os.path.exists("/proc/xen") |
2786 | + |
2787 | + |
2788 | +def _is_ibm_provisioning(): |
2789 | + return os.path.exists("/root/provisioningConfiguration.cfg") |
2790 | + |
2791 | + |
2792 | +def get_ibm_platform(): |
2793 | + """Return a tuple (Platform, path) |
2794 | + |
2795 | + If this is Not IBM cloud, then the return value is (None, None). |
2796 | + An instance in provisioning mode is considered running on IBM cloud.""" |
2797 | + label_mdata = "METADATA" |
2798 | + label_cfg2 = "CONFIG-2" |
2799 | + not_found = (None, None) |
2800 | + |
2801 | + if not _is_xen(): |
2802 | + return not_found |
2803 | + |
2804 | + # fslabels contains only the first entry with a given label. |
2805 | + fslabels = {} |
2806 | + try: |
2807 | + devs = util.blkid() |
2808 | + except util.ProcessExecutionError as e: |
2809 | + LOG.warning("Failed to run blkid: %s", e) |
2810 | + return (None, None) |
2811 | + |
2812 | + for dev in sorted(devs.keys()): |
2813 | + data = devs[dev] |
2814 | + label = data.get("LABEL", "").upper() |
2815 | + uuid = data.get("UUID", "").upper() |
2816 | + if label not in (label_mdata, label_cfg2): |
2817 | + continue |
2818 | + if label in fslabels: |
2819 | + LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s", |
2820 | + label, fslabels[label], data) |
2821 | + continue |
2822 | + if label == label_cfg2 and uuid != IBM_CONFIG_UUID: |
2823 | + LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s", |
2824 | + dev, label, uuid, data) |
2825 | + continue |
2826 | + fslabels[label] = data |
2827 | + |
2828 | + metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME') |
2829 | + cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME') |
2830 | + |
2831 | + if cfg2_path: |
2832 | + return (Platforms.OS_CODE, cfg2_path) |
2833 | + elif metadata_path: |
2834 | + if _is_ibm_provisioning(): |
2835 | + return (Platforms.TEMPLATE_PROVISIONING_METADATA, metadata_path) |
2836 | + else: |
2837 | + return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) |
2838 | + elif _is_ibm_provisioning(): |
2839 | + return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) |
2840 | + return not_found |
2841 | + |
2842 | + |
2843 | +def read_md(): |
2844 | + """Read data from IBM Cloud. |
2845 | + |
2846 | + @return: None if not running on IBM Cloud. |
2847 | + dictionary with guaranteed fields: metadata, version |
2848 | + and optional fields: userdata, vendordata, networkdata. |
2849 | + Also includes the system uuid from /sys/hypervisor/uuid.""" |
2850 | + platform, path = get_ibm_platform() |
2851 | + if platform is None: |
2852 | + LOG.debug("This is not an IBMCloud platform.") |
2853 | + return None |
2854 | + elif platform in PROVISIONING: |
2855 | + LOG.debug("Cloud-init is disabled during provisioning: %s.", |
2856 | + platform) |
2857 | + return None |
2858 | + |
2859 | + ret = {'platform': platform, 'source': path, |
2860 | + 'system-uuid': _read_system_uuid()} |
2861 | + |
2862 | + try: |
2863 | + if os.path.isdir(path): |
2864 | + results = metadata_from_dir(path) |
2865 | + else: |
2866 | + results = util.mount_cb(path, metadata_from_dir) |
2867 | + except BrokenMetadata as e: |
2868 | + raise RuntimeError( |
2869 | + "Failed reading IBM config disk (platform=%s path=%s): %s" % |
2870 | + (platform, path, e)) |
2871 | + |
2872 | + ret.update(results) |
2873 | + return ret |
2874 | + |
2875 | + |
2876 | +class BrokenMetadata(IOError): |
2877 | + pass |
2878 | + |
2879 | + |
2880 | +def metadata_from_dir(source_dir): |
2881 | + """Walk source_dir extracting standardized metadata. |
2882 | + |
2883 | + Certain metadata keys are renamed to present a standardized set of metadata |
2884 | + keys. |
2885 | + |
2886 | + This function has a lot in common with ConfigDriveReader.read_v2 but |
2887 | + there are a number of inconsistencies, such key renames and as only |
2888 | + presenting a 'latest' version which make it an unlikely candidate to share |
2889 | + code. |
2890 | + |
2891 | + @return: Dict containing translated metadata, userdata, vendordata, |
2892 | + networkdata as present. |
2893 | + """ |
2894 | + |
2895 | + def opath(fname): |
2896 | + return os.path.join("openstack", "latest", fname) |
2897 | + |
2898 | + def load_json_bytes(blob): |
2899 | + return json.loads(blob.decode('utf-8')) |
2900 | + |
2901 | + files = [ |
2902 | + # tuples of (results_name, path, translator) |
2903 | + ('metadata_raw', opath('meta_data.json'), load_json_bytes), |
2904 | + ('userdata', opath('user_data'), None), |
2905 | + ('vendordata', opath('vendor_data.json'), load_json_bytes), |
2906 | + ('networkdata', opath('network_data.json'), load_json_bytes), |
2907 | + ] |
2908 | + |
2909 | + results = {} |
2910 | + for (name, path, transl) in files: |
2911 | + fpath = os.path.join(source_dir, path) |
2912 | + raw = None |
2913 | + try: |
2914 | + raw = util.load_file(fpath, decode=False) |
2915 | + except IOError as e: |
2916 | + LOG.debug("Failed reading path '%s': %s", fpath, e) |
2917 | + |
2918 | + if raw is None or transl is None: |
2919 | + data = raw |
2920 | + else: |
2921 | + try: |
2922 | + data = transl(raw) |
2923 | + except Exception as e: |
2924 | + raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) |
2925 | + |
2926 | + results[name] = data |
2927 | + |
2928 | + if results.get('metadata_raw') is None: |
2929 | + raise BrokenMetadata( |
2930 | + "%s missing required file 'meta_data.json'" % source_dir) |
2931 | + |
2932 | + results['metadata'] = {} |
2933 | + |
2934 | + md_raw = results['metadata_raw'] |
2935 | + md = results['metadata'] |
2936 | + if 'random_seed' in md_raw: |
2937 | + try: |
2938 | + md['random_seed'] = base64.b64decode(md_raw['random_seed']) |
2939 | + except (ValueError, TypeError) as e: |
2940 | + raise BrokenMetadata( |
2941 | + "Badly formatted metadata random_seed entry: %s" % e) |
2942 | + |
2943 | + renames = ( |
2944 | + ('public_keys', 'public-keys'), ('hostname', 'local-hostname'), |
2945 | + ('uuid', 'instance-id')) |
2946 | + for mdname, newname in renames: |
2947 | + if mdname in md_raw: |
2948 | + md[newname] = md_raw[mdname] |
2949 | + |
2950 | + return results |
2951 | + |
2952 | + |
2953 | +# Used to match classes to dependencies |
2954 | +datasources = [ |
2955 | + (DataSourceIBMCloud, (sources.DEP_FILESYSTEM,)), |
2956 | +] |
2957 | + |
2958 | + |
2959 | +# Return a list of data sources that match this set of dependencies |
2960 | +def get_datasource_list(depends): |
2961 | + return sources.list_from_depends(depends, datasources) |
2962 | + |
2963 | + |
2964 | +if __name__ == "__main__": |
2965 | + import argparse |
2966 | + |
2967 | + parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata') |
2968 | + args = parser.parse_args() |
2969 | + data = read_md() |
2970 | + print(util.json_dumps(data)) |
2971 | + |
2972 | +# vi: ts=4 expandtab |
2973 | diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py |
2974 | index 6e62f98..dc914a7 100644 |
2975 | --- a/cloudinit/sources/DataSourceOVF.py |
2976 | +++ b/cloudinit/sources/DataSourceOVF.py |
2977 | @@ -95,11 +95,20 @@ class DataSourceOVF(sources.DataSource): |
2978 | "VMware Customization support") |
2979 | elif not util.get_cfg_option_bool( |
2980 | self.sys_cfg, "disable_vmware_customization", True): |
2981 | - deployPkgPluginPath = search_file("/usr/lib/vmware-tools", |
2982 | - "libdeployPkgPlugin.so") |
2983 | - if not deployPkgPluginPath: |
2984 | - deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", |
2985 | - "libdeployPkgPlugin.so") |
2986 | + |
2987 | + search_paths = ( |
2988 | + "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", |
2989 | + "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") |
2990 | + |
2991 | + plugin = "libdeployPkgPlugin.so" |
2992 | + deployPkgPluginPath = None |
2993 | + for path in search_paths: |
2994 | + deployPkgPluginPath = search_file(path, plugin) |
2995 | + if deployPkgPluginPath: |
2996 | + LOG.debug("Found the customization plugin at %s", |
2997 | + deployPkgPluginPath) |
2998 | + break |
2999 | + |
3000 | if deployPkgPluginPath: |
3001 | # When the VM is powered on, the "VMware Tools" daemon |
3002 | # copies the customization specification file to |
3003 | @@ -111,6 +120,8 @@ class DataSourceOVF(sources.DataSource): |
3004 | msg="waiting for configuration file", |
3005 | func=wait_for_imc_cfg_file, |
3006 | args=("cust.cfg", max_wait)) |
3007 | + else: |
3008 | + LOG.debug("Did not find the customization plugin.") |
3009 | |
3010 | if vmwareImcConfigFilePath: |
3011 | LOG.debug("Found VMware Customization Config File at %s", |
3012 | diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py |
3013 | index ce47b6b..d4a4111 100644 |
3014 | --- a/cloudinit/sources/DataSourceOpenNebula.py |
3015 | +++ b/cloudinit/sources/DataSourceOpenNebula.py |
3016 | @@ -20,7 +20,6 @@ import string |
3017 | |
3018 | from cloudinit import log as logging |
3019 | from cloudinit import net |
3020 | -from cloudinit.net import eni |
3021 | from cloudinit import sources |
3022 | from cloudinit import util |
3023 | |
3024 | @@ -91,19 +90,19 @@ class DataSourceOpenNebula(sources.DataSource): |
3025 | return False |
3026 | |
3027 | self.seed = seed |
3028 | - self.network_eni = results.get('network-interfaces') |
3029 | + self.network = results.get('network-interfaces') |
3030 | self.metadata = md |
3031 | self.userdata_raw = results.get('userdata') |
3032 | return True |
3033 | |
3034 | @property |
3035 | def network_config(self): |
3036 | - if self.network_eni is not None: |
3037 | - return eni.convert_eni_data(self.network_eni) |
3038 | + if self.network is not None: |
3039 | + return self.network |
3040 | else: |
3041 | return None |
3042 | |
3043 | - def get_hostname(self, fqdn=False, resolve_ip=None): |
3044 | + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
3045 | if resolve_ip is None: |
3046 | if self.dsmode == sources.DSMODE_NETWORK: |
3047 | resolve_ip = True |
3048 | @@ -143,18 +142,42 @@ class OpenNebulaNetwork(object): |
3049 | def mac2network(self, mac): |
3050 | return self.mac2ip(mac).rpartition(".")[0] + ".0" |
3051 | |
3052 | - def get_dns(self, dev): |
3053 | - return self.get_field(dev, "dns", "").split() |
3054 | + def get_nameservers(self, dev): |
3055 | + nameservers = {} |
3056 | + dns = self.get_field(dev, "dns", "").split() |
3057 | + dns.extend(self.context.get('DNS', "").split()) |
3058 | + if dns: |
3059 | + nameservers['addresses'] = dns |
3060 | + search_domain = self.get_field(dev, "search_domain", "").split() |
3061 | + if search_domain: |
3062 | + nameservers['search'] = search_domain |
3063 | + return nameservers |
3064 | |
3065 | - def get_domain(self, dev): |
3066 | - return self.get_field(dev, "domain") |
3067 | + def get_mtu(self, dev): |
3068 | + return self.get_field(dev, "mtu") |
3069 | |
3070 | def get_ip(self, dev, mac): |
3071 | return self.get_field(dev, "ip", self.mac2ip(mac)) |
3072 | |
3073 | + def get_ip6(self, dev): |
3074 | + addresses6 = [] |
3075 | + ip6 = self.get_field(dev, "ip6") |
3076 | + if ip6: |
3077 | + addresses6.append(ip6) |
3078 | + ip6_ula = self.get_field(dev, "ip6_ula") |
3079 | + if ip6_ula: |
3080 | + addresses6.append(ip6_ula) |
3081 | + return addresses6 |
3082 | + |
3083 | + def get_ip6_prefix(self, dev): |
3084 | + return self.get_field(dev, "ip6_prefix_length", "64") |
3085 | + |
3086 | def get_gateway(self, dev): |
3087 | return self.get_field(dev, "gateway") |
3088 | |
3089 | + def get_gateway6(self, dev): |
3090 | + return self.get_field(dev, "gateway6") |
3091 | + |
3092 | def get_mask(self, dev): |
3093 | return self.get_field(dev, "mask", "255.255.255.0") |
3094 | |
3095 | @@ -171,13 +194,11 @@ class OpenNebulaNetwork(object): |
3096 | return default if val in (None, "") else val |
3097 | |
3098 | def gen_conf(self): |
3099 | - global_dns = self.context.get('DNS', "").split() |
3100 | - |
3101 | - conf = [] |
3102 | - conf.append('auto lo') |
3103 | - conf.append('iface lo inet loopback') |
3104 | - conf.append('') |
3105 | + netconf = {} |
3106 | + netconf['version'] = 2 |
3107 | + netconf['ethernets'] = {} |
3108 | |
3109 | + ethernets = {} |
3110 | for mac, dev in self.ifaces.items(): |
3111 | mac = mac.lower() |
3112 | |
3113 | @@ -185,29 +206,49 @@ class OpenNebulaNetwork(object): |
3114 | # dev stores the current system name. |
3115 | c_dev = self.context_devname.get(mac, dev) |
3116 | |
3117 | - conf.append('auto ' + dev) |
3118 | - conf.append('iface ' + dev + ' inet static') |
3119 | - conf.append(' #hwaddress %s' % mac) |
3120 | - conf.append(' address ' + self.get_ip(c_dev, mac)) |
3121 | - conf.append(' network ' + self.get_network(c_dev, mac)) |
3122 | - conf.append(' netmask ' + self.get_mask(c_dev)) |
3123 | + devconf = {} |
3124 | + |
3125 | + # Set MAC address |
3126 | + devconf['match'] = {'macaddress': mac} |
3127 | |
3128 | + # Set IPv4 address |
3129 | + devconf['addresses'] = [] |
3130 | + mask = self.get_mask(c_dev) |
3131 | + prefix = str(net.mask_to_net_prefix(mask)) |
3132 | + devconf['addresses'].append( |
3133 | + self.get_ip(c_dev, mac) + '/' + prefix) |
3134 | + |
3135 | + # Set IPv6 Global and ULA address |
3136 | + addresses6 = self.get_ip6(c_dev) |
3137 | + if addresses6: |
3138 | + prefix6 = self.get_ip6_prefix(c_dev) |
3139 | + devconf['addresses'].extend( |
3140 | + [i + '/' + prefix6 for i in addresses6]) |
3141 | + |
3142 | + # Set IPv4 default gateway |
3143 | gateway = self.get_gateway(c_dev) |
3144 | if gateway: |
3145 | - conf.append(' gateway ' + gateway) |
3146 | + devconf['gateway4'] = gateway |
3147 | + |
3148 | + # Set IPv6 default gateway |
3149 | + gateway6 = self.get_gateway6(c_dev) |
3150 | + if gateway: |
3151 | + devconf['gateway6'] = gateway6 |
3152 | |
3153 | - domain = self.get_domain(c_dev) |
3154 | - if domain: |
3155 | - conf.append(' dns-search ' + domain) |
3156 | + # Set DNS servers and search domains |
3157 | + nameservers = self.get_nameservers(c_dev) |
3158 | + if nameservers: |
3159 | + devconf['nameservers'] = nameservers |
3160 | |
3161 | - # add global DNS servers to all interfaces |
3162 | - dns = self.get_dns(c_dev) |
3163 | - if global_dns or dns: |
3164 | - conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) |
3165 | + # Set MTU size |
3166 | + mtu = self.get_mtu(c_dev) |
3167 | + if mtu: |
3168 | + devconf['mtu'] = mtu |
3169 | |
3170 | - conf.append('') |
3171 | + ethernets[dev] = devconf |
3172 | |
3173 | - return "\n".join(conf) |
3174 | + netconf['ethernets'] = ethernets |
3175 | + return(netconf) |
3176 | |
3177 | |
3178 | def find_candidate_devs(): |
3179 | @@ -393,10 +434,10 @@ def read_context_disk_dir(source_dir, asuser=None): |
3180 | except TypeError: |
3181 | LOG.warning("Failed base64 decoding of userdata") |
3182 | |
3183 | - # generate static /etc/network/interfaces |
3184 | + # generate Network Configuration v2 |
3185 | # only if there are any required context variables |
3186 | - # http://opennebula.org/documentation:rel3.8:cong#network_configuration |
3187 | - ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)] |
3188 | + # http://docs.opennebula.org/5.4/operation/references/template.html#context-section |
3189 | + ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)] |
3190 | if ipaddr_keys: |
3191 | onet = OpenNebulaNetwork(context) |
3192 | results['network-interfaces'] = onet.gen_conf() |
3193 | diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py |
3194 | index b0b19c9..e2502b0 100644 |
3195 | --- a/cloudinit/sources/DataSourceScaleway.py |
3196 | +++ b/cloudinit/sources/DataSourceScaleway.py |
3197 | @@ -113,9 +113,9 @@ def query_data_api_once(api_address, timeout, requests_session): |
3198 | retries=0, |
3199 | session=requests_session, |
3200 | # If the error is a HTTP/404 or a ConnectionError, go into raise |
3201 | - # block below. |
3202 | - exception_cb=lambda _, exc: exc.code == 404 or ( |
3203 | - isinstance(exc.cause, requests.exceptions.ConnectionError) |
3204 | + # block below and don't bother retrying. |
3205 | + exception_cb=lambda _, exc: exc.code != 404 and ( |
3206 | + not isinstance(exc.cause, requests.exceptions.ConnectionError) |
3207 | ) |
3208 | ) |
3209 | return util.decode_binary(resp.contents) |
3210 | @@ -215,7 +215,7 @@ class DataSourceScaleway(sources.DataSource): |
3211 | def get_public_ssh_keys(self): |
3212 | return [key['key'] for key in self.metadata['ssh_public_keys']] |
3213 | |
3214 | - def get_hostname(self, fqdn=False, resolve_ip=False): |
3215 | + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
3216 | return self.metadata['hostname'] |
3217 | |
3218 | @property |
3219 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py |
3220 | index a05ca2f..df0b374 100644 |
3221 | --- a/cloudinit/sources/__init__.py |
3222 | +++ b/cloudinit/sources/__init__.py |
3223 | @@ -276,21 +276,34 @@ class DataSource(object): |
3224 | return "iid-datasource" |
3225 | return str(self.metadata['instance-id']) |
3226 | |
3227 | - def get_hostname(self, fqdn=False, resolve_ip=False): |
3228 | + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
3229 | + """Get hostname or fqdn from the datasource. Look it up if desired. |
3230 | + |
3231 | + @param fqdn: Boolean, set True to return hostname with domain. |
3232 | + @param resolve_ip: Boolean, set True to attempt to resolve an ipv4 |
3233 | + address provided in local-hostname meta-data. |
3234 | + @param metadata_only: Boolean, set True to avoid looking up hostname |
3235 | + if meta-data doesn't have local-hostname present. |
3236 | + |
3237 | + @return: hostname or qualified hostname. Optionally return None when |
3238 | + metadata_only is True and local-hostname data is not available. |
3239 | + """ |
3240 | defdomain = "localdomain" |
3241 | defhost = "localhost" |
3242 | domain = defdomain |
3243 | |
3244 | if not self.metadata or 'local-hostname' not in self.metadata: |
3245 | + if metadata_only: |
3246 | + return None |
3247 | # this is somewhat questionable really. |
3248 | # the cloud datasource was asked for a hostname |
3249 | # and didn't have one. raising error might be more appropriate |
3250 | # but instead, basically look up the existing hostname |
3251 | toks = [] |
3252 | hostname = util.get_hostname() |
3253 | - fqdn = util.get_fqdn_from_hosts(hostname) |
3254 | - if fqdn and fqdn.find(".") > 0: |
3255 | - toks = str(fqdn).split(".") |
3256 | + hosts_fqdn = util.get_fqdn_from_hosts(hostname) |
3257 | + if hosts_fqdn and hosts_fqdn.find(".") > 0: |
3258 | + toks = str(hosts_fqdn).split(".") |
3259 | elif hostname and hostname.find(".") > 0: |
3260 | toks = str(hostname).split(".") |
3261 | elif hostname: |
3262 | diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py |
3263 | new file mode 100644 |
3264 | index 0000000..2554530 |
3265 | --- /dev/null |
3266 | +++ b/cloudinit/sources/helpers/hetzner.py |
3267 | @@ -0,0 +1,26 @@ |
3268 | +# Author: Jonas Keidel <jonas.keidel@hetzner.com> |
3269 | +# Author: Markus Schade <markus.schade@hetzner.com> |
3270 | +# |
3271 | +# This file is part of cloud-init. See LICENSE file for license information. |
3272 | + |
3273 | +from cloudinit import log as logging |
3274 | +from cloudinit import url_helper |
3275 | +from cloudinit import util |
3276 | + |
3277 | +LOG = logging.getLogger(__name__) |
3278 | + |
3279 | + |
3280 | +def read_metadata(url, timeout=2, sec_between=2, retries=30): |
3281 | + response = url_helper.readurl(url, timeout=timeout, |
3282 | + sec_between=sec_between, retries=retries) |
3283 | + if not response.ok(): |
3284 | + raise RuntimeError("unable to read metadata at %s" % url) |
3285 | + return util.load_yaml(response.contents.decode()) |
3286 | + |
3287 | + |
3288 | +def read_userdata(url, timeout=2, sec_between=2, retries=30): |
3289 | + response = url_helper.readurl(url, timeout=timeout, |
3290 | + sec_between=sec_between, retries=retries) |
3291 | + if not response.ok(): |
3292 | + raise RuntimeError("unable to read userdata at %s" % url) |
3293 | + return response.contents |
3294 | diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py |
3295 | index af15115..e7fda22 100644 |
3296 | --- a/cloudinit/sources/tests/test_init.py |
3297 | +++ b/cloudinit/sources/tests/test_init.py |
3298 | @@ -1,13 +1,15 @@ |
3299 | # This file is part of cloud-init. See LICENSE file for license information. |
3300 | |
3301 | +import inspect |
3302 | import os |
3303 | import six |
3304 | import stat |
3305 | |
3306 | from cloudinit.helpers import Paths |
3307 | +from cloudinit import importer |
3308 | from cloudinit.sources import ( |
3309 | INSTANCE_JSON_FILE, DataSource) |
3310 | -from cloudinit.tests.helpers import CiTestCase, skipIf |
3311 | +from cloudinit.tests.helpers import CiTestCase, skipIf, mock |
3312 | from cloudinit.user_data import UserDataProcessor |
3313 | from cloudinit import util |
3314 | |
3315 | @@ -108,6 +110,74 @@ class TestDataSource(CiTestCase): |
3316 | self.assertEqual('userdata_raw', datasource.userdata_raw) |
3317 | self.assertEqual('vendordata_raw', datasource.vendordata_raw) |
3318 | |
3319 | + def test_get_hostname_strips_local_hostname_without_domain(self): |
3320 | + """Datasource.get_hostname strips metadata local-hostname of domain.""" |
3321 | + tmp = self.tmp_dir() |
3322 | + datasource = DataSourceTestSubclassNet( |
3323 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) |
3324 | + self.assertTrue(datasource.get_data()) |
3325 | + self.assertEqual( |
3326 | + 'test-subclass-hostname', datasource.metadata['local-hostname']) |
3327 | + self.assertEqual('test-subclass-hostname', datasource.get_hostname()) |
3328 | + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' |
3329 | + self.assertEqual('hostname', datasource.get_hostname()) |
3330 | + |
3331 | + def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): |
3332 | + """Datasource.get_hostname with fqdn set gets qualified hostname.""" |
3333 | + tmp = self.tmp_dir() |
3334 | + datasource = DataSourceTestSubclassNet( |
3335 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) |
3336 | + self.assertTrue(datasource.get_data()) |
3337 | + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' |
3338 | + self.assertEqual( |
3339 | + 'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) |
3340 | + |
3341 | + def test_get_hostname_without_metadata_uses_system_hostname(self): |
3342 | + """Datasource.gethostname runs util.get_hostname when no metadata.""" |
3343 | + tmp = self.tmp_dir() |
3344 | + datasource = DataSourceTestSubclassNet( |
3345 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) |
3346 | + self.assertEqual({}, datasource.metadata) |
3347 | + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' |
3348 | + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: |
3349 | + with mock.patch(mock_fqdn) as m_fqdn: |
3350 | + m_gethost.return_value = 'systemhostname.domain.com' |
3351 | + m_fqdn.return_value = None # No maching fqdn in /etc/hosts |
3352 | + self.assertEqual('systemhostname', datasource.get_hostname()) |
3353 | + self.assertEqual( |
3354 | + 'systemhostname.domain.com', |
3355 | + datasource.get_hostname(fqdn=True)) |
3356 | + |
3357 | + def test_get_hostname_without_metadata_returns_none(self): |
3358 | + """Datasource.gethostname returns None when metadata_only and no MD.""" |
3359 | + tmp = self.tmp_dir() |
3360 | + datasource = DataSourceTestSubclassNet( |
3361 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) |
3362 | + self.assertEqual({}, datasource.metadata) |
3363 | + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' |
3364 | + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: |
3365 | + with mock.patch(mock_fqdn) as m_fqdn: |
3366 | + self.assertIsNone(datasource.get_hostname(metadata_only=True)) |
3367 | + self.assertIsNone( |
3368 | + datasource.get_hostname(fqdn=True, metadata_only=True)) |
3369 | + self.assertEqual([], m_gethost.call_args_list) |
3370 | + self.assertEqual([], m_fqdn.call_args_list) |
3371 | + |
3372 | + def test_get_hostname_without_metadata_prefers_etc_hosts(self): |
3373 | + """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" |
3374 | + tmp = self.tmp_dir() |
3375 | + datasource = DataSourceTestSubclassNet( |
3376 | + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) |
3377 | + self.assertEqual({}, datasource.metadata) |
3378 | + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' |
3379 | + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: |
3380 | + with mock.patch(mock_fqdn) as m_fqdn: |
3381 | + m_gethost.return_value = 'systemhostname.domain.com' |
3382 | + m_fqdn.return_value = 'fqdnhostname.domain.com' |
3383 | + self.assertEqual('fqdnhostname', datasource.get_hostname()) |
3384 | + self.assertEqual('fqdnhostname.domain.com', |
3385 | + datasource.get_hostname(fqdn=True)) |
3386 | + |
3387 | def test_get_data_write_json_instance_data(self): |
3388 | """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" |
3389 | tmp = self.tmp_dir() |
3390 | @@ -200,3 +270,29 @@ class TestDataSource(CiTestCase): |
3391 | "WARNING: Error persisting instance-data.json: 'utf8' codec can't" |
3392 | " decode byte 0xaa in position 2: invalid start byte", |
3393 | self.logs.getvalue()) |
3394 | + |
3395 | + def test_get_hostname_subclass_support(self): |
3396 | + """Validate get_hostname signature on all subclasses of DataSource.""" |
3397 | + # Use inspect.getfullargspec when we drop py2.6 and py2.7 |
3398 | + get_args = inspect.getargspec # pylint: disable=W1505 |
3399 | + base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505 |
3400 | + # Import all DataSource subclasses so we can inspect them. |
3401 | + modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) |
3402 | + for loc, name in modules.items(): |
3403 | + mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) |
3404 | + if mod_locs: |
3405 | + importer.import_module(mod_locs[0]) |
3406 | + for child in DataSource.__subclasses__(): |
3407 | + if 'Test' in child.dsname: |
3408 | + continue |
3409 | + self.assertEqual( |
3410 | + base_args, |
3411 | + get_args(child.get_hostname), # pylint: disable=W1505 |
3412 | + '%s does not implement DataSource.get_hostname params' |
3413 | + % child) |
3414 | + for grandchild in child.__subclasses__(): |
3415 | + self.assertEqual( |
3416 | + base_args, |
3417 | + get_args(grandchild.get_hostname), # pylint: disable=W1505 |
3418 | + '%s does not implement DataSource.get_hostname params' |
3419 | + % grandchild) |
3420 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py |
3421 | index d045268..bc4ebc8 100644 |
3422 | --- a/cloudinit/stages.py |
3423 | +++ b/cloudinit/stages.py |
3424 | @@ -132,8 +132,7 @@ class Init(object): |
3425 | return initial_dirs |
3426 | |
3427 | def purge_cache(self, rm_instance_lnk=False): |
3428 | - rm_list = [] |
3429 | - rm_list.append(self.paths.boot_finished) |
3430 | + rm_list = [self.paths.boot_finished] |
3431 | if rm_instance_lnk: |
3432 | rm_list.append(self.paths.instance_link) |
3433 | for f in rm_list: |
3434 | diff --git a/cloudinit/subp.py b/cloudinit/subp.py |
3435 | new file mode 100644 |
3436 | index 0000000..0ad0930 |
3437 | --- /dev/null |
3438 | +++ b/cloudinit/subp.py |
3439 | @@ -0,0 +1,57 @@ |
3440 | +# This file is part of cloud-init. See LICENSE file for license information. |
3441 | +"""Common utility functions for interacting with subprocess.""" |
3442 | + |
3443 | +# TODO move subp shellify and runparts related functions out of util.py |
3444 | + |
3445 | +import logging |
3446 | + |
3447 | +LOG = logging.getLogger(__name__) |
3448 | + |
3449 | + |
3450 | +def prepend_base_command(base_command, commands): |
3451 | + """Ensure user-provided commands start with base_command; warn otherwise. |
3452 | + |
3453 | + Each command is either a list or string. Perform the following: |
3454 | + - If the command is a list, pop the first element if it is None |
3455 | + - If the command is a list, insert base_command as the first element if |
3456 | + not present. |
3457 | + - When the command is a string not starting with 'base-command', warn. |
3458 | + |
3459 | + Allow flexibility to provide non-base-command environment/config setup if |
3460 | + needed. |
3461 | + |
3462 | + @commands: List of commands. Each command element is a list or string. |
3463 | + |
3464 | + @return: List of 'fixed up' commands. |
3465 | + @raise: TypeError on invalid config item type. |
3466 | + """ |
3467 | + warnings = [] |
3468 | + errors = [] |
3469 | + fixed_commands = [] |
3470 | + for command in commands: |
3471 | + if isinstance(command, list): |
3472 | + if command[0] is None: # Avoid warnings by specifying None |
3473 | + command = command[1:] |
3474 | + elif command[0] != base_command: # Automatically prepend |
3475 | + command.insert(0, base_command) |
3476 | + elif isinstance(command, str): |
3477 | + if not command.startswith('%s ' % base_command): |
3478 | + warnings.append(command) |
3479 | + else: |
3480 | + errors.append(str(command)) |
3481 | + continue |
3482 | + fixed_commands.append(command) |
3483 | + |
3484 | + if warnings: |
3485 | + LOG.warning( |
3486 | + 'Non-%s commands in %s config:\n%s', |
3487 | + base_command, base_command, '\n'.join(warnings)) |
3488 | + if errors: |
3489 | + raise TypeError( |
3490 | + 'Invalid {name} config.' |
3491 | + ' These commands are not a string or list:\n{errors}'.format( |
3492 | + name=base_command, errors='\n'.join(errors))) |
3493 | + return fixed_commands |
3494 | + |
3495 | + |
3496 | +# vi: ts=4 expandtab |
3497 | diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py |
3498 | index 0080c72..999b1d7 100644 |
3499 | --- a/cloudinit/tests/helpers.py |
3500 | +++ b/cloudinit/tests/helpers.py |
3501 | @@ -173,17 +173,15 @@ class CiTestCase(TestCase): |
3502 | dir = self.tmp_dir() |
3503 | return os.path.normpath(os.path.abspath(os.path.join(dir, path))) |
3504 | |
3505 | - def assertRaisesCodeEqual(self, expected, found): |
3506 | - """Handle centos6 having different context manager for assertRaises. |
3507 | - with assertRaises(Exception) as e: |
3508 | - raise Exception("BOO") |
3509 | - |
3510 | - centos6 will have e.exception as an integer. |
3511 | - anything nwere will have it as something with a '.code'""" |
3512 | - if isinstance(found, int): |
3513 | - self.assertEqual(expected, found) |
3514 | - else: |
3515 | - self.assertEqual(expected, found.code) |
3516 | + def sys_exit(self, code): |
3517 | + """Provide a wrapper around sys.exit for python 2.6 |
3518 | + |
3519 | + In 2.6, this code would produce 'cm.exception' with value int(2) |
3520 | + rather than the SystemExit that was raised by sys.exit(2). |
3521 | + with assertRaises(SystemExit) as cm: |
3522 | + sys.exit(2) |
3523 | + """ |
3524 | + raise SystemExit(code) |
3525 | |
3526 | |
3527 | class ResourceUsingTestCase(CiTestCase): |
3528 | @@ -285,10 +283,15 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): |
3529 | def patchOS(self, new_root): |
3530 | patch_funcs = { |
3531 | os.path: [('isfile', 1), ('exists', 1), |
3532 | - ('islink', 1), ('isdir', 1)], |
3533 | + ('islink', 1), ('isdir', 1), ('lexists', 1)], |
3534 | os: [('listdir', 1), ('mkdir', 1), |
3535 | - ('lstat', 1), ('symlink', 2)], |
3536 | + ('lstat', 1), ('symlink', 2)] |
3537 | } |
3538 | + |
3539 | + if hasattr(os, 'scandir'): |
3540 | + # py27 does not have scandir |
3541 | + patch_funcs[os].append(('scandir', 1)) |
3542 | + |
3543 | for (mod, funcs) in patch_funcs.items(): |
3544 | for f, nargs in funcs: |
3545 | func = getattr(mod, f) |
3546 | @@ -411,6 +414,19 @@ except AttributeError: |
3547 | return decorator |
3548 | |
3549 | |
3550 | +try: |
3551 | + import jsonschema |
3552 | + assert jsonschema # avoid pyflakes error F401: import unused |
3553 | + _missing_jsonschema_dep = False |
3554 | +except ImportError: |
3555 | + _missing_jsonschema_dep = True |
3556 | + |
3557 | + |
3558 | +def skipUnlessJsonSchema(): |
3559 | + return skipIf( |
3560 | + _missing_jsonschema_dep, "No python-jsonschema dependency present.") |
3561 | + |
3562 | + |
3563 | # older versions of mock do not have the useful 'assert_not_called' |
3564 | if not hasattr(mock.Mock, 'assert_not_called'): |
3565 | def __mock_assert_not_called(mmock): |
3566 | @@ -422,12 +438,12 @@ if not hasattr(mock.Mock, 'assert_not_called'): |
3567 | mock.Mock.assert_not_called = __mock_assert_not_called |
3568 | |
3569 | |
3570 | -# older unittest2.TestCase (centos6) do not have assertRaisesRegex |
3571 | -# And setting assertRaisesRegex to assertRaisesRegexp causes |
3572 | -# https://github.com/PyCQA/pylint/issues/1653 . So the workaround. |
3573 | +# older unittest2.TestCase (centos6) have only the now-deprecated |
3574 | +# assertRaisesRegexp. Simple assignment makes pylint complain, about |
3575 | +# users of assertRaisesRegex so we use getattr to trick it. |
3576 | +# https://github.com/PyCQA/pylint/issues/1946 |
3577 | if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): |
3578 | - def _tricky(*args, **kwargs): |
3579 | - return unittest2.TestCase.assertRaisesRegexp |
3580 | - unittest2.TestCase.assertRaisesRegex = _tricky |
3581 | + unittest2.TestCase.assertRaisesRegex = ( |
3582 | + getattr(unittest2.TestCase, 'assertRaisesRegexp')) |
3583 | |
3584 | # vi: ts=4 expandtab |
3585 | diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py |
3586 | new file mode 100644 |
3587 | index 0000000..448097d |
3588 | --- /dev/null |
3589 | +++ b/cloudinit/tests/test_subp.py |
3590 | @@ -0,0 +1,61 @@ |
3591 | +# This file is part of cloud-init. See LICENSE file for license information. |
3592 | + |
3593 | +"""Tests for cloudinit.subp utility functions""" |
3594 | + |
3595 | +from cloudinit import subp |
3596 | +from cloudinit.tests.helpers import CiTestCase |
3597 | + |
3598 | + |
3599 | +class TestPrependBaseCommands(CiTestCase): |
3600 | + |
3601 | + with_logs = True |
3602 | + |
3603 | + def test_prepend_base_command_errors_on_neither_string_nor_list(self): |
3604 | + """Raise an error for each command which is not a string or list.""" |
3605 | + orig_commands = ['ls', 1, {'not': 'gonna work'}, ['basecmd', 'list']] |
3606 | + with self.assertRaises(TypeError) as context_manager: |
3607 | + subp.prepend_base_command( |
3608 | + base_command='basecmd', commands=orig_commands) |
3609 | + self.assertEqual( |
3610 | + "Invalid basecmd config. These commands are not a string or" |
3611 | + " list:\n1\n{'not': 'gonna work'}", |
3612 | + str(context_manager.exception)) |
3613 | + |
3614 | + def test_prepend_base_command_warns_on_non_base_string_commands(self): |
3615 | + """Warn on each non-base for commands of type string.""" |
3616 | + orig_commands = [ |
3617 | + 'ls', 'basecmd list', 'touch /blah', 'basecmd install x'] |
3618 | + fixed_commands = subp.prepend_base_command( |
3619 | + base_command='basecmd', commands=orig_commands) |
3620 | + self.assertEqual( |
3621 | + 'WARNING: Non-basecmd commands in basecmd config:\n' |
3622 | + 'ls\ntouch /blah\n', |
3623 | + self.logs.getvalue()) |
3624 | + self.assertEqual(orig_commands, fixed_commands) |
3625 | + |
3626 | + def test_prepend_base_command_prepends_on_non_base_list_commands(self): |
3627 | + """Prepend 'basecmd' for each non-basecmd command of type list.""" |
3628 | + orig_commands = [['ls'], ['basecmd', 'list'], ['basecmda', '/blah'], |
3629 | + ['basecmd', 'install', 'x']] |
3630 | + expected = [['basecmd', 'ls'], ['basecmd', 'list'], |
3631 | + ['basecmd', 'basecmda', '/blah'], |
3632 | + ['basecmd', 'install', 'x']] |
3633 | + fixed_commands = subp.prepend_base_command( |
3634 | + base_command='basecmd', commands=orig_commands) |
3635 | + self.assertEqual('', self.logs.getvalue()) |
3636 | + self.assertEqual(expected, fixed_commands) |
3637 | + |
3638 | + def test_prepend_base_command_removes_first_item_when_none(self): |
3639 | + """Remove the first element of a non-basecmd when it is None.""" |
3640 | + orig_commands = [[None, 'ls'], ['basecmd', 'list'], |
3641 | + [None, 'touch', '/blah'], |
3642 | + ['basecmd', 'install', 'x']] |
3643 | + expected = [['ls'], ['basecmd', 'list'], |
3644 | + ['touch', '/blah'], |
3645 | + ['basecmd', 'install', 'x']] |
3646 | + fixed_commands = subp.prepend_base_command( |
3647 | + base_command='basecmd', commands=orig_commands) |
3648 | + self.assertEqual('', self.logs.getvalue()) |
3649 | + self.assertEqual(expected, fixed_commands) |
3650 | + |
3651 | +# vi: ts=4 expandtab |
3652 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py |
3653 | index ba6bf69..3f37dbb 100644 |
3654 | --- a/cloudinit/tests/test_util.py |
3655 | +++ b/cloudinit/tests/test_util.py |
3656 | @@ -3,6 +3,7 @@ |
3657 | """Tests for cloudinit.util""" |
3658 | |
3659 | import logging |
3660 | +from textwrap import dedent |
3661 | |
3662 | import cloudinit.util as util |
3663 | |
3664 | @@ -16,6 +17,25 @@ MOUNT_INFO = [ |
3665 | ] |
3666 | |
3667 | |
3668 | +class FakeCloud(object): |
3669 | + |
3670 | + def __init__(self, hostname, fqdn): |
3671 | + self.hostname = hostname |
3672 | + self.fqdn = fqdn |
3673 | + self.calls = [] |
3674 | + |
3675 | + def get_hostname(self, fqdn=None, metadata_only=None): |
3676 | + myargs = {} |
3677 | + if fqdn is not None: |
3678 | + myargs['fqdn'] = fqdn |
3679 | + if metadata_only is not None: |
3680 | + myargs['metadata_only'] = metadata_only |
3681 | + self.calls.append(myargs) |
3682 | + if fqdn: |
3683 | + return self.fqdn |
3684 | + return self.hostname |
3685 | + |
3686 | + |
3687 | class TestUtil(CiTestCase): |
3688 | |
3689 | def test_parse_mount_info_no_opts_no_arg(self): |
3690 | @@ -44,3 +64,152 @@ class TestUtil(CiTestCase): |
3691 | m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') |
3692 | is_rw = util.mount_is_read_write('/') |
3693 | self.assertEqual(is_rw, False) |
3694 | + |
3695 | + |
3696 | +class TestShellify(CiTestCase): |
3697 | + |
3698 | + def test_input_dict_raises_type_error(self): |
3699 | + self.assertRaisesRegex( |
3700 | + TypeError, 'Input.*was.*dict.*xpected', |
3701 | + util.shellify, {'mykey': 'myval'}) |
3702 | + |
3703 | + def test_input_str_raises_type_error(self): |
3704 | + self.assertRaisesRegex( |
3705 | + TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar") |
3706 | + |
3707 | + def test_value_with_int_raises_type_error(self): |
3708 | + self.assertRaisesRegex( |
3709 | + TypeError, 'shellify.*int', util.shellify, ["foo", 1]) |
3710 | + |
3711 | + def test_supports_strings_and_lists(self): |
3712 | + self.assertEqual( |
3713 | + '\n'.join(["#!/bin/sh", "echo hi mom", "'echo' 'hi dad'", |
3714 | + "'echo' 'hi' 'sis'", ""]), |
3715 | + util.shellify(["echo hi mom", ["echo", "hi dad"], |
3716 | + ('echo', 'hi', 'sis')])) |
3717 | + |
3718 | + |
3719 | +class TestGetHostnameFqdn(CiTestCase): |
3720 | + |
3721 | + def test_get_hostname_fqdn_from_only_cfg_fqdn(self): |
3722 | + """When cfg only has the fqdn key, derive hostname and fqdn from it.""" |
3723 | + hostname, fqdn = util.get_hostname_fqdn( |
3724 | + cfg={'fqdn': 'myhost.domain.com'}, cloud=None) |
3725 | + self.assertEqual('myhost', hostname) |
3726 | + self.assertEqual('myhost.domain.com', fqdn) |
3727 | + |
3728 | + def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): |
3729 | + """When cfg has both fqdn and hostname keys, return them.""" |
3730 | + hostname, fqdn = util.get_hostname_fqdn( |
3731 | + cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None) |
3732 | + self.assertEqual('other', hostname) |
3733 | + self.assertEqual('myhost.domain.com', fqdn) |
3734 | + |
3735 | + def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): |
3736 | + """When cfg has only hostname key which represents a fqdn, use that.""" |
3737 | + hostname, fqdn = util.get_hostname_fqdn( |
3738 | + cfg={'hostname': 'myhost.domain.com'}, cloud=None) |
3739 | + self.assertEqual('myhost', hostname) |
3740 | + self.assertEqual('myhost.domain.com', fqdn) |
3741 | + |
3742 | + def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): |
3743 | + """When cfg has a hostname without a '.' query cloud.get_hostname.""" |
3744 | + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') |
3745 | + hostname, fqdn = util.get_hostname_fqdn( |
3746 | + cfg={'hostname': 'myhost'}, cloud=mycloud) |
3747 | + self.assertEqual('myhost', hostname) |
3748 | + self.assertEqual('cloudhost.mycloud.com', fqdn) |
3749 | + self.assertEqual( |
3750 | + [{'fqdn': True, 'metadata_only': False}], mycloud.calls) |
3751 | + |
3752 | + def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): |
3753 | + """When cfg has neither hostname nor fqdn cloud.get_hostname.""" |
3754 | + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') |
3755 | + hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) |
3756 | + self.assertEqual('cloudhost', hostname) |
3757 | + self.assertEqual('cloudhost.mycloud.com', fqdn) |
3758 | + self.assertEqual( |
3759 | + [{'fqdn': True, 'metadata_only': False}, |
3760 | + {'metadata_only': False}], mycloud.calls) |
3761 | + |
3762 | + def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): |
3763 | + """Calls to cloud.get_hostname pass the metadata_only parameter.""" |
3764 | + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') |
3765 | + hostname, fqdn = util.get_hostname_fqdn( |
3766 | + cfg={}, cloud=mycloud, metadata_only=True) |
3767 | + self.assertEqual( |
3768 | + [{'fqdn': True, 'metadata_only': True}, |
3769 | + {'metadata_only': True}], mycloud.calls) |
3770 | + |
3771 | + |
3772 | +class TestBlkid(CiTestCase): |
3773 | + ids = { |
3774 | + "id01": "1111-1111", |
3775 | + "id02": "22222222-2222", |
3776 | + "id03": "33333333-3333", |
3777 | + "id04": "44444444-4444", |
3778 | + "id05": "55555555-5555-5555-5555-555555555555", |
3779 | + "id06": "66666666-6666-6666-6666-666666666666", |
3780 | + "id07": "52894610484658920398", |
3781 | + "id08": "86753098675309867530", |
3782 | + "id09": "99999999-9999-9999-9999-999999999999", |
3783 | + } |
3784 | + |
3785 | + blkid_out = dedent("""\ |
3786 | + /dev/loop0: TYPE="squashfs" |
3787 | + /dev/loop1: TYPE="squashfs" |
3788 | + /dev/loop2: TYPE="squashfs" |
3789 | + /dev/loop3: TYPE="squashfs" |
3790 | + /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" |
3791 | + /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" |
3792 | + /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" |
3793 | + /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ |
3794 | + """TYPE="zfs_member" PARTUUID="{id09}" |
3795 | + /dev/loop4: TYPE="squashfs" |
3796 | + """) |
3797 | + |
3798 | + maxDiff = None |
3799 | + |
3800 | + def _get_expected(self): |
3801 | + return ({ |
3802 | + "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, |
3803 | + "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, |
3804 | + "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, |
3805 | + "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, |
3806 | + "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, |
3807 | + "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat", |
3808 | + "UUID": self.ids["id01"], |
3809 | + "PARTUUID": self.ids["id02"]}, |
3810 | + "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4", |
3811 | + "UUID": self.ids["id03"], |
3812 | + "PARTUUID": self.ids["id04"]}, |
3813 | + "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4", |
3814 | + "UUID": self.ids["id05"], |
3815 | + "PARTUUID": self.ids["id06"]}, |
3816 | + "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member", |
3817 | + "LABEL": "default", |
3818 | + "UUID": self.ids["id07"], |
3819 | + "UUID_SUB": self.ids["id08"], |
3820 | + "PARTUUID": self.ids["id09"]}, |
3821 | + }) |
3822 | + |
3823 | + @mock.patch("cloudinit.util.subp") |
3824 | + def test_functional_blkid(self, m_subp): |
3825 | + m_subp.return_value = ( |
3826 | + self.blkid_out.format(**self.ids), "") |
3827 | + self.assertEqual(self._get_expected(), util.blkid()) |
3828 | + m_subp.assert_called_with(["blkid", "-o", "full"], capture=True, |
3829 | + decode="replace") |
3830 | + |
3831 | + @mock.patch("cloudinit.util.subp") |
3832 | + def test_blkid_no_cache_uses_no_cache(self, m_subp): |
3833 | + """blkid should turn off cache if disable_cache is true.""" |
3834 | + m_subp.return_value = ( |
3835 | + self.blkid_out.format(**self.ids), "") |
3836 | + self.assertEqual(self._get_expected(), |
3837 | + util.blkid(disable_cache=True)) |
3838 | + m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"], |
3839 | + capture=True, decode="replace") |
3840 | + |
3841 | + |
3842 | +# vi: ts=4 expandtab |
3843 | diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py |
3844 | index 0a5be0b..03a573a 100644 |
3845 | --- a/cloudinit/url_helper.py |
3846 | +++ b/cloudinit/url_helper.py |
3847 | @@ -16,7 +16,7 @@ import time |
3848 | |
3849 | from email.utils import parsedate |
3850 | from functools import partial |
3851 | - |
3852 | +from itertools import count |
3853 | from requests import exceptions |
3854 | |
3855 | from six.moves.urllib.parse import ( |
3856 | @@ -47,7 +47,7 @@ try: |
3857 | _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=no-member |
3858 | if _REQ_VER >= LooseVersion('0.8.8'): |
3859 | SSL_ENABLED = True |
3860 | - if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): |
3861 | + if LooseVersion('0.7.0') <= _REQ_VER < LooseVersion('1.0.0'): |
3862 | CONFIG_ENABLED = True |
3863 | except ImportError: |
3864 | pass |
3865 | @@ -121,7 +121,7 @@ class UrlResponse(object): |
3866 | upper = 300 |
3867 | if redirects_ok: |
3868 | upper = 400 |
3869 | - if self.code >= 200 and self.code < upper: |
3870 | + if 200 <= self.code < upper: |
3871 | return True |
3872 | else: |
3873 | return False |
3874 | @@ -172,7 +172,7 @@ def _get_ssl_args(url, ssl_details): |
3875 | def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
3876 | headers=None, headers_cb=None, ssl_details=None, |
3877 | check_status=True, allow_redirects=True, exception_cb=None, |
3878 | - session=None): |
3879 | + session=None, infinite=False): |
3880 | url = _cleanurl(url) |
3881 | req_args = { |
3882 | 'url': url, |
3883 | @@ -220,7 +220,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
3884 | excps = [] |
3885 | # Handle retrying ourselves since the built-in support |
3886 | # doesn't handle sleeping between tries... |
3887 | - for i in range(0, manual_tries): |
3888 | + # Infinitely retry if infinite is True |
3889 | + for i in count() if infinite else range(0, manual_tries): |
3890 | req_args['headers'] = headers_cb(url) |
3891 | filtered_req_args = {} |
3892 | for (k, v) in req_args.items(): |
3893 | @@ -229,7 +230,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
3894 | filtered_req_args[k] = v |
3895 | try: |
3896 | LOG.debug("[%s/%s] open '%s' with %s configuration", i, |
3897 | - manual_tries, url, filtered_req_args) |
3898 | + "infinite" if infinite else manual_tries, url, |
3899 | + filtered_req_args) |
3900 | |
3901 | if session is None: |
3902 | session = requests.Session() |
3903 | @@ -258,11 +260,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
3904 | # ssl exceptions are not going to get fixed by waiting a |
3905 | # few seconds |
3906 | break |
3907 | - if exception_cb and exception_cb(req_args.copy(), excps[-1]): |
3908 | - # if an exception callback was given it should return None |
3909 | - # a true-ish value means to break and re-raise the exception |
3910 | + if exception_cb and not exception_cb(req_args.copy(), excps[-1]): |
3911 | + # if an exception callback was given, it should return True |
3912 | + # to continue retrying and False to break and re-raise the |
3913 | + # exception |
3914 | break |
3915 | - if i + 1 < manual_tries and sec_between > 0: |
3916 | + if (infinite and sec_between > 0) or \ |
3917 | + (i + 1 < manual_tries and sec_between > 0): |
3918 | LOG.debug("Please wait %s seconds while we wait to try again", |
3919 | sec_between) |
3920 | time.sleep(sec_between) |
3921 | diff --git a/cloudinit/util.py b/cloudinit/util.py |
3922 | index 338fb97..0ab2c48 100644 |
3923 | --- a/cloudinit/util.py |
3924 | +++ b/cloudinit/util.py |
3925 | @@ -546,7 +546,7 @@ def is_ipv4(instr): |
3926 | return False |
3927 | |
3928 | try: |
3929 | - toks = [x for x in toks if int(x) < 256 and int(x) >= 0] |
3930 | + toks = [x for x in toks if 0 <= int(x) < 256] |
3931 | except Exception: |
3932 | return False |
3933 | |
3934 | @@ -716,8 +716,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): |
3935 | def make_url(scheme, host, port=None, |
3936 | path='', params='', query='', fragment=''): |
3937 | |
3938 | - pieces = [] |
3939 | - pieces.append(scheme or '') |
3940 | + pieces = [scheme or ''] |
3941 | |
3942 | netloc = '' |
3943 | if host: |
3944 | @@ -1026,9 +1025,16 @@ def dos2unix(contents): |
3945 | return contents.replace('\r\n', '\n') |
3946 | |
3947 | |
3948 | -def get_hostname_fqdn(cfg, cloud): |
3949 | - # return the hostname and fqdn from 'cfg'. If not found in cfg, |
3950 | - # then fall back to data from cloud |
3951 | +def get_hostname_fqdn(cfg, cloud, metadata_only=False): |
3952 | + """Get hostname and fqdn from config if present and fallback to cloud. |
3953 | + |
3954 | + @param cfg: Dictionary of merged user-data configuration (from init.cfg). |
3955 | + @param cloud: Cloud instance from init.cloudify(). |
3956 | + @param metadata_only: Boolean, set True to only query cloud meta-data, |
3957 | + returning None if not present in meta-data. |
3958 | + @return: a Tuple of strings <hostname>, <fqdn>. Values can be none when |
3959 | + metadata_only is True and no cfg or metadata provides hostname info. |
3960 | + """ |
3961 | if "fqdn" in cfg: |
3962 | # user specified a fqdn. Default hostname then is based off that |
3963 | fqdn = cfg['fqdn'] |
3964 | @@ -1042,11 +1048,11 @@ def get_hostname_fqdn(cfg, cloud): |
3965 | else: |
3966 | # no fqdn set, get fqdn from cloud. |
3967 | # get hostname from cfg if available otherwise cloud |
3968 | - fqdn = cloud.get_hostname(fqdn=True) |
3969 | + fqdn = cloud.get_hostname(fqdn=True, metadata_only=metadata_only) |
3970 | if "hostname" in cfg: |
3971 | hostname = cfg['hostname'] |
3972 | else: |
3973 | - hostname = cloud.get_hostname() |
3974 | + hostname = cloud.get_hostname(metadata_only=metadata_only) |
3975 | return (hostname, fqdn) |
3976 | |
3977 | |
3978 | @@ -1231,6 +1237,37 @@ def find_devs_with(criteria=None, oformat='device', |
3979 | return entries |
3980 | |
3981 | |
3982 | +def blkid(devs=None, disable_cache=False): |
3983 | + """Get all device tags details from blkid. |
3984 | + |
3985 | + @param devs: Optional list of device paths you wish to query. |
3986 | + @param disable_cache: Bool, set True to start with clean cache. |
3987 | + |
3988 | + @return: Dict of key value pairs of info for the device. |
3989 | + """ |
3990 | + if devs is None: |
3991 | + devs = [] |
3992 | + else: |
3993 | + devs = list(devs) |
3994 | + |
3995 | + cmd = ['blkid', '-o', 'full'] |
3996 | + if disable_cache: |
3997 | + cmd.extend(['-c', '/dev/null']) |
3998 | + cmd.extend(devs) |
3999 | + |
4000 | + # we have to decode with 'replace' as shelx.split (called by |
4001 | + # load_shell_content) can't take bytes. So this is potentially |
4002 | + # lossy of non-utf-8 chars in blkid output. |
4003 | + out, _ = subp(cmd, capture=True, decode="replace") |
4004 | + ret = {} |
4005 | + for line in out.splitlines(): |
4006 | + dev, _, data = line.partition(":") |
4007 | + ret[dev] = load_shell_content(data) |
4008 | + ret[dev]["DEVNAME"] = dev |
4009 | + |
4010 | + return ret |
4011 | + |
4012 | + |
4013 | def peek_file(fname, max_bytes): |
4014 | LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) |
4015 | with open(fname, 'rb') as ifh: |
4016 | @@ -1746,7 +1783,7 @@ def chmod(path, mode): |
4017 | def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False): |
4018 | """ |
4019 | Writes a file with the given content and sets the file mode as specified. |
4020 | - Resotres the SELinux context if possible. |
4021 | + Restores the SELinux context if possible. |
4022 | |
4023 | @param filename: The full path of the file to write. |
4024 | @param content: The content to write to the file. |
4025 | @@ -1821,7 +1858,8 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): |
4026 | |
4027 | |
4028 | def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
4029 | - logstring=False, decode="replace", target=None, update_env=None): |
4030 | + logstring=False, decode="replace", target=None, update_env=None, |
4031 | + status_cb=None): |
4032 | |
4033 | # not supported in cloud-init (yet), for now kept in the call signature |
4034 | # to ease maintaining code shared between cloud-init and curtin |
4035 | @@ -1842,6 +1880,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
4036 | if target_path(target) != "/": |
4037 | args = ['chroot', target] + list(args) |
4038 | |
4039 | + if status_cb: |
4040 | + command = ' '.join(args) if isinstance(args, list) else args |
4041 | + status_cb('Begin run command: {command}\n'.format(command=command)) |
4042 | if not logstring: |
4043 | LOG.debug(("Running command %s with allowed return codes %s" |
4044 | " (shell=%s, capture=%s)"), args, rcs, shell, capture) |
4045 | @@ -1865,12 +1906,25 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
4046 | if not isinstance(data, bytes): |
4047 | data = data.encode() |
4048 | |
4049 | + # Popen converts entries in the arguments array from non-bytes to bytes. |
4050 | + # When locale is unset it may use ascii for that encoding which can |
4051 | + # cause UnicodeDecodeErrors. (LP: #1751051) |
4052 | + if isinstance(args, six.binary_type): |
4053 | + bytes_args = args |
4054 | + elif isinstance(args, six.string_types): |
4055 | + bytes_args = args.encode("utf-8") |
4056 | + else: |
4057 | + bytes_args = [ |
4058 | + x if isinstance(x, six.binary_type) else x.encode("utf-8") |
4059 | + for x in args] |
4060 | try: |
4061 | - sp = subprocess.Popen(args, stdout=stdout, |
4062 | + sp = subprocess.Popen(bytes_args, stdout=stdout, |
4063 | stderr=stderr, stdin=stdin, |
4064 | env=env, shell=shell) |
4065 | (out, err) = sp.communicate(data) |
4066 | except OSError as e: |
4067 | + if status_cb: |
4068 | + status_cb('ERROR: End run command: invalid command provided\n') |
4069 | raise ProcessExecutionError( |
4070 | cmd=args, reason=e, errno=e.errno, |
4071 | stdout="-" if decode else b"-", |
4072 | @@ -1895,9 +1949,14 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
4073 | |
4074 | rc = sp.returncode |
4075 | if rc not in rcs: |
4076 | + if status_cb: |
4077 | + status_cb( |
4078 | + 'ERROR: End run command: exit({code})\n'.format(code=rc)) |
4079 | raise ProcessExecutionError(stdout=out, stderr=err, |
4080 | exit_code=rc, |
4081 | cmd=args) |
4082 | + if status_cb: |
4083 | + status_cb('End run command: exit({code})\n'.format(code=rc)) |
4084 | return (out, err) |
4085 | |
4086 | |
4087 | @@ -1918,6 +1977,11 @@ def abs_join(*paths): |
4088 | # if it is an array, shell protect it (with single ticks) |
4089 | # if it is a string, do nothing |
4090 | def shellify(cmdlist, add_header=True): |
4091 | + if not isinstance(cmdlist, (tuple, list)): |
4092 | + raise TypeError( |
4093 | + "Input to shellify was type '%s'. Expected list or tuple." % |
4094 | + (type_utils.obj_name(cmdlist))) |
4095 | + |
4096 | content = '' |
4097 | if add_header: |
4098 | content += "#!/bin/sh\n" |
4099 | @@ -1926,7 +1990,7 @@ def shellify(cmdlist, add_header=True): |
4100 | for args in cmdlist: |
4101 | # If the item is a list, wrap all items in single tick. |
4102 | # If its not, then just write it directly. |
4103 | - if isinstance(args, list): |
4104 | + if isinstance(args, (list, tuple)): |
4105 | fixed = [] |
4106 | for f in args: |
4107 | fixed.append("'%s'" % (six.text_type(f).replace("'", escaped))) |
4108 | @@ -1936,9 +2000,10 @@ def shellify(cmdlist, add_header=True): |
4109 | content = "%s%s\n" % (content, args) |
4110 | cmds_made += 1 |
4111 | else: |
4112 | - raise RuntimeError(("Unable to shellify type %s" |
4113 | - " which is not a list or string") |
4114 | - % (type_utils.obj_name(args))) |
4115 | + raise TypeError( |
4116 | + "Unable to shellify type '%s'. Expected list, string, tuple. " |
4117 | + "Got: %s" % (type_utils.obj_name(args), args)) |
4118 | + |
4119 | LOG.debug("Shellified %s commands.", cmds_made) |
4120 | return content |
4121 | |
4122 | @@ -2169,7 +2234,7 @@ def get_path_dev_freebsd(path, mnt_list): |
4123 | return path_found |
4124 | |
4125 | |
4126 | -def get_mount_info_freebsd(path, log=LOG): |
4127 | +def get_mount_info_freebsd(path): |
4128 | (result, err) = subp(['mount', '-p', path], rcs=[0, 1]) |
4129 | if len(err): |
4130 | # find a path if the input is not a mounting point |
4131 | @@ -2183,23 +2248,49 @@ def get_mount_info_freebsd(path, log=LOG): |
4132 | return "/dev/" + label_part, ret[2], ret[1] |
4133 | |
4134 | |
4135 | +def get_device_info_from_zpool(zpool): |
4136 | + (zpoolstatus, err) = subp(['zpool', 'status', zpool]) |
4137 | + if len(err): |
4138 | + return None |
4139 | + r = r'.*(ONLINE).*' |
4140 | + for line in zpoolstatus.split("\n"): |
4141 | + if re.search(r, line) and zpool not in line and "state" not in line: |
4142 | + disk = line.split()[0] |
4143 | + LOG.debug('found zpool "%s" on disk %s', zpool, disk) |
4144 | + return disk |
4145 | + |
4146 | + |
4147 | def parse_mount(path): |
4148 | - (mountoutput, _err) = subp("mount") |
4149 | + (mountoutput, _err) = subp(['mount']) |
4150 | mount_locs = mountoutput.splitlines() |
4151 | + # there are 2 types of mount outputs we have to parse therefore |
4152 | + # the regex is a bit complex. to better understand this regex see: |
4153 | + # https://regex101.com/r/2F6c1k/1 |
4154 | + # https://regex101.com/r/T2en7a/1 |
4155 | + regex = r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + \ |
4156 | + '(?=(?:type)[\s]+([\S]+)|\(([^,]*))' |
4157 | for line in mount_locs: |
4158 | - m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) |
4159 | + m = re.search(regex, line) |
4160 | if not m: |
4161 | continue |
4162 | + devpth = m.group(1) |
4163 | + mount_point = m.group(2) |
4164 | + # above regex will either fill the fs_type in group(3) |
4165 | + # or group(4) depending on the format we have. |
4166 | + fs_type = m.group(3) |
4167 | + if fs_type is None: |
4168 | + fs_type = m.group(4) |
4169 | + LOG.debug('found line in mount -> devpth: %s, mount_point: %s, ' |
4170 | + 'fs_type: %s', devpth, mount_point, fs_type) |
4171 | # check whether the dev refers to a label on FreeBSD |
4172 | # for example, if dev is '/dev/label/rootfs', we should |
4173 | # continue finding the real device like '/dev/da0'. |
4174 | - devm = re.search('^(/dev/.+)p([0-9])$', m.group(1)) |
4175 | - if (not devm and is_FreeBSD()): |
4176 | + # this is only valid for non zfs file systems as a zpool |
4177 | + # can have gpt labels as disk. |
4178 | + devm = re.search('^(/dev/.+)p([0-9])$', devpth) |
4179 | + if not devm and is_FreeBSD() and fs_type != 'zfs': |
4180 | return get_mount_info_freebsd(path) |
4181 | - devpth = m.group(1) |
4182 | - mount_point = m.group(2) |
4183 | - fs_type = m.group(3) |
4184 | - if mount_point == path: |
4185 | + elif mount_point == path: |
4186 | return devpth, fs_type, mount_point |
4187 | return None |
4188 | |
4189 | diff --git a/cloudinit/version.py b/cloudinit/version.py |
4190 | index be6262d..ccd0f84 100644 |
4191 | --- a/cloudinit/version.py |
4192 | +++ b/cloudinit/version.py |
4193 | @@ -4,7 +4,7 @@ |
4194 | # |
4195 | # This file is part of cloud-init. See LICENSE file for license information. |
4196 | |
4197 | -__VERSION__ = "17.2" |
4198 | +__VERSION__ = "18.2" |
4199 | |
4200 | FEATURES = [ |
4201 | # supports network config version 1 |
4202 | diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl |
4203 | index 32de9c9..3129d4e 100644 |
4204 | --- a/config/cloud.cfg.tmpl |
4205 | +++ b/config/cloud.cfg.tmpl |
4206 | @@ -4,6 +4,8 @@ |
4207 | |
4208 | {% if variant in ["freebsd"] %} |
4209 | syslog_fix_perms: root:wheel |
4210 | +{% elif variant in ["suse"] %} |
4211 | +syslog_fix_perms: root:root |
4212 | {% endif %} |
4213 | # A set of users which may be applied and/or used by various modules |
4214 | # when a 'default' entry is found it will reference the 'default_user' |
4215 | @@ -70,7 +72,8 @@ cloud_config_modules: |
4216 | # Emit the cloud config ready event |
4217 | # this can be used by upstart jobs for 'start on cloud-config'. |
4218 | - emit_upstart |
4219 | - - snap_config |
4220 | + - snap |
4221 | + - snap_config # DEPRECATED- Drop in version 18.2 |
4222 | {% endif %} |
4223 | - ssh-import-id |
4224 | - locale |
4225 | @@ -84,6 +87,9 @@ cloud_config_modules: |
4226 | - apt-pipelining |
4227 | - apt-configure |
4228 | {% endif %} |
4229 | +{% if variant in ["ubuntu"] %} |
4230 | + - ubuntu-advantage |
4231 | +{% endif %} |
4232 | {% if variant in ["suse"] %} |
4233 | - zypper-add-repo |
4234 | {% endif %} |
4235 | @@ -100,7 +106,7 @@ cloud_config_modules: |
4236 | # The modules that run in the 'final' stage |
4237 | cloud_final_modules: |
4238 | {% if variant in ["ubuntu", "unknown", "debian"] %} |
4239 | - - snappy |
4240 | + - snappy # DEPRECATED- Drop in version 18.2 |
4241 | {% endif %} |
4242 | - package-update-upgrade-install |
4243 | {% if variant in ["ubuntu", "unknown", "debian"] %} |
4244 | @@ -111,9 +117,9 @@ cloud_final_modules: |
4245 | {% if variant not in ["freebsd"] %} |
4246 | - puppet |
4247 | - chef |
4248 | - - salt-minion |
4249 | - mcollective |
4250 | {% endif %} |
4251 | + - salt-minion |
4252 | - rightscale_userdata |
4253 | - scripts-vendor |
4254 | - scripts-per-once |
4255 | diff --git a/debian/changelog b/debian/changelog |
4256 | index a319c5e..c7fc4fc 100644 |
4257 | --- a/debian/changelog |
4258 | +++ b/debian/changelog |
4259 | @@ -1,10 +1,76 @@ |
4260 | -cloud-init (17.2-35-gf576b2a2-0ubuntu1~17.10.3) UNRELEASED; urgency=medium |
4261 | +cloud-init (18.2-0ubuntu1~17.10.1) artful-proposed; urgency=medium |
4262 | |
4263 | * Drop the following cherry picks in debian/patches. They are now |
4264 | incorporated now incorporated in the upstream source: |
4265 | + cpick-40e7738-GCE-fix-reading-of-user-data-that-is-not-base64-encoded |
4266 | - |
4267 | - -- Scott Moser <smoser@ubuntu.com> Wed, 14 Mar 2018 15:43:25 -0400 |
4268 | + * New upstream snapshot. (LP: #1759406) |
4269 | + - release 18.2 (LP: #1759318) |
4270 | + - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. |
4271 | + - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. |
4272 | + (LP: #1759307) |
4273 | + - FreeBSD: resizefs module now able to handle zfs/zpool. |
4274 | + [Dominic Schlegel] (LP: #1721243) |
4275 | + - cc_puppet: Revert regression of puppet creating ssl and ssl_cert dirs |
4276 | + - Enable IBMCloud datasource in settings.py. |
4277 | + - IBMCloud: Initial IBM Cloud datasource. |
4278 | + - tests: remove jsonschema from xenial tox environment. |
4279 | + - tests: Fix newly added schema unit tests to skip if no jsonschema. |
4280 | + - ec2: Adjust ec2 datasource after exception_cb change. |
4281 | + - Reduce AzurePreprovisioning HTTP timeouts. |
4282 | + [Douglas Jordan] (LP: #1752977) |
4283 | + - Revert the logic of exception_cb in read_url. |
4284 | + [Kurt Garloff] (LP: #1702160, #1298921) |
4285 | + - ubuntu-advantage: Add new config module to support |
4286 | + ubuntu-advantage-tools |
4287 | + - Handle global dns entries in netplan [Ryan Harper] (LP: #1750884) |
4288 | + - Identify OpenTelekomCloud Xen as OpenStack DS. |
4289 | + [Kurt Garloff] (LP: #1756471) |
4290 | + - datasources: fix DataSource subclass get_hostname method signature |
4291 | + (LP: #1757176) |
4292 | + - OpenNebula: Update network to return v2 config rather than ENI. |
4293 | + [Akihiko Ota] |
4294 | + - Add Hetzner Cloud DataSource |
4295 | + - net: recognize iscsi root cases without ip= on kernel command line. |
4296 | + (LP: #1752391) |
4297 | + - tests: fix flakes warning for unused variable |
4298 | + - tests: patch leaked stderr messages from snap unit tests |
4299 | + - cc_snap: Add new module to install and configure snapd and snap |
4300 | + packages. |
4301 | + - tests: Make pylint happy and fix python2.6 uses of assertRaisesRegex. |
4302 | + - netplan: render bridge port-priority values (LP: #1735821) |
4303 | + - util: Fix subp regression. Allow specifying subp command as a string. |
4304 | + (LP: #1755965) |
4305 | + - doc: fix all warnings issued by 'tox -e doc' |
4306 | + - FreeBSD: Set hostname to FQDN. [Dominic Schlegel] (LP: #1753499) |
4307 | + - tests: fix run_tree and bddeb |
4308 | + - tests: Fix some warnings in tests that popped up with newer python. |
4309 | + - set_hostname: When present in metadata, set it before network bringup. |
4310 | + (LP: #1746455) |
4311 | + - tests: Centralize and re-use skipTest based on json schema presense. |
4312 | + - This commit fixes get_hostname on the AzureDataSource. |
4313 | + [Douglas Jordan] (LP: #1754495) |
4314 | + - shellify: raise TypeError on bad input. |
4315 | + - Make salt minion module work on FreeBSD. |
4316 | + [Dominic Schlegel] (LP: #1721503) |
4317 | + - Simplify some comparisions. [Rémy Léone] |
4318 | + - Change some list creation and population to literal. [Rémy Léone] |
4319 | + - GCE: fix reading of user-data that is not base64 encoded. (LP: #1752711) |
4320 | + - doc: fix chef install from apt packages example in RTD. |
4321 | + - Implement puppet 4 support [Romanos Skiadas] (LP: #1446804) |
4322 | + - subp: Fix subp usage with non-ascii characters when no system locale. |
4323 | + (LP: #1751051) |
4324 | + - salt: configure grains in grains file rather than in minion config. |
4325 | + [Daniel Wallace] |
4326 | + - release 18.1 (LP: #1751145) |
4327 | + - OVF: Fix VMware support for 64-bit platforms. [Sankar Tanguturi] |
4328 | + - ds-identify: Fix searching for iso9660 OVF cdroms. (LP: #1749980) |
4329 | + - SUSE: Fix groups used for ownership of cloud-init.log [Robert Schweikert] |
4330 | + - ds-identify: check /writable/system-data/ for nocloud seed. |
4331 | + (LP: #1747070) |
4332 | + - tests: run nosetests in cloudinit/ directory, fix py26 fallout. |
4333 | + - tools: run-centos: git clone rather than tar. |
4334 | + |
4335 | + -- Chad Smith <chad.smith@canonical.com> Tue, 27 Mar 2018 20:21:42 -0600 |
4336 | |
4337 | cloud-init (17.2-35-gf576b2a2-0ubuntu1~17.10.2) artful-proposed; urgency=medium |
4338 | |
4339 | diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt |
4340 | index 58d5fdc..defc5a5 100644 |
4341 | --- a/doc/examples/cloud-config-chef.txt |
4342 | +++ b/doc/examples/cloud-config-chef.txt |
4343 | @@ -12,8 +12,8 @@ |
4344 | |
4345 | # Key from https://packages.chef.io/chef.asc |
4346 | apt: |
4347 | - source1: |
4348 | - source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main" |
4349 | + sources: |
4350 | + source1: "deb http://packages.chef.io/repos/apt/stable $RELEASE main" |
4351 | key: | |
4352 | -----BEGIN PGP PUBLIC KEY BLOCK----- |
4353 | Version: GnuPG v1.4.12 (Darwin) |
4354 | diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py |
4355 | index 0ea3b6b..50eb05c 100644 |
4356 | --- a/doc/rtd/conf.py |
4357 | +++ b/doc/rtd/conf.py |
4358 | @@ -29,6 +29,7 @@ project = 'Cloud-Init' |
4359 | extensions = [ |
4360 | 'sphinx.ext.intersphinx', |
4361 | 'sphinx.ext.autodoc', |
4362 | + 'sphinx.ext.autosectionlabel', |
4363 | 'sphinx.ext.viewcode', |
4364 | ] |
4365 | |
4366 | diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst |
4367 | index ae3a0c7..3e2c9e3 100644 |
4368 | --- a/doc/rtd/topics/capabilities.rst |
4369 | +++ b/doc/rtd/topics/capabilities.rst |
4370 | @@ -44,13 +44,14 @@ Currently defined feature names include: |
4371 | CLI Interface |
4372 | ============= |
4373 | |
4374 | - The command line documentation is accessible on any cloud-init |
4375 | -installed system: |
4376 | +The command line documentation is accessible on any cloud-init installed |
4377 | +system: |
4378 | |
4379 | -.. code-block:: bash |
4380 | +.. code-block:: shell-session |
4381 | |
4382 | % cloud-init --help |
4383 | usage: cloud-init [-h] [--version] [--file FILES] |
4384 | + |
4385 | [--debug] [--force] |
4386 | {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} |
4387 | ... |
4388 | @@ -88,7 +89,7 @@ Print out each feature supported. If cloud-init does not have the |
4389 | features subcommand, it also does not support any features described in |
4390 | this document. |
4391 | |
4392 | -.. code-block:: bash |
4393 | +.. code-block:: shell-session |
4394 | |
4395 | % cloud-init features |
4396 | NETWORK_CONFIG_V1 |
4397 | @@ -100,10 +101,11 @@ cloud-init status |
4398 | ----------------- |
4399 | Report whether cloud-init is running, done, disabled or errored. Exits |
4400 | non-zero if an error is detected in cloud-init. |
4401 | + |
4402 | * **--long**: Detailed status information. |
4403 | * **--wait**: Block until cloud-init completes. |
4404 | |
4405 | -.. code-block:: bash |
4406 | +.. code-block:: shell-session |
4407 | |
4408 | % cloud-init status --long |
4409 | status: done |
4410 | @@ -214,7 +216,7 @@ of once-per-instance: |
4411 | * **--frequency**: Optionally override the declared module frequency |
4412 | with one of (always|once-per-instance|once) |
4413 | |
4414 | -.. code-block:: bash |
4415 | +.. code-block:: shell-session |
4416 | |
4417 | % cloud-init single --name set_hostname --frequency always |
4418 | |
4419 | diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst |
4420 | index c2b47ed..cacc8a2 100644 |
4421 | --- a/doc/rtd/topics/debugging.rst |
4422 | +++ b/doc/rtd/topics/debugging.rst |
4423 | @@ -1,6 +1,6 @@ |
4424 | -********************** |
4425 | +******************************** |
4426 | Testing and debugging cloud-init |
4427 | -********************** |
4428 | +******************************** |
4429 | |
4430 | Overview |
4431 | ======== |
4432 | @@ -10,7 +10,7 @@ deployed instances. |
4433 | .. _boot_time_analysis: |
4434 | |
4435 | Boot Time Analysis - cloud-init analyze |
4436 | -====================================== |
4437 | +======================================= |
4438 | Occasionally instances don't appear as performant as we would like and |
4439 | cloud-init packages a simple facility to inspect what operations took |
4440 | cloud-init the longest during boot and setup. |
4441 | @@ -22,9 +22,9 @@ determine the long-pole in cloud-init configuration and setup. These |
4442 | subcommands default to reading /var/log/cloud-init.log. |
4443 | |
4444 | * ``analyze show`` Parse and organize cloud-init.log events by stage and |
4445 | -include each sub-stage granularity with time delta reports. |
4446 | + include each sub-stage granularity with time delta reports. |
4447 | |
4448 | -.. code-block:: bash |
4449 | +.. code-block:: shell-session |
4450 | |
4451 | $ cloud-init analyze show -i my-cloud-init.log |
4452 | -- Boot Record 01 -- |
4453 | @@ -41,9 +41,9 @@ include each sub-stage granularity with time delta reports. |
4454 | |
4455 | |
4456 | * ``analyze dump`` Parse cloud-init.log into event records and return a list of |
4457 | -dictionaries that can be consumed for other reporting needs. |
4458 | + dictionaries that can be consumed for other reporting needs. |
4459 | |
4460 | -.. code-block:: bash |
4461 | +.. code-block:: shell-session |
4462 | |
4463 | $ cloud-init analyze blame -i my-cloud-init.log |
4464 | [ |
4465 | @@ -56,10 +56,10 @@ dictionaries that can be consumed for other reporting needs. |
4466 | },... |
4467 | |
4468 | * ``analyze blame`` Parse cloud-init.log into event records and sort them based |
4469 | -on highest time cost for quick assessment of areas of cloud-init that may need |
4470 | -improvement. |
4471 | + on highest time cost for quick assessment of areas of cloud-init that may |
4472 | + need improvement. |
4473 | |
4474 | -.. code-block:: bash |
4475 | +.. code-block:: shell-session |
4476 | |
4477 | $ cloud-init analyze blame -i my-cloud-init.log |
4478 | -- Boot Record 11 -- |
4479 | @@ -73,31 +73,36 @@ Analyze quickstart - LXC |
4480 | --------------------------- |
4481 | To quickly obtain a cloud-init log try using lxc on any ubuntu system: |
4482 | |
4483 | -.. code-block:: bash |
4484 | +.. code-block:: shell-session |
4485 | + |
4486 | + $ lxc init ubuntu-daily:xenial x1 |
4487 | + $ lxc start x1 |
4488 | + $ # Take lxc's cloud-init.log and pipe it to the analyzer |
4489 | + $ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i - |
4490 | + $ lxc file pull x1/var/log/cloud-init.log - | \ |
4491 | + python3 -m cloudinit.analyze dump -i - |
4492 | |
4493 | - $ lxc init ubuntu-daily:xenial x1 |
4494 | - $ lxc start x1 |
4495 | - # Take lxc's cloud-init.log and pipe it to the analyzer |
4496 | - $ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i - |
4497 | - $ lxc file pull x1/var/log/cloud-init.log - | \ |
4498 | - python3 -m cloudinit.analyze dump -i - |
4499 | |
4500 | Analyze quickstart - KVM |
4501 | --------------------------- |
4502 | To quickly analyze a KVM a cloud-init log: |
4503 | |
4504 | 1. Download the current cloud image |
4505 | - wget https://cloud-images.ubuntu.com/daily/server/xenial/current/xenial-server-cloudimg-amd64.img |
4506 | + |
4507 | +.. code-block:: shell-session |
4508 | + |
4509 | + $ wget https://cloud-images.ubuntu.com/daily/server/xenial/current/xenial-server-cloudimg-amd64.img |
4510 | + |
4511 | 2. Create a snapshot image to preserve the original cloud-image |
4512 | |
4513 | -.. code-block:: bash |
4514 | +.. code-block:: shell-session |
4515 | |
4516 | $ qemu-img create -b xenial-server-cloudimg-amd64.img -f qcow2 \ |
4517 | test-cloudinit.qcow2 |
4518 | |
4519 | 3. Create a seed image with metadata using `cloud-localds` |
4520 | |
4521 | -.. code-block:: bash |
4522 | +.. code-block:: shell-session |
4523 | |
4524 | $ cat > user-data <<EOF |
4525 | #cloud-config |
4526 | @@ -108,18 +113,18 @@ To quickly analyze a KVM a cloud-init log: |
4527 | |
4528 | 4. Launch your modified VM |
4529 | |
4530 | -.. code-block:: bash |
4531 | +.. code-block:: shell-session |
4532 | |
4533 | $ kvm -m 512 -net nic -net user -redir tcp:2222::22 \ |
4534 | - -drive file=test-cloudinit.qcow2,if=virtio,format=qcow2 \ |
4535 | - -drive file=my-seed.img,if=virtio,format=raw |
4536 | + -drive file=test-cloudinit.qcow2,if=virtio,format=qcow2 \ |
4537 | + -drive file=my-seed.img,if=virtio,format=raw |
4538 | |
4539 | 5. Analyze the boot (blame, dump, show) |
4540 | |
4541 | -.. code-block:: bash |
4542 | +.. code-block:: shell-session |
4543 | |
4544 | $ ssh -p 2222 ubuntu@localhost 'cat /var/log/cloud-init.log' | \ |
4545 | - cloud-init analyze blame -i - |
4546 | + cloud-init analyze blame -i - |
4547 | |
4548 | |
4549 | Running single cloud config modules |
4550 | @@ -136,7 +141,7 @@ prevents a module from running again if it has already been run. To ensure that |
4551 | a module is run again, the desired frequency can be overridden on the |
4552 | commandline: |
4553 | |
4554 | -.. code-block:: bash |
4555 | +.. code-block:: shell-session |
4556 | |
4557 | $ sudo cloud-init single --name cc_ssh --frequency always |
4558 | ... |
4559 | diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst |
4560 | index 7b14675..d9720f6 100644 |
4561 | --- a/doc/rtd/topics/modules.rst |
4562 | +++ b/doc/rtd/topics/modules.rst |
4563 | @@ -45,6 +45,7 @@ Modules |
4564 | .. automodule:: cloudinit.config.cc_seed_random |
4565 | .. automodule:: cloudinit.config.cc_set_hostname |
4566 | .. automodule:: cloudinit.config.cc_set_passwords |
4567 | +.. automodule:: cloudinit.config.cc_snap |
4568 | .. automodule:: cloudinit.config.cc_snappy |
4569 | .. automodule:: cloudinit.config.cc_snap_config |
4570 | .. automodule:: cloudinit.config.cc_spacewalk |
4571 | @@ -52,6 +53,7 @@ Modules |
4572 | .. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints |
4573 | .. automodule:: cloudinit.config.cc_ssh_import_id |
4574 | .. automodule:: cloudinit.config.cc_timezone |
4575 | +.. automodule:: cloudinit.config.cc_ubuntu_advantage |
4576 | .. automodule:: cloudinit.config.cc_update_etc_hosts |
4577 | .. automodule:: cloudinit.config.cc_update_hostname |
4578 | .. automodule:: cloudinit.config.cc_users_groups |
4579 | diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst |
4580 | index 96c1cf5..1e99455 100644 |
4581 | --- a/doc/rtd/topics/network-config.rst |
4582 | +++ b/doc/rtd/topics/network-config.rst |
4583 | @@ -202,7 +202,7 @@ is helpful for examining expected output for a given input format. |
4584 | |
4585 | CLI Interface : |
4586 | |
4587 | -.. code-block:: bash |
4588 | +.. code-block:: shell-session |
4589 | |
4590 | % tools/net-convert.py --help |
4591 | usage: net-convert.py [-h] --network-data PATH --kind |
4592 | @@ -222,7 +222,7 @@ CLI Interface : |
4593 | |
4594 | Example output converting V2 to sysconfig: |
4595 | |
4596 | -.. code-block:: bash |
4597 | +.. code-block:: shell-session |
4598 | |
4599 | % tools/net-convert.py --network-data v2.yaml --kind yaml \ |
4600 | --output-kind sysconfig -d target |
4601 | diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst |
4602 | index bf04bb3..cac4a6e 100644 |
4603 | --- a/doc/rtd/topics/tests.rst |
4604 | +++ b/doc/rtd/topics/tests.rst |
4605 | @@ -21,7 +21,7 @@ Overview |
4606 | In order to avoid the need for dependencies and ease the setup and |
4607 | configuration users can run the integration tests via tox: |
4608 | |
4609 | -.. code-block:: bash |
4610 | +.. code-block:: shell-session |
4611 | |
4612 | $ git clone https://git.launchpad.net/cloud-init |
4613 | $ cd cloud-init |
4614 | @@ -51,7 +51,7 @@ The first example will provide a complete end-to-end run of data |
4615 | collection and verification. There are additional examples below |
4616 | explaining how to run one or the other independently. |
4617 | |
4618 | -.. code-block:: bash |
4619 | +.. code-block:: shell-session |
4620 | |
4621 | $ git clone https://git.launchpad.net/cloud-init |
4622 | $ cd cloud-init |
4623 | @@ -93,7 +93,7 @@ If developing tests it may be necessary to see if cloud-config works as |
4624 | expected and the correct files are pulled down. In this case only a |
4625 | collect can be ran by running: |
4626 | |
4627 | -.. code-block:: bash |
4628 | +.. code-block:: shell-session |
4629 | |
4630 | $ tox -e citest -- collect -n xenial --data-dir /tmp/collection |
4631 | |
4632 | @@ -106,7 +106,7 @@ Verify |
4633 | When developing tests it is much easier to simply rerun the verify scripts |
4634 | without the more lengthy collect process. This can be done by running: |
4635 | |
4636 | -.. code-block:: bash |
4637 | +.. code-block:: shell-session |
4638 | |
4639 | $ tox -e citest -- verify --data-dir /tmp/collection |
4640 | |
4641 | @@ -133,7 +133,7 @@ cloud-init deb from or use the ``tree_run`` command using a copy of |
4642 | cloud-init located in a different directory, use the option ``--cloud-init |
4643 | /path/to/cloud-init``. |
4644 | |
4645 | -.. code-block:: bash |
4646 | +.. code-block:: shell-session |
4647 | |
4648 | $ tox -e citest -- tree_run --verbose \ |
4649 | --os-name xenial --os-name stretch \ |
4650 | @@ -331,7 +331,7 @@ Integration tests are located under the `tests/cloud_tests` directory. |
4651 | Test configurations are placed under `configs` and the test verification |
4652 | scripts under `testcases`: |
4653 | |
4654 | -.. code-block:: bash |
4655 | +.. code-block:: shell-session |
4656 | |
4657 | cloud-init$ tree -d tests/cloud_tests/ |
4658 | tests/cloud_tests/ |
4659 | @@ -362,7 +362,7 @@ The following would create a test case named ``example`` under the |
4660 | ``modules`` category with the given description, and cloud config data read |
4661 | in from ``/tmp/user_data``. |
4662 | |
4663 | -.. code-block:: bash |
4664 | +.. code-block:: shell-session |
4665 | |
4666 | $ tox -e citest -- create modules/example \ |
4667 | -d "a simple example test case" -c "$(< /tmp/user_data)" |
4668 | @@ -385,7 +385,7 @@ Development Checklist |
4669 | * Placed in the appropriate sub-folder in the test cases directory |
4670 | * Tested by running the test: |
4671 | |
4672 | - .. code-block:: bash |
4673 | + .. code-block:: shell-session |
4674 | |
4675 | $ tox -e citest -- run -verbose \ |
4676 | --os-name <release target> \ |
4677 | @@ -404,14 +404,14 @@ These configuration files are the standard that the AWS cli and other AWS |
4678 | tools utilize for interacting directly with AWS itself and are normally |
4679 | generated when running ``aws configure``: |
4680 | |
4681 | -.. code-block:: bash |
4682 | +.. code-block:: shell-session |
4683 | |
4684 | $ cat $HOME/.aws/credentials |
4685 | [default] |
4686 | aws_access_key_id = <KEY HERE> |
4687 | aws_secret_access_key = <KEY HERE> |
4688 | |
4689 | -.. code-block:: bash |
4690 | +.. code-block:: shell-session |
4691 | |
4692 | $ cat $HOME/.aws/config |
4693 | [default] |
4694 | diff --git a/packages/debian/control.in b/packages/debian/control.in |
4695 | index 265b261..46da6df 100644 |
4696 | --- a/packages/debian/control.in |
4697 | +++ b/packages/debian/control.in |
4698 | @@ -10,7 +10,8 @@ Standards-Version: 3.9.6 |
4699 | Package: cloud-init |
4700 | Architecture: all |
4701 | Depends: ${misc:Depends}, |
4702 | - ${${python}:Depends} |
4703 | + ${${python}:Depends}, |
4704 | + isc-dhcp-client |
4705 | Recommends: eatmydata, sudo, software-properties-common, gdisk |
4706 | XB-Python-Version: ${python:Versions} |
4707 | Description: Init scripts for cloud instances |
4708 | diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py |
4709 | index a6d5069..b9cfcfa 100644 |
4710 | --- a/tests/cloud_tests/bddeb.py |
4711 | +++ b/tests/cloud_tests/bddeb.py |
4712 | @@ -16,7 +16,7 @@ pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] |
4713 | |
4714 | def _out(cmd_res): |
4715 | """Get clean output from cmd result.""" |
4716 | - return cmd_res[0].strip() |
4717 | + return cmd_res[0].decode("utf-8").strip() |
4718 | |
4719 | |
4720 | def build_deb(args, instance): |
4721 | diff --git a/tests/cloud_tests/platforms/ec2/__init__.py b/tests/cloud_tests/platforms/ec2/__init__.py |
4722 | new file mode 100644 |
4723 | index 0000000..e69de29 |
4724 | --- /dev/null |
4725 | +++ b/tests/cloud_tests/platforms/ec2/__init__.py |
4726 | diff --git a/tests/cloud_tests/platforms/lxd/__init__.py b/tests/cloud_tests/platforms/lxd/__init__.py |
4727 | new file mode 100644 |
4728 | index 0000000..e69de29 |
4729 | --- /dev/null |
4730 | +++ b/tests/cloud_tests/platforms/lxd/__init__.py |
4731 | diff --git a/tests/cloud_tests/platforms/lxd/platform.py b/tests/cloud_tests/platforms/lxd/platform.py |
4732 | index 6a01692..f7251a0 100644 |
4733 | --- a/tests/cloud_tests/platforms/lxd/platform.py |
4734 | +++ b/tests/cloud_tests/platforms/lxd/platform.py |
4735 | @@ -101,8 +101,4 @@ class LXDPlatform(Platform): |
4736 | """ |
4737 | return self.client.images.get_by_alias(alias) |
4738 | |
4739 | - def destroy(self): |
4740 | - """Clean up platform data.""" |
4741 | - super(LXDPlatform, self).destroy() |
4742 | - |
4743 | # vi: ts=4 expandtab |
4744 | diff --git a/tests/cloud_tests/platforms/nocloudkvm/__init__.py b/tests/cloud_tests/platforms/nocloudkvm/__init__.py |
4745 | new file mode 100644 |
4746 | index 0000000..e69de29 |
4747 | --- /dev/null |
4748 | +++ b/tests/cloud_tests/platforms/nocloudkvm/__init__.py |
4749 | diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py |
4750 | index 932dc0f..33ff3f2 100644 |
4751 | --- a/tests/cloud_tests/platforms/nocloudkvm/instance.py |
4752 | +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py |
4753 | @@ -109,7 +109,7 @@ class NoCloudKVMInstance(Instance): |
4754 | if self.pid: |
4755 | try: |
4756 | c_util.subp(['kill', '-9', self.pid]) |
4757 | - except util.ProcessExectuionError: |
4758 | + except c_util.ProcessExecutionError: |
4759 | pass |
4760 | |
4761 | if self.pid_file: |
4762 | diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py |
4763 | index a7e6f5d..8593346 100644 |
4764 | --- a/tests/cloud_tests/platforms/nocloudkvm/platform.py |
4765 | +++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py |
4766 | @@ -21,10 +21,6 @@ class NoCloudKVMPlatform(Platform): |
4767 | |
4768 | platform_name = 'nocloud-kvm' |
4769 | |
4770 | - def __init__(self, config): |
4771 | - """Set up platform.""" |
4772 | - super(NoCloudKVMPlatform, self).__init__(config) |
4773 | - |
4774 | def get_image(self, img_conf): |
4775 | """Get image using specified image configuration. |
4776 | |
4777 | diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py |
4778 | index 1542b3b..abbfebb 100644 |
4779 | --- a/tests/cloud_tests/platforms/platforms.py |
4780 | +++ b/tests/cloud_tests/platforms/platforms.py |
4781 | @@ -2,12 +2,15 @@ |
4782 | |
4783 | """Base platform class.""" |
4784 | import os |
4785 | +import shutil |
4786 | |
4787 | from simplestreams import filters, mirrors |
4788 | from simplestreams import util as s_util |
4789 | |
4790 | from cloudinit import util as c_util |
4791 | |
4792 | +from tests.cloud_tests import util |
4793 | + |
4794 | |
4795 | class Platform(object): |
4796 | """Base class for platforms.""" |
4797 | @@ -17,7 +20,14 @@ class Platform(object): |
4798 | def __init__(self, config): |
4799 | """Set up platform.""" |
4800 | self.config = config |
4801 | - self._generate_ssh_keys(config['data_dir']) |
4802 | + self.tmpdir = util.mkdtemp() |
4803 | + if 'data_dir' in config: |
4804 | + self.data_dir = config['data_dir'] |
4805 | + else: |
4806 | + self.data_dir = os.path.join(self.tmpdir, "data_dir") |
4807 | + os.mkdir(self.data_dir) |
4808 | + |
4809 | + self._generate_ssh_keys(self.data_dir) |
4810 | |
4811 | def get_image(self, img_conf): |
4812 | """Get image using specified image configuration. |
4813 | @@ -29,7 +39,7 @@ class Platform(object): |
4814 | |
4815 | def destroy(self): |
4816 | """Clean up platform data.""" |
4817 | - pass |
4818 | + shutil.rmtree(self.tmpdir) |
4819 | |
4820 | def _generate_ssh_keys(self, data_dir): |
4821 | """Generate SSH keys to be used with image.""" |
4822 | diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml |
4823 | index d8bc170..c7dcbe8 100644 |
4824 | --- a/tests/cloud_tests/releases.yaml |
4825 | +++ b/tests/cloud_tests/releases.yaml |
4826 | @@ -30,6 +30,9 @@ default_release_config: |
4827 | mirror_url: https://cloud-images.ubuntu.com/daily |
4828 | mirror_dir: '/srv/citest/images' |
4829 | keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg |
4830 | + # The OS version formatted as Major.Minor is used to compare releases |
4831 | + version: null # Each release needs to define this, for example 16.04 |
4832 | + |
4833 | ec2: |
4834 | # Choose from: [ebs, instance-store] |
4835 | root-store: ebs |
4836 | diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml |
4837 | index 8e0fb62..a3e2990 100644 |
4838 | --- a/tests/cloud_tests/testcases.yaml |
4839 | +++ b/tests/cloud_tests/testcases.yaml |
4840 | @@ -15,6 +15,9 @@ base_test_data: |
4841 | instance-id: | |
4842 | #!/bin/sh |
4843 | cat /run/cloud-init/.instance-id |
4844 | + instance-data.json: | |
4845 | + #!/bin/sh |
4846 | + cat /run/cloud-init/instance-data.json |
4847 | result.json: | |
4848 | #!/bin/sh |
4849 | cat /run/cloud-init/result.json |
4850 | diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py |
4851 | index a29a092..bd548f5 100644 |
4852 | --- a/tests/cloud_tests/testcases/__init__.py |
4853 | +++ b/tests/cloud_tests/testcases/__init__.py |
4854 | @@ -7,6 +7,8 @@ import inspect |
4855 | import unittest |
4856 | from unittest.util import strclass |
4857 | |
4858 | +from cloudinit.util import read_conf |
4859 | + |
4860 | from tests.cloud_tests import config |
4861 | from tests.cloud_tests.testcases.base import CloudTestCase as base_test |
4862 | |
4863 | @@ -48,6 +50,7 @@ def get_suite(test_name, data, conf): |
4864 | def setUpClass(cls): |
4865 | cls.data = data |
4866 | cls.conf = conf |
4867 | + cls.release_conf = read_conf(config.RELEASES_CONF)['releases'] |
4868 | |
4869 | suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(tmp)) |
4870 | |
4871 | diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py |
4872 | index 20e9595..324c7c9 100644 |
4873 | --- a/tests/cloud_tests/testcases/base.py |
4874 | +++ b/tests/cloud_tests/testcases/base.py |
4875 | @@ -4,10 +4,14 @@ |
4876 | |
4877 | import crypt |
4878 | import json |
4879 | +import re |
4880 | import unittest |
4881 | |
4882 | + |
4883 | from cloudinit import util as c_util |
4884 | |
4885 | +SkipTest = unittest.SkipTest |
4886 | + |
4887 | |
4888 | class CloudTestCase(unittest.TestCase): |
4889 | """Base test class for verifiers.""" |
4890 | @@ -16,6 +20,43 @@ class CloudTestCase(unittest.TestCase): |
4891 | data = {} |
4892 | conf = None |
4893 | _cloud_config = None |
4894 | + release_conf = {} # The platform's os release configuration |
4895 | + |
4896 | + expected_warnings = () # Subclasses set to ignore expected WARN logs |
4897 | + |
4898 | + @property |
4899 | + def os_cfg(self): |
4900 | + return self.release_conf[self.os_name]['default'] |
4901 | + |
4902 | + def is_distro(self, distro_name): |
4903 | + return self.os_cfg['os'] == distro_name |
4904 | + |
4905 | + def os_version_cmp(self, cmp_version): |
4906 | + """Compare the version of the test to comparison_version. |
4907 | + |
4908 | + @param: cmp_version: Either a float or a string representing |
4909 | + a release os from releases.yaml (e.g. centos66) |
4910 | + |
4911 | + @return: -1 when version < cmp_version, 0 when version=cmp_version and |
4912 | + 1 when version > cmp_version. |
4913 | + """ |
4914 | + version = self.release_conf[self.os_name]['default']['version'] |
4915 | + if isinstance(cmp_version, str): |
4916 | + cmp_version = self.release_conf[cmp_version]['default']['version'] |
4917 | + if version < cmp_version: |
4918 | + return -1 |
4919 | + elif version == cmp_version: |
4920 | + return 0 |
4921 | + else: |
4922 | + return 1 |
4923 | + |
4924 | + @property |
4925 | + def os_name(self): |
4926 | + return self.data.get('os_name', 'UNKNOWN') |
4927 | + |
4928 | + @property |
4929 | + def platform(self): |
4930 | + return self.data.get('platform', 'UNKNOWN') |
4931 | |
4932 | @property |
4933 | def cloud_config(self): |
4934 | @@ -72,12 +113,134 @@ class CloudTestCase(unittest.TestCase): |
4935 | self.assertEqual(len(result['errors']), 0) |
4936 | |
4937 | def test_no_warnings_in_log(self): |
4938 | - """Warnings should not be found in the log.""" |
4939 | + """Unexpected warnings should not be found in the log.""" |
4940 | + warnings = [ |
4941 | + l for l in self.get_data_file('cloud-init.log').splitlines() |
4942 | + if 'WARN' in l] |
4943 | + joined_warnings = '\n'.join(warnings) |
4944 | + for expected_warning in self.expected_warnings: |
4945 | + self.assertIn( |
4946 | + expected_warning, joined_warnings, |
4947 | + msg="Did not find %s in cloud-init.log" % expected_warning) |
4948 | + # Prune expected from discovered warnings |
4949 | + warnings = [w for w in warnings if expected_warning not in w] |
4950 | + self.assertEqual( |
4951 | + [], warnings, msg="'WARN' found inside cloud-init.log") |
4952 | + |
4953 | + def test_instance_data_json_ec2(self): |
4954 | + """Validate instance-data.json content by ec2 platform. |
4955 | + |
4956 | + This content is sourced by snapd when determining snapstore endpoints. |
4957 | + We validate expected values per cloud type to ensure we don't break |
4958 | + snapd. |
4959 | + """ |
4960 | + if self.platform != 'ec2': |
4961 | + raise SkipTest( |
4962 | + 'Skipping ec2 instance-data.json on %s' % self.platform) |
4963 | + out = self.get_data_file('instance-data.json') |
4964 | + if not out: |
4965 | + if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0: |
4966 | + raise AssertionError( |
4967 | + 'No instance-data.json found on %s' % self.os_name) |
4968 | + raise SkipTest( |
4969 | + 'Skipping instance-data.json test.' |
4970 | + ' OS: %s not bionic or newer' % self.os_name) |
4971 | + instance_data = json.loads(out) |
4972 | + self.assertEqual( |
4973 | + ['ds/user-data'], instance_data['base64-encoded-keys']) |
4974 | + ds = instance_data.get('ds', {}) |
4975 | + macs = ds.get('network', {}).get('interfaces', {}).get('macs', {}) |
4976 | + if not macs: |
4977 | + raise AssertionError('No network data from EC2 meta-data') |
4978 | + # Check meta-data items we depend on |
4979 | + expected_net_keys = [ |
4980 | + 'public-ipv4s', 'ipv4-associations', 'local-hostname', |
4981 | + 'public-hostname'] |
4982 | + for mac, mac_data in macs.items(): |
4983 | + for key in expected_net_keys: |
4984 | + self.assertIn(key, mac_data) |
4985 | + self.assertIsNotNone( |
4986 | + ds.get('placement', {}).get('availability-zone'), |
4987 | + 'Could not determine EC2 Availability zone placement') |
4988 | + ds = instance_data.get('ds', {}) |
4989 | + v1_data = instance_data.get('v1', {}) |
4990 | + self.assertIsNotNone( |
4991 | + v1_data['availability-zone'], 'expected ec2 availability-zone') |
4992 | + self.assertEqual('aws', v1_data['cloud-name']) |
4993 | + self.assertIn('i-', v1_data['instance-id']) |
4994 | + self.assertIn('ip-', v1_data['local-hostname']) |
4995 | + self.assertIsNotNone(v1_data['region'], 'expected ec2 region') |
4996 | + |
4997 | + def test_instance_data_json_lxd(self): |
4998 | + """Validate instance-data.json content by lxd platform. |
4999 | + |
5000 | + This content is sourced by snapd when determining snapstore endpoints. |
The diff has been truncated for viewing.
PASSED: Continuous integration, rev:71972989518 0aec0e5e42f6ae6 2fa88fe8e2a523 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 941/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 941/rebuild
https:/