Merge ~chad.smith/cloud-init:ubuntu/artful into cloud-init:ubuntu/artful
- Git
- lp:~chad.smith/cloud-init
- ubuntu/artful
- Merge into ubuntu/artful
Proposed by
Chad Smith
Status: | Merged | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | 903f02e16735bfa0c745330c1be63363c0798fcf | ||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/artful | ||||||||||||
Merge into: | cloud-init:ubuntu/artful | ||||||||||||
Diff against target: |
7933 lines (+4629/-634) 103 files modified
.pylintrc (+11/-1) ChangeLog (+110/-0) cloudinit/apport.py (+3/-3) cloudinit/cloud.py (+3/-2) cloudinit/cmd/main.py (+29/-6) cloudinit/cmd/tests/test_clean.py (+2/-1) cloudinit/cmd/tests/test_main.py (+161/-0) cloudinit/cmd/tests/test_status.py (+2/-1) cloudinit/config/cc_keys_to_console.py (+1/-3) cloudinit/config/cc_puppet.py (+44/-15) cloudinit/config/cc_resizefs.py (+22/-0) cloudinit/config/cc_runcmd.py (+4/-2) cloudinit/config/cc_salt_minion.py (+65/-20) cloudinit/config/cc_set_hostname.py (+35/-6) cloudinit/config/cc_snap.py (+230/-0) cloudinit/config/cc_snap_config.py (+7/-0) cloudinit/config/cc_snappy.py (+8/-0) cloudinit/config/cc_ssh_authkey_fingerprints.py (+4/-5) cloudinit/config/cc_ubuntu_advantage.py (+173/-0) cloudinit/config/tests/test_snap.py (+490/-0) cloudinit/config/tests/test_ubuntu_advantage.py (+269/-0) cloudinit/distros/arch.py (+1/-4) cloudinit/distros/freebsd.py (+6/-0) cloudinit/distros/opensuse.py (+2/-3) cloudinit/ec2_utils.py (+2/-4) cloudinit/net/cmdline.py (+22/-2) cloudinit/net/netplan.py (+14/-21) cloudinit/net/network_state.py (+11/-1) cloudinit/settings.py (+2/-0) cloudinit/sources/DataSourceAliYun.py (+1/-1) cloudinit/sources/DataSourceAzure.py (+11/-22) cloudinit/sources/DataSourceCloudSigma.py (+1/-1) cloudinit/sources/DataSourceConfigDrive.py (+10/-0) cloudinit/sources/DataSourceGCE.py (+8/-9) cloudinit/sources/DataSourceHetzner.py (+106/-0) cloudinit/sources/DataSourceIBMCloud.py (+325/-0) cloudinit/sources/DataSourceOVF.py (+16/-5) cloudinit/sources/DataSourceOpenNebula.py (+75/-34) cloudinit/sources/DataSourceScaleway.py (+4/-4) cloudinit/sources/__init__.py (+17/-4) cloudinit/sources/helpers/hetzner.py (+26/-0) cloudinit/sources/tests/test_init.py (+97/-1) cloudinit/stages.py (+1/-2) cloudinit/subp.py (+57/-0) cloudinit/tests/helpers.py (+35/-19) cloudinit/tests/test_subp.py (+61/-0) cloudinit/tests/test_util.py (+169/-0) cloudinit/url_helper.py (+14/-10) cloudinit/util.py (+115/-24) cloudinit/version.py (+1/-1) config/cloud.cfg.tmpl (+9/-3) debian/changelog (+69/-3) doc/examples/cloud-config-chef.txt (+2/-2) doc/rtd/conf.py (+1/-0) doc/rtd/topics/capabilities.rst (+8/-6) doc/rtd/topics/debugging.rst (+31/-26) doc/rtd/topics/modules.rst (+2/-0) doc/rtd/topics/network-config.rst (+2/-2) doc/rtd/topics/tests.rst (+10/-10) packages/debian/control.in (+2/-1) tests/cloud_tests/bddeb.py (+1/-1) tests/cloud_tests/platforms/ec2/__init__.py (+0/-0) tests/cloud_tests/platforms/lxd/__init__.py (+0/-0) tests/cloud_tests/platforms/lxd/platform.py (+0/-4) tests/cloud_tests/platforms/nocloudkvm/__init__.py (+0/-0) tests/cloud_tests/platforms/nocloudkvm/instance.py (+1/-1) tests/cloud_tests/platforms/nocloudkvm/platform.py (+0/-4) tests/cloud_tests/platforms/platforms.py (+12/-2) tests/cloud_tests/releases.yaml (+3/-0) tests/cloud_tests/testcases.yaml (+3/-0) tests/cloud_tests/testcases/__init__.py (+3/-0) tests/cloud_tests/testcases/base.py (+168/-5) tests/cloud_tests/testcases/main/command_output_simple.py (+2/-15) tests/cloud_tests/testcases/modules/salt_minion.py (+10/-0) tests/cloud_tests/testcases/modules/salt_minion.yaml (+9/-1) tests/cloud_tests/testcases/modules/snap.py (+16/-0) tests/cloud_tests/testcases/modules/snap.yaml (+18/-0) tests/cloud_tests/testcases/modules/snappy.py (+2/-0) tests/cloud_tests/util.py (+5/-1) tests/cloud_tests/verify.py (+7/-4) tests/data/mount_parse_ext.txt (+19/-0) tests/data/mount_parse_zfs.txt (+21/-0) tests/data/zpool_status_simple.txt (+10/-0) tests/unittests/test_datasource/test_azure.py (+22/-15) tests/unittests/test_datasource/test_common.py (+4/-0) tests/unittests/test_datasource/test_gce.py (+19/-1) tests/unittests/test_datasource/test_hetzner.py (+117/-0) tests/unittests/test_datasource/test_ibmcloud.py (+262/-0) tests/unittests/test_datasource/test_opennebula.py (+177/-89) tests/unittests/test_ds_identify.py (+161/-6) tests/unittests/test_handler/test_handler_apt_source_v1.py (+2/-1) tests/unittests/test_handler/test_handler_bootcmd.py (+7/-12) tests/unittests/test_handler/test_handler_ntp.py (+6/-12) tests/unittests/test_handler/test_handler_resizefs.py (+60/-12) tests/unittests/test_handler/test_handler_runcmd.py (+4/-10) tests/unittests/test_handler/test_handler_set_hostname.py (+53/-4) tests/unittests/test_handler/test_schema.py (+16/-19) tests/unittests/test_net.py (+58/-81) tests/unittests/test_util.py (+135/-0) tools/ds-identify (+99/-28) tools/pipremove (+14/-0) tools/run-centos (+78/-13) tox.ini (+6/-3) |
||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
Scott Moser | Pending | ||
Review via email: mp+342249@code.launchpad.net |
Commit message
Sync tip of cloud-init for SRU into Artful.
Also git cherry-pick isc-dchp-client package dependency per
5b630c3419c5e28
Description of the change
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Revision history for this message
Scott Moser (smoser) wrote : | # |
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:903f02e1673
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild:
https:/
review:
Approve
(continuous-integration)
There was an error fetching revisions from git servers. Please try again in a few minutes. If the problem persists, contact Launchpad support.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/.pylintrc b/.pylintrc | |||
2 | index 05a086d..0bdfa59 100644 | |||
3 | --- a/.pylintrc | |||
4 | +++ b/.pylintrc | |||
5 | @@ -46,7 +46,17 @@ reports=no | |||
6 | 46 | # (useful for modules/projects where namespaces are manipulated during runtime | 46 | # (useful for modules/projects where namespaces are manipulated during runtime |
7 | 47 | # and thus existing member attributes cannot be deduced by static analysis. It | 47 | # and thus existing member attributes cannot be deduced by static analysis. It |
8 | 48 | # supports qualified module names, as well as Unix pattern matching. | 48 | # supports qualified module names, as well as Unix pattern matching. |
10 | 49 | ignored-modules=six.moves,pkg_resources,httplib,http.client,paramiko,simplestreams | 49 | ignored-modules= |
11 | 50 | http.client, | ||
12 | 51 | httplib, | ||
13 | 52 | pkg_resources, | ||
14 | 53 | six.moves, | ||
15 | 54 | # cloud_tests requirements. | ||
16 | 55 | boto3, | ||
17 | 56 | botocore, | ||
18 | 57 | paramiko, | ||
19 | 58 | pylxd, | ||
20 | 59 | simplestreams | ||
21 | 50 | 60 | ||
22 | 51 | # List of class names for which member attributes should not be checked (useful | 61 | # List of class names for which member attributes should not be checked (useful |
23 | 52 | # for classes with dynamically set attributes). This supports the use of | 62 | # for classes with dynamically set attributes). This supports the use of |
24 | diff --git a/ChangeLog b/ChangeLog | |||
25 | index 31c2dcb..daa7ccf 100644 | |||
26 | --- a/ChangeLog | |||
27 | +++ b/ChangeLog | |||
28 | @@ -1,3 +1,113 @@ | |||
29 | 1 | 18.2: | ||
30 | 2 | - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. | ||
31 | 3 | - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. | ||
32 | 4 | (LP: #1759307) | ||
33 | 5 | - FreeBSD: resizefs module now able to handle zfs/zpool. | ||
34 | 6 | [Dominic Schlegel] (LP: #1721243) | ||
35 | 7 | - cc_puppet: Revert regression of puppet creating ssl and ssl_cert dirs | ||
36 | 8 | - Enable IBMCloud datasource in settings.py. | ||
37 | 9 | - IBMCloud: Initial IBM Cloud datasource. | ||
38 | 10 | - tests: remove jsonschema from xenial tox environment. | ||
39 | 11 | - tests: Fix newly added schema unit tests to skip if no jsonschema. | ||
40 | 12 | - ec2: Adjust ec2 datasource after exception_cb change. | ||
41 | 13 | - Reduce AzurePreprovisioning HTTP timeouts. | ||
42 | 14 | [Douglas Jordan] (LP: #1752977) | ||
43 | 15 | - Revert the logic of exception_cb in read_url. | ||
44 | 16 | [Kurt Garloff] (LP: #1702160, #1298921) | ||
45 | 17 | - ubuntu-advantage: Add new config module to support | ||
46 | 18 | ubuntu-advantage-tools | ||
47 | 19 | - Handle global dns entries in netplan (LP: #1750884) | ||
48 | 20 | - Identify OpenTelekomCloud Xen as OpenStack DS. | ||
49 | 21 | [Kurt Garloff] (LP: #1756471) | ||
50 | 22 | - datasources: fix DataSource subclass get_hostname method signature | ||
51 | 23 | (LP: #1757176) | ||
52 | 24 | - OpenNebula: Update network to return v2 config rather than ENI. | ||
53 | 25 | [Akihiko Ota] | ||
54 | 26 | - Add Hetzner Cloud DataSource | ||
55 | 27 | - net: recognize iscsi root cases without ip= on kernel command line. | ||
56 | 28 | (LP: #1752391) | ||
57 | 29 | - tests: fix flakes warning for unused variable | ||
58 | 30 | - tests: patch leaked stderr messages from snap unit tests | ||
59 | 31 | - cc_snap: Add new module to install and configure snapd and snap | ||
60 | 32 | packages. | ||
61 | 33 | - tests: Make pylint happy and fix python2.6 uses of assertRaisesRegex. | ||
62 | 34 | - netplan: render bridge port-priority values (LP: #1735821) | ||
63 | 35 | - util: Fix subp regression. Allow specifying subp command as a string. | ||
64 | 36 | (LP: #1755965) | ||
65 | 37 | - doc: fix all warnings issued by 'tox -e doc' | ||
66 | 38 | - FreeBSD: Set hostname to FQDN. [Dominic Schlegel] (LP: #1753499) | ||
67 | 39 | - tests: fix run_tree and bddeb | ||
68 | 40 | - tests: Fix some warnings in tests that popped up with newer python. | ||
69 | 41 | - set_hostname: When present in metadata, set it before network bringup. | ||
70 | 42 | (LP: #1746455) | ||
71 | 43 | - tests: Centralize and re-use skipTest based on json schema presense. | ||
72 | 44 | - This commit fixes get_hostname on the AzureDataSource. | ||
73 | 45 | [Douglas Jordan] (LP: #1754495) | ||
74 | 46 | - shellify: raise TypeError on bad input. | ||
75 | 47 | - Make salt minion module work on FreeBSD. | ||
76 | 48 | [Dominic Schlegel] (LP: #1721503) | ||
77 | 49 | - Simplify some comparisions. [Rémy Léone] | ||
78 | 50 | - Change some list creation and population to literal. [Rémy Léone] | ||
79 | 51 | - GCE: fix reading of user-data that is not base64 encoded. (LP: #1752711) | ||
80 | 52 | - doc: fix chef install from apt packages example in RTD. | ||
81 | 53 | - Implement puppet 4 support [Romanos Skiadas] (LP: #1446804) | ||
82 | 54 | - subp: Fix subp usage with non-ascii characters when no system locale. | ||
83 | 55 | (LP: #1751051) | ||
84 | 56 | - salt: configure grains in grains file rather than in minion config. | ||
85 | 57 | [Daniel Wallace] | ||
86 | 58 | |||
87 | 59 | 18.1: | ||
88 | 60 | - OVF: Fix VMware support for 64-bit platforms. [Sankar Tanguturi] | ||
89 | 61 | - ds-identify: Fix searching for iso9660 OVF cdroms. (LP: #1749980) | ||
90 | 62 | - SUSE: Fix groups used for ownership of cloud-init.log [Robert Schweikert] | ||
91 | 63 | - ds-identify: check /writable/system-data/ for nocloud seed. | ||
92 | 64 | (LP: #1747070) | ||
93 | 65 | - tests: run nosetests in cloudinit/ directory, fix py26 fallout. | ||
94 | 66 | - tools: run-centos: git clone rather than tar. | ||
95 | 67 | - tests: add support for logs with lxd from snap and future lxd 3. | ||
96 | 68 | (LP: #1745663) | ||
97 | 69 | - EC2: Fix get_instance_id called against cached datasource pickle. | ||
98 | 70 | (LP: #1748354) | ||
99 | 71 | - cli: fix cloud-init status to report running when before result.json | ||
100 | 72 | (LP: #1747965) | ||
101 | 73 | - net: accept network-config in netplan format for renaming interfaces | ||
102 | 74 | (LP: #1709715) | ||
103 | 75 | - Fix ssh keys validation in ssh_util [Tatiana Kholkina] | ||
104 | 76 | - docs: Update RTD content for cloud-init subcommands. | ||
105 | 77 | - OVF: Extend well-known labels to include OVFENV. (LP: #1698669) | ||
106 | 78 | - Fix potential cases of uninitialized variables. (LP: #1744796) | ||
107 | 79 | - tests: Collect script output as binary, collect systemd journal, fix lxd. | ||
108 | 80 | - HACKING.rst: mention setting user name and email via git config. | ||
109 | 81 | - Azure VM Preprovisioning support. [Douglas Jordan] (LP: #1734991) | ||
110 | 82 | - tools/read-version: Fix read-version when in a git worktree. | ||
111 | 83 | - docs: Fix typos in docs and one debug message. [Florian Grignon] | ||
112 | 84 | - btrfs: support resizing if root is mounted ro. | ||
113 | 85 | [Robert Schweikert] (LP: #1734787) | ||
114 | 86 | - OpenNebula: Improve network configuration support. | ||
115 | 87 | [Akihiko Ota] (LP: #1719157, #1716397, #1736750) | ||
116 | 88 | - tests: Fix EC2 Platform to return console output as bytes. | ||
117 | 89 | - tests: Fix attempted use of /run in a test case. | ||
118 | 90 | - GCE: Improvements and changes to ssh key behavior for default user. | ||
119 | 91 | [Max Illfelder] (LP: #1670456, #1707033, #1707037, #1707039) | ||
120 | 92 | - subp: make ProcessExecutionError have expected types in stderr, stdout. | ||
121 | 93 | - tests: when querying ntp server, do not do dns resolution. | ||
122 | 94 | - Recognize uppercase vfat disk labels [James Penick] (LP: #1598783) | ||
123 | 95 | - tests: remove zesty as supported OS to test [Joshua Powers] | ||
124 | 96 | - Do not log warning on config files that represent None. (LP: #1742479) | ||
125 | 97 | - tests: Use git hash pip dependency format for pylxd. | ||
126 | 98 | - tests: add integration requirements text file [Joshua Powers] | ||
127 | 99 | - MAAS: add check_instance_id based off oauth tokens. (LP: #1712680) | ||
128 | 100 | - tests: update apt sources list test [Joshua Powers] | ||
129 | 101 | - tests: clean up image properties [Joshua Powers] | ||
130 | 102 | - tests: rename test ssh keys to avoid appearance of leaking private keys. | ||
131 | 103 | [Joshua Powers] | ||
132 | 104 | - tests: Enable AWS EC2 Integration Testing [Joshua Powers] | ||
133 | 105 | - cli: cloud-init clean handles symlinks (LP: #1741093) | ||
134 | 106 | - SUSE: Add a basic test of network config rendering. [Robert Schweikert] | ||
135 | 107 | - Azure: Only bounce network when necessary. (LP: #1722668) | ||
136 | 108 | - lint: Fix lints seen by pylint version 1.8.1. | ||
137 | 109 | - cli: Fix error in cloud-init modules --mode=init. (LP: #1736600) | ||
138 | 110 | |||
139 | 1 | 17.2: | 111 | 17.2: |
140 | 2 | - ds-identify: failure in NoCloud due to unset variable usage. | 112 | - ds-identify: failure in NoCloud due to unset variable usage. |
141 | 3 | (LP: #1737704) | 113 | (LP: #1737704) |
142 | diff --git a/cloudinit/apport.py b/cloudinit/apport.py | |||
143 | index 221f341..618b016 100644 | |||
144 | --- a/cloudinit/apport.py | |||
145 | +++ b/cloudinit/apport.py | |||
146 | @@ -14,9 +14,9 @@ except ImportError: | |||
147 | 14 | 14 | ||
148 | 15 | KNOWN_CLOUD_NAMES = [ | 15 | KNOWN_CLOUD_NAMES = [ |
149 | 16 | 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', | 16 | 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', |
153 | 17 | 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS', | 17 | 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', |
154 | 18 | 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS', | 18 | 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', |
155 | 19 | 'VMware', 'Other'] | 19 | 'Scaleway', 'SmartOS', 'VMware', 'Other'] |
156 | 20 | 20 | ||
157 | 21 | # Potentially clear text collected logs | 21 | # Potentially clear text collected logs |
158 | 22 | CLOUDINIT_LOG = '/var/log/cloud-init.log' | 22 | CLOUDINIT_LOG = '/var/log/cloud-init.log' |
159 | diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py | |||
160 | index ba61678..6d12c43 100644 | |||
161 | --- a/cloudinit/cloud.py | |||
162 | +++ b/cloudinit/cloud.py | |||
163 | @@ -78,8 +78,9 @@ class Cloud(object): | |||
164 | 78 | def get_locale(self): | 78 | def get_locale(self): |
165 | 79 | return self.datasource.get_locale() | 79 | return self.datasource.get_locale() |
166 | 80 | 80 | ||
169 | 81 | def get_hostname(self, fqdn=False): | 81 | def get_hostname(self, fqdn=False, metadata_only=False): |
170 | 82 | return self.datasource.get_hostname(fqdn=fqdn) | 82 | return self.datasource.get_hostname( |
171 | 83 | fqdn=fqdn, metadata_only=metadata_only) | ||
172 | 83 | 84 | ||
173 | 84 | def device_name_to_device(self, name): | 85 | def device_name_to_device(self, name): |
174 | 85 | return self.datasource.device_name_to_device(name) | 86 | return self.datasource.device_name_to_device(name) |
175 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
176 | index d2f1b77..3f2dbb9 100644 | |||
177 | --- a/cloudinit/cmd/main.py | |||
178 | +++ b/cloudinit/cmd/main.py | |||
179 | @@ -40,6 +40,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, | |||
180 | 40 | 40 | ||
181 | 41 | from cloudinit import atomic_helper | 41 | from cloudinit import atomic_helper |
182 | 42 | 42 | ||
183 | 43 | from cloudinit.config import cc_set_hostname | ||
184 | 43 | from cloudinit.dhclient_hook import LogDhclient | 44 | from cloudinit.dhclient_hook import LogDhclient |
185 | 44 | 45 | ||
186 | 45 | 46 | ||
187 | @@ -215,12 +216,10 @@ def main_init(name, args): | |||
188 | 215 | if args.local: | 216 | if args.local: |
189 | 216 | deps = [sources.DEP_FILESYSTEM] | 217 | deps = [sources.DEP_FILESYSTEM] |
190 | 217 | 218 | ||
197 | 218 | early_logs = [] | 219 | early_logs = [attempt_cmdline_url( |
198 | 219 | early_logs.append( | 220 | path=os.path.join("%s.d" % CLOUD_CONFIG, |
199 | 220 | attempt_cmdline_url( | 221 | "91_kernel_cmdline_url.cfg"), |
200 | 221 | path=os.path.join("%s.d" % CLOUD_CONFIG, | 222 | network=not args.local)] |
195 | 222 | "91_kernel_cmdline_url.cfg"), | ||
196 | 223 | network=not args.local)) | ||
201 | 224 | 223 | ||
202 | 225 | # Cloud-init 'init' stage is broken up into the following sub-stages | 224 | # Cloud-init 'init' stage is broken up into the following sub-stages |
203 | 226 | # 1. Ensure that the init object fetches its config without errors | 225 | # 1. Ensure that the init object fetches its config without errors |
204 | @@ -354,6 +353,11 @@ def main_init(name, args): | |||
205 | 354 | LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", | 353 | LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", |
206 | 355 | mode, name, iid, init.is_new_instance()) | 354 | mode, name, iid, init.is_new_instance()) |
207 | 356 | 355 | ||
208 | 356 | if mode == sources.DSMODE_LOCAL: | ||
209 | 357 | # Before network comes up, set any configured hostname to allow | ||
210 | 358 | # dhcp clients to advertize this hostname to any DDNS services | ||
211 | 359 | # LP: #1746455. | ||
212 | 360 | _maybe_set_hostname(init, stage='local', retry_stage='network') | ||
213 | 357 | init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL)) | 361 | init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL)) |
214 | 358 | 362 | ||
215 | 359 | if mode == sources.DSMODE_LOCAL: | 363 | if mode == sources.DSMODE_LOCAL: |
216 | @@ -370,6 +374,7 @@ def main_init(name, args): | |||
217 | 370 | init.setup_datasource() | 374 | init.setup_datasource() |
218 | 371 | # update fully realizes user-data (pulling in #include if necessary) | 375 | # update fully realizes user-data (pulling in #include if necessary) |
219 | 372 | init.update() | 376 | init.update() |
220 | 377 | _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') | ||
221 | 373 | # Stage 7 | 378 | # Stage 7 |
222 | 374 | try: | 379 | try: |
223 | 375 | # Attempt to consume the data per instance. | 380 | # Attempt to consume the data per instance. |
224 | @@ -683,6 +688,24 @@ def status_wrapper(name, args, data_d=None, link_d=None): | |||
225 | 683 | return len(v1[mode]['errors']) | 688 | return len(v1[mode]['errors']) |
226 | 684 | 689 | ||
227 | 685 | 690 | ||
228 | 691 | def _maybe_set_hostname(init, stage, retry_stage): | ||
229 | 692 | """Call set-hostname if metadata, vendordata or userdata provides it. | ||
230 | 693 | |||
231 | 694 | @param stage: String representing current stage in which we are running. | ||
232 | 695 | @param retry_stage: String represented logs upon error setting hostname. | ||
233 | 696 | """ | ||
234 | 697 | cloud = init.cloudify() | ||
235 | 698 | (hostname, _fqdn) = util.get_hostname_fqdn( | ||
236 | 699 | init.cfg, cloud, metadata_only=True) | ||
237 | 700 | if hostname: # meta-data or user-data hostname content | ||
238 | 701 | try: | ||
239 | 702 | cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) | ||
240 | 703 | except cc_set_hostname.SetHostnameError as e: | ||
241 | 704 | LOG.debug( | ||
242 | 705 | 'Failed setting hostname in %s stage. Will' | ||
243 | 706 | ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) | ||
244 | 707 | |||
245 | 708 | |||
246 | 686 | def main_features(name, args): | 709 | def main_features(name, args): |
247 | 687 | sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') | 710 | sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') |
248 | 688 | 711 | ||
249 | diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py | |||
250 | index 6713af4..5a3ec3b 100644 | |||
251 | --- a/cloudinit/cmd/tests/test_clean.py | |||
252 | +++ b/cloudinit/cmd/tests/test_clean.py | |||
253 | @@ -165,10 +165,11 @@ class TestClean(CiTestCase): | |||
254 | 165 | wrap_and_call( | 165 | wrap_and_call( |
255 | 166 | 'cloudinit.cmd.clean', | 166 | 'cloudinit.cmd.clean', |
256 | 167 | {'Init': {'side_effect': self.init_class}, | 167 | {'Init': {'side_effect': self.init_class}, |
257 | 168 | 'sys.exit': {'side_effect': self.sys_exit}, | ||
258 | 168 | 'sys.argv': {'new': ['clean', '--logs']}}, | 169 | 'sys.argv': {'new': ['clean', '--logs']}}, |
259 | 169 | clean.main) | 170 | clean.main) |
260 | 170 | 171 | ||
262 | 171 | self.assertRaisesCodeEqual(0, context_manager.exception.code) | 172 | self.assertEqual(0, context_manager.exception.code) |
263 | 172 | self.assertFalse( | 173 | self.assertFalse( |
264 | 173 | os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) | 174 | os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) |
265 | 174 | 175 | ||
266 | diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py | |||
267 | 175 | new file mode 100644 | 176 | new file mode 100644 |
268 | index 0000000..dbe421c | |||
269 | --- /dev/null | |||
270 | +++ b/cloudinit/cmd/tests/test_main.py | |||
271 | @@ -0,0 +1,161 @@ | |||
272 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
273 | 2 | |||
274 | 3 | from collections import namedtuple | ||
275 | 4 | import copy | ||
276 | 5 | import os | ||
277 | 6 | from six import StringIO | ||
278 | 7 | |||
279 | 8 | from cloudinit.cmd import main | ||
280 | 9 | from cloudinit.util import ( | ||
281 | 10 | ensure_dir, load_file, write_file, yaml_dumps) | ||
282 | 11 | from cloudinit.tests.helpers import ( | ||
283 | 12 | FilesystemMockingTestCase, wrap_and_call) | ||
284 | 13 | |||
285 | 14 | mypaths = namedtuple('MyPaths', 'run_dir') | ||
286 | 15 | myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') | ||
287 | 16 | |||
288 | 17 | |||
289 | 18 | class TestMain(FilesystemMockingTestCase): | ||
290 | 19 | |||
291 | 20 | with_logs = True | ||
292 | 21 | |||
293 | 22 | def setUp(self): | ||
294 | 23 | super(TestMain, self).setUp() | ||
295 | 24 | self.new_root = self.tmp_dir() | ||
296 | 25 | self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) | ||
297 | 26 | os.makedirs(self.cloud_dir) | ||
298 | 27 | self.replicateTestRoot('simple_ubuntu', self.new_root) | ||
299 | 28 | self.cfg = { | ||
300 | 29 | 'datasource_list': ['None'], | ||
301 | 30 | 'runcmd': ['ls /etc'], # test ALL_DISTROS | ||
302 | 31 | 'system_info': {'paths': {'cloud_dir': self.cloud_dir, | ||
303 | 32 | 'run_dir': self.new_root}}, | ||
304 | 33 | 'write_files': [ | ||
305 | 34 | { | ||
306 | 35 | 'path': '/etc/blah.ini', | ||
307 | 36 | 'content': 'blah', | ||
308 | 37 | 'permissions': 0o755, | ||
309 | 38 | }, | ||
310 | 39 | ], | ||
311 | 40 | 'cloud_init_modules': ['write-files', 'runcmd'], | ||
312 | 41 | } | ||
313 | 42 | cloud_cfg = yaml_dumps(self.cfg) | ||
314 | 43 | ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) | ||
315 | 44 | self.cloud_cfg_file = os.path.join( | ||
316 | 45 | self.new_root, 'etc', 'cloud', 'cloud.cfg') | ||
317 | 46 | write_file(self.cloud_cfg_file, cloud_cfg) | ||
318 | 47 | self.patchOS(self.new_root) | ||
319 | 48 | self.patchUtils(self.new_root) | ||
320 | 49 | self.stderr = StringIO() | ||
321 | 50 | self.patchStdoutAndStderr(stderr=self.stderr) | ||
322 | 51 | |||
323 | 52 | def test_main_init_run_net_stops_on_file_no_net(self): | ||
324 | 53 | """When no-net file is present, main_init does not process modules.""" | ||
325 | 54 | stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file | ||
326 | 55 | write_file(stop_file, '') | ||
327 | 56 | cmdargs = myargs( | ||
328 | 57 | debug=False, files=None, force=False, local=False, reporter=None, | ||
329 | 58 | subcommand='init') | ||
330 | 59 | (item1, item2) = wrap_and_call( | ||
331 | 60 | 'cloudinit.cmd.main', | ||
332 | 61 | {'util.close_stdin': True, | ||
333 | 62 | 'netinfo.debug_info': 'my net debug info', | ||
334 | 63 | 'util.fixup_output': ('outfmt', 'errfmt')}, | ||
335 | 64 | main.main_init, 'init', cmdargs) | ||
336 | 65 | # We should not run write_files module | ||
337 | 66 | self.assertFalse( | ||
338 | 67 | os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), | ||
339 | 68 | 'Unexpected run of write_files module produced blah.ini') | ||
340 | 69 | self.assertEqual([], item2) | ||
341 | 70 | # Instancify is called | ||
342 | 71 | instance_id_path = 'var/lib/cloud/data/instance-id' | ||
343 | 72 | self.assertFalse( | ||
344 | 73 | os.path.exists(os.path.join(self.new_root, instance_id_path)), | ||
345 | 74 | 'Unexpected call to datasource.instancify produced instance-id') | ||
346 | 75 | expected_logs = [ | ||
347 | 76 | "Exiting. stop file ['{stop_file}'] existed\n".format( | ||
348 | 77 | stop_file=stop_file), | ||
349 | 78 | 'my net debug info' # netinfo.debug_info | ||
350 | 79 | ] | ||
351 | 80 | for log in expected_logs: | ||
352 | 81 | self.assertIn(log, self.stderr.getvalue()) | ||
353 | 82 | |||
354 | 83 | def test_main_init_run_net_runs_modules(self): | ||
355 | 84 | """Modules like write_files are run in 'net' mode.""" | ||
356 | 85 | cmdargs = myargs( | ||
357 | 86 | debug=False, files=None, force=False, local=False, reporter=None, | ||
358 | 87 | subcommand='init') | ||
359 | 88 | (item1, item2) = wrap_and_call( | ||
360 | 89 | 'cloudinit.cmd.main', | ||
361 | 90 | {'util.close_stdin': True, | ||
362 | 91 | 'netinfo.debug_info': 'my net debug info', | ||
363 | 92 | 'util.fixup_output': ('outfmt', 'errfmt')}, | ||
364 | 93 | main.main_init, 'init', cmdargs) | ||
365 | 94 | self.assertEqual([], item2) | ||
366 | 95 | # Instancify is called | ||
367 | 96 | instance_id_path = 'var/lib/cloud/data/instance-id' | ||
368 | 97 | self.assertEqual( | ||
369 | 98 | 'iid-datasource-none\n', | ||
370 | 99 | os.path.join(load_file( | ||
371 | 100 | os.path.join(self.new_root, instance_id_path)))) | ||
372 | 101 | # modules are run (including write_files) | ||
373 | 102 | self.assertEqual( | ||
374 | 103 | 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) | ||
375 | 104 | expected_logs = [ | ||
376 | 105 | 'network config is disabled by fallback', # apply_network_config | ||
377 | 106 | 'my net debug info', # netinfo.debug_info | ||
378 | 107 | 'no previous run detected' | ||
379 | 108 | ] | ||
380 | 109 | for log in expected_logs: | ||
381 | 110 | self.assertIn(log, self.stderr.getvalue()) | ||
382 | 111 | |||
383 | 112 | def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): | ||
384 | 113 | """When local-hostname metadata is present, call cc_set_hostname.""" | ||
385 | 114 | self.cfg['datasource'] = { | ||
386 | 115 | 'None': {'metadata': {'local-hostname': 'md-hostname'}}} | ||
387 | 116 | cloud_cfg = yaml_dumps(self.cfg) | ||
388 | 117 | write_file(self.cloud_cfg_file, cloud_cfg) | ||
389 | 118 | cmdargs = myargs( | ||
390 | 119 | debug=False, files=None, force=False, local=False, reporter=None, | ||
391 | 120 | subcommand='init') | ||
392 | 121 | |||
393 | 122 | def set_hostname(name, cfg, cloud, log, args): | ||
394 | 123 | self.assertEqual('set-hostname', name) | ||
395 | 124 | updated_cfg = copy.deepcopy(self.cfg) | ||
396 | 125 | updated_cfg.update( | ||
397 | 126 | {'def_log_file': '/var/log/cloud-init.log', | ||
398 | 127 | 'log_cfgs': [], | ||
399 | 128 | 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], | ||
400 | 129 | 'vendor_data': {'enabled': True, 'prefix': []}}) | ||
401 | 130 | updated_cfg.pop('system_info') | ||
402 | 131 | |||
403 | 132 | self.assertEqual(updated_cfg, cfg) | ||
404 | 133 | self.assertEqual(main.LOG, log) | ||
405 | 134 | self.assertIsNone(args) | ||
406 | 135 | |||
407 | 136 | (item1, item2) = wrap_and_call( | ||
408 | 137 | 'cloudinit.cmd.main', | ||
409 | 138 | {'util.close_stdin': True, | ||
410 | 139 | 'netinfo.debug_info': 'my net debug info', | ||
411 | 140 | 'cc_set_hostname.handle': {'side_effect': set_hostname}, | ||
412 | 141 | 'util.fixup_output': ('outfmt', 'errfmt')}, | ||
413 | 142 | main.main_init, 'init', cmdargs) | ||
414 | 143 | self.assertEqual([], item2) | ||
415 | 144 | # Instancify is called | ||
416 | 145 | instance_id_path = 'var/lib/cloud/data/instance-id' | ||
417 | 146 | self.assertEqual( | ||
418 | 147 | 'iid-datasource-none\n', | ||
419 | 148 | os.path.join(load_file( | ||
420 | 149 | os.path.join(self.new_root, instance_id_path)))) | ||
421 | 150 | # modules are run (including write_files) | ||
422 | 151 | self.assertEqual( | ||
423 | 152 | 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) | ||
424 | 153 | expected_logs = [ | ||
425 | 154 | 'network config is disabled by fallback', # apply_network_config | ||
426 | 155 | 'my net debug info', # netinfo.debug_info | ||
427 | 156 | 'no previous run detected' | ||
428 | 157 | ] | ||
429 | 158 | for log in expected_logs: | ||
430 | 159 | self.assertIn(log, self.stderr.getvalue()) | ||
431 | 160 | |||
432 | 161 | # vi: ts=4 expandtab | ||
433 | diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py | |||
434 | index 4a5a8c0..37a8993 100644 | |||
435 | --- a/cloudinit/cmd/tests/test_status.py | |||
436 | +++ b/cloudinit/cmd/tests/test_status.py | |||
437 | @@ -380,10 +380,11 @@ class TestStatus(CiTestCase): | |||
438 | 380 | wrap_and_call( | 380 | wrap_and_call( |
439 | 381 | 'cloudinit.cmd.status', | 381 | 'cloudinit.cmd.status', |
440 | 382 | {'sys.argv': {'new': ['status']}, | 382 | {'sys.argv': {'new': ['status']}, |
441 | 383 | 'sys.exit': {'side_effect': self.sys_exit}, | ||
442 | 383 | '_is_cloudinit_disabled': (False, ''), | 384 | '_is_cloudinit_disabled': (False, ''), |
443 | 384 | 'Init': {'side_effect': self.init_class}}, | 385 | 'Init': {'side_effect': self.init_class}}, |
444 | 385 | status.main) | 386 | status.main) |
446 | 386 | self.assertRaisesCodeEqual(0, context_manager.exception.code) | 387 | self.assertEqual(0, context_manager.exception.code) |
447 | 387 | self.assertEqual('status: running\n', m_stdout.getvalue()) | 388 | self.assertEqual('status: running\n', m_stdout.getvalue()) |
448 | 388 | 389 | ||
449 | 389 | # vi: ts=4 expandtab syntax=python | 390 | # vi: ts=4 expandtab syntax=python |
450 | diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py | |||
451 | index efedd4a..aff4010 100644 | |||
452 | --- a/cloudinit/config/cc_keys_to_console.py | |||
453 | +++ b/cloudinit/config/cc_keys_to_console.py | |||
454 | @@ -63,9 +63,7 @@ def handle(name, cfg, cloud, log, _args): | |||
455 | 63 | ["ssh-dss"]) | 63 | ["ssh-dss"]) |
456 | 64 | 64 | ||
457 | 65 | try: | 65 | try: |
461 | 66 | cmd = [helper_path] | 66 | cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] |
459 | 67 | cmd.append(','.join(fp_blacklist)) | ||
460 | 68 | cmd.append(','.join(key_blacklist)) | ||
462 | 69 | (stdout, _stderr) = util.subp(cmd) | 67 | (stdout, _stderr) = util.subp(cmd) |
463 | 70 | util.multi_log("%s\n" % (stdout.strip()), | 68 | util.multi_log("%s\n" % (stdout.strip()), |
464 | 71 | stderr=False, console=True) | 69 | stderr=False, console=True) |
465 | diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py | |||
466 | index 28b1d56..4190a20 100644 | |||
467 | --- a/cloudinit/config/cc_puppet.py | |||
468 | +++ b/cloudinit/config/cc_puppet.py | |||
469 | @@ -21,6 +21,13 @@ under ``version``, and defaults to ``none``, which selects the latest version | |||
470 | 21 | in the repos. If the ``puppet`` config key exists in the config archive, this | 21 | in the repos. If the ``puppet`` config key exists in the config archive, this |
471 | 22 | module will attempt to start puppet even if no installation was performed. | 22 | module will attempt to start puppet even if no installation was performed. |
472 | 23 | 23 | ||
473 | 24 | The module also provides keys for configuring the new puppet 4 paths and | ||
474 | 25 | installing the puppet package from the puppetlabs repositories: | ||
475 | 26 | https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html | ||
476 | 27 | The keys are ``package_name``, ``conf_file`` and ``ssl_dir``. If unset, their | ||
477 | 28 | values will default to ones that work with puppet 3.x and with distributions | ||
478 | 29 | that ship modified puppet 4.x that uses the old paths. | ||
479 | 30 | |||
480 | 24 | Puppet configuration can be specified under the ``conf`` key. The | 31 | Puppet configuration can be specified under the ``conf`` key. The |
481 | 25 | configuration is specified as a dictionary containing high-level ``<section>`` | 32 | configuration is specified as a dictionary containing high-level ``<section>`` |
482 | 26 | keys and lists of ``<key>=<value>`` pairs within each section. Each section | 33 | keys and lists of ``<key>=<value>`` pairs within each section. Each section |
483 | @@ -44,6 +51,9 @@ in pem format as a multi-line string (using the ``|`` yaml notation). | |||
484 | 44 | puppet: | 51 | puppet: |
485 | 45 | install: <true/false> | 52 | install: <true/false> |
486 | 46 | version: <version> | 53 | version: <version> |
487 | 54 | conf_file: '/etc/puppet/puppet.conf' | ||
488 | 55 | ssl_dir: '/var/lib/puppet/ssl' | ||
489 | 56 | package_name: 'puppet' | ||
490 | 47 | conf: | 57 | conf: |
491 | 48 | agent: | 58 | agent: |
492 | 49 | server: "puppetmaster.example.org" | 59 | server: "puppetmaster.example.org" |
493 | @@ -63,9 +73,17 @@ from cloudinit import helpers | |||
494 | 63 | from cloudinit import util | 73 | from cloudinit import util |
495 | 64 | 74 | ||
496 | 65 | PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' | 75 | PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' |
497 | 66 | PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/' | ||
498 | 67 | PUPPET_SSL_DIR = '/var/lib/puppet/ssl' | 76 | PUPPET_SSL_DIR = '/var/lib/puppet/ssl' |
500 | 68 | PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' | 77 | PUPPET_PACKAGE_NAME = 'puppet' |
501 | 78 | |||
502 | 79 | |||
503 | 80 | class PuppetConstants(object): | ||
504 | 81 | |||
505 | 82 | def __init__(self, puppet_conf_file, puppet_ssl_dir, log): | ||
506 | 83 | self.conf_path = puppet_conf_file | ||
507 | 84 | self.ssl_dir = puppet_ssl_dir | ||
508 | 85 | self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") | ||
509 | 86 | self.ssl_cert_path = os.path.join(self.ssl_cert_dir, "ca.pem") | ||
510 | 69 | 87 | ||
511 | 70 | 88 | ||
512 | 71 | def _autostart_puppet(log): | 89 | def _autostart_puppet(log): |
513 | @@ -92,22 +110,29 @@ def handle(name, cfg, cloud, log, _args): | |||
514 | 92 | return | 110 | return |
515 | 93 | 111 | ||
516 | 94 | puppet_cfg = cfg['puppet'] | 112 | puppet_cfg = cfg['puppet'] |
517 | 95 | |||
518 | 96 | # Start by installing the puppet package if necessary... | 113 | # Start by installing the puppet package if necessary... |
519 | 97 | install = util.get_cfg_option_bool(puppet_cfg, 'install', True) | 114 | install = util.get_cfg_option_bool(puppet_cfg, 'install', True) |
520 | 98 | version = util.get_cfg_option_str(puppet_cfg, 'version', None) | 115 | version = util.get_cfg_option_str(puppet_cfg, 'version', None) |
521 | 116 | package_name = util.get_cfg_option_str( | ||
522 | 117 | puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME) | ||
523 | 118 | conf_file = util.get_cfg_option_str( | ||
524 | 119 | puppet_cfg, 'conf_file', PUPPET_CONF_PATH) | ||
525 | 120 | ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) | ||
526 | 121 | |||
527 | 122 | p_constants = PuppetConstants(conf_file, ssl_dir, log) | ||
528 | 99 | if not install and version: | 123 | if not install and version: |
529 | 100 | log.warn(("Puppet install set false but version supplied," | 124 | log.warn(("Puppet install set false but version supplied," |
530 | 101 | " doing nothing.")) | 125 | " doing nothing.")) |
531 | 102 | elif install: | 126 | elif install: |
532 | 103 | log.debug(("Attempting to install puppet %s,"), | 127 | log.debug(("Attempting to install puppet %s,"), |
533 | 104 | version if version else 'latest') | 128 | version if version else 'latest') |
535 | 105 | cloud.distro.install_packages(('puppet', version)) | 129 | |
536 | 130 | cloud.distro.install_packages((package_name, version)) | ||
537 | 106 | 131 | ||
538 | 107 | # ... and then update the puppet configuration | 132 | # ... and then update the puppet configuration |
539 | 108 | if 'conf' in puppet_cfg: | 133 | if 'conf' in puppet_cfg: |
540 | 109 | # Add all sections from the conf object to puppet.conf | 134 | # Add all sections from the conf object to puppet.conf |
542 | 110 | contents = util.load_file(PUPPET_CONF_PATH) | 135 | contents = util.load_file(p_constants.conf_path) |
543 | 111 | # Create object for reading puppet.conf values | 136 | # Create object for reading puppet.conf values |
544 | 112 | puppet_config = helpers.DefaultingConfigParser() | 137 | puppet_config = helpers.DefaultingConfigParser() |
545 | 113 | # Read puppet.conf values from original file in order to be able to | 138 | # Read puppet.conf values from original file in order to be able to |
546 | @@ -115,20 +140,23 @@ def handle(name, cfg, cloud, log, _args): | |||
547 | 115 | # (TODO(harlowja) is this really needed??) | 140 | # (TODO(harlowja) is this really needed??) |
548 | 116 | cleaned_lines = [i.lstrip() for i in contents.splitlines()] | 141 | cleaned_lines = [i.lstrip() for i in contents.splitlines()] |
549 | 117 | cleaned_contents = '\n'.join(cleaned_lines) | 142 | cleaned_contents = '\n'.join(cleaned_lines) |
552 | 118 | puppet_config.readfp(StringIO(cleaned_contents), | 143 | # Move to puppet_config.read_file when dropping py2.7 |
553 | 119 | filename=PUPPET_CONF_PATH) | 144 | puppet_config.readfp( # pylint: disable=W1505 |
554 | 145 | StringIO(cleaned_contents), | ||
555 | 146 | filename=p_constants.conf_path) | ||
556 | 120 | for (cfg_name, cfg) in puppet_cfg['conf'].items(): | 147 | for (cfg_name, cfg) in puppet_cfg['conf'].items(): |
557 | 121 | # Cert configuration is a special case | 148 | # Cert configuration is a special case |
558 | 122 | # Dump the puppet master ca certificate in the correct place | 149 | # Dump the puppet master ca certificate in the correct place |
559 | 123 | if cfg_name == 'ca_cert': | 150 | if cfg_name == 'ca_cert': |
560 | 124 | # Puppet ssl sub-directory isn't created yet | 151 | # Puppet ssl sub-directory isn't created yet |
561 | 125 | # Create it with the proper permissions and ownership | 152 | # Create it with the proper permissions and ownership |
568 | 126 | util.ensure_dir(PUPPET_SSL_DIR, 0o771) | 153 | util.ensure_dir(p_constants.ssl_dir, 0o771) |
569 | 127 | util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') | 154 | util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') |
570 | 128 | util.ensure_dir(PUPPET_SSL_CERT_DIR) | 155 | util.ensure_dir(p_constants.ssl_cert_dir) |
571 | 129 | util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') | 156 | |
572 | 130 | util.write_file(PUPPET_SSL_CERT_PATH, cfg) | 157 | util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') |
573 | 131 | util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') | 158 | util.write_file(p_constants.ssl_cert_path, cfg) |
574 | 159 | util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root') | ||
575 | 132 | else: | 160 | else: |
576 | 133 | # Iterate through the config items, we'll use ConfigParser.set | 161 | # Iterate through the config items, we'll use ConfigParser.set |
577 | 134 | # to overwrite or create new items as needed | 162 | # to overwrite or create new items as needed |
578 | @@ -144,8 +172,9 @@ def handle(name, cfg, cloud, log, _args): | |||
579 | 144 | puppet_config.set(cfg_name, o, v) | 172 | puppet_config.set(cfg_name, o, v) |
580 | 145 | # We got all our config as wanted we'll rename | 173 | # We got all our config as wanted we'll rename |
581 | 146 | # the previous puppet.conf and create our new one | 174 | # the previous puppet.conf and create our new one |
584 | 147 | util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) | 175 | util.rename(p_constants.conf_path, "%s.old" |
585 | 148 | util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) | 176 | % (p_constants.conf_path)) |
586 | 177 | util.write_file(p_constants.conf_path, puppet_config.stringify()) | ||
587 | 149 | 178 | ||
588 | 150 | # Set it up so it autostarts | 179 | # Set it up so it autostarts |
589 | 151 | _autostart_puppet(log) | 180 | _autostart_puppet(log) |
590 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py | |||
591 | index cec22bb..c8e1752 100644 | |||
592 | --- a/cloudinit/config/cc_resizefs.py | |||
593 | +++ b/cloudinit/config/cc_resizefs.py | |||
594 | @@ -84,6 +84,10 @@ def _resize_ufs(mount_point, devpth): | |||
595 | 84 | return ('growfs', devpth) | 84 | return ('growfs', devpth) |
596 | 85 | 85 | ||
597 | 86 | 86 | ||
598 | 87 | def _resize_zfs(mount_point, devpth): | ||
599 | 88 | return ('zpool', 'online', '-e', mount_point, devpth) | ||
600 | 89 | |||
601 | 90 | |||
602 | 87 | def _get_dumpfs_output(mount_point): | 91 | def _get_dumpfs_output(mount_point): |
603 | 88 | dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) | 92 | dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) |
604 | 89 | return dumpfs_res | 93 | return dumpfs_res |
605 | @@ -148,6 +152,7 @@ RESIZE_FS_PREFIXES_CMDS = [ | |||
606 | 148 | ('ext', _resize_ext), | 152 | ('ext', _resize_ext), |
607 | 149 | ('xfs', _resize_xfs), | 153 | ('xfs', _resize_xfs), |
608 | 150 | ('ufs', _resize_ufs), | 154 | ('ufs', _resize_ufs), |
609 | 155 | ('zfs', _resize_zfs), | ||
610 | 151 | ] | 156 | ] |
611 | 152 | 157 | ||
612 | 153 | RESIZE_FS_PRECHECK_CMDS = { | 158 | RESIZE_FS_PRECHECK_CMDS = { |
613 | @@ -188,6 +193,13 @@ def maybe_get_writable_device_path(devpath, info, log): | |||
614 | 188 | log.debug("Not attempting to resize devpath '%s': %s", devpath, info) | 193 | log.debug("Not attempting to resize devpath '%s': %s", devpath, info) |
615 | 189 | return None | 194 | return None |
616 | 190 | 195 | ||
617 | 196 | # FreeBSD zpool can also just use gpt/<label> | ||
618 | 197 | # with that in mind we can not do an os.stat on "gpt/whatever" | ||
619 | 198 | # therefore return the devpath already here. | ||
620 | 199 | if devpath.startswith('gpt/'): | ||
621 | 200 | log.debug('We have a gpt label - just go ahead') | ||
622 | 201 | return devpath | ||
623 | 202 | |||
624 | 191 | try: | 203 | try: |
625 | 192 | statret = os.stat(devpath) | 204 | statret = os.stat(devpath) |
626 | 193 | except OSError as exc: | 205 | except OSError as exc: |
627 | @@ -231,6 +243,16 @@ def handle(name, cfg, _cloud, log, args): | |||
628 | 231 | 243 | ||
629 | 232 | (devpth, fs_type, mount_point) = result | 244 | (devpth, fs_type, mount_point) = result |
630 | 233 | 245 | ||
631 | 246 | # if we have a zfs then our device path at this point | ||
632 | 247 | # is the zfs label. For example: vmzroot/ROOT/freebsd | ||
633 | 248 | # we will have to get the zpool name out of this | ||
634 | 249 | # and set the resize_what variable to the zpool | ||
635 | 250 | # so the _resize_zfs function gets the right attribute. | ||
636 | 251 | if fs_type == 'zfs': | ||
637 | 252 | zpool = devpth.split('/')[0] | ||
638 | 253 | devpth = util.get_device_info_from_zpool(zpool) | ||
639 | 254 | resize_what = zpool | ||
640 | 255 | |||
641 | 234 | info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) | 256 | info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) |
642 | 235 | log.debug("resize_info: %s" % info) | 257 | log.debug("resize_info: %s" % info) |
643 | 236 | 258 | ||
644 | diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py | |||
645 | index 449872f..539cbd5 100644 | |||
646 | --- a/cloudinit/config/cc_runcmd.py | |||
647 | +++ b/cloudinit/config/cc_runcmd.py | |||
648 | @@ -39,8 +39,10 @@ schema = { | |||
649 | 39 | using ``sh``. | 39 | using ``sh``. |
650 | 40 | 40 | ||
651 | 41 | .. note:: | 41 | .. note:: |
654 | 42 | all commands must be proper yaml, so you have to quote any characters | 42 | |
655 | 43 | yaml would eat (':' can be problematic)"""), | 43 | all commands must be proper yaml, so you have to quote any characters |
656 | 44 | yaml would eat (':' can be problematic) | ||
657 | 45 | """), | ||
658 | 44 | 'distros': distros, | 46 | 'distros': distros, |
659 | 45 | 'examples': [dedent("""\ | 47 | 'examples': [dedent("""\ |
660 | 46 | runcmd: | 48 | runcmd: |
661 | diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py | |||
662 | index 2b38837..d6a21d7 100644 | |||
663 | --- a/cloudinit/config/cc_salt_minion.py | |||
664 | +++ b/cloudinit/config/cc_salt_minion.py | |||
665 | @@ -12,7 +12,9 @@ key is present in the config parts, then salt minion will be installed and | |||
666 | 12 | started. Configuration for salt minion can be specified in the ``conf`` key | 12 | started. Configuration for salt minion can be specified in the ``conf`` key |
667 | 13 | under ``salt_minion``. Any conf values present there will be assigned in | 13 | under ``salt_minion``. Any conf values present there will be assigned in |
668 | 14 | ``/etc/salt/minion``. The public and private keys to use for salt minion can be | 14 | ``/etc/salt/minion``. The public and private keys to use for salt minion can be |
670 | 15 | specified with ``public_key`` and ``private_key`` respectively. | 15 | specified with ``public_key`` and ``private_key`` respectively. Optionally if |
671 | 16 | you have a custom package name, service name or config directory you can | ||
672 | 17 | specify them with ``pkg_name``, ``service_name`` and ``config_dir``. | ||
673 | 16 | 18 | ||
674 | 17 | **Internal name:** ``cc_salt_minion`` | 19 | **Internal name:** ``cc_salt_minion`` |
675 | 18 | 20 | ||
676 | @@ -23,8 +25,14 @@ specified with ``public_key`` and ``private_key`` respectively. | |||
677 | 23 | **Config keys**:: | 25 | **Config keys**:: |
678 | 24 | 26 | ||
679 | 25 | salt_minion: | 27 | salt_minion: |
680 | 28 | pkg_name: 'salt-minion' | ||
681 | 29 | service_name: 'salt-minion' | ||
682 | 30 | config_dir: '/etc/salt' | ||
683 | 26 | conf: | 31 | conf: |
684 | 27 | master: salt.example.com | 32 | master: salt.example.com |
685 | 33 | grains: | ||
686 | 34 | role: | ||
687 | 35 | - web | ||
688 | 28 | public_key: | | 36 | public_key: | |
689 | 29 | ------BEGIN PUBLIC KEY------- | 37 | ------BEGIN PUBLIC KEY------- |
690 | 30 | <key data> | 38 | <key data> |
691 | @@ -39,7 +47,34 @@ import os | |||
692 | 39 | 47 | ||
693 | 40 | from cloudinit import util | 48 | from cloudinit import util |
694 | 41 | 49 | ||
696 | 42 | # Note: see http://saltstack.org/topics/installation/ | 50 | # Note: see https://docs.saltstack.com/en/latest/topics/installation/ |
697 | 51 | # Note: see https://docs.saltstack.com/en/latest/ref/configuration/ | ||
698 | 52 | |||
699 | 53 | |||
700 | 54 | class SaltConstants(object): | ||
701 | 55 | """ | ||
702 | 56 | defines default distribution specific salt variables | ||
703 | 57 | """ | ||
704 | 58 | def __init__(self, cfg): | ||
705 | 59 | |||
706 | 60 | # constants tailored for FreeBSD | ||
707 | 61 | if util.is_FreeBSD(): | ||
708 | 62 | self.pkg_name = 'py27-salt' | ||
709 | 63 | self.srv_name = 'salt_minion' | ||
710 | 64 | self.conf_dir = '/usr/local/etc/salt' | ||
711 | 65 | # constants for any other OS | ||
712 | 66 | else: | ||
713 | 67 | self.pkg_name = 'salt-minion' | ||
714 | 68 | self.srv_name = 'salt-minion' | ||
715 | 69 | self.conf_dir = '/etc/salt' | ||
716 | 70 | |||
717 | 71 | # if there are constants given in cloud config use those | ||
718 | 72 | self.pkg_name = util.get_cfg_option_str(cfg, 'pkg_name', | ||
719 | 73 | self.pkg_name) | ||
720 | 74 | self.conf_dir = util.get_cfg_option_str(cfg, 'config_dir', | ||
721 | 75 | self.conf_dir) | ||
722 | 76 | self.srv_name = util.get_cfg_option_str(cfg, 'service_name', | ||
723 | 77 | self.srv_name) | ||
724 | 43 | 78 | ||
725 | 44 | 79 | ||
726 | 45 | def handle(name, cfg, cloud, log, _args): | 80 | def handle(name, cfg, cloud, log, _args): |
727 | @@ -49,39 +84,49 @@ def handle(name, cfg, cloud, log, _args): | |||
728 | 49 | " no 'salt_minion' key in configuration"), name) | 84 | " no 'salt_minion' key in configuration"), name) |
729 | 50 | return | 85 | return |
730 | 51 | 86 | ||
732 | 52 | salt_cfg = cfg['salt_minion'] | 87 | s_cfg = cfg['salt_minion'] |
733 | 88 | const = SaltConstants(cfg=s_cfg) | ||
734 | 53 | 89 | ||
735 | 54 | # Start by installing the salt package ... | 90 | # Start by installing the salt package ... |
737 | 55 | cloud.distro.install_packages(('salt-minion',)) | 91 | cloud.distro.install_packages(const.pkg_name) |
738 | 56 | 92 | ||
739 | 57 | # Ensure we can configure files at the right dir | 93 | # Ensure we can configure files at the right dir |
742 | 58 | config_dir = salt_cfg.get("config_dir", '/etc/salt') | 94 | util.ensure_dir(const.conf_dir) |
741 | 59 | util.ensure_dir(config_dir) | ||
743 | 60 | 95 | ||
744 | 61 | # ... and then update the salt configuration | 96 | # ... and then update the salt configuration |
749 | 62 | if 'conf' in salt_cfg: | 97 | if 'conf' in s_cfg: |
750 | 63 | # Add all sections from the conf object to /etc/salt/minion | 98 | # Add all sections from the conf object to minion config file |
751 | 64 | minion_config = os.path.join(config_dir, 'minion') | 99 | minion_config = os.path.join(const.conf_dir, 'minion') |
752 | 65 | minion_data = util.yaml_dumps(salt_cfg.get('conf')) | 100 | minion_data = util.yaml_dumps(s_cfg.get('conf')) |
753 | 66 | util.write_file(minion_config, minion_data) | 101 | util.write_file(minion_config, minion_data) |
754 | 67 | 102 | ||
755 | 103 | if 'grains' in s_cfg: | ||
756 | 104 | # add grains to /etc/salt/grains | ||
757 | 105 | grains_config = os.path.join(const.conf_dir, 'grains') | ||
758 | 106 | grains_data = util.yaml_dumps(s_cfg.get('grains')) | ||
759 | 107 | util.write_file(grains_config, grains_data) | ||
760 | 108 | |||
761 | 68 | # ... copy the key pair if specified | 109 | # ... copy the key pair if specified |
767 | 69 | if 'public_key' in salt_cfg and 'private_key' in salt_cfg: | 110 | if 'public_key' in s_cfg and 'private_key' in s_cfg: |
768 | 70 | if os.path.isdir("/etc/salt/pki/minion"): | 111 | pki_dir_default = os.path.join(const.conf_dir, "pki/minion") |
769 | 71 | pki_dir_default = "/etc/salt/pki/minion" | 112 | if not os.path.isdir(pki_dir_default): |
770 | 72 | else: | 113 | pki_dir_default = os.path.join(const.conf_dir, "pki") |
766 | 73 | pki_dir_default = "/etc/salt/pki" | ||
771 | 74 | 114 | ||
773 | 75 | pki_dir = salt_cfg.get('pki_dir', pki_dir_default) | 115 | pki_dir = s_cfg.get('pki_dir', pki_dir_default) |
774 | 76 | with util.umask(0o77): | 116 | with util.umask(0o77): |
775 | 77 | util.ensure_dir(pki_dir) | 117 | util.ensure_dir(pki_dir) |
776 | 78 | pub_name = os.path.join(pki_dir, 'minion.pub') | 118 | pub_name = os.path.join(pki_dir, 'minion.pub') |
777 | 79 | pem_name = os.path.join(pki_dir, 'minion.pem') | 119 | pem_name = os.path.join(pki_dir, 'minion.pem') |
780 | 80 | util.write_file(pub_name, salt_cfg['public_key']) | 120 | util.write_file(pub_name, s_cfg['public_key']) |
781 | 81 | util.write_file(pem_name, salt_cfg['private_key']) | 121 | util.write_file(pem_name, s_cfg['private_key']) |
782 | 122 | |||
783 | 123 | # we need to have the salt minion service enabled in rc in order to be | ||
784 | 124 | # able to start the service. this does only apply on FreeBSD servers. | ||
785 | 125 | if cloud.distro.osfamily == 'freebsd': | ||
786 | 126 | cloud.distro.updatercconf('salt_minion_enable', 'YES') | ||
787 | 82 | 127 | ||
789 | 83 | # restart salt-minion. 'service' will start even if not started. if it | 128 | # restart salt-minion. 'service' will start even if not started. if it |
790 | 84 | # was started, it needs to be restarted for config change. | 129 | # was started, it needs to be restarted for config change. |
792 | 85 | util.subp(['service', 'salt-minion', 'restart'], capture=False) | 130 | util.subp(['service', const.srv_name, 'restart'], capture=False) |
793 | 86 | 131 | ||
794 | 87 | # vi: ts=4 expandtab | 132 | # vi: ts=4 expandtab |
795 | diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py | |||
796 | index aa3dfe5..3d2b2da 100644 | |||
797 | --- a/cloudinit/config/cc_set_hostname.py | |||
798 | +++ b/cloudinit/config/cc_set_hostname.py | |||
799 | @@ -32,22 +32,51 @@ will be used. | |||
800 | 32 | hostname: <fqdn/hostname> | 32 | hostname: <fqdn/hostname> |
801 | 33 | """ | 33 | """ |
802 | 34 | 34 | ||
803 | 35 | import os | ||
804 | 36 | |||
805 | 37 | |||
806 | 38 | from cloudinit.atomic_helper import write_json | ||
807 | 35 | from cloudinit import util | 39 | from cloudinit import util |
808 | 36 | 40 | ||
809 | 37 | 41 | ||
810 | 42 | class SetHostnameError(Exception): | ||
811 | 43 | """Raised when the distro runs into an exception when setting hostname. | ||
812 | 44 | |||
813 | 45 | This may happen if we attempt to set the hostname early in cloud-init's | ||
814 | 46 | init-local timeframe as certain services may not be running yet. | ||
815 | 47 | """ | ||
816 | 48 | pass | ||
817 | 49 | |||
818 | 50 | |||
819 | 38 | def handle(name, cfg, cloud, log, _args): | 51 | def handle(name, cfg, cloud, log, _args): |
820 | 39 | if util.get_cfg_option_bool(cfg, "preserve_hostname", False): | 52 | if util.get_cfg_option_bool(cfg, "preserve_hostname", False): |
821 | 40 | log.debug(("Configuration option 'preserve_hostname' is set," | 53 | log.debug(("Configuration option 'preserve_hostname' is set," |
822 | 41 | " not setting the hostname in module %s"), name) | 54 | " not setting the hostname in module %s"), name) |
823 | 42 | return | 55 | return |
824 | 43 | |||
825 | 44 | (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) | 56 | (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) |
826 | 57 | # Check for previous successful invocation of set-hostname | ||
827 | 58 | |||
828 | 59 | # set-hostname artifact file accounts for both hostname and fqdn | ||
829 | 60 | # deltas. As such, it's format is different than cc_update_hostname's | ||
830 | 61 | # previous-hostname file which only contains the base hostname. | ||
831 | 62 | # TODO consolidate previous-hostname and set-hostname artifact files and | ||
832 | 63 | # distro._read_hostname implementation so we only validate one artifact. | ||
833 | 64 | prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname") | ||
834 | 65 | prev_hostname = {} | ||
835 | 66 | if os.path.exists(prev_fn): | ||
836 | 67 | prev_hostname = util.load_json(util.load_file(prev_fn)) | ||
837 | 68 | hostname_changed = (hostname != prev_hostname.get('hostname') or | ||
838 | 69 | fqdn != prev_hostname.get('fqdn')) | ||
839 | 70 | if not hostname_changed: | ||
840 | 71 | log.debug('No hostname changes. Skipping set-hostname') | ||
841 | 72 | return | ||
842 | 73 | log.debug("Setting the hostname to %s (%s)", fqdn, hostname) | ||
843 | 45 | try: | 74 | try: |
844 | 46 | log.debug("Setting the hostname to %s (%s)", fqdn, hostname) | ||
845 | 47 | cloud.distro.set_hostname(hostname, fqdn) | 75 | cloud.distro.set_hostname(hostname, fqdn) |
850 | 48 | except Exception: | 76 | except Exception as e: |
851 | 49 | util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn, | 77 | msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname) |
852 | 50 | hostname) | 78 | util.logexc(log, msg) |
853 | 51 | raise | 79 | raise SetHostnameError("%s: %s" % (msg, e)) |
854 | 80 | write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn}) | ||
855 | 52 | 81 | ||
856 | 53 | # vi: ts=4 expandtab | 82 | # vi: ts=4 expandtab |
857 | diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py | |||
858 | 54 | new file mode 100644 | 83 | new file mode 100644 |
859 | index 0000000..34a53fd | |||
860 | --- /dev/null | |||
861 | +++ b/cloudinit/config/cc_snap.py | |||
862 | @@ -0,0 +1,230 @@ | |||
863 | 1 | # Copyright (C) 2018 Canonical Ltd. | ||
864 | 2 | # | ||
865 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
866 | 4 | |||
867 | 5 | """Snap: Install, configure and manage snapd and snap packages.""" | ||
868 | 6 | |||
869 | 7 | import sys | ||
870 | 8 | from textwrap import dedent | ||
871 | 9 | |||
872 | 10 | from cloudinit import log as logging | ||
873 | 11 | from cloudinit.config.schema import ( | ||
874 | 12 | get_schema_doc, validate_cloudconfig_schema) | ||
875 | 13 | from cloudinit.settings import PER_INSTANCE | ||
876 | 14 | from cloudinit.subp import prepend_base_command | ||
877 | 15 | from cloudinit import util | ||
878 | 16 | |||
879 | 17 | |||
880 | 18 | distros = ['ubuntu'] | ||
881 | 19 | frequency = PER_INSTANCE | ||
882 | 20 | |||
883 | 21 | LOG = logging.getLogger(__name__) | ||
884 | 22 | |||
885 | 23 | schema = { | ||
886 | 24 | 'id': 'cc_snap', | ||
887 | 25 | 'name': 'Snap', | ||
888 | 26 | 'title': 'Install, configure and manage snapd and snap packages', | ||
889 | 27 | 'description': dedent("""\ | ||
890 | 28 | This module provides a simple configuration namespace in cloud-init to | ||
891 | 29 | both setup snapd and install snaps. | ||
892 | 30 | |||
893 | 31 | .. note:: | ||
894 | 32 | Both ``assertions`` and ``commands`` values can be either a | ||
895 | 33 | dictionary or a list. If these configs are provided as a | ||
896 | 34 | dictionary, the keys are only used to order the execution of the | ||
897 | 35 | assertions or commands and the dictionary is merged with any | ||
898 | 36 | vendor-data snap configuration provided. If a list is provided by | ||
899 | 37 | the user instead of a dict, any vendor-data snap configuration is | ||
900 | 38 | ignored. | ||
901 | 39 | |||
902 | 40 | The ``assertions`` configuration option is a dictionary or list of | ||
903 | 41 | properly-signed snap assertions which will run before any snap | ||
904 | 42 | ``commands``. They will be added to snapd's assertion database by | ||
905 | 43 | invoking ``snap ack <aggregate_assertion_file>``. | ||
906 | 44 | |||
907 | 45 | Snap ``commands`` is a dictionary or list of individual snap | ||
908 | 46 | commands to run on the target system. These commands can be used to | ||
909 | 47 | create snap users, install snaps and provide snap configuration. | ||
910 | 48 | |||
911 | 49 | .. note:: | ||
912 | 50 | If 'side-loading' private/unpublished snaps on an instance, it is | ||
913 | 51 | best to create a snap seed directory and seed.yaml manifest in | ||
914 | 52 | **/var/lib/snapd/seed/** which snapd automatically installs on | ||
915 | 53 | startup. | ||
916 | 54 | |||
917 | 55 | **Development only**: The ``squashfuse_in_container`` boolean can be | ||
918 | 56 | set true to install squashfuse package when in a container to enable | ||
919 | 57 | snap installs. Default is false. | ||
920 | 58 | """), | ||
921 | 59 | 'distros': distros, | ||
922 | 60 | 'examples': [dedent("""\ | ||
923 | 61 | snap: | ||
924 | 62 | assertions: | ||
925 | 63 | 00: | | ||
926 | 64 | signed_assertion_blob_here | ||
927 | 65 | 02: | | ||
928 | 66 | signed_assertion_blob_here | ||
929 | 67 | commands: | ||
930 | 68 | 00: snap create-user --sudoer --known <snap-user>@mydomain.com | ||
931 | 69 | 01: snap install canonical-livepatch | ||
932 | 70 | 02: canonical-livepatch enable <AUTH_TOKEN> | ||
933 | 71 | """), dedent("""\ | ||
934 | 72 | # LXC-based containers require squashfuse before snaps can be installed | ||
935 | 73 | snap: | ||
936 | 74 | commands: | ||
937 | 75 | 00: apt-get install squashfuse -y | ||
938 | 76 | 11: snap install emoj | ||
939 | 77 | |||
940 | 78 | """), dedent("""\ | ||
941 | 79 | # Convenience: the snap command can be omitted when specifying commands | ||
942 | 80 | # as a list and 'snap' will automatically be prepended. | ||
943 | 81 | # The following commands are equivalent: | ||
944 | 82 | snap: | ||
945 | 83 | commands: | ||
946 | 84 | 00: ['install', 'vlc'] | ||
947 | 85 | 01: ['snap', 'install', 'vlc'] | ||
948 | 86 | 02: snap install vlc | ||
949 | 87 | 03: 'snap install vlc' | ||
950 | 88 | """)], | ||
951 | 89 | 'frequency': PER_INSTANCE, | ||
952 | 90 | 'type': 'object', | ||
953 | 91 | 'properties': { | ||
954 | 92 | 'snap': { | ||
955 | 93 | 'type': 'object', | ||
956 | 94 | 'properties': { | ||
957 | 95 | 'assertions': { | ||
958 | 96 | 'type': ['object', 'array'], # Array of strings or dict | ||
959 | 97 | 'items': {'type': 'string'}, | ||
960 | 98 | 'additionalItems': False, # Reject items non-string | ||
961 | 99 | 'minItems': 1, | ||
962 | 100 | 'minProperties': 1, | ||
963 | 101 | 'uniqueItems': True | ||
964 | 102 | }, | ||
965 | 103 | 'commands': { | ||
966 | 104 | 'type': ['object', 'array'], # Array of strings or dict | ||
967 | 105 | 'items': { | ||
968 | 106 | 'oneOf': [ | ||
969 | 107 | {'type': 'array', 'items': {'type': 'string'}}, | ||
970 | 108 | {'type': 'string'}] | ||
971 | 109 | }, | ||
972 | 110 | 'additionalItems': False, # Reject non-string & non-list | ||
973 | 111 | 'minItems': 1, | ||
974 | 112 | 'minProperties': 1, | ||
975 | 113 | 'uniqueItems': True | ||
976 | 114 | }, | ||
977 | 115 | 'squashfuse_in_container': { | ||
978 | 116 | 'type': 'boolean' | ||
979 | 117 | } | ||
980 | 118 | }, | ||
981 | 119 | 'additionalProperties': False, # Reject keys not in schema | ||
982 | 120 | 'required': [], | ||
983 | 121 | 'minProperties': 1 | ||
984 | 122 | } | ||
985 | 123 | } | ||
986 | 124 | } | ||
987 | 125 | |||
988 | 126 | # TODO schema for 'assertions' and 'commands' are too permissive at the moment. | ||
989 | 127 | # Once python-jsonschema supports schema draft 6 add support for arbitrary | ||
990 | 128 | # object keys with 'patternProperties' constraint to validate string values. | ||
991 | 129 | |||
992 | 130 | __doc__ = get_schema_doc(schema) # Supplement python help() | ||
993 | 131 | |||
994 | 132 | SNAP_CMD = "snap" | ||
995 | 133 | ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" | ||
996 | 134 | |||
997 | 135 | |||
998 | 136 | def add_assertions(assertions): | ||
999 | 137 | """Import list of assertions. | ||
1000 | 138 | |||
1001 | 139 | Import assertions by concatenating each assertion into a | ||
1002 | 140 | string separated by a '\n'. Write this string to a instance file and | ||
1003 | 141 | then invoke `snap ack /path/to/file` and check for errors. | ||
1004 | 142 | If snap exits 0, then all assertions are imported. | ||
1005 | 143 | """ | ||
1006 | 144 | if not assertions: | ||
1007 | 145 | return | ||
1008 | 146 | LOG.debug('Importing user-provided snap assertions') | ||
1009 | 147 | if isinstance(assertions, dict): | ||
1010 | 148 | assertions = assertions.values() | ||
1011 | 149 | elif not isinstance(assertions, list): | ||
1012 | 150 | raise TypeError( | ||
1013 | 151 | 'assertion parameter was not a list or dict: {assertions}'.format( | ||
1014 | 152 | assertions=assertions)) | ||
1015 | 153 | |||
1016 | 154 | snap_cmd = [SNAP_CMD, 'ack'] | ||
1017 | 155 | combined = "\n".join(assertions) | ||
1018 | 156 | |||
1019 | 157 | for asrt in assertions: | ||
1020 | 158 | LOG.debug('Snap acking: %s', asrt.split('\n')[0:2]) | ||
1021 | 159 | |||
1022 | 160 | util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) | ||
1023 | 161 | util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True) | ||
1024 | 162 | |||
1025 | 163 | |||
1026 | 164 | def run_commands(commands): | ||
1027 | 165 | """Run the provided commands provided in snap:commands configuration. | ||
1028 | 166 | |||
1029 | 167 | Commands are run individually. Any errors are collected and reported | ||
1030 | 168 | after attempting all commands. | ||
1031 | 169 | |||
1032 | 170 | @param commands: A list or dict containing commands to run. Keys of a | ||
1033 | 171 | dict will be used to order the commands provided as dict values. | ||
1034 | 172 | """ | ||
1035 | 173 | if not commands: | ||
1036 | 174 | return | ||
1037 | 175 | LOG.debug('Running user-provided snap commands') | ||
1038 | 176 | if isinstance(commands, dict): | ||
1039 | 177 | # Sort commands based on dictionary key | ||
1040 | 178 | commands = [v for _, v in sorted(commands.items())] | ||
1041 | 179 | elif not isinstance(commands, list): | ||
1042 | 180 | raise TypeError( | ||
1043 | 181 | 'commands parameter was not a list or dict: {commands}'.format( | ||
1044 | 182 | commands=commands)) | ||
1045 | 183 | |||
1046 | 184 | fixed_snap_commands = prepend_base_command('snap', commands) | ||
1047 | 185 | |||
1048 | 186 | cmd_failures = [] | ||
1049 | 187 | for command in fixed_snap_commands: | ||
1050 | 188 | shell = isinstance(command, str) | ||
1051 | 189 | try: | ||
1052 | 190 | util.subp(command, shell=shell, status_cb=sys.stderr.write) | ||
1053 | 191 | except util.ProcessExecutionError as e: | ||
1054 | 192 | cmd_failures.append(str(e)) | ||
1055 | 193 | if cmd_failures: | ||
1056 | 194 | msg = 'Failures running snap commands:\n{cmd_failures}'.format( | ||
1057 | 195 | cmd_failures=cmd_failures) | ||
1058 | 196 | util.logexc(LOG, msg) | ||
1059 | 197 | raise RuntimeError(msg) | ||
1060 | 198 | |||
1061 | 199 | |||
1062 | 200 | # RELEASE_BLOCKER: Once LP: #1628289 is released on xenial, drop this function. | ||
1063 | 201 | def maybe_install_squashfuse(cloud): | ||
1064 | 202 | """Install squashfuse if we are in a container.""" | ||
1065 | 203 | if not util.is_container(): | ||
1066 | 204 | return | ||
1067 | 205 | try: | ||
1068 | 206 | cloud.distro.update_package_sources() | ||
1069 | 207 | except Exception as e: | ||
1070 | 208 | util.logexc(LOG, "Package update failed") | ||
1071 | 209 | raise | ||
1072 | 210 | try: | ||
1073 | 211 | cloud.distro.install_packages(['squashfuse']) | ||
1074 | 212 | except Exception as e: | ||
1075 | 213 | util.logexc(LOG, "Failed to install squashfuse") | ||
1076 | 214 | raise | ||
1077 | 215 | |||
1078 | 216 | |||
1079 | 217 | def handle(name, cfg, cloud, log, args): | ||
1080 | 218 | cfgin = cfg.get('snap', {}) | ||
1081 | 219 | if not cfgin: | ||
1082 | 220 | LOG.debug(("Skipping module named %s," | ||
1083 | 221 | " no 'snap' key in configuration"), name) | ||
1084 | 222 | return | ||
1085 | 223 | |||
1086 | 224 | validate_cloudconfig_schema(cfg, schema) | ||
1087 | 225 | if util.is_true(cfgin.get('squashfuse_in_container', False)): | ||
1088 | 226 | maybe_install_squashfuse(cloud) | ||
1089 | 227 | add_assertions(cfgin.get('assertions', [])) | ||
1090 | 228 | run_commands(cfgin.get('commands', [])) | ||
1091 | 229 | |||
1092 | 230 | # vi: ts=4 expandtab | ||
1093 | diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py | |||
1094 | index e82c081..afe297e 100644 | |||
1095 | --- a/cloudinit/config/cc_snap_config.py | |||
1096 | +++ b/cloudinit/config/cc_snap_config.py | |||
1097 | @@ -4,11 +4,15 @@ | |||
1098 | 4 | # | 4 | # |
1099 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | 5 | # This file is part of cloud-init. See LICENSE file for license information. |
1100 | 6 | 6 | ||
1101 | 7 | # RELEASE_BLOCKER: Remove this deprecated module in 18.3 | ||
1102 | 7 | """ | 8 | """ |
1103 | 8 | Snap Config | 9 | Snap Config |
1104 | 9 | ----------- | 10 | ----------- |
1105 | 10 | **Summary:** snap_config modules allows configuration of snapd. | 11 | **Summary:** snap_config modules allows configuration of snapd. |
1106 | 11 | 12 | ||
1107 | 13 | **Deprecated**: Use :ref:`snap` module instead. This module will not exist | ||
1108 | 14 | in cloud-init 18.3. | ||
1109 | 15 | |||
1110 | 12 | This module uses the same ``snappy`` namespace for configuration but | 16 | This module uses the same ``snappy`` namespace for configuration but |
1111 | 13 | acts only only a subset of the configuration. | 17 | acts only only a subset of the configuration. |
1112 | 14 | 18 | ||
1113 | @@ -154,6 +158,9 @@ def handle(name, cfg, cloud, log, args): | |||
1114 | 154 | LOG.debug('No snappy config provided, skipping') | 158 | LOG.debug('No snappy config provided, skipping') |
1115 | 155 | return | 159 | return |
1116 | 156 | 160 | ||
1117 | 161 | log.warning( | ||
1118 | 162 | 'DEPRECATION: snap_config module will be dropped in 18.3 release.' | ||
1119 | 163 | ' Use snap module instead') | ||
1120 | 157 | if not(util.system_is_snappy()): | 164 | if not(util.system_is_snappy()): |
1121 | 158 | LOG.debug("%s: system not snappy", name) | 165 | LOG.debug("%s: system not snappy", name) |
1122 | 159 | return | 166 | return |
1123 | diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py | |||
1124 | index eecb817..bab80bb 100644 | |||
1125 | --- a/cloudinit/config/cc_snappy.py | |||
1126 | +++ b/cloudinit/config/cc_snappy.py | |||
1127 | @@ -1,10 +1,14 @@ | |||
1128 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
1129 | 2 | 2 | ||
1130 | 3 | # RELEASE_BLOCKER: Remove this deprecated module in 18.3 | ||
1131 | 3 | """ | 4 | """ |
1132 | 4 | Snappy | 5 | Snappy |
1133 | 5 | ------ | 6 | ------ |
1134 | 6 | **Summary:** snappy modules allows configuration of snappy. | 7 | **Summary:** snappy modules allows configuration of snappy. |
1135 | 7 | 8 | ||
1136 | 9 | **Deprecated**: Use :ref:`snap` module instead. This module will not exist | ||
1137 | 10 | in cloud-init 18.3. | ||
1138 | 11 | |||
1139 | 8 | The below example config config would install ``etcd``, and then install | 12 | The below example config config would install ``etcd``, and then install |
1140 | 9 | ``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has | 13 | ``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has |
1141 | 10 | ``config-blob`` inside it. If ``pkgname`` is installed already, then | 14 | ``config-blob`` inside it. If ``pkgname`` is installed already, then |
1142 | @@ -271,6 +275,10 @@ def handle(name, cfg, cloud, log, args): | |||
1143 | 271 | LOG.debug("%s: 'auto' mode, and system not snappy", name) | 275 | LOG.debug("%s: 'auto' mode, and system not snappy", name) |
1144 | 272 | return | 276 | return |
1145 | 273 | 277 | ||
1146 | 278 | log.warning( | ||
1147 | 279 | 'DEPRECATION: snappy module will be dropped in 18.3 release.' | ||
1148 | 280 | ' Use snap module instead') | ||
1149 | 281 | |||
1150 | 274 | set_snappy_command() | 282 | set_snappy_command() |
1151 | 275 | 283 | ||
1152 | 276 | pkg_ops = get_package_ops(packages=mycfg['packages'], | 284 | pkg_ops = get_package_ops(packages=mycfg['packages'], |
1153 | diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py | |||
1154 | index 35d8c57..98b0e66 100755 | |||
1155 | --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py | |||
1156 | +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py | |||
1157 | @@ -77,11 +77,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', | |||
1158 | 77 | tbl = SimpleTable(tbl_fields) | 77 | tbl = SimpleTable(tbl_fields) |
1159 | 78 | for entry in key_entries: | 78 | for entry in key_entries: |
1160 | 79 | if _is_printable_key(entry): | 79 | if _is_printable_key(entry): |
1166 | 80 | row = [] | 80 | row = [entry.keytype or '-', |
1167 | 81 | row.append(entry.keytype or '-') | 81 | _gen_fingerprint(entry.base64, hash_meth) or '-', |
1168 | 82 | row.append(_gen_fingerprint(entry.base64, hash_meth) or '-') | 82 | entry.options or '-', |
1169 | 83 | row.append(entry.options or '-') | 83 | entry.comment or '-'] |
1165 | 84 | row.append(entry.comment or '-') | ||
1170 | 85 | tbl.add_row(row) | 84 | tbl.add_row(row) |
1171 | 86 | authtbl_s = tbl.get_string() | 85 | authtbl_s = tbl.get_string() |
1172 | 87 | authtbl_lines = authtbl_s.splitlines() | 86 | authtbl_lines = authtbl_s.splitlines() |
1173 | diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py | |||
1174 | 88 | new file mode 100644 | 87 | new file mode 100644 |
1175 | index 0000000..16b1868 | |||
1176 | --- /dev/null | |||
1177 | +++ b/cloudinit/config/cc_ubuntu_advantage.py | |||
1178 | @@ -0,0 +1,173 @@ | |||
1179 | 1 | # Copyright (C) 2018 Canonical Ltd. | ||
1180 | 2 | # | ||
1181 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
1182 | 4 | |||
1183 | 5 | """Ubuntu advantage: manage ubuntu-advantage offerings from Canonical.""" | ||
1184 | 6 | |||
1185 | 7 | import sys | ||
1186 | 8 | from textwrap import dedent | ||
1187 | 9 | |||
1188 | 10 | from cloudinit import log as logging | ||
1189 | 11 | from cloudinit.config.schema import ( | ||
1190 | 12 | get_schema_doc, validate_cloudconfig_schema) | ||
1191 | 13 | from cloudinit.settings import PER_INSTANCE | ||
1192 | 14 | from cloudinit.subp import prepend_base_command | ||
1193 | 15 | from cloudinit import util | ||
1194 | 16 | |||
1195 | 17 | |||
1196 | 18 | distros = ['ubuntu'] | ||
1197 | 19 | frequency = PER_INSTANCE | ||
1198 | 20 | |||
1199 | 21 | LOG = logging.getLogger(__name__) | ||
1200 | 22 | |||
1201 | 23 | schema = { | ||
1202 | 24 | 'id': 'cc_ubuntu_advantage', | ||
1203 | 25 | 'name': 'Ubuntu Advantage', | ||
1204 | 26 | 'title': 'Install, configure and manage ubuntu-advantage offerings', | ||
1205 | 27 | 'description': dedent("""\ | ||
1206 | 28 | This module provides configuration options to setup ubuntu-advantage | ||
1207 | 29 | subscriptions. | ||
1208 | 30 | |||
1209 | 31 | .. note:: | ||
1210 | 32 | Both ``commands`` value can be either a dictionary or a list. If | ||
1211 | 33 | the configuration provided is a dictionary, the keys are only used | ||
1212 | 34 | to order the execution of the commands and the dictionary is | ||
1213 | 35 | merged with any vendor-data ubuntu-advantage configuration | ||
1214 | 36 | provided. If a ``commands`` is provided as a list, any vendor-data | ||
1215 | 37 | ubuntu-advantage ``commands`` are ignored. | ||
1216 | 38 | |||
1217 | 39 | Ubuntu-advantage ``commands`` is a dictionary or list of | ||
1218 | 40 | ubuntu-advantage commands to run on the deployed machine. | ||
1219 | 41 | These commands can be used to enable or disable subscriptions to | ||
1220 | 42 | various ubuntu-advantage products. See 'man ubuntu-advantage' for more | ||
1221 | 43 | information on supported subcommands. | ||
1222 | 44 | |||
1223 | 45 | .. note:: | ||
1224 | 46 | Each command item can be a string or list. If the item is a list, | ||
1225 | 47 | 'ubuntu-advantage' can be omitted and it will automatically be | ||
1226 | 48 | inserted as part of the command. | ||
1227 | 49 | """), | ||
1228 | 50 | 'distros': distros, | ||
1229 | 51 | 'examples': [dedent("""\ | ||
1230 | 52 | # Enable Extended Security Maintenance using your service auth token | ||
1231 | 53 | ubuntu-advantage: | ||
1232 | 54 | commands: | ||
1233 | 55 | 00: ubuntu-advantage enable-esm <token> | ||
1234 | 56 | """), dedent("""\ | ||
1235 | 57 | # Enable livepatch by providing your livepatch token | ||
1236 | 58 | ubuntu-advantage: | ||
1237 | 59 | commands: | ||
1238 | 60 | 00: ubuntu-advantage enable-livepatch <livepatch-token> | ||
1239 | 61 | |||
1240 | 62 | """), dedent("""\ | ||
1241 | 63 | # Convenience: the ubuntu-advantage command can be omitted when | ||
1242 | 64 | # specifying commands as a list and 'ubuntu-advantage' will | ||
1243 | 65 | # automatically be prepended. | ||
1244 | 66 | # The following commands are equivalent | ||
1245 | 67 | ubuntu-advantage: | ||
1246 | 68 | commands: | ||
1247 | 69 | 00: ['enable-livepatch', 'my-token'] | ||
1248 | 70 | 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token'] | ||
1249 | 71 | 02: ubuntu-advantage enable-livepatch my-token | ||
1250 | 72 | 03: 'ubuntu-advantage enable-livepatch my-token' | ||
1251 | 73 | """)], | ||
1252 | 74 | 'frequency': PER_INSTANCE, | ||
1253 | 75 | 'type': 'object', | ||
1254 | 76 | 'properties': { | ||
1255 | 77 | 'ubuntu-advantage': { | ||
1256 | 78 | 'type': 'object', | ||
1257 | 79 | 'properties': { | ||
1258 | 80 | 'commands': { | ||
1259 | 81 | 'type': ['object', 'array'], # Array of strings or dict | ||
1260 | 82 | 'items': { | ||
1261 | 83 | 'oneOf': [ | ||
1262 | 84 | {'type': 'array', 'items': {'type': 'string'}}, | ||
1263 | 85 | {'type': 'string'}] | ||
1264 | 86 | }, | ||
1265 | 87 | 'additionalItems': False, # Reject non-string & non-list | ||
1266 | 88 | 'minItems': 1, | ||
1267 | 89 | 'minProperties': 1, | ||
1268 | 90 | 'uniqueItems': True | ||
1269 | 91 | } | ||
1270 | 92 | }, | ||
1271 | 93 | 'additionalProperties': False, # Reject keys not in schema | ||
1272 | 94 | 'required': ['commands'] | ||
1273 | 95 | } | ||
1274 | 96 | } | ||
1275 | 97 | } | ||
1276 | 98 | |||
1277 | 99 | # TODO schema for 'assertions' and 'commands' are too permissive at the moment. | ||
1278 | 100 | # Once python-jsonschema supports schema draft 6 add support for arbitrary | ||
1279 | 101 | # object keys with 'patternProperties' constraint to validate string values. | ||
1280 | 102 | |||
1281 | 103 | __doc__ = get_schema_doc(schema) # Supplement python help() | ||
1282 | 104 | |||
1283 | 105 | UA_CMD = "ubuntu-advantage" | ||
1284 | 106 | |||
1285 | 107 | |||
1286 | 108 | def run_commands(commands): | ||
1287 | 109 | """Run the commands provided in ubuntu-advantage:commands config. | ||
1288 | 110 | |||
1289 | 111 | Commands are run individually. Any errors are collected and reported | ||
1290 | 112 | after attempting all commands. | ||
1291 | 113 | |||
1292 | 114 | @param commands: A list or dict containing commands to run. Keys of a | ||
1293 | 115 | dict will be used to order the commands provided as dict values. | ||
1294 | 116 | """ | ||
1295 | 117 | if not commands: | ||
1296 | 118 | return | ||
1297 | 119 | LOG.debug('Running user-provided ubuntu-advantage commands') | ||
1298 | 120 | if isinstance(commands, dict): | ||
1299 | 121 | # Sort commands based on dictionary key | ||
1300 | 122 | commands = [v for _, v in sorted(commands.items())] | ||
1301 | 123 | elif not isinstance(commands, list): | ||
1302 | 124 | raise TypeError( | ||
1303 | 125 | 'commands parameter was not a list or dict: {commands}'.format( | ||
1304 | 126 | commands=commands)) | ||
1305 | 127 | |||
1306 | 128 | fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands) | ||
1307 | 129 | |||
1308 | 130 | cmd_failures = [] | ||
1309 | 131 | for command in fixed_ua_commands: | ||
1310 | 132 | shell = isinstance(command, str) | ||
1311 | 133 | try: | ||
1312 | 134 | util.subp(command, shell=shell, status_cb=sys.stderr.write) | ||
1313 | 135 | except util.ProcessExecutionError as e: | ||
1314 | 136 | cmd_failures.append(str(e)) | ||
1315 | 137 | if cmd_failures: | ||
1316 | 138 | msg = ( | ||
1317 | 139 | 'Failures running ubuntu-advantage commands:\n' | ||
1318 | 140 | '{cmd_failures}'.format( | ||
1319 | 141 | cmd_failures=cmd_failures)) | ||
1320 | 142 | util.logexc(LOG, msg) | ||
1321 | 143 | raise RuntimeError(msg) | ||
1322 | 144 | |||
1323 | 145 | |||
1324 | 146 | def maybe_install_ua_tools(cloud): | ||
1325 | 147 | """Install ubuntu-advantage-tools if not present.""" | ||
1326 | 148 | if util.which('ubuntu-advantage'): | ||
1327 | 149 | return | ||
1328 | 150 | try: | ||
1329 | 151 | cloud.distro.update_package_sources() | ||
1330 | 152 | except Exception as e: | ||
1331 | 153 | util.logexc(LOG, "Package update failed") | ||
1332 | 154 | raise | ||
1333 | 155 | try: | ||
1334 | 156 | cloud.distro.install_packages(['ubuntu-advantage-tools']) | ||
1335 | 157 | except Exception as e: | ||
1336 | 158 | util.logexc(LOG, "Failed to install ubuntu-advantage-tools") | ||
1337 | 159 | raise | ||
1338 | 160 | |||
1339 | 161 | |||
1340 | 162 | def handle(name, cfg, cloud, log, args): | ||
1341 | 163 | cfgin = cfg.get('ubuntu-advantage') | ||
1342 | 164 | if cfgin is None: | ||
1343 | 165 | LOG.debug(("Skipping module named %s," | ||
1344 | 166 | " no 'ubuntu-advantage' key in configuration"), name) | ||
1345 | 167 | return | ||
1346 | 168 | |||
1347 | 169 | validate_cloudconfig_schema(cfg, schema) | ||
1348 | 170 | maybe_install_ua_tools(cloud) | ||
1349 | 171 | run_commands(cfgin.get('commands', [])) | ||
1350 | 172 | |||
1351 | 173 | # vi: ts=4 expandtab | ||
1352 | diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py | |||
1353 | 0 | new file mode 100644 | 174 | new file mode 100644 |
1354 | index 0000000..c5b4a9d | |||
1355 | --- /dev/null | |||
1356 | +++ b/cloudinit/config/tests/test_snap.py | |||
1357 | @@ -0,0 +1,490 @@ | |||
1358 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
1359 | 2 | |||
1360 | 3 | import re | ||
1361 | 4 | from six import StringIO | ||
1362 | 5 | |||
1363 | 6 | from cloudinit.config.cc_snap import ( | ||
1364 | 7 | ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, | ||
1365 | 8 | run_commands, schema) | ||
1366 | 9 | from cloudinit.config.schema import validate_cloudconfig_schema | ||
1367 | 10 | from cloudinit import util | ||
1368 | 11 | from cloudinit.tests.helpers import ( | ||
1369 | 12 | CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema) | ||
1370 | 13 | |||
1371 | 14 | |||
1372 | 15 | SYSTEM_USER_ASSERTION = """\ | ||
1373 | 16 | type: system-user | ||
1374 | 17 | authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp | ||
1375 | 18 | brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp | ||
1376 | 19 | email: foo@bar.com | ||
1377 | 20 | password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt | ||
1378 | 21 | series: | ||
1379 | 22 | - 16 | ||
1380 | 23 | since: 2016-09-10T16:34:00+03:00 | ||
1381 | 24 | until: 2017-11-10T16:34:00+03:00 | ||
1382 | 25 | username: baz | ||
1383 | 26 | sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj | ||
1384 | 27 | |||
1385 | 28 | AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP | ||
1386 | 29 | Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI | ||
1387 | 30 | zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF | ||
1388 | 31 | s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj | ||
1389 | 32 | +to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP | ||
1390 | 33 | Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS | ||
1391 | 34 | d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q | ||
1392 | 35 | BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H | ||
1393 | 36 | f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V | ||
1394 | 37 | v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""" | ||
1395 | 38 | |||
1396 | 39 | ACCOUNT_ASSERTION = """\ | ||
1397 | 40 | type: account-key | ||
1398 | 41 | authority-id: canonical | ||
1399 | 42 | revision: 2 | ||
1400 | 43 | public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0 | ||
1401 | 44 | account-id: canonical | ||
1402 | 45 | name: store | ||
1403 | 46 | since: 2016-04-01T00:00:00.0Z | ||
1404 | 47 | body-length: 717 | ||
1405 | 48 | sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH | ||
1406 | 49 | |||
1407 | 50 | AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j | ||
1408 | 51 | qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482 | ||
1409 | 52 | vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ | ||
1410 | 53 | UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK | ||
1411 | 54 | Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG | ||
1412 | 55 | o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl | ||
1413 | 56 | VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9 | ||
1414 | 57 | 2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an | ||
1415 | 58 | Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc | ||
1416 | 59 | vUvV7RjVzv17ut0AEQEAAQ== | ||
1417 | 60 | |||
1418 | 61 | AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM | ||
1419 | 62 | WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b | ||
1420 | 63 | nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL | ||
1421 | 64 | 3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL | ||
1422 | 65 | eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY | ||
1423 | 66 | inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1 | ||
1424 | 67 | rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+ | ||
1425 | 68 | rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE | ||
1426 | 69 | aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ | ||
1427 | 70 | 6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO | ||
1428 | 71 | haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF | ||
1429 | 72 | yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9 | ||
1430 | 73 | HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi | ||
1431 | 74 | skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK | ||
1432 | 75 | CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde | ||
1433 | 76 | ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF | ||
1434 | 77 | qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR | ||
1435 | 78 | IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t | ||
1436 | 79 | oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""" | ||
1437 | 80 | |||
1438 | 81 | |||
1439 | 82 | class FakeCloud(object): | ||
1440 | 83 | def __init__(self, distro): | ||
1441 | 84 | self.distro = distro | ||
1442 | 85 | |||
1443 | 86 | |||
1444 | 87 | class TestAddAssertions(CiTestCase): | ||
1445 | 88 | |||
1446 | 89 | with_logs = True | ||
1447 | 90 | |||
1448 | 91 | def setUp(self): | ||
1449 | 92 | super(TestAddAssertions, self).setUp() | ||
1450 | 93 | self.tmp = self.tmp_dir() | ||
1451 | 94 | |||
1452 | 95 | @mock.patch('cloudinit.config.cc_snap.util.subp') | ||
1453 | 96 | def test_add_assertions_on_empty_list(self, m_subp): | ||
1454 | 97 | """When provided with an empty list, add_assertions does nothing.""" | ||
1455 | 98 | add_assertions([]) | ||
1456 | 99 | self.assertEqual('', self.logs.getvalue()) | ||
1457 | 100 | m_subp.assert_not_called() | ||
1458 | 101 | |||
1459 | 102 | def test_add_assertions_on_non_list_or_dict(self): | ||
1460 | 103 | """When provided an invalid type, add_assertions raises an error.""" | ||
1461 | 104 | with self.assertRaises(TypeError) as context_manager: | ||
1462 | 105 | add_assertions(assertions="I'm Not Valid") | ||
1463 | 106 | self.assertEqual( | ||
1464 | 107 | "assertion parameter was not a list or dict: I'm Not Valid", | ||
1465 | 108 | str(context_manager.exception)) | ||
1466 | 109 | |||
1467 | 110 | @mock.patch('cloudinit.config.cc_snap.util.subp') | ||
1468 | 111 | def test_add_assertions_adds_assertions_as_list(self, m_subp): | ||
1469 | 112 | """When provided with a list, add_assertions adds all assertions.""" | ||
1470 | 113 | self.assertEqual( | ||
1471 | 114 | ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') | ||
1472 | 115 | assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) | ||
1473 | 116 | assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION] | ||
1474 | 117 | wrap_and_call( | ||
1475 | 118 | 'cloudinit.config.cc_snap', | ||
1476 | 119 | {'ASSERTIONS_FILE': {'new': assert_file}}, | ||
1477 | 120 | add_assertions, assertions) | ||
1478 | 121 | self.assertIn( | ||
1479 | 122 | 'Importing user-provided snap assertions', self.logs.getvalue()) | ||
1480 | 123 | self.assertIn( | ||
1481 | 124 | 'sertions', self.logs.getvalue()) | ||
1482 | 125 | self.assertEqual( | ||
1483 | 126 | [mock.call(['snap', 'ack', assert_file], capture=True)], | ||
1484 | 127 | m_subp.call_args_list) | ||
1485 | 128 | compare_file = self.tmp_path('comparison', dir=self.tmp) | ||
1486 | 129 | util.write_file(compare_file, '\n'.join(assertions).encode('utf-8')) | ||
1487 | 130 | self.assertEqual( | ||
1488 | 131 | util.load_file(compare_file), util.load_file(assert_file)) | ||
1489 | 132 | |||
1490 | 133 | @mock.patch('cloudinit.config.cc_snap.util.subp') | ||
1491 | 134 | def test_add_assertions_adds_assertions_as_dict(self, m_subp): | ||
1492 | 135 | """When provided with a dict, add_assertions adds all assertions.""" | ||
1493 | 136 | self.assertEqual( | ||
1494 | 137 | ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') | ||
1495 | 138 | assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) | ||
1496 | 139 | assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION} | ||
1497 | 140 | wrap_and_call( | ||
1498 | 141 | 'cloudinit.config.cc_snap', | ||
1499 | 142 | {'ASSERTIONS_FILE': {'new': assert_file}}, | ||
1500 | 143 | add_assertions, assertions) | ||
1501 | 144 | self.assertIn( | ||
1502 | 145 | 'Importing user-provided snap assertions', self.logs.getvalue()) | ||
1503 | 146 | self.assertIn( | ||
1504 | 147 | "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv", | ||
1505 | 148 | self.logs.getvalue()) | ||
1506 | 149 | self.assertIn( | ||
1507 | 150 | "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic", | ||
1508 | 151 | self.logs.getvalue()) | ||
1509 | 152 | self.assertEqual( | ||
1510 | 153 | [mock.call(['snap', 'ack', assert_file], capture=True)], | ||
1511 | 154 | m_subp.call_args_list) | ||
1512 | 155 | compare_file = self.tmp_path('comparison', dir=self.tmp) | ||
1513 | 156 | combined = '\n'.join(assertions.values()) | ||
1514 | 157 | util.write_file(compare_file, combined.encode('utf-8')) | ||
1515 | 158 | self.assertEqual( | ||
1516 | 159 | util.load_file(compare_file), util.load_file(assert_file)) | ||
1517 | 160 | |||
1518 | 161 | |||
1519 | 162 | class TestRunCommands(CiTestCase): | ||
1520 | 163 | |||
1521 | 164 | with_logs = True | ||
1522 | 165 | |||
1523 | 166 | def setUp(self): | ||
1524 | 167 | super(TestRunCommands, self).setUp() | ||
1525 | 168 | self.tmp = self.tmp_dir() | ||
1526 | 169 | |||
1527 | 170 | @mock.patch('cloudinit.config.cc_snap.util.subp') | ||
1528 | 171 | def test_run_commands_on_empty_list(self, m_subp): | ||
1529 | 172 | """When provided with an empty list, run_commands does nothing.""" | ||
1530 | 173 | run_commands([]) | ||
1531 | 174 | self.assertEqual('', self.logs.getvalue()) | ||
1532 | 175 | m_subp.assert_not_called() | ||
1533 | 176 | |||
1534 | 177 | def test_run_commands_on_non_list_or_dict(self): | ||
1535 | 178 | """When provided an invalid type, run_commands raises an error.""" | ||
1536 | 179 | with self.assertRaises(TypeError) as context_manager: | ||
1537 | 180 | run_commands(commands="I'm Not Valid") | ||
1538 | 181 | self.assertEqual( | ||
1539 | 182 | "commands parameter was not a list or dict: I'm Not Valid", | ||
1540 | 183 | str(context_manager.exception)) | ||
1541 | 184 | |||
1542 | 185 | def test_run_command_logs_commands_and_exit_codes_to_stderr(self): | ||
1543 | 186 | """All exit codes are logged to stderr.""" | ||
1544 | 187 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
1545 | 188 | |||
1546 | 189 | cmd1 = 'echo "HI" >> %s' % outfile | ||
1547 | 190 | cmd2 = 'bogus command' | ||
1548 | 191 | cmd3 = 'echo "MOM" >> %s' % outfile | ||
1549 | 192 | commands = [cmd1, cmd2, cmd3] | ||
1550 | 193 | |||
1551 | 194 | mock_path = 'cloudinit.config.cc_snap.sys.stderr' | ||
1552 | 195 | with mock.patch(mock_path, new_callable=StringIO) as m_stderr: | ||
1553 | 196 | with self.assertRaises(RuntimeError) as context_manager: | ||
1554 | 197 | run_commands(commands=commands) | ||
1555 | 198 | |||
1556 | 199 | self.assertIsNotNone( | ||
1557 | 200 | re.search(r'bogus: (command )?not found', | ||
1558 | 201 | str(context_manager.exception)), | ||
1559 | 202 | msg='Expected bogus command not found') | ||
1560 | 203 | expected_stderr_log = '\n'.join([ | ||
1561 | 204 | 'Begin run command: {cmd}'.format(cmd=cmd1), | ||
1562 | 205 | 'End run command: exit(0)', | ||
1563 | 206 | 'Begin run command: {cmd}'.format(cmd=cmd2), | ||
1564 | 207 | 'ERROR: End run command: exit(127)', | ||
1565 | 208 | 'Begin run command: {cmd}'.format(cmd=cmd3), | ||
1566 | 209 | 'End run command: exit(0)\n']) | ||
1567 | 210 | self.assertEqual(expected_stderr_log, m_stderr.getvalue()) | ||
1568 | 211 | |||
1569 | 212 | def test_run_command_as_lists(self): | ||
1570 | 213 | """When commands are specified as a list, run them in order.""" | ||
1571 | 214 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
1572 | 215 | |||
1573 | 216 | cmd1 = 'echo "HI" >> %s' % outfile | ||
1574 | 217 | cmd2 = 'echo "MOM" >> %s' % outfile | ||
1575 | 218 | commands = [cmd1, cmd2] | ||
1576 | 219 | mock_path = 'cloudinit.config.cc_snap.sys.stderr' | ||
1577 | 220 | with mock.patch(mock_path, new_callable=StringIO): | ||
1578 | 221 | run_commands(commands=commands) | ||
1579 | 222 | |||
1580 | 223 | self.assertIn( | ||
1581 | 224 | 'DEBUG: Running user-provided snap commands', | ||
1582 | 225 | self.logs.getvalue()) | ||
1583 | 226 | self.assertEqual('HI\nMOM\n', util.load_file(outfile)) | ||
1584 | 227 | self.assertIn( | ||
1585 | 228 | 'WARNING: Non-snap commands in snap config:', self.logs.getvalue()) | ||
1586 | 229 | |||
1587 | 230 | def test_run_command_dict_sorted_as_command_script(self): | ||
1588 | 231 | """When commands are a dict, sort them and run.""" | ||
1589 | 232 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
1590 | 233 | cmd1 = 'echo "HI" >> %s' % outfile | ||
1591 | 234 | cmd2 = 'echo "MOM" >> %s' % outfile | ||
1592 | 235 | commands = {'02': cmd1, '01': cmd2} | ||
1593 | 236 | mock_path = 'cloudinit.config.cc_snap.sys.stderr' | ||
1594 | 237 | with mock.patch(mock_path, new_callable=StringIO): | ||
1595 | 238 | run_commands(commands=commands) | ||
1596 | 239 | |||
1597 | 240 | expected_messages = [ | ||
1598 | 241 | 'DEBUG: Running user-provided snap commands'] | ||
1599 | 242 | for message in expected_messages: | ||
1600 | 243 | self.assertIn(message, self.logs.getvalue()) | ||
1601 | 244 | self.assertEqual('MOM\nHI\n', util.load_file(outfile)) | ||
1602 | 245 | |||
1603 | 246 | |||
1604 | 247 | @skipUnlessJsonSchema() | ||
1605 | 248 | class TestSchema(CiTestCase): | ||
1606 | 249 | |||
1607 | 250 | with_logs = True | ||
1608 | 251 | |||
1609 | 252 | def test_schema_warns_on_snap_not_as_dict(self): | ||
1610 | 253 | """If the snap configuration is not a dict, emit a warning.""" | ||
1611 | 254 | validate_cloudconfig_schema({'snap': 'wrong type'}, schema) | ||
1612 | 255 | self.assertEqual( | ||
1613 | 256 | "WARNING: Invalid config:\nsnap: 'wrong type' is not of type" | ||
1614 | 257 | " 'object'\n", | ||
1615 | 258 | self.logs.getvalue()) | ||
1616 | 259 | |||
1617 | 260 | @mock.patch('cloudinit.config.cc_snap.run_commands') | ||
1618 | 261 | def test_schema_disallows_unknown_keys(self, _): | ||
1619 | 262 | """Unknown keys in the snap configuration emit warnings.""" | ||
1620 | 263 | validate_cloudconfig_schema( | ||
1621 | 264 | {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema) | ||
1622 | 265 | self.assertIn( | ||
1623 | 266 | 'WARNING: Invalid config:\nsnap: Additional properties are not' | ||
1624 | 267 | " allowed ('invalid-key' was unexpected)", | ||
1625 | 268 | self.logs.getvalue()) | ||
1626 | 269 | |||
1627 | 270 | def test_warn_schema_requires_either_commands_or_assertions(self): | ||
1628 | 271 | """Warn when snap configuration lacks both commands and assertions.""" | ||
1629 | 272 | validate_cloudconfig_schema( | ||
1630 | 273 | {'snap': {}}, schema) | ||
1631 | 274 | self.assertIn( | ||
1632 | 275 | 'WARNING: Invalid config:\nsnap: {} does not have enough' | ||
1633 | 276 | ' properties', | ||
1634 | 277 | self.logs.getvalue()) | ||
1635 | 278 | |||
1636 | 279 | @mock.patch('cloudinit.config.cc_snap.run_commands') | ||
1637 | 280 | def test_warn_schema_commands_is_not_list_or_dict(self, _): | ||
1638 | 281 | """Warn when snap:commands config is not a list or dict.""" | ||
1639 | 282 | validate_cloudconfig_schema( | ||
1640 | 283 | {'snap': {'commands': 'broken'}}, schema) | ||
1641 | 284 | self.assertEqual( | ||
1642 | 285 | "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type" | ||
1643 | 286 | " 'object', 'array'\n", | ||
1644 | 287 | self.logs.getvalue()) | ||
1645 | 288 | |||
1646 | 289 | @mock.patch('cloudinit.config.cc_snap.run_commands') | ||
1647 | 290 | def test_warn_schema_when_commands_is_empty(self, _): | ||
1648 | 291 | """Emit warnings when snap:commands is an empty list or dict.""" | ||
1649 | 292 | validate_cloudconfig_schema( | ||
1650 | 293 | {'snap': {'commands': []}}, schema) | ||
1651 | 294 | validate_cloudconfig_schema( | ||
1652 | 295 | {'snap': {'commands': {}}}, schema) | ||
1653 | 296 | self.assertEqual( | ||
1654 | 297 | "WARNING: Invalid config:\nsnap.commands: [] is too short\n" | ||
1655 | 298 | "WARNING: Invalid config:\nsnap.commands: {} does not have enough" | ||
1656 | 299 | " properties\n", | ||
1657 | 300 | self.logs.getvalue()) | ||
1658 | 301 | |||
1659 | 302 | @mock.patch('cloudinit.config.cc_snap.run_commands') | ||
1660 | 303 | def test_schema_when_commands_are_list_or_dict(self, _): | ||
1661 | 304 | """No warnings when snap:commands are either a list or dict.""" | ||
1662 | 305 | validate_cloudconfig_schema( | ||
1663 | 306 | {'snap': {'commands': ['valid']}}, schema) | ||
1664 | 307 | validate_cloudconfig_schema( | ||
1665 | 308 | {'snap': {'commands': {'01': 'also valid'}}}, schema) | ||
1666 | 309 | self.assertEqual('', self.logs.getvalue()) | ||
1667 | 310 | |||
1668 | 311 | @mock.patch('cloudinit.config.cc_snap.add_assertions') | ||
1669 | 312 | def test_warn_schema_assertions_is_not_list_or_dict(self, _): | ||
1670 | 313 | """Warn when snap:assertions config is not a list or dict.""" | ||
1671 | 314 | validate_cloudconfig_schema( | ||
1672 | 315 | {'snap': {'assertions': 'broken'}}, schema) | ||
1673 | 316 | self.assertEqual( | ||
1674 | 317 | "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of" | ||
1675 | 318 | " type 'object', 'array'\n", | ||
1676 | 319 | self.logs.getvalue()) | ||
1677 | 320 | |||
1678 | 321 | @mock.patch('cloudinit.config.cc_snap.add_assertions') | ||
1679 | 322 | def test_warn_schema_when_assertions_is_empty(self, _): | ||
1680 | 323 | """Emit warnings when snap:assertions is an empty list or dict.""" | ||
1681 | 324 | validate_cloudconfig_schema( | ||
1682 | 325 | {'snap': {'assertions': []}}, schema) | ||
1683 | 326 | validate_cloudconfig_schema( | ||
1684 | 327 | {'snap': {'assertions': {}}}, schema) | ||
1685 | 328 | self.assertEqual( | ||
1686 | 329 | "WARNING: Invalid config:\nsnap.assertions: [] is too short\n" | ||
1687 | 330 | "WARNING: Invalid config:\nsnap.assertions: {} does not have" | ||
1688 | 331 | " enough properties\n", | ||
1689 | 332 | self.logs.getvalue()) | ||
1690 | 333 | |||
1691 | 334 | @mock.patch('cloudinit.config.cc_snap.add_assertions') | ||
1692 | 335 | def test_schema_when_assertions_are_list_or_dict(self, _): | ||
1693 | 336 | """No warnings when snap:assertions are a list or dict.""" | ||
1694 | 337 | validate_cloudconfig_schema( | ||
1695 | 338 | {'snap': {'assertions': ['valid']}}, schema) | ||
1696 | 339 | validate_cloudconfig_schema( | ||
1697 | 340 | {'snap': {'assertions': {'01': 'also valid'}}}, schema) | ||
1698 | 341 | self.assertEqual('', self.logs.getvalue()) | ||
1699 | 342 | |||
1700 | 343 | |||
1701 | 344 | class TestHandle(CiTestCase): | ||
1702 | 345 | |||
1703 | 346 | with_logs = True | ||
1704 | 347 | |||
1705 | 348 | def setUp(self): | ||
1706 | 349 | super(TestHandle, self).setUp() | ||
1707 | 350 | self.tmp = self.tmp_dir() | ||
1708 | 351 | |||
1709 | 352 | @mock.patch('cloudinit.config.cc_snap.run_commands') | ||
1710 | 353 | @mock.patch('cloudinit.config.cc_snap.add_assertions') | ||
1711 | 354 | @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema') | ||
1712 | 355 | def test_handle_no_config(self, m_schema, m_add, m_run): | ||
1713 | 356 | """When no snap-related configuration is provided, nothing happens.""" | ||
1714 | 357 | cfg = {} | ||
1715 | 358 | handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
1716 | 359 | self.assertIn( | ||
1717 | 360 | "DEBUG: Skipping module named snap, no 'snap' key in config", | ||
1718 | 361 | self.logs.getvalue()) | ||
1719 | 362 | m_schema.assert_not_called() | ||
1720 | 363 | m_add.assert_not_called() | ||
1721 | 364 | m_run.assert_not_called() | ||
1722 | 365 | |||
1723 | 366 | @mock.patch('cloudinit.config.cc_snap.run_commands') | ||
1724 | 367 | @mock.patch('cloudinit.config.cc_snap.add_assertions') | ||
1725 | 368 | @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') | ||
1726 | 369 | def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add, | ||
1727 | 370 | m_run): | ||
1728 | 371 | """When squashfuse_in_container is unset, don't attempt to install.""" | ||
1729 | 372 | handle( | ||
1730 | 373 | 'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None) | ||
1731 | 374 | handle( | ||
1732 | 375 | 'snap', cfg={'snap': {'squashfuse_in_container': None}}, | ||
1733 | 376 | cloud=None, log=self.logger, args=None) | ||
1734 | 377 | handle( | ||
1735 | 378 | 'snap', cfg={'snap': {'squashfuse_in_container': False}}, | ||
1736 | 379 | cloud=None, log=self.logger, args=None) | ||
1737 | 380 | self.assertEqual([], m_squash.call_args_list) # No calls | ||
1738 | 381 | # snap configuration missing assertions and commands will default to [] | ||
1739 | 382 | self.assertIn(mock.call([]), m_add.call_args_list) | ||
1740 | 383 | self.assertIn(mock.call([]), m_run.call_args_list) | ||
1741 | 384 | |||
1742 | 385 | @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') | ||
1743 | 386 | def test_handle_tries_to_install_squashfuse(self, m_squash): | ||
1744 | 387 | """If squashfuse_in_container is True, try installing squashfuse.""" | ||
1745 | 388 | cfg = {'snap': {'squashfuse_in_container': True}} | ||
1746 | 389 | mycloud = FakeCloud(None) | ||
1747 | 390 | handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None) | ||
1748 | 391 | self.assertEqual( | ||
1749 | 392 | [mock.call(mycloud)], m_squash.call_args_list) | ||
1750 | 393 | |||
1751 | 394 | def test_handle_runs_commands_provided(self): | ||
1752 | 395 | """If commands are specified as a list, run them.""" | ||
1753 | 396 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
1754 | 397 | |||
1755 | 398 | cfg = { | ||
1756 | 399 | 'snap': {'commands': ['echo "HI" >> %s' % outfile, | ||
1757 | 400 | 'echo "MOM" >> %s' % outfile]}} | ||
1758 | 401 | mock_path = 'cloudinit.config.cc_snap.sys.stderr' | ||
1759 | 402 | with mock.patch(mock_path, new_callable=StringIO): | ||
1760 | 403 | handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
1761 | 404 | self.assertEqual('HI\nMOM\n', util.load_file(outfile)) | ||
1762 | 405 | |||
1763 | 406 | @mock.patch('cloudinit.config.cc_snap.util.subp') | ||
1764 | 407 | def test_handle_adds_assertions(self, m_subp): | ||
1765 | 408 | """Any configured snap assertions are provided to add_assertions.""" | ||
1766 | 409 | assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) | ||
1767 | 410 | compare_file = self.tmp_path('comparison', dir=self.tmp) | ||
1768 | 411 | cfg = { | ||
1769 | 412 | 'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}} | ||
1770 | 413 | wrap_and_call( | ||
1771 | 414 | 'cloudinit.config.cc_snap', | ||
1772 | 415 | {'ASSERTIONS_FILE': {'new': assert_file}}, | ||
1773 | 416 | handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
1774 | 417 | content = '\n'.join(cfg['snap']['assertions']) | ||
1775 | 418 | util.write_file(compare_file, content.encode('utf-8')) | ||
1776 | 419 | self.assertEqual( | ||
1777 | 420 | util.load_file(compare_file), util.load_file(assert_file)) | ||
1778 | 421 | |||
1779 | 422 | @mock.patch('cloudinit.config.cc_snap.util.subp') | ||
1780 | 423 | @skipUnlessJsonSchema() | ||
1781 | 424 | def test_handle_validates_schema(self, m_subp): | ||
1782 | 425 | """Any provided configuration is runs validate_cloudconfig_schema.""" | ||
1783 | 426 | assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) | ||
1784 | 427 | cfg = {'snap': {'invalid': ''}} # Generates schema warning | ||
1785 | 428 | wrap_and_call( | ||
1786 | 429 | 'cloudinit.config.cc_snap', | ||
1787 | 430 | {'ASSERTIONS_FILE': {'new': assert_file}}, | ||
1788 | 431 | handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
1789 | 432 | self.assertEqual( | ||
1790 | 433 | "WARNING: Invalid config:\nsnap: Additional properties are not" | ||
1791 | 434 | " allowed ('invalid' was unexpected)\n", | ||
1792 | 435 | self.logs.getvalue()) | ||
1793 | 436 | |||
1794 | 437 | |||
1795 | 438 | class TestMaybeInstallSquashFuse(CiTestCase): | ||
1796 | 439 | |||
1797 | 440 | with_logs = True | ||
1798 | 441 | |||
1799 | 442 | def setUp(self): | ||
1800 | 443 | super(TestMaybeInstallSquashFuse, self).setUp() | ||
1801 | 444 | self.tmp = self.tmp_dir() | ||
1802 | 445 | |||
1803 | 446 | @mock.patch('cloudinit.config.cc_snap.util.is_container') | ||
1804 | 447 | def test_maybe_install_squashfuse_skips_non_containers(self, m_container): | ||
1805 | 448 | """maybe_install_squashfuse does nothing when not on a container.""" | ||
1806 | 449 | m_container.return_value = False | ||
1807 | 450 | maybe_install_squashfuse(cloud=FakeCloud(None)) | ||
1808 | 451 | self.assertEqual([mock.call()], m_container.call_args_list) | ||
1809 | 452 | self.assertEqual('', self.logs.getvalue()) | ||
1810 | 453 | |||
1811 | 454 | @mock.patch('cloudinit.config.cc_snap.util.is_container') | ||
1812 | 455 | def test_maybe_install_squashfuse_raises_install_errors(self, m_container): | ||
1813 | 456 | """maybe_install_squashfuse logs and raises package install errors.""" | ||
1814 | 457 | m_container.return_value = True | ||
1815 | 458 | distro = mock.MagicMock() | ||
1816 | 459 | distro.update_package_sources.side_effect = RuntimeError( | ||
1817 | 460 | 'Some apt error') | ||
1818 | 461 | with self.assertRaises(RuntimeError) as context_manager: | ||
1819 | 462 | maybe_install_squashfuse(cloud=FakeCloud(distro)) | ||
1820 | 463 | self.assertEqual('Some apt error', str(context_manager.exception)) | ||
1821 | 464 | self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) | ||
1822 | 465 | |||
1823 | 466 | @mock.patch('cloudinit.config.cc_snap.util.is_container') | ||
1824 | 467 | def test_maybe_install_squashfuse_raises_update_errors(self, m_container): | ||
1825 | 468 | """maybe_install_squashfuse logs and raises package update errors.""" | ||
1826 | 469 | m_container.return_value = True | ||
1827 | 470 | distro = mock.MagicMock() | ||
1828 | 471 | distro.update_package_sources.side_effect = RuntimeError( | ||
1829 | 472 | 'Some apt error') | ||
1830 | 473 | with self.assertRaises(RuntimeError) as context_manager: | ||
1831 | 474 | maybe_install_squashfuse(cloud=FakeCloud(distro)) | ||
1832 | 475 | self.assertEqual('Some apt error', str(context_manager.exception)) | ||
1833 | 476 | self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) | ||
1834 | 477 | |||
1835 | 478 | @mock.patch('cloudinit.config.cc_snap.util.is_container') | ||
1836 | 479 | def test_maybe_install_squashfuse_happy_path(self, m_container): | ||
1837 | 480 | """maybe_install_squashfuse logs and raises package install errors.""" | ||
1838 | 481 | m_container.return_value = True | ||
1839 | 482 | distro = mock.MagicMock() # No errors raised | ||
1840 | 483 | maybe_install_squashfuse(cloud=FakeCloud(distro)) | ||
1841 | 484 | self.assertEqual( | ||
1842 | 485 | [mock.call()], distro.update_package_sources.call_args_list) | ||
1843 | 486 | self.assertEqual( | ||
1844 | 487 | [mock.call(['squashfuse'])], | ||
1845 | 488 | distro.install_packages.call_args_list) | ||
1846 | 489 | |||
1847 | 490 | # vi: ts=4 expandtab | ||
1848 | diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py | |||
1849 | 0 | new file mode 100644 | 491 | new file mode 100644 |
1850 | index 0000000..f2a59fa | |||
1851 | --- /dev/null | |||
1852 | +++ b/cloudinit/config/tests/test_ubuntu_advantage.py | |||
1853 | @@ -0,0 +1,269 @@ | |||
1854 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
1855 | 2 | |||
1856 | 3 | import re | ||
1857 | 4 | from six import StringIO | ||
1858 | 5 | |||
1859 | 6 | from cloudinit.config.cc_ubuntu_advantage import ( | ||
1860 | 7 | handle, maybe_install_ua_tools, run_commands, schema) | ||
1861 | 8 | from cloudinit.config.schema import validate_cloudconfig_schema | ||
1862 | 9 | from cloudinit import util | ||
1863 | 10 | from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema | ||
1864 | 11 | |||
1865 | 12 | |||
1866 | 13 | # Module path used in mocks | ||
1867 | 14 | MPATH = 'cloudinit.config.cc_ubuntu_advantage' | ||
1868 | 15 | |||
1869 | 16 | |||
1870 | 17 | class FakeCloud(object): | ||
1871 | 18 | def __init__(self, distro): | ||
1872 | 19 | self.distro = distro | ||
1873 | 20 | |||
1874 | 21 | |||
1875 | 22 | class TestRunCommands(CiTestCase): | ||
1876 | 23 | |||
1877 | 24 | with_logs = True | ||
1878 | 25 | |||
1879 | 26 | def setUp(self): | ||
1880 | 27 | super(TestRunCommands, self).setUp() | ||
1881 | 28 | self.tmp = self.tmp_dir() | ||
1882 | 29 | |||
1883 | 30 | @mock.patch('%s.util.subp' % MPATH) | ||
1884 | 31 | def test_run_commands_on_empty_list(self, m_subp): | ||
1885 | 32 | """When provided with an empty list, run_commands does nothing.""" | ||
1886 | 33 | run_commands([]) | ||
1887 | 34 | self.assertEqual('', self.logs.getvalue()) | ||
1888 | 35 | m_subp.assert_not_called() | ||
1889 | 36 | |||
1890 | 37 | def test_run_commands_on_non_list_or_dict(self): | ||
1891 | 38 | """When provided an invalid type, run_commands raises an error.""" | ||
1892 | 39 | with self.assertRaises(TypeError) as context_manager: | ||
1893 | 40 | run_commands(commands="I'm Not Valid") | ||
1894 | 41 | self.assertEqual( | ||
1895 | 42 | "commands parameter was not a list or dict: I'm Not Valid", | ||
1896 | 43 | str(context_manager.exception)) | ||
1897 | 44 | |||
1898 | 45 | def test_run_command_logs_commands_and_exit_codes_to_stderr(self): | ||
1899 | 46 | """All exit codes are logged to stderr.""" | ||
1900 | 47 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
1901 | 48 | |||
1902 | 49 | cmd1 = 'echo "HI" >> %s' % outfile | ||
1903 | 50 | cmd2 = 'bogus command' | ||
1904 | 51 | cmd3 = 'echo "MOM" >> %s' % outfile | ||
1905 | 52 | commands = [cmd1, cmd2, cmd3] | ||
1906 | 53 | |||
1907 | 54 | mock_path = '%s.sys.stderr' % MPATH | ||
1908 | 55 | with mock.patch(mock_path, new_callable=StringIO) as m_stderr: | ||
1909 | 56 | with self.assertRaises(RuntimeError) as context_manager: | ||
1910 | 57 | run_commands(commands=commands) | ||
1911 | 58 | |||
1912 | 59 | self.assertIsNotNone( | ||
1913 | 60 | re.search(r'bogus: (command )?not found', | ||
1914 | 61 | str(context_manager.exception)), | ||
1915 | 62 | msg='Expected bogus command not found') | ||
1916 | 63 | expected_stderr_log = '\n'.join([ | ||
1917 | 64 | 'Begin run command: {cmd}'.format(cmd=cmd1), | ||
1918 | 65 | 'End run command: exit(0)', | ||
1919 | 66 | 'Begin run command: {cmd}'.format(cmd=cmd2), | ||
1920 | 67 | 'ERROR: End run command: exit(127)', | ||
1921 | 68 | 'Begin run command: {cmd}'.format(cmd=cmd3), | ||
1922 | 69 | 'End run command: exit(0)\n']) | ||
1923 | 70 | self.assertEqual(expected_stderr_log, m_stderr.getvalue()) | ||
1924 | 71 | |||
1925 | 72 | def test_run_command_as_lists(self): | ||
1926 | 73 | """When commands are specified as a list, run them in order.""" | ||
1927 | 74 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
1928 | 75 | |||
1929 | 76 | cmd1 = 'echo "HI" >> %s' % outfile | ||
1930 | 77 | cmd2 = 'echo "MOM" >> %s' % outfile | ||
1931 | 78 | commands = [cmd1, cmd2] | ||
1932 | 79 | with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): | ||
1933 | 80 | run_commands(commands=commands) | ||
1934 | 81 | |||
1935 | 82 | self.assertIn( | ||
1936 | 83 | 'DEBUG: Running user-provided ubuntu-advantage commands', | ||
1937 | 84 | self.logs.getvalue()) | ||
1938 | 85 | self.assertEqual('HI\nMOM\n', util.load_file(outfile)) | ||
1939 | 86 | self.assertIn( | ||
1940 | 87 | 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage' | ||
1941 | 88 | ' config:', | ||
1942 | 89 | self.logs.getvalue()) | ||
1943 | 90 | |||
1944 | 91 | def test_run_command_dict_sorted_as_command_script(self): | ||
1945 | 92 | """When commands are a dict, sort them and run.""" | ||
1946 | 93 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
1947 | 94 | cmd1 = 'echo "HI" >> %s' % outfile | ||
1948 | 95 | cmd2 = 'echo "MOM" >> %s' % outfile | ||
1949 | 96 | commands = {'02': cmd1, '01': cmd2} | ||
1950 | 97 | with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): | ||
1951 | 98 | run_commands(commands=commands) | ||
1952 | 99 | |||
1953 | 100 | expected_messages = [ | ||
1954 | 101 | 'DEBUG: Running user-provided ubuntu-advantage commands'] | ||
1955 | 102 | for message in expected_messages: | ||
1956 | 103 | self.assertIn(message, self.logs.getvalue()) | ||
1957 | 104 | self.assertEqual('MOM\nHI\n', util.load_file(outfile)) | ||
1958 | 105 | |||
1959 | 106 | |||
1960 | 107 | @skipUnlessJsonSchema() | ||
1961 | 108 | class TestSchema(CiTestCase): | ||
1962 | 109 | |||
1963 | 110 | with_logs = True | ||
1964 | 111 | |||
1965 | 112 | def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): | ||
1966 | 113 | """If ubuntu-advantage configuration is not a dict, emit a warning.""" | ||
1967 | 114 | validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema) | ||
1968 | 115 | self.assertEqual( | ||
1969 | 116 | "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not" | ||
1970 | 117 | " of type 'object'\n", | ||
1971 | 118 | self.logs.getvalue()) | ||
1972 | 119 | |||
1973 | 120 | @mock.patch('%s.run_commands' % MPATH) | ||
1974 | 121 | def test_schema_disallows_unknown_keys(self, _): | ||
1975 | 122 | """Unknown keys in ubuntu-advantage configuration emit warnings.""" | ||
1976 | 123 | validate_cloudconfig_schema( | ||
1977 | 124 | {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}}, | ||
1978 | 125 | schema) | ||
1979 | 126 | self.assertIn( | ||
1980 | 127 | 'WARNING: Invalid config:\nubuntu-advantage: Additional properties' | ||
1981 | 128 | " are not allowed ('invalid-key' was unexpected)", | ||
1982 | 129 | self.logs.getvalue()) | ||
1983 | 130 | |||
1984 | 131 | def test_warn_schema_requires_commands(self): | ||
1985 | 132 | """Warn when ubuntu-advantage configuration lacks commands.""" | ||
1986 | 133 | validate_cloudconfig_schema( | ||
1987 | 134 | {'ubuntu-advantage': {}}, schema) | ||
1988 | 135 | self.assertEqual( | ||
1989 | 136 | "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a" | ||
1990 | 137 | " required property\n", | ||
1991 | 138 | self.logs.getvalue()) | ||
1992 | 139 | |||
1993 | 140 | @mock.patch('%s.run_commands' % MPATH) | ||
1994 | 141 | def test_warn_schema_commands_is_not_list_or_dict(self, _): | ||
1995 | 142 | """Warn when ubuntu-advantage:commands config is not a list or dict.""" | ||
1996 | 143 | validate_cloudconfig_schema( | ||
1997 | 144 | {'ubuntu-advantage': {'commands': 'broken'}}, schema) | ||
1998 | 145 | self.assertEqual( | ||
1999 | 146 | "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is" | ||
2000 | 147 | " not of type 'object', 'array'\n", | ||
2001 | 148 | self.logs.getvalue()) | ||
2002 | 149 | |||
2003 | 150 | @mock.patch('%s.run_commands' % MPATH) | ||
2004 | 151 | def test_warn_schema_when_commands_is_empty(self, _): | ||
2005 | 152 | """Emit warnings when ubuntu-advantage:commands is empty.""" | ||
2006 | 153 | validate_cloudconfig_schema( | ||
2007 | 154 | {'ubuntu-advantage': {'commands': []}}, schema) | ||
2008 | 155 | validate_cloudconfig_schema( | ||
2009 | 156 | {'ubuntu-advantage': {'commands': {}}}, schema) | ||
2010 | 157 | self.assertEqual( | ||
2011 | 158 | "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too" | ||
2012 | 159 | " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}" | ||
2013 | 160 | " does not have enough properties\n", | ||
2014 | 161 | self.logs.getvalue()) | ||
2015 | 162 | |||
2016 | 163 | @mock.patch('%s.run_commands' % MPATH) | ||
2017 | 164 | def test_schema_when_commands_are_list_or_dict(self, _): | ||
2018 | 165 | """No warnings when ubuntu-advantage:commands are a list or dict.""" | ||
2019 | 166 | validate_cloudconfig_schema( | ||
2020 | 167 | {'ubuntu-advantage': {'commands': ['valid']}}, schema) | ||
2021 | 168 | validate_cloudconfig_schema( | ||
2022 | 169 | {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) | ||
2023 | 170 | self.assertEqual('', self.logs.getvalue()) | ||
2024 | 171 | |||
2025 | 172 | |||
2026 | 173 | class TestHandle(CiTestCase): | ||
2027 | 174 | |||
2028 | 175 | with_logs = True | ||
2029 | 176 | |||
2030 | 177 | def setUp(self): | ||
2031 | 178 | super(TestHandle, self).setUp() | ||
2032 | 179 | self.tmp = self.tmp_dir() | ||
2033 | 180 | |||
2034 | 181 | @mock.patch('%s.run_commands' % MPATH) | ||
2035 | 182 | @mock.patch('%s.validate_cloudconfig_schema' % MPATH) | ||
2036 | 183 | def test_handle_no_config(self, m_schema, m_run): | ||
2037 | 184 | """When no ua-related configuration is provided, nothing happens.""" | ||
2038 | 185 | cfg = {} | ||
2039 | 186 | handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
2040 | 187 | self.assertIn( | ||
2041 | 188 | "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key" | ||
2042 | 189 | " in config", | ||
2043 | 190 | self.logs.getvalue()) | ||
2044 | 191 | m_schema.assert_not_called() | ||
2045 | 192 | m_run.assert_not_called() | ||
2046 | 193 | |||
2047 | 194 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) | ||
2048 | 195 | def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install): | ||
2049 | 196 | """If ubuntu_advantage is provided, try installing ua-tools package.""" | ||
2050 | 197 | cfg = {'ubuntu-advantage': {}} | ||
2051 | 198 | mycloud = FakeCloud(None) | ||
2052 | 199 | handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) | ||
2053 | 200 | m_install.assert_called_once_with(mycloud) | ||
2054 | 201 | |||
2055 | 202 | @mock.patch('%s.maybe_install_ua_tools' % MPATH) | ||
2056 | 203 | def test_handle_runs_commands_provided(self, m_install): | ||
2057 | 204 | """When commands are specified as a list, run them.""" | ||
2058 | 205 | outfile = self.tmp_path('output.log', dir=self.tmp) | ||
2059 | 206 | |||
2060 | 207 | cfg = { | ||
2061 | 208 | 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, | ||
2062 | 209 | 'echo "MOM" >> %s' % outfile]}} | ||
2063 | 210 | mock_path = '%s.sys.stderr' % MPATH | ||
2064 | 211 | with mock.patch(mock_path, new_callable=StringIO): | ||
2065 | 212 | handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) | ||
2066 | 213 | self.assertEqual('HI\nMOM\n', util.load_file(outfile)) | ||
2067 | 214 | |||
2068 | 215 | |||
2069 | 216 | class TestMaybeInstallUATools(CiTestCase): | ||
2070 | 217 | |||
2071 | 218 | with_logs = True | ||
2072 | 219 | |||
2073 | 220 | def setUp(self): | ||
2074 | 221 | super(TestMaybeInstallUATools, self).setUp() | ||
2075 | 222 | self.tmp = self.tmp_dir() | ||
2076 | 223 | |||
2077 | 224 | @mock.patch('%s.util.which' % MPATH) | ||
2078 | 225 | def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): | ||
2079 | 226 | """Do nothing if ubuntu-advantage-tools already exists.""" | ||
2080 | 227 | m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed | ||
2081 | 228 | distro = mock.MagicMock() | ||
2082 | 229 | distro.update_package_sources.side_effect = RuntimeError( | ||
2083 | 230 | 'Some apt error') | ||
2084 | 231 | maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError | ||
2085 | 232 | |||
2086 | 233 | @mock.patch('%s.util.which' % MPATH) | ||
2087 | 234 | def test_maybe_install_ua_tools_raises_update_errors(self, m_which): | ||
2088 | 235 | """maybe_install_ua_tools logs and raises apt update errors.""" | ||
2089 | 236 | m_which.return_value = None | ||
2090 | 237 | distro = mock.MagicMock() | ||
2091 | 238 | distro.update_package_sources.side_effect = RuntimeError( | ||
2092 | 239 | 'Some apt error') | ||
2093 | 240 | with self.assertRaises(RuntimeError) as context_manager: | ||
2094 | 241 | maybe_install_ua_tools(cloud=FakeCloud(distro)) | ||
2095 | 242 | self.assertEqual('Some apt error', str(context_manager.exception)) | ||
2096 | 243 | self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) | ||
2097 | 244 | |||
2098 | 245 | @mock.patch('%s.util.which' % MPATH) | ||
2099 | 246 | def test_maybe_install_ua_raises_install_errors(self, m_which): | ||
2100 | 247 | """maybe_install_ua_tools logs and raises package install errors.""" | ||
2101 | 248 | m_which.return_value = None | ||
2102 | 249 | distro = mock.MagicMock() | ||
2103 | 250 | distro.update_package_sources.return_value = None | ||
2104 | 251 | distro.install_packages.side_effect = RuntimeError( | ||
2105 | 252 | 'Some install error') | ||
2106 | 253 | with self.assertRaises(RuntimeError) as context_manager: | ||
2107 | 254 | maybe_install_ua_tools(cloud=FakeCloud(distro)) | ||
2108 | 255 | self.assertEqual('Some install error', str(context_manager.exception)) | ||
2109 | 256 | self.assertIn( | ||
2110 | 257 | 'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue()) | ||
2111 | 258 | |||
2112 | 259 | @mock.patch('%s.util.which' % MPATH) | ||
2113 | 260 | def test_maybe_install_ua_tools_happy_path(self, m_which): | ||
2114 | 261 | """maybe_install_ua_tools installs ubuntu-advantage-tools.""" | ||
2115 | 262 | m_which.return_value = None | ||
2116 | 263 | distro = mock.MagicMock() # No errors raised | ||
2117 | 264 | maybe_install_ua_tools(cloud=FakeCloud(distro)) | ||
2118 | 265 | distro.update_package_sources.assert_called_once_with() | ||
2119 | 266 | distro.install_packages.assert_called_once_with( | ||
2120 | 267 | ['ubuntu-advantage-tools']) | ||
2121 | 268 | |||
2122 | 269 | # vi: ts=4 expandtab | ||
2123 | diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py | |||
2124 | index f87a343..b814c8b 100644 | |||
2125 | --- a/cloudinit/distros/arch.py | |||
2126 | +++ b/cloudinit/distros/arch.py | |||
2127 | @@ -129,11 +129,8 @@ class Distro(distros.Distro): | |||
2128 | 129 | if pkgs is None: | 129 | if pkgs is None: |
2129 | 130 | pkgs = [] | 130 | pkgs = [] |
2130 | 131 | 131 | ||
2132 | 132 | cmd = ['pacman'] | 132 | cmd = ['pacman', "-Sy", "--quiet", "--noconfirm"] |
2133 | 133 | # Redirect output | 133 | # Redirect output |
2134 | 134 | cmd.append("-Sy") | ||
2135 | 135 | cmd.append("--quiet") | ||
2136 | 136 | cmd.append("--noconfirm") | ||
2137 | 137 | 134 | ||
2138 | 138 | if args and isinstance(args, str): | 135 | if args and isinstance(args, str): |
2139 | 139 | cmd.append(args) | 136 | cmd.append(args) |
2140 | diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py | |||
2141 | index aa468bc..754d3df 100644 | |||
2142 | --- a/cloudinit/distros/freebsd.py | |||
2143 | +++ b/cloudinit/distros/freebsd.py | |||
2144 | @@ -132,6 +132,12 @@ class Distro(distros.Distro): | |||
2145 | 132 | LOG.debug("Using network interface %s", bsddev) | 132 | LOG.debug("Using network interface %s", bsddev) |
2146 | 133 | return bsddev | 133 | return bsddev |
2147 | 134 | 134 | ||
2148 | 135 | def _select_hostname(self, hostname, fqdn): | ||
2149 | 136 | # Should be FQDN if available. See rc.conf(5) in FreeBSD | ||
2150 | 137 | if fqdn: | ||
2151 | 138 | return fqdn | ||
2152 | 139 | return hostname | ||
2153 | 140 | |||
2154 | 135 | def _read_system_hostname(self): | 141 | def _read_system_hostname(self): |
2155 | 136 | sys_hostname = self._read_hostname(filename=None) | 142 | sys_hostname = self._read_hostname(filename=None) |
2156 | 137 | return ('rc.conf', sys_hostname) | 143 | return ('rc.conf', sys_hostname) |
2157 | diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py | |||
2158 | index a219e9f..162dfa0 100644 | |||
2159 | --- a/cloudinit/distros/opensuse.py | |||
2160 | +++ b/cloudinit/distros/opensuse.py | |||
2161 | @@ -67,11 +67,10 @@ class Distro(distros.Distro): | |||
2162 | 67 | if pkgs is None: | 67 | if pkgs is None: |
2163 | 68 | pkgs = [] | 68 | pkgs = [] |
2164 | 69 | 69 | ||
2165 | 70 | cmd = ['zypper'] | ||
2166 | 71 | # No user interaction possible, enable non-interactive mode | 70 | # No user interaction possible, enable non-interactive mode |
2168 | 72 | cmd.append('--non-interactive') | 71 | cmd = ['zypper', '--non-interactive'] |
2169 | 73 | 72 | ||
2171 | 74 | # Comand is the operation, such as install | 73 | # Command is the operation, such as install |
2172 | 75 | if command == 'upgrade': | 74 | if command == 'upgrade': |
2173 | 76 | command = 'update' | 75 | command = 'update' |
2174 | 77 | cmd.append(command) | 76 | cmd.append(command) |
2175 | diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py | |||
2176 | index d6c61e4..dc3f0fc 100644 | |||
2177 | --- a/cloudinit/ec2_utils.py | |||
2178 | +++ b/cloudinit/ec2_utils.py | |||
2179 | @@ -135,10 +135,8 @@ class MetadataMaterializer(object): | |||
2180 | 135 | 135 | ||
2181 | 136 | 136 | ||
2182 | 137 | def _skip_retry_on_codes(status_codes, _request_args, cause): | 137 | def _skip_retry_on_codes(status_codes, _request_args, cause): |
2187 | 138 | """Returns if a request should retry based on a given set of codes that | 138 | """Returns False if cause.code is in status_codes.""" |
2188 | 139 | case retrying to be stopped/skipped. | 139 | return cause.code not in status_codes |
2185 | 140 | """ | ||
2186 | 141 | return cause.code in status_codes | ||
2189 | 142 | 140 | ||
2190 | 143 | 141 | ||
2191 | 144 | def get_instance_userdata(api_version='latest', | 142 | def get_instance_userdata(api_version='latest', |
2192 | diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py | |||
2193 | index 7b2cc9d..9e9fe0f 100755 | |||
2194 | --- a/cloudinit/net/cmdline.py | |||
2195 | +++ b/cloudinit/net/cmdline.py | |||
2196 | @@ -9,12 +9,15 @@ import base64 | |||
2197 | 9 | import glob | 9 | import glob |
2198 | 10 | import gzip | 10 | import gzip |
2199 | 11 | import io | 11 | import io |
2200 | 12 | import os | ||
2201 | 12 | 13 | ||
2202 | 13 | from . import get_devicelist | 14 | from . import get_devicelist |
2203 | 14 | from . import read_sys_net_safe | 15 | from . import read_sys_net_safe |
2204 | 15 | 16 | ||
2205 | 16 | from cloudinit import util | 17 | from cloudinit import util |
2206 | 17 | 18 | ||
2207 | 19 | _OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" | ||
2208 | 20 | |||
2209 | 18 | 21 | ||
2210 | 19 | def _klibc_to_config_entry(content, mac_addrs=None): | 22 | def _klibc_to_config_entry(content, mac_addrs=None): |
2211 | 20 | """Convert a klibc written shell content file to a 'config' entry | 23 | """Convert a klibc written shell content file to a 'config' entry |
2212 | @@ -103,9 +106,13 @@ def _klibc_to_config_entry(content, mac_addrs=None): | |||
2213 | 103 | return name, iface | 106 | return name, iface |
2214 | 104 | 107 | ||
2215 | 105 | 108 | ||
2216 | 109 | def _get_klibc_net_cfg_files(): | ||
2217 | 110 | return glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf') | ||
2218 | 111 | |||
2219 | 112 | |||
2220 | 106 | def config_from_klibc_net_cfg(files=None, mac_addrs=None): | 113 | def config_from_klibc_net_cfg(files=None, mac_addrs=None): |
2221 | 107 | if files is None: | 114 | if files is None: |
2223 | 108 | files = glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf') | 115 | files = _get_klibc_net_cfg_files() |
2224 | 109 | 116 | ||
2225 | 110 | entries = [] | 117 | entries = [] |
2226 | 111 | names = {} | 118 | names = {} |
2227 | @@ -160,10 +167,23 @@ def _b64dgz(b64str, gzipped="try"): | |||
2228 | 160 | return _decomp_gzip(blob, strict=gzipped != "try") | 167 | return _decomp_gzip(blob, strict=gzipped != "try") |
2229 | 161 | 168 | ||
2230 | 162 | 169 | ||
2231 | 170 | def _is_initramfs_netconfig(files, cmdline): | ||
2232 | 171 | if files: | ||
2233 | 172 | if 'ip=' in cmdline or 'ip6=' in cmdline: | ||
2234 | 173 | return True | ||
2235 | 174 | if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE): | ||
2236 | 175 | # iBft can configure networking without ip= | ||
2237 | 176 | return True | ||
2238 | 177 | return False | ||
2239 | 178 | |||
2240 | 179 | |||
2241 | 163 | def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): | 180 | def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): |
2242 | 164 | if cmdline is None: | 181 | if cmdline is None: |
2243 | 165 | cmdline = util.get_cmdline() | 182 | cmdline = util.get_cmdline() |
2244 | 166 | 183 | ||
2245 | 184 | if files is None: | ||
2246 | 185 | files = _get_klibc_net_cfg_files() | ||
2247 | 186 | |||
2248 | 167 | if 'network-config=' in cmdline: | 187 | if 'network-config=' in cmdline: |
2249 | 168 | data64 = None | 188 | data64 = None |
2250 | 169 | for tok in cmdline.split(): | 189 | for tok in cmdline.split(): |
2251 | @@ -172,7 +192,7 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): | |||
2252 | 172 | if data64: | 192 | if data64: |
2253 | 173 | return util.load_yaml(_b64dgz(data64)) | 193 | return util.load_yaml(_b64dgz(data64)) |
2254 | 174 | 194 | ||
2256 | 175 | if 'ip=' not in cmdline and 'ip6=' not in cmdline: | 195 | if not _is_initramfs_netconfig(files, cmdline): |
2257 | 176 | return None | 196 | return None |
2258 | 177 | 197 | ||
2259 | 178 | if mac_addrs is None: | 198 | if mac_addrs is None: |
2260 | diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py | |||
2261 | index d3788af..6344348 100644 | |||
2262 | --- a/cloudinit/net/netplan.py | |||
2263 | +++ b/cloudinit/net/netplan.py | |||
2264 | @@ -311,12 +311,12 @@ class Renderer(renderer.Renderer): | |||
2265 | 311 | if newname is None: | 311 | if newname is None: |
2266 | 312 | continue | 312 | continue |
2267 | 313 | br_config.update({newname: value}) | 313 | br_config.update({newname: value}) |
2270 | 314 | if newname == 'path-cost': | 314 | if newname in ['path-cost', 'port-priority']: |
2271 | 315 | # <interface> <cost> -> <interface>: int(<cost>) | 315 | # <interface> <value> -> <interface>: int(<value>) |
2272 | 316 | newvalue = {} | 316 | newvalue = {} |
2276 | 317 | for costval in value: | 317 | for val in value: |
2277 | 318 | (port, cost) = costval.split() | 318 | (port, portval) = val.split() |
2278 | 319 | newvalue[port] = int(cost) | 319 | newvalue[port] = int(portval) |
2279 | 320 | br_config.update({newname: newvalue}) | 320 | br_config.update({newname: newvalue}) |
2280 | 321 | 321 | ||
2281 | 322 | if len(br_config) > 0: | 322 | if len(br_config) > 0: |
2282 | @@ -336,22 +336,15 @@ class Renderer(renderer.Renderer): | |||
2283 | 336 | _extract_addresses(ifcfg, vlan) | 336 | _extract_addresses(ifcfg, vlan) |
2284 | 337 | vlans.update({ifname: vlan}) | 337 | vlans.update({ifname: vlan}) |
2285 | 338 | 338 | ||
2302 | 339 | # inject global nameserver values under each physical interface | 339 | # inject global nameserver values under each all interface which |
2303 | 340 | if nameservers: | 340 | # has addresses and do not already have a DNS configuration |
2304 | 341 | for _eth, cfg in ethernets.items(): | 341 | if nameservers or searchdomains: |
2305 | 342 | nscfg = cfg.get('nameservers', {}) | 342 | nscfg = {'addresses': nameservers, 'search': searchdomains} |
2306 | 343 | addresses = nscfg.get('addresses', []) | 343 | for section in [ethernets, wifis, bonds, bridges, vlans]: |
2307 | 344 | addresses += nameservers | 344 | for _name, cfg in section.items(): |
2308 | 345 | nscfg.update({'addresses': addresses}) | 345 | if 'nameservers' in cfg or 'addresses' not in cfg: |
2309 | 346 | cfg.update({'nameservers': nscfg}) | 346 | continue |
2310 | 347 | 347 | cfg.update({'nameservers': nscfg}) | |
2295 | 348 | if searchdomains: | ||
2296 | 349 | for _eth, cfg in ethernets.items(): | ||
2297 | 350 | nscfg = cfg.get('nameservers', {}) | ||
2298 | 351 | search = nscfg.get('search', []) | ||
2299 | 352 | search += searchdomains | ||
2300 | 353 | nscfg.update({'search': search}) | ||
2301 | 354 | cfg.update({'nameservers': nscfg}) | ||
2311 | 355 | 348 | ||
2312 | 356 | # workaround yaml dictionary key sorting when dumping | 349 | # workaround yaml dictionary key sorting when dumping |
2313 | 357 | def _render_section(name, section): | 350 | def _render_section(name, section): |
2314 | diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py | |||
2315 | index fe667d8..6d63e5c 100644 | |||
2316 | --- a/cloudinit/net/network_state.py | |||
2317 | +++ b/cloudinit/net/network_state.py | |||
2318 | @@ -47,7 +47,7 @@ NET_CONFIG_TO_V2 = { | |||
2319 | 47 | 'bridge_maxage': 'max-age', | 47 | 'bridge_maxage': 'max-age', |
2320 | 48 | 'bridge_maxwait': None, | 48 | 'bridge_maxwait': None, |
2321 | 49 | 'bridge_pathcost': 'path-cost', | 49 | 'bridge_pathcost': 'path-cost', |
2323 | 50 | 'bridge_portprio': None, | 50 | 'bridge_portprio': 'port-priority', |
2324 | 51 | 'bridge_stp': 'stp', | 51 | 'bridge_stp': 'stp', |
2325 | 52 | 'bridge_waitport': None}} | 52 | 'bridge_waitport': None}} |
2326 | 53 | 53 | ||
2327 | @@ -708,6 +708,7 @@ class NetworkStateInterpreter(object): | |||
2328 | 708 | 708 | ||
2329 | 709 | gateway4 = None | 709 | gateway4 = None |
2330 | 710 | gateway6 = None | 710 | gateway6 = None |
2331 | 711 | nameservers = {} | ||
2332 | 711 | for address in cfg.get('addresses', []): | 712 | for address in cfg.get('addresses', []): |
2333 | 712 | subnet = { | 713 | subnet = { |
2334 | 713 | 'type': 'static', | 714 | 'type': 'static', |
2335 | @@ -723,6 +724,15 @@ class NetworkStateInterpreter(object): | |||
2336 | 723 | gateway4 = cfg.get('gateway4') | 724 | gateway4 = cfg.get('gateway4') |
2337 | 724 | subnet.update({'gateway': gateway4}) | 725 | subnet.update({'gateway': gateway4}) |
2338 | 725 | 726 | ||
2339 | 727 | if 'nameservers' in cfg and not nameservers: | ||
2340 | 728 | addresses = cfg.get('nameservers').get('addresses') | ||
2341 | 729 | if addresses: | ||
2342 | 730 | nameservers['dns_nameservers'] = addresses | ||
2343 | 731 | search = cfg.get('nameservers').get('search') | ||
2344 | 732 | if search: | ||
2345 | 733 | nameservers['dns_search'] = search | ||
2346 | 734 | subnet.update(nameservers) | ||
2347 | 735 | |||
2348 | 726 | subnets.append(subnet) | 736 | subnets.append(subnet) |
2349 | 727 | 737 | ||
2350 | 728 | routes = [] | 738 | routes = [] |
2351 | diff --git a/cloudinit/settings.py b/cloudinit/settings.py | |||
2352 | index c120498..dde5749 100644 | |||
2353 | --- a/cloudinit/settings.py | |||
2354 | +++ b/cloudinit/settings.py | |||
2355 | @@ -36,6 +36,8 @@ CFG_BUILTIN = { | |||
2356 | 36 | 'SmartOS', | 36 | 'SmartOS', |
2357 | 37 | 'Bigstep', | 37 | 'Bigstep', |
2358 | 38 | 'Scaleway', | 38 | 'Scaleway', |
2359 | 39 | 'Hetzner', | ||
2360 | 40 | 'IBMCloud', | ||
2361 | 39 | # At the end to act as a 'catch' when none of the above work... | 41 | # At the end to act as a 'catch' when none of the above work... |
2362 | 40 | 'None', | 42 | 'None', |
2363 | 41 | ], | 43 | ], |
2364 | diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py | |||
2365 | index 7ac8288..22279d0 100644 | |||
2366 | --- a/cloudinit/sources/DataSourceAliYun.py | |||
2367 | +++ b/cloudinit/sources/DataSourceAliYun.py | |||
2368 | @@ -22,7 +22,7 @@ class DataSourceAliYun(EC2.DataSourceEc2): | |||
2369 | 22 | super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) | 22 | super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) |
2370 | 23 | self.seed_dir = os.path.join(paths.seed_dir, "AliYun") | 23 | self.seed_dir = os.path.join(paths.seed_dir, "AliYun") |
2371 | 24 | 24 | ||
2373 | 25 | def get_hostname(self, fqdn=False, _resolve_ip=False): | 25 | def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
2374 | 26 | return self.metadata.get('hostname', 'localhost.localdomain') | 26 | return self.metadata.get('hostname', 'localhost.localdomain') |
2375 | 27 | 27 | ||
2376 | 28 | def get_public_ssh_keys(self): | 28 | def get_public_ssh_keys(self): |
2377 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py | |||
2378 | index 4bcbf3a..0ee622e 100644 | |||
2379 | --- a/cloudinit/sources/DataSourceAzure.py | |||
2380 | +++ b/cloudinit/sources/DataSourceAzure.py | |||
2381 | @@ -20,7 +20,7 @@ from cloudinit import net | |||
2382 | 20 | from cloudinit.net.dhcp import EphemeralDHCPv4 | 20 | from cloudinit.net.dhcp import EphemeralDHCPv4 |
2383 | 21 | from cloudinit import sources | 21 | from cloudinit import sources |
2384 | 22 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric | 22 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric |
2386 | 23 | from cloudinit.url_helper import readurl, wait_for_url, UrlError | 23 | from cloudinit.url_helper import readurl, UrlError |
2387 | 24 | from cloudinit import util | 24 | from cloudinit import util |
2388 | 25 | 25 | ||
2389 | 26 | LOG = logging.getLogger(__name__) | 26 | LOG = logging.getLogger(__name__) |
2390 | @@ -49,7 +49,6 @@ DEFAULT_FS = 'ext4' | |||
2391 | 49 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' | 49 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' |
2392 | 50 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" | 50 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" |
2393 | 51 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" | 51 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" |
2394 | 52 | IMDS_RETRIES = 5 | ||
2395 | 53 | 52 | ||
2396 | 54 | 53 | ||
2397 | 55 | def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): | 54 | def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): |
2398 | @@ -223,6 +222,8 @@ DEF_PASSWD_REDACTION = 'REDACTED' | |||
2399 | 223 | 222 | ||
2400 | 224 | 223 | ||
2401 | 225 | def get_hostname(hostname_command='hostname'): | 224 | def get_hostname(hostname_command='hostname'): |
2402 | 225 | if not isinstance(hostname_command, (list, tuple)): | ||
2403 | 226 | hostname_command = (hostname_command,) | ||
2404 | 226 | return util.subp(hostname_command, capture=True)[0].strip() | 227 | return util.subp(hostname_command, capture=True)[0].strip() |
2405 | 227 | 228 | ||
2406 | 228 | 229 | ||
2407 | @@ -449,36 +450,24 @@ class DataSourceAzure(sources.DataSource): | |||
2408 | 449 | headers = {"Metadata": "true"} | 450 | headers = {"Metadata": "true"} |
2409 | 450 | LOG.debug("Start polling IMDS") | 451 | LOG.debug("Start polling IMDS") |
2410 | 451 | 452 | ||
2415 | 452 | def sleep_cb(response, loop_n): | 453 | def exc_cb(msg, exception): |
2412 | 453 | return 1 | ||
2413 | 454 | |||
2414 | 455 | def exception_cb(msg, exception): | ||
2416 | 456 | if isinstance(exception, UrlError) and exception.code == 404: | 454 | if isinstance(exception, UrlError) and exception.code == 404: |
2421 | 457 | return | 455 | return True |
2418 | 458 | LOG.warning("Exception during polling. Will try DHCP.", | ||
2419 | 459 | exc_info=True) | ||
2420 | 460 | |||
2422 | 461 | # If we get an exception while trying to call IMDS, we | 456 | # If we get an exception while trying to call IMDS, we |
2423 | 462 | # call DHCP and setup the ephemeral network to acquire the new IP. | 457 | # call DHCP and setup the ephemeral network to acquire the new IP. |
2425 | 463 | raise exception | 458 | return False |
2426 | 464 | 459 | ||
2427 | 465 | need_report = report_ready | 460 | need_report = report_ready |
2429 | 466 | for i in range(IMDS_RETRIES): | 461 | while True: |
2430 | 467 | try: | 462 | try: |
2431 | 468 | with EphemeralDHCPv4() as lease: | 463 | with EphemeralDHCPv4() as lease: |
2432 | 469 | if need_report: | 464 | if need_report: |
2433 | 470 | self._report_ready(lease=lease) | 465 | self._report_ready(lease=lease) |
2434 | 471 | need_report = False | 466 | need_report = False |
2445 | 472 | wait_for_url([url], max_wait=None, timeout=60, | 467 | return readurl(url, timeout=1, headers=headers, |
2446 | 473 | status_cb=LOG.info, | 468 | exception_cb=exc_cb, infinite=True).contents |
2447 | 474 | headers_cb=lambda url: headers, sleep_time=1, | 469 | except UrlError: |
2448 | 475 | exception_cb=exception_cb, | 470 | pass |
2439 | 476 | sleep_time_cb=sleep_cb) | ||
2440 | 477 | return str(readurl(url, headers=headers)) | ||
2441 | 478 | except Exception: | ||
2442 | 479 | LOG.debug("Exception during polling-retrying dhcp" + | ||
2443 | 480 | " %d more time(s).", (IMDS_RETRIES - i), | ||
2444 | 481 | exc_info=True) | ||
2449 | 482 | 471 | ||
2450 | 483 | def _report_ready(self, lease): | 472 | def _report_ready(self, lease): |
2451 | 484 | """Tells the fabric provisioning has completed | 473 | """Tells the fabric provisioning has completed |
2452 | diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py | |||
2453 | index 4eaad47..c816f34 100644 | |||
2454 | --- a/cloudinit/sources/DataSourceCloudSigma.py | |||
2455 | +++ b/cloudinit/sources/DataSourceCloudSigma.py | |||
2456 | @@ -84,7 +84,7 @@ class DataSourceCloudSigma(sources.DataSource): | |||
2457 | 84 | 84 | ||
2458 | 85 | return True | 85 | return True |
2459 | 86 | 86 | ||
2461 | 87 | def get_hostname(self, fqdn=False, resolve_ip=False): | 87 | def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
2462 | 88 | """ | 88 | """ |
2463 | 89 | Cleans up and uses the server's name if the latter is set. Otherwise | 89 | Cleans up and uses the server's name if the latter is set. Otherwise |
2464 | 90 | the first part from uuid is being used. | 90 | the first part from uuid is being used. |
2465 | diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py | |||
2466 | index b8db626..c7b5fe5 100644 | |||
2467 | --- a/cloudinit/sources/DataSourceConfigDrive.py | |||
2468 | +++ b/cloudinit/sources/DataSourceConfigDrive.py | |||
2469 | @@ -14,6 +14,7 @@ from cloudinit import util | |||
2470 | 14 | 14 | ||
2471 | 15 | from cloudinit.net import eni | 15 | from cloudinit.net import eni |
2472 | 16 | 16 | ||
2473 | 17 | from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform | ||
2474 | 17 | from cloudinit.sources.helpers import openstack | 18 | from cloudinit.sources.helpers import openstack |
2475 | 18 | 19 | ||
2476 | 19 | LOG = logging.getLogger(__name__) | 20 | LOG = logging.getLogger(__name__) |
2477 | @@ -255,6 +256,15 @@ def find_candidate_devs(probe_optical=True): | |||
2478 | 255 | # an unpartitioned block device (ex sda, not sda1) | 256 | # an unpartitioned block device (ex sda, not sda1) |
2479 | 256 | devices = [d for d in candidates | 257 | devices = [d for d in candidates |
2480 | 257 | if d in by_label or not util.is_partition(d)] | 258 | if d in by_label or not util.is_partition(d)] |
2481 | 259 | |||
2482 | 260 | if devices: | ||
2483 | 261 | # IBMCloud uses config-2 label, but limited to a single UUID. | ||
2484 | 262 | ibm_platform, ibm_path = get_ibm_platform() | ||
2485 | 263 | if ibm_path in devices: | ||
2486 | 264 | devices.remove(ibm_path) | ||
2487 | 265 | LOG.debug("IBMCloud device '%s' (%s) removed from candidate list", | ||
2488 | 266 | ibm_path, ibm_platform) | ||
2489 | 267 | |||
2490 | 258 | return devices | 268 | return devices |
2491 | 259 | 269 | ||
2492 | 260 | 270 | ||
2493 | diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py | |||
2494 | index 2da34a9..d816262 100644 | |||
2495 | --- a/cloudinit/sources/DataSourceGCE.py | |||
2496 | +++ b/cloudinit/sources/DataSourceGCE.py | |||
2497 | @@ -90,7 +90,7 @@ class DataSourceGCE(sources.DataSource): | |||
2498 | 90 | public_keys_data = self.metadata['public-keys-data'] | 90 | public_keys_data = self.metadata['public-keys-data'] |
2499 | 91 | return _parse_public_keys(public_keys_data, self.default_user) | 91 | return _parse_public_keys(public_keys_data, self.default_user) |
2500 | 92 | 92 | ||
2502 | 93 | def get_hostname(self, fqdn=False, resolve_ip=False): | 93 | def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
2503 | 94 | # GCE has long FDQN's and has asked for short hostnames. | 94 | # GCE has long FDQN's and has asked for short hostnames. |
2504 | 95 | return self.metadata['local-hostname'].split('.')[0] | 95 | return self.metadata['local-hostname'].split('.')[0] |
2505 | 96 | 96 | ||
2506 | @@ -213,16 +213,15 @@ def read_md(address=None, platform_check=True): | |||
2507 | 213 | if md['availability-zone']: | 213 | if md['availability-zone']: |
2508 | 214 | md['availability-zone'] = md['availability-zone'].split('/')[-1] | 214 | md['availability-zone'] = md['availability-zone'].split('/')[-1] |
2509 | 215 | 215 | ||
2512 | 216 | encoding = instance_data.get('user-data-encoding') | 216 | if 'user-data' in instance_data: |
2513 | 217 | if encoding: | 217 | # instance_data was json, so values are all utf-8 strings. |
2514 | 218 | ud = instance_data['user-data'].encode("utf-8") | ||
2515 | 219 | encoding = instance_data.get('user-data-encoding') | ||
2516 | 218 | if encoding == 'base64': | 220 | if encoding == 'base64': |
2519 | 219 | md['user-data'] = b64decode(instance_data.get('user-data')) | 221 | ud = b64decode(ud) |
2520 | 220 | else: | 222 | elif encoding: |
2521 | 221 | LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) | 223 | LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) |
2526 | 222 | 224 | ret['user-data'] = ud | |
2523 | 223 | if 'user-data' in md: | ||
2524 | 224 | ret['user-data'] = md['user-data'] | ||
2525 | 225 | del md['user-data'] | ||
2527 | 226 | 225 | ||
2528 | 227 | ret['meta-data'] = md | 226 | ret['meta-data'] = md |
2529 | 228 | ret['success'] = True | 227 | ret['success'] = True |
2530 | diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py | |||
2531 | 229 | new file mode 100644 | 228 | new file mode 100644 |
2532 | index 0000000..5c75b65 | |||
2533 | --- /dev/null | |||
2534 | +++ b/cloudinit/sources/DataSourceHetzner.py | |||
2535 | @@ -0,0 +1,106 @@ | |||
2536 | 1 | # Author: Jonas Keidel <jonas.keidel@hetzner.com> | ||
2537 | 2 | # Author: Markus Schade <markus.schade@hetzner.com> | ||
2538 | 3 | # | ||
2539 | 4 | # This file is part of cloud-init. See LICENSE file for license information. | ||
2540 | 5 | # | ||
2541 | 6 | """Hetzner Cloud API Documentation. | ||
2542 | 7 | https://docs.hetzner.cloud/""" | ||
2543 | 8 | |||
2544 | 9 | from cloudinit import log as logging | ||
2545 | 10 | from cloudinit import net as cloudnet | ||
2546 | 11 | from cloudinit import sources | ||
2547 | 12 | from cloudinit import util | ||
2548 | 13 | |||
2549 | 14 | import cloudinit.sources.helpers.hetzner as hc_helper | ||
2550 | 15 | |||
2551 | 16 | LOG = logging.getLogger(__name__) | ||
2552 | 17 | |||
2553 | 18 | BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1' | ||
2554 | 19 | |||
2555 | 20 | BUILTIN_DS_CONFIG = { | ||
2556 | 21 | 'metadata_url': BASE_URL_V1 + '/metadata', | ||
2557 | 22 | 'userdata_url': BASE_URL_V1 + '/userdata', | ||
2558 | 23 | } | ||
2559 | 24 | |||
2560 | 25 | MD_RETRIES = 60 | ||
2561 | 26 | MD_TIMEOUT = 2 | ||
2562 | 27 | MD_WAIT_RETRY = 2 | ||
2563 | 28 | |||
2564 | 29 | |||
2565 | 30 | class DataSourceHetzner(sources.DataSource): | ||
2566 | 31 | def __init__(self, sys_cfg, distro, paths): | ||
2567 | 32 | sources.DataSource.__init__(self, sys_cfg, distro, paths) | ||
2568 | 33 | self.distro = distro | ||
2569 | 34 | self.metadata = dict() | ||
2570 | 35 | self.ds_cfg = util.mergemanydict([ | ||
2571 | 36 | util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), | ||
2572 | 37 | BUILTIN_DS_CONFIG]) | ||
2573 | 38 | self.metadata_address = self.ds_cfg['metadata_url'] | ||
2574 | 39 | self.userdata_address = self.ds_cfg['userdata_url'] | ||
2575 | 40 | self.retries = self.ds_cfg.get('retries', MD_RETRIES) | ||
2576 | 41 | self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) | ||
2577 | 42 | self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) | ||
2578 | 43 | self._network_config = None | ||
2579 | 44 | self.dsmode = sources.DSMODE_NETWORK | ||
2580 | 45 | |||
2581 | 46 | def get_data(self): | ||
2582 | 47 | if not on_hetzner(): | ||
2583 | 48 | return False | ||
2584 | 49 | nic = cloudnet.find_fallback_nic() | ||
2585 | 50 | with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16, | ||
2586 | 51 | "169.254.255.255"): | ||
2587 | 52 | md = hc_helper.read_metadata( | ||
2588 | 53 | self.metadata_address, timeout=self.timeout, | ||
2589 | 54 | sec_between=self.wait_retry, retries=self.retries) | ||
2590 | 55 | ud = hc_helper.read_userdata( | ||
2591 | 56 | self.userdata_address, timeout=self.timeout, | ||
2592 | 57 | sec_between=self.wait_retry, retries=self.retries) | ||
2593 | 58 | |||
2594 | 59 | self.userdata_raw = ud | ||
2595 | 60 | self.metadata_full = md | ||
2596 | 61 | |||
2597 | 62 | """hostname is name provided by user at launch. The API enforces | ||
2598 | 63 | it is a valid hostname, but it is not guaranteed to be resolvable | ||
2599 | 64 | in dns or fully qualified.""" | ||
2600 | 65 | self.metadata['instance-id'] = md['instance-id'] | ||
2601 | 66 | self.metadata['local-hostname'] = md['hostname'] | ||
2602 | 67 | self.metadata['network-config'] = md.get('network-config', None) | ||
2603 | 68 | self.metadata['public-keys'] = md.get('public-keys', None) | ||
2604 | 69 | self.vendordata_raw = md.get("vendor_data", None) | ||
2605 | 70 | |||
2606 | 71 | return True | ||
2607 | 72 | |||
2608 | 73 | @property | ||
2609 | 74 | def network_config(self): | ||
2610 | 75 | """Configure the networking. This needs to be done each boot, since | ||
2611 | 76 | the IP information may have changed due to snapshot and/or | ||
2612 | 77 | migration. | ||
2613 | 78 | """ | ||
2614 | 79 | |||
2615 | 80 | if self._network_config: | ||
2616 | 81 | return self._network_config | ||
2617 | 82 | |||
2618 | 83 | _net_config = self.metadata['network-config'] | ||
2619 | 84 | if not _net_config: | ||
2620 | 85 | raise Exception("Unable to get meta-data from server....") | ||
2621 | 86 | |||
2622 | 87 | self._network_config = _net_config | ||
2623 | 88 | |||
2624 | 89 | return self._network_config | ||
2625 | 90 | |||
2626 | 91 | |||
2627 | 92 | def on_hetzner(): | ||
2628 | 93 | return util.read_dmi_data('system-manufacturer') == "Hetzner" | ||
2629 | 94 | |||
2630 | 95 | |||
2631 | 96 | # Used to match classes to dependencies | ||
2632 | 97 | datasources = [ | ||
2633 | 98 | (DataSourceHetzner, (sources.DEP_FILESYSTEM, )), | ||
2634 | 99 | ] | ||
2635 | 100 | |||
2636 | 101 | |||
2637 | 102 | # Return a list of data sources that match this set of dependencies | ||
2638 | 103 | def get_datasource_list(depends): | ||
2639 | 104 | return sources.list_from_depends(depends, datasources) | ||
2640 | 105 | |||
2641 | 106 | # vi: ts=4 expandtab | ||
2642 | diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py | |||
2643 | 0 | new file mode 100644 | 107 | new file mode 100644 |
2644 | index 0000000..02b3d56 | |||
2645 | --- /dev/null | |||
2646 | +++ b/cloudinit/sources/DataSourceIBMCloud.py | |||
2647 | @@ -0,0 +1,325 @@ | |||
2648 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
2649 | 2 | """Datasource for IBMCloud. | ||
2650 | 3 | |||
2651 | 4 | IBMCloud is also know as SoftLayer or BlueMix. | ||
2652 | 5 | IBMCloud hypervisor is xen (2018-03-10). | ||
2653 | 6 | |||
2654 | 7 | There are 2 different api exposed launch methods. | ||
2655 | 8 | * template: This is the legacy method of launching instances. | ||
2656 | 9 | When booting from an image template, the system boots first into | ||
2657 | 10 | a "provisioning" mode. There, host <-> guest mechanisms are utilized | ||
2658 | 11 | to execute code in the guest and provision it. | ||
2659 | 12 | |||
2660 | 13 | Cloud-init will disable itself when it detects that it is in the | ||
2661 | 14 | provisioning mode. It detects this by the presence of | ||
2662 | 15 | a file '/root/provisioningConfiguration.cfg'. | ||
2663 | 16 | |||
2664 | 17 | When provided with user-data, the "first boot" will contain a | ||
2665 | 18 | ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data | ||
2666 | 19 | provided, then there is no data-source. | ||
2667 | 20 | |||
2668 | 21 | Cloud-init never does any network configuration in this mode. | ||
2669 | 22 | |||
2670 | 23 | * os_code: Essentially "launch by OS Code" (Operating System Code). | ||
2671 | 24 | This is a more modern approach. There is no specific "provisioning" boot. | ||
2672 | 25 | Instead, cloud-init does all the customization. With or without | ||
2673 | 26 | user-data provided, an OpenStack ConfigDrive like disk is attached. | ||
2674 | 27 | |||
2675 | 28 | Only disks with label 'config-2' and UUID '9796-932E' are considered. | ||
2676 | 29 | This is to avoid this datasource claiming ConfigDrive. This does | ||
2677 | 30 | mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be | ||
2678 | 31 | incorrectly identified as IBMCloud. | ||
2679 | 32 | |||
2680 | 33 | TODO: | ||
2681 | 34 | * is uuid (/sys/hypervisor/uuid) stable for life of an instance? | ||
2682 | 35 | it seems it is not the same as data's uuid in the os_code case | ||
2683 | 36 | but is in the template case. | ||
2684 | 37 | |||
2685 | 38 | """ | ||
2686 | 39 | import base64 | ||
2687 | 40 | import json | ||
2688 | 41 | import os | ||
2689 | 42 | |||
2690 | 43 | from cloudinit import log as logging | ||
2691 | 44 | from cloudinit import sources | ||
2692 | 45 | from cloudinit.sources.helpers import openstack | ||
2693 | 46 | from cloudinit import util | ||
2694 | 47 | |||
2695 | 48 | LOG = logging.getLogger(__name__) | ||
2696 | 49 | |||
2697 | 50 | IBM_CONFIG_UUID = "9796-932E" | ||
2698 | 51 | |||
2699 | 52 | |||
2700 | 53 | class Platforms(object): | ||
2701 | 54 | TEMPLATE_LIVE_METADATA = "Template/Live/Metadata" | ||
2702 | 55 | TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED." | ||
2703 | 56 | TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata" | ||
2704 | 57 | TEMPLATE_PROVISIONING_NODATA = "Template/Provisioning/No-Metadata" | ||
2705 | 58 | OS_CODE = "OS-Code/Live" | ||
2706 | 59 | |||
2707 | 60 | |||
2708 | 61 | PROVISIONING = ( | ||
2709 | 62 | Platforms.TEMPLATE_PROVISIONING_METADATA, | ||
2710 | 63 | Platforms.TEMPLATE_PROVISIONING_NODATA) | ||
2711 | 64 | |||
2712 | 65 | |||
2713 | 66 | class DataSourceIBMCloud(sources.DataSource): | ||
2714 | 67 | |||
2715 | 68 | dsname = 'IBMCloud' | ||
2716 | 69 | system_uuid = None | ||
2717 | 70 | |||
2718 | 71 | def __init__(self, sys_cfg, distro, paths): | ||
2719 | 72 | super(DataSourceIBMCloud, self).__init__(sys_cfg, distro, paths) | ||
2720 | 73 | self.source = None | ||
2721 | 74 | self._network_config = None | ||
2722 | 75 | self.network_json = None | ||
2723 | 76 | self.platform = None | ||
2724 | 77 | |||
2725 | 78 | def __str__(self): | ||
2726 | 79 | root = super(DataSourceIBMCloud, self).__str__() | ||
2727 | 80 | mstr = "%s [%s %s]" % (root, self.platform, self.source) | ||
2728 | 81 | return mstr | ||
2729 | 82 | |||
2730 | 83 | def _get_data(self): | ||
2731 | 84 | results = read_md() | ||
2732 | 85 | if results is None: | ||
2733 | 86 | return False | ||
2734 | 87 | |||
2735 | 88 | self.source = results['source'] | ||
2736 | 89 | self.platform = results['platform'] | ||
2737 | 90 | self.metadata = results['metadata'] | ||
2738 | 91 | self.userdata_raw = results.get('userdata') | ||
2739 | 92 | self.network_json = results.get('networkdata') | ||
2740 | 93 | vd = results.get('vendordata') | ||
2741 | 94 | self.vendordata_pure = vd | ||
2742 | 95 | self.system_uuid = results['system-uuid'] | ||
2743 | 96 | try: | ||
2744 | 97 | self.vendordata_raw = sources.convert_vendordata(vd) | ||
2745 | 98 | except ValueError as e: | ||
2746 | 99 | LOG.warning("Invalid content in vendor-data: %s", e) | ||
2747 | 100 | self.vendordata_raw = None | ||
2748 | 101 | |||
2749 | 102 | return True | ||
2750 | 103 | |||
2751 | 104 | def check_instance_id(self, sys_cfg): | ||
2752 | 105 | """quickly (local check only) if self.instance_id is still valid | ||
2753 | 106 | |||
2754 | 107 | in Template mode, the system uuid (/sys/hypervisor/uuid) is the | ||
2755 | 108 | same as found in the METADATA disk. But that is not true in OS_CODE | ||
2756 | 109 | mode. So we read the system_uuid and keep that for later compare.""" | ||
2757 | 110 | if self.system_uuid is None: | ||
2758 | 111 | return False | ||
2759 | 112 | return self.system_uuid == _read_system_uuid() | ||
2760 | 113 | |||
2761 | 114 | @property | ||
2762 | 115 | def network_config(self): | ||
2763 | 116 | if self.platform != Platforms.OS_CODE: | ||
2764 | 117 | # If deployed from template, an agent in the provisioning | ||
2765 | 118 | # environment handles networking configuration. Not cloud-init. | ||
2766 | 119 | return {'config': 'disabled', 'version': 1} | ||
2767 | 120 | if self._network_config is None: | ||
2768 | 121 | if self.network_json is not None: | ||
2769 | 122 | LOG.debug("network config provided via network_json") | ||
2770 | 123 | self._network_config = openstack.convert_net_json( | ||
2771 | 124 | self.network_json, known_macs=None) | ||
2772 | 125 | else: | ||
2773 | 126 | LOG.debug("no network configuration available.") | ||
2774 | 127 | return self._network_config | ||
2775 | 128 | |||
2776 | 129 | |||
2777 | 130 | def _read_system_uuid(): | ||
2778 | 131 | uuid_path = "/sys/hypervisor/uuid" | ||
2779 | 132 | if not os.path.isfile(uuid_path): | ||
2780 | 133 | return None | ||
2781 | 134 | return util.load_file(uuid_path).strip().lower() | ||
2782 | 135 | |||
2783 | 136 | |||
2784 | 137 | def _is_xen(): | ||
2785 | 138 | return os.path.exists("/proc/xen") | ||
2786 | 139 | |||
2787 | 140 | |||
2788 | 141 | def _is_ibm_provisioning(): | ||
2789 | 142 | return os.path.exists("/root/provisioningConfiguration.cfg") | ||
2790 | 143 | |||
2791 | 144 | |||
2792 | 145 | def get_ibm_platform(): | ||
2793 | 146 | """Return a tuple (Platform, path) | ||
2794 | 147 | |||
2795 | 148 | If this is Not IBM cloud, then the return value is (None, None). | ||
2796 | 149 | An instance in provisioning mode is considered running on IBM cloud.""" | ||
2797 | 150 | label_mdata = "METADATA" | ||
2798 | 151 | label_cfg2 = "CONFIG-2" | ||
2799 | 152 | not_found = (None, None) | ||
2800 | 153 | |||
2801 | 154 | if not _is_xen(): | ||
2802 | 155 | return not_found | ||
2803 | 156 | |||
2804 | 157 | # fslabels contains only the first entry with a given label. | ||
2805 | 158 | fslabels = {} | ||
2806 | 159 | try: | ||
2807 | 160 | devs = util.blkid() | ||
2808 | 161 | except util.ProcessExecutionError as e: | ||
2809 | 162 | LOG.warning("Failed to run blkid: %s", e) | ||
2810 | 163 | return (None, None) | ||
2811 | 164 | |||
2812 | 165 | for dev in sorted(devs.keys()): | ||
2813 | 166 | data = devs[dev] | ||
2814 | 167 | label = data.get("LABEL", "").upper() | ||
2815 | 168 | uuid = data.get("UUID", "").upper() | ||
2816 | 169 | if label not in (label_mdata, label_cfg2): | ||
2817 | 170 | continue | ||
2818 | 171 | if label in fslabels: | ||
2819 | 172 | LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s", | ||
2820 | 173 | label, fslabels[label], data) | ||
2821 | 174 | continue | ||
2822 | 175 | if label == label_cfg2 and uuid != IBM_CONFIG_UUID: | ||
2823 | 176 | LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s", | ||
2824 | 177 | dev, label, uuid, data) | ||
2825 | 178 | continue | ||
2826 | 179 | fslabels[label] = data | ||
2827 | 180 | |||
2828 | 181 | metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME') | ||
2829 | 182 | cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME') | ||
2830 | 183 | |||
2831 | 184 | if cfg2_path: | ||
2832 | 185 | return (Platforms.OS_CODE, cfg2_path) | ||
2833 | 186 | elif metadata_path: | ||
2834 | 187 | if _is_ibm_provisioning(): | ||
2835 | 188 | return (Platforms.TEMPLATE_PROVISIONING_METADATA, metadata_path) | ||
2836 | 189 | else: | ||
2837 | 190 | return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) | ||
2838 | 191 | elif _is_ibm_provisioning(): | ||
2839 | 192 | return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) | ||
2840 | 193 | return not_found | ||
2841 | 194 | |||
2842 | 195 | |||
2843 | 196 | def read_md(): | ||
2844 | 197 | """Read data from IBM Cloud. | ||
2845 | 198 | |||
2846 | 199 | @return: None if not running on IBM Cloud. | ||
2847 | 200 | dictionary with guaranteed fields: metadata, version | ||
2848 | 201 | and optional fields: userdata, vendordata, networkdata. | ||
2849 | 202 | Also includes the system uuid from /sys/hypervisor/uuid.""" | ||
2850 | 203 | platform, path = get_ibm_platform() | ||
2851 | 204 | if platform is None: | ||
2852 | 205 | LOG.debug("This is not an IBMCloud platform.") | ||
2853 | 206 | return None | ||
2854 | 207 | elif platform in PROVISIONING: | ||
2855 | 208 | LOG.debug("Cloud-init is disabled during provisioning: %s.", | ||
2856 | 209 | platform) | ||
2857 | 210 | return None | ||
2858 | 211 | |||
2859 | 212 | ret = {'platform': platform, 'source': path, | ||
2860 | 213 | 'system-uuid': _read_system_uuid()} | ||
2861 | 214 | |||
2862 | 215 | try: | ||
2863 | 216 | if os.path.isdir(path): | ||
2864 | 217 | results = metadata_from_dir(path) | ||
2865 | 218 | else: | ||
2866 | 219 | results = util.mount_cb(path, metadata_from_dir) | ||
2867 | 220 | except BrokenMetadata as e: | ||
2868 | 221 | raise RuntimeError( | ||
2869 | 222 | "Failed reading IBM config disk (platform=%s path=%s): %s" % | ||
2870 | 223 | (platform, path, e)) | ||
2871 | 224 | |||
2872 | 225 | ret.update(results) | ||
2873 | 226 | return ret | ||
2874 | 227 | |||
2875 | 228 | |||
2876 | 229 | class BrokenMetadata(IOError): | ||
2877 | 230 | pass | ||
2878 | 231 | |||
2879 | 232 | |||
2880 | 233 | def metadata_from_dir(source_dir): | ||
2881 | 234 | """Walk source_dir extracting standardized metadata. | ||
2882 | 235 | |||
2883 | 236 | Certain metadata keys are renamed to present a standardized set of metadata | ||
2884 | 237 | keys. | ||
2885 | 238 | |||
2886 | 239 | This function has a lot in common with ConfigDriveReader.read_v2 but | ||
2887 | 240 | there are a number of inconsistencies, such key renames and as only | ||
2888 | 241 | presenting a 'latest' version which make it an unlikely candidate to share | ||
2889 | 242 | code. | ||
2890 | 243 | |||
2891 | 244 | @return: Dict containing translated metadata, userdata, vendordata, | ||
2892 | 245 | networkdata as present. | ||
2893 | 246 | """ | ||
2894 | 247 | |||
2895 | 248 | def opath(fname): | ||
2896 | 249 | return os.path.join("openstack", "latest", fname) | ||
2897 | 250 | |||
2898 | 251 | def load_json_bytes(blob): | ||
2899 | 252 | return json.loads(blob.decode('utf-8')) | ||
2900 | 253 | |||
2901 | 254 | files = [ | ||
2902 | 255 | # tuples of (results_name, path, translator) | ||
2903 | 256 | ('metadata_raw', opath('meta_data.json'), load_json_bytes), | ||
2904 | 257 | ('userdata', opath('user_data'), None), | ||
2905 | 258 | ('vendordata', opath('vendor_data.json'), load_json_bytes), | ||
2906 | 259 | ('networkdata', opath('network_data.json'), load_json_bytes), | ||
2907 | 260 | ] | ||
2908 | 261 | |||
2909 | 262 | results = {} | ||
2910 | 263 | for (name, path, transl) in files: | ||
2911 | 264 | fpath = os.path.join(source_dir, path) | ||
2912 | 265 | raw = None | ||
2913 | 266 | try: | ||
2914 | 267 | raw = util.load_file(fpath, decode=False) | ||
2915 | 268 | except IOError as e: | ||
2916 | 269 | LOG.debug("Failed reading path '%s': %s", fpath, e) | ||
2917 | 270 | |||
2918 | 271 | if raw is None or transl is None: | ||
2919 | 272 | data = raw | ||
2920 | 273 | else: | ||
2921 | 274 | try: | ||
2922 | 275 | data = transl(raw) | ||
2923 | 276 | except Exception as e: | ||
2924 | 277 | raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) | ||
2925 | 278 | |||
2926 | 279 | results[name] = data | ||
2927 | 280 | |||
2928 | 281 | if results.get('metadata_raw') is None: | ||
2929 | 282 | raise BrokenMetadata( | ||
2930 | 283 | "%s missing required file 'meta_data.json'" % source_dir) | ||
2931 | 284 | |||
2932 | 285 | results['metadata'] = {} | ||
2933 | 286 | |||
2934 | 287 | md_raw = results['metadata_raw'] | ||
2935 | 288 | md = results['metadata'] | ||
2936 | 289 | if 'random_seed' in md_raw: | ||
2937 | 290 | try: | ||
2938 | 291 | md['random_seed'] = base64.b64decode(md_raw['random_seed']) | ||
2939 | 292 | except (ValueError, TypeError) as e: | ||
2940 | 293 | raise BrokenMetadata( | ||
2941 | 294 | "Badly formatted metadata random_seed entry: %s" % e) | ||
2942 | 295 | |||
2943 | 296 | renames = ( | ||
2944 | 297 | ('public_keys', 'public-keys'), ('hostname', 'local-hostname'), | ||
2945 | 298 | ('uuid', 'instance-id')) | ||
2946 | 299 | for mdname, newname in renames: | ||
2947 | 300 | if mdname in md_raw: | ||
2948 | 301 | md[newname] = md_raw[mdname] | ||
2949 | 302 | |||
2950 | 303 | return results | ||
2951 | 304 | |||
2952 | 305 | |||
2953 | 306 | # Used to match classes to dependencies | ||
2954 | 307 | datasources = [ | ||
2955 | 308 | (DataSourceIBMCloud, (sources.DEP_FILESYSTEM,)), | ||
2956 | 309 | ] | ||
2957 | 310 | |||
2958 | 311 | |||
2959 | 312 | # Return a list of data sources that match this set of dependencies | ||
2960 | 313 | def get_datasource_list(depends): | ||
2961 | 314 | return sources.list_from_depends(depends, datasources) | ||
2962 | 315 | |||
2963 | 316 | |||
2964 | 317 | if __name__ == "__main__": | ||
2965 | 318 | import argparse | ||
2966 | 319 | |||
2967 | 320 | parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata') | ||
2968 | 321 | args = parser.parse_args() | ||
2969 | 322 | data = read_md() | ||
2970 | 323 | print(util.json_dumps(data)) | ||
2971 | 324 | |||
2972 | 325 | # vi: ts=4 expandtab | ||
2973 | diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py | |||
2974 | index 6e62f98..dc914a7 100644 | |||
2975 | --- a/cloudinit/sources/DataSourceOVF.py | |||
2976 | +++ b/cloudinit/sources/DataSourceOVF.py | |||
2977 | @@ -95,11 +95,20 @@ class DataSourceOVF(sources.DataSource): | |||
2978 | 95 | "VMware Customization support") | 95 | "VMware Customization support") |
2979 | 96 | elif not util.get_cfg_option_bool( | 96 | elif not util.get_cfg_option_bool( |
2980 | 97 | self.sys_cfg, "disable_vmware_customization", True): | 97 | self.sys_cfg, "disable_vmware_customization", True): |
2986 | 98 | deployPkgPluginPath = search_file("/usr/lib/vmware-tools", | 98 | |
2987 | 99 | "libdeployPkgPlugin.so") | 99 | search_paths = ( |
2988 | 100 | if not deployPkgPluginPath: | 100 | "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", |
2989 | 101 | deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", | 101 | "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") |
2990 | 102 | "libdeployPkgPlugin.so") | 102 | |
2991 | 103 | plugin = "libdeployPkgPlugin.so" | ||
2992 | 104 | deployPkgPluginPath = None | ||
2993 | 105 | for path in search_paths: | ||
2994 | 106 | deployPkgPluginPath = search_file(path, plugin) | ||
2995 | 107 | if deployPkgPluginPath: | ||
2996 | 108 | LOG.debug("Found the customization plugin at %s", | ||
2997 | 109 | deployPkgPluginPath) | ||
2998 | 110 | break | ||
2999 | 111 | |||
3000 | 103 | if deployPkgPluginPath: | 112 | if deployPkgPluginPath: |
3001 | 104 | # When the VM is powered on, the "VMware Tools" daemon | 113 | # When the VM is powered on, the "VMware Tools" daemon |
3002 | 105 | # copies the customization specification file to | 114 | # copies the customization specification file to |
3003 | @@ -111,6 +120,8 @@ class DataSourceOVF(sources.DataSource): | |||
3004 | 111 | msg="waiting for configuration file", | 120 | msg="waiting for configuration file", |
3005 | 112 | func=wait_for_imc_cfg_file, | 121 | func=wait_for_imc_cfg_file, |
3006 | 113 | args=("cust.cfg", max_wait)) | 122 | args=("cust.cfg", max_wait)) |
3007 | 123 | else: | ||
3008 | 124 | LOG.debug("Did not find the customization plugin.") | ||
3009 | 114 | 125 | ||
3010 | 115 | if vmwareImcConfigFilePath: | 126 | if vmwareImcConfigFilePath: |
3011 | 116 | LOG.debug("Found VMware Customization Config File at %s", | 127 | LOG.debug("Found VMware Customization Config File at %s", |
3012 | diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py | |||
3013 | index ce47b6b..d4a4111 100644 | |||
3014 | --- a/cloudinit/sources/DataSourceOpenNebula.py | |||
3015 | +++ b/cloudinit/sources/DataSourceOpenNebula.py | |||
3016 | @@ -20,7 +20,6 @@ import string | |||
3017 | 20 | 20 | ||
3018 | 21 | from cloudinit import log as logging | 21 | from cloudinit import log as logging |
3019 | 22 | from cloudinit import net | 22 | from cloudinit import net |
3020 | 23 | from cloudinit.net import eni | ||
3021 | 24 | from cloudinit import sources | 23 | from cloudinit import sources |
3022 | 25 | from cloudinit import util | 24 | from cloudinit import util |
3023 | 26 | 25 | ||
3024 | @@ -91,19 +90,19 @@ class DataSourceOpenNebula(sources.DataSource): | |||
3025 | 91 | return False | 90 | return False |
3026 | 92 | 91 | ||
3027 | 93 | self.seed = seed | 92 | self.seed = seed |
3029 | 94 | self.network_eni = results.get('network-interfaces') | 93 | self.network = results.get('network-interfaces') |
3030 | 95 | self.metadata = md | 94 | self.metadata = md |
3031 | 96 | self.userdata_raw = results.get('userdata') | 95 | self.userdata_raw = results.get('userdata') |
3032 | 97 | return True | 96 | return True |
3033 | 98 | 97 | ||
3034 | 99 | @property | 98 | @property |
3035 | 100 | def network_config(self): | 99 | def network_config(self): |
3038 | 101 | if self.network_eni is not None: | 100 | if self.network is not None: |
3039 | 102 | return eni.convert_eni_data(self.network_eni) | 101 | return self.network |
3040 | 103 | else: | 102 | else: |
3041 | 104 | return None | 103 | return None |
3042 | 105 | 104 | ||
3044 | 106 | def get_hostname(self, fqdn=False, resolve_ip=None): | 105 | def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
3045 | 107 | if resolve_ip is None: | 106 | if resolve_ip is None: |
3046 | 108 | if self.dsmode == sources.DSMODE_NETWORK: | 107 | if self.dsmode == sources.DSMODE_NETWORK: |
3047 | 109 | resolve_ip = True | 108 | resolve_ip = True |
3048 | @@ -143,18 +142,42 @@ class OpenNebulaNetwork(object): | |||
3049 | 143 | def mac2network(self, mac): | 142 | def mac2network(self, mac): |
3050 | 144 | return self.mac2ip(mac).rpartition(".")[0] + ".0" | 143 | return self.mac2ip(mac).rpartition(".")[0] + ".0" |
3051 | 145 | 144 | ||
3054 | 146 | def get_dns(self, dev): | 145 | def get_nameservers(self, dev): |
3055 | 147 | return self.get_field(dev, "dns", "").split() | 146 | nameservers = {} |
3056 | 147 | dns = self.get_field(dev, "dns", "").split() | ||
3057 | 148 | dns.extend(self.context.get('DNS', "").split()) | ||
3058 | 149 | if dns: | ||
3059 | 150 | nameservers['addresses'] = dns | ||
3060 | 151 | search_domain = self.get_field(dev, "search_domain", "").split() | ||
3061 | 152 | if search_domain: | ||
3062 | 153 | nameservers['search'] = search_domain | ||
3063 | 154 | return nameservers | ||
3064 | 148 | 155 | ||
3067 | 149 | def get_domain(self, dev): | 156 | def get_mtu(self, dev): |
3068 | 150 | return self.get_field(dev, "domain") | 157 | return self.get_field(dev, "mtu") |
3069 | 151 | 158 | ||
3070 | 152 | def get_ip(self, dev, mac): | 159 | def get_ip(self, dev, mac): |
3071 | 153 | return self.get_field(dev, "ip", self.mac2ip(mac)) | 160 | return self.get_field(dev, "ip", self.mac2ip(mac)) |
3072 | 154 | 161 | ||
3073 | 162 | def get_ip6(self, dev): | ||
3074 | 163 | addresses6 = [] | ||
3075 | 164 | ip6 = self.get_field(dev, "ip6") | ||
3076 | 165 | if ip6: | ||
3077 | 166 | addresses6.append(ip6) | ||
3078 | 167 | ip6_ula = self.get_field(dev, "ip6_ula") | ||
3079 | 168 | if ip6_ula: | ||
3080 | 169 | addresses6.append(ip6_ula) | ||
3081 | 170 | return addresses6 | ||
3082 | 171 | |||
3083 | 172 | def get_ip6_prefix(self, dev): | ||
3084 | 173 | return self.get_field(dev, "ip6_prefix_length", "64") | ||
3085 | 174 | |||
3086 | 155 | def get_gateway(self, dev): | 175 | def get_gateway(self, dev): |
3087 | 156 | return self.get_field(dev, "gateway") | 176 | return self.get_field(dev, "gateway") |
3088 | 157 | 177 | ||
3089 | 178 | def get_gateway6(self, dev): | ||
3090 | 179 | return self.get_field(dev, "gateway6") | ||
3091 | 180 | |||
3092 | 158 | def get_mask(self, dev): | 181 | def get_mask(self, dev): |
3093 | 159 | return self.get_field(dev, "mask", "255.255.255.0") | 182 | return self.get_field(dev, "mask", "255.255.255.0") |
3094 | 160 | 183 | ||
3095 | @@ -171,13 +194,11 @@ class OpenNebulaNetwork(object): | |||
3096 | 171 | return default if val in (None, "") else val | 194 | return default if val in (None, "") else val |
3097 | 172 | 195 | ||
3098 | 173 | def gen_conf(self): | 196 | def gen_conf(self): |
3105 | 174 | global_dns = self.context.get('DNS', "").split() | 197 | netconf = {} |
3106 | 175 | 198 | netconf['version'] = 2 | |
3107 | 176 | conf = [] | 199 | netconf['ethernets'] = {} |
3102 | 177 | conf.append('auto lo') | ||
3103 | 178 | conf.append('iface lo inet loopback') | ||
3104 | 179 | conf.append('') | ||
3108 | 180 | 200 | ||
3109 | 201 | ethernets = {} | ||
3110 | 181 | for mac, dev in self.ifaces.items(): | 202 | for mac, dev in self.ifaces.items(): |
3111 | 182 | mac = mac.lower() | 203 | mac = mac.lower() |
3112 | 183 | 204 | ||
3113 | @@ -185,29 +206,49 @@ class OpenNebulaNetwork(object): | |||
3114 | 185 | # dev stores the current system name. | 206 | # dev stores the current system name. |
3115 | 186 | c_dev = self.context_devname.get(mac, dev) | 207 | c_dev = self.context_devname.get(mac, dev) |
3116 | 187 | 208 | ||
3123 | 188 | conf.append('auto ' + dev) | 209 | devconf = {} |
3124 | 189 | conf.append('iface ' + dev + ' inet static') | 210 | |
3125 | 190 | conf.append(' #hwaddress %s' % mac) | 211 | # Set MAC address |
3126 | 191 | conf.append(' address ' + self.get_ip(c_dev, mac)) | 212 | devconf['match'] = {'macaddress': mac} |
3121 | 192 | conf.append(' network ' + self.get_network(c_dev, mac)) | ||
3122 | 193 | conf.append(' netmask ' + self.get_mask(c_dev)) | ||
3127 | 194 | 213 | ||
3128 | 214 | # Set IPv4 address | ||
3129 | 215 | devconf['addresses'] = [] | ||
3130 | 216 | mask = self.get_mask(c_dev) | ||
3131 | 217 | prefix = str(net.mask_to_net_prefix(mask)) | ||
3132 | 218 | devconf['addresses'].append( | ||
3133 | 219 | self.get_ip(c_dev, mac) + '/' + prefix) | ||
3134 | 220 | |||
3135 | 221 | # Set IPv6 Global and ULA address | ||
3136 | 222 | addresses6 = self.get_ip6(c_dev) | ||
3137 | 223 | if addresses6: | ||
3138 | 224 | prefix6 = self.get_ip6_prefix(c_dev) | ||
3139 | 225 | devconf['addresses'].extend( | ||
3140 | 226 | [i + '/' + prefix6 for i in addresses6]) | ||
3141 | 227 | |||
3142 | 228 | # Set IPv4 default gateway | ||
3143 | 195 | gateway = self.get_gateway(c_dev) | 229 | gateway = self.get_gateway(c_dev) |
3144 | 196 | if gateway: | 230 | if gateway: |
3146 | 197 | conf.append(' gateway ' + gateway) | 231 | devconf['gateway4'] = gateway |
3147 | 232 | |||
3148 | 233 | # Set IPv6 default gateway | ||
3149 | 234 | gateway6 = self.get_gateway6(c_dev) | ||
3150 | 235 | if gateway: | ||
3151 | 236 | devconf['gateway6'] = gateway6 | ||
3152 | 198 | 237 | ||
3156 | 199 | domain = self.get_domain(c_dev) | 238 | # Set DNS servers and search domains |
3157 | 200 | if domain: | 239 | nameservers = self.get_nameservers(c_dev) |
3158 | 201 | conf.append(' dns-search ' + domain) | 240 | if nameservers: |
3159 | 241 | devconf['nameservers'] = nameservers | ||
3160 | 202 | 242 | ||
3165 | 203 | # add global DNS servers to all interfaces | 243 | # Set MTU size |
3166 | 204 | dns = self.get_dns(c_dev) | 244 | mtu = self.get_mtu(c_dev) |
3167 | 205 | if global_dns or dns: | 245 | if mtu: |
3168 | 206 | conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) | 246 | devconf['mtu'] = mtu |
3169 | 207 | 247 | ||
3171 | 208 | conf.append('') | 248 | ethernets[dev] = devconf |
3172 | 209 | 249 | ||
3174 | 210 | return "\n".join(conf) | 250 | netconf['ethernets'] = ethernets |
3175 | 251 | return(netconf) | ||
3176 | 211 | 252 | ||
3177 | 212 | 253 | ||
3178 | 213 | def find_candidate_devs(): | 254 | def find_candidate_devs(): |
3179 | @@ -393,10 +434,10 @@ def read_context_disk_dir(source_dir, asuser=None): | |||
3180 | 393 | except TypeError: | 434 | except TypeError: |
3181 | 394 | LOG.warning("Failed base64 decoding of userdata") | 435 | LOG.warning("Failed base64 decoding of userdata") |
3182 | 395 | 436 | ||
3184 | 396 | # generate static /etc/network/interfaces | 437 | # generate Network Configuration v2 |
3185 | 397 | # only if there are any required context variables | 438 | # only if there are any required context variables |
3188 | 398 | # http://opennebula.org/documentation:rel3.8:cong#network_configuration | 439 | # http://docs.opennebula.org/5.4/operation/references/template.html#context-section |
3189 | 399 | ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)] | 440 | ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)] |
3190 | 400 | if ipaddr_keys: | 441 | if ipaddr_keys: |
3191 | 401 | onet = OpenNebulaNetwork(context) | 442 | onet = OpenNebulaNetwork(context) |
3192 | 402 | results['network-interfaces'] = onet.gen_conf() | 443 | results['network-interfaces'] = onet.gen_conf() |
3193 | diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py | |||
3194 | index b0b19c9..e2502b0 100644 | |||
3195 | --- a/cloudinit/sources/DataSourceScaleway.py | |||
3196 | +++ b/cloudinit/sources/DataSourceScaleway.py | |||
3197 | @@ -113,9 +113,9 @@ def query_data_api_once(api_address, timeout, requests_session): | |||
3198 | 113 | retries=0, | 113 | retries=0, |
3199 | 114 | session=requests_session, | 114 | session=requests_session, |
3200 | 115 | # If the error is a HTTP/404 or a ConnectionError, go into raise | 115 | # If the error is a HTTP/404 or a ConnectionError, go into raise |
3204 | 116 | # block below. | 116 | # block below and don't bother retrying. |
3205 | 117 | exception_cb=lambda _, exc: exc.code == 404 or ( | 117 | exception_cb=lambda _, exc: exc.code != 404 and ( |
3206 | 118 | isinstance(exc.cause, requests.exceptions.ConnectionError) | 118 | not isinstance(exc.cause, requests.exceptions.ConnectionError) |
3207 | 119 | ) | 119 | ) |
3208 | 120 | ) | 120 | ) |
3209 | 121 | return util.decode_binary(resp.contents) | 121 | return util.decode_binary(resp.contents) |
3210 | @@ -215,7 +215,7 @@ class DataSourceScaleway(sources.DataSource): | |||
3211 | 215 | def get_public_ssh_keys(self): | 215 | def get_public_ssh_keys(self): |
3212 | 216 | return [key['key'] for key in self.metadata['ssh_public_keys']] | 216 | return [key['key'] for key in self.metadata['ssh_public_keys']] |
3213 | 217 | 217 | ||
3215 | 218 | def get_hostname(self, fqdn=False, resolve_ip=False): | 218 | def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
3216 | 219 | return self.metadata['hostname'] | 219 | return self.metadata['hostname'] |
3217 | 220 | 220 | ||
3218 | 221 | @property | 221 | @property |
3219 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py | |||
3220 | index a05ca2f..df0b374 100644 | |||
3221 | --- a/cloudinit/sources/__init__.py | |||
3222 | +++ b/cloudinit/sources/__init__.py | |||
3223 | @@ -276,21 +276,34 @@ class DataSource(object): | |||
3224 | 276 | return "iid-datasource" | 276 | return "iid-datasource" |
3225 | 277 | return str(self.metadata['instance-id']) | 277 | return str(self.metadata['instance-id']) |
3226 | 278 | 278 | ||
3228 | 279 | def get_hostname(self, fqdn=False, resolve_ip=False): | 279 | def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): |
3229 | 280 | """Get hostname or fqdn from the datasource. Look it up if desired. | ||
3230 | 281 | |||
3231 | 282 | @param fqdn: Boolean, set True to return hostname with domain. | ||
3232 | 283 | @param resolve_ip: Boolean, set True to attempt to resolve an ipv4 | ||
3233 | 284 | address provided in local-hostname meta-data. | ||
3234 | 285 | @param metadata_only: Boolean, set True to avoid looking up hostname | ||
3235 | 286 | if meta-data doesn't have local-hostname present. | ||
3236 | 287 | |||
3237 | 288 | @return: hostname or qualified hostname. Optionally return None when | ||
3238 | 289 | metadata_only is True and local-hostname data is not available. | ||
3239 | 290 | """ | ||
3240 | 280 | defdomain = "localdomain" | 291 | defdomain = "localdomain" |
3241 | 281 | defhost = "localhost" | 292 | defhost = "localhost" |
3242 | 282 | domain = defdomain | 293 | domain = defdomain |
3243 | 283 | 294 | ||
3244 | 284 | if not self.metadata or 'local-hostname' not in self.metadata: | 295 | if not self.metadata or 'local-hostname' not in self.metadata: |
3245 | 296 | if metadata_only: | ||
3246 | 297 | return None | ||
3247 | 285 | # this is somewhat questionable really. | 298 | # this is somewhat questionable really. |
3248 | 286 | # the cloud datasource was asked for a hostname | 299 | # the cloud datasource was asked for a hostname |
3249 | 287 | # and didn't have one. raising error might be more appropriate | 300 | # and didn't have one. raising error might be more appropriate |
3250 | 288 | # but instead, basically look up the existing hostname | 301 | # but instead, basically look up the existing hostname |
3251 | 289 | toks = [] | 302 | toks = [] |
3252 | 290 | hostname = util.get_hostname() | 303 | hostname = util.get_hostname() |
3256 | 291 | fqdn = util.get_fqdn_from_hosts(hostname) | 304 | hosts_fqdn = util.get_fqdn_from_hosts(hostname) |
3257 | 292 | if fqdn and fqdn.find(".") > 0: | 305 | if hosts_fqdn and hosts_fqdn.find(".") > 0: |
3258 | 293 | toks = str(fqdn).split(".") | 306 | toks = str(hosts_fqdn).split(".") |
3259 | 294 | elif hostname and hostname.find(".") > 0: | 307 | elif hostname and hostname.find(".") > 0: |
3260 | 295 | toks = str(hostname).split(".") | 308 | toks = str(hostname).split(".") |
3261 | 296 | elif hostname: | 309 | elif hostname: |
3262 | diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py | |||
3263 | 297 | new file mode 100644 | 310 | new file mode 100644 |
3264 | index 0000000..2554530 | |||
3265 | --- /dev/null | |||
3266 | +++ b/cloudinit/sources/helpers/hetzner.py | |||
3267 | @@ -0,0 +1,26 @@ | |||
3268 | 1 | # Author: Jonas Keidel <jonas.keidel@hetzner.com> | ||
3269 | 2 | # Author: Markus Schade <markus.schade@hetzner.com> | ||
3270 | 3 | # | ||
3271 | 4 | # This file is part of cloud-init. See LICENSE file for license information. | ||
3272 | 5 | |||
3273 | 6 | from cloudinit import log as logging | ||
3274 | 7 | from cloudinit import url_helper | ||
3275 | 8 | from cloudinit import util | ||
3276 | 9 | |||
3277 | 10 | LOG = logging.getLogger(__name__) | ||
3278 | 11 | |||
3279 | 12 | |||
3280 | 13 | def read_metadata(url, timeout=2, sec_between=2, retries=30): | ||
3281 | 14 | response = url_helper.readurl(url, timeout=timeout, | ||
3282 | 15 | sec_between=sec_between, retries=retries) | ||
3283 | 16 | if not response.ok(): | ||
3284 | 17 | raise RuntimeError("unable to read metadata at %s" % url) | ||
3285 | 18 | return util.load_yaml(response.contents.decode()) | ||
3286 | 19 | |||
3287 | 20 | |||
3288 | 21 | def read_userdata(url, timeout=2, sec_between=2, retries=30): | ||
3289 | 22 | response = url_helper.readurl(url, timeout=timeout, | ||
3290 | 23 | sec_between=sec_between, retries=retries) | ||
3291 | 24 | if not response.ok(): | ||
3292 | 25 | raise RuntimeError("unable to read userdata at %s" % url) | ||
3293 | 26 | return response.contents | ||
3294 | diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py | |||
3295 | index af15115..e7fda22 100644 | |||
3296 | --- a/cloudinit/sources/tests/test_init.py | |||
3297 | +++ b/cloudinit/sources/tests/test_init.py | |||
3298 | @@ -1,13 +1,15 @@ | |||
3299 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
3300 | 2 | 2 | ||
3301 | 3 | import inspect | ||
3302 | 3 | import os | 4 | import os |
3303 | 4 | import six | 5 | import six |
3304 | 5 | import stat | 6 | import stat |
3305 | 6 | 7 | ||
3306 | 7 | from cloudinit.helpers import Paths | 8 | from cloudinit.helpers import Paths |
3307 | 9 | from cloudinit import importer | ||
3308 | 8 | from cloudinit.sources import ( | 10 | from cloudinit.sources import ( |
3309 | 9 | INSTANCE_JSON_FILE, DataSource) | 11 | INSTANCE_JSON_FILE, DataSource) |
3311 | 10 | from cloudinit.tests.helpers import CiTestCase, skipIf | 12 | from cloudinit.tests.helpers import CiTestCase, skipIf, mock |
3312 | 11 | from cloudinit.user_data import UserDataProcessor | 13 | from cloudinit.user_data import UserDataProcessor |
3313 | 12 | from cloudinit import util | 14 | from cloudinit import util |
3314 | 13 | 15 | ||
3315 | @@ -108,6 +110,74 @@ class TestDataSource(CiTestCase): | |||
3316 | 108 | self.assertEqual('userdata_raw', datasource.userdata_raw) | 110 | self.assertEqual('userdata_raw', datasource.userdata_raw) |
3317 | 109 | self.assertEqual('vendordata_raw', datasource.vendordata_raw) | 111 | self.assertEqual('vendordata_raw', datasource.vendordata_raw) |
3318 | 110 | 112 | ||
3319 | 113 | def test_get_hostname_strips_local_hostname_without_domain(self): | ||
3320 | 114 | """Datasource.get_hostname strips metadata local-hostname of domain.""" | ||
3321 | 115 | tmp = self.tmp_dir() | ||
3322 | 116 | datasource = DataSourceTestSubclassNet( | ||
3323 | 117 | self.sys_cfg, self.distro, Paths({'run_dir': tmp})) | ||
3324 | 118 | self.assertTrue(datasource.get_data()) | ||
3325 | 119 | self.assertEqual( | ||
3326 | 120 | 'test-subclass-hostname', datasource.metadata['local-hostname']) | ||
3327 | 121 | self.assertEqual('test-subclass-hostname', datasource.get_hostname()) | ||
3328 | 122 | datasource.metadata['local-hostname'] = 'hostname.my.domain.com' | ||
3329 | 123 | self.assertEqual('hostname', datasource.get_hostname()) | ||
3330 | 124 | |||
3331 | 125 | def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): | ||
3332 | 126 | """Datasource.get_hostname with fqdn set gets qualified hostname.""" | ||
3333 | 127 | tmp = self.tmp_dir() | ||
3334 | 128 | datasource = DataSourceTestSubclassNet( | ||
3335 | 129 | self.sys_cfg, self.distro, Paths({'run_dir': tmp})) | ||
3336 | 130 | self.assertTrue(datasource.get_data()) | ||
3337 | 131 | datasource.metadata['local-hostname'] = 'hostname.my.domain.com' | ||
3338 | 132 | self.assertEqual( | ||
3339 | 133 | 'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) | ||
3340 | 134 | |||
3341 | 135 | def test_get_hostname_without_metadata_uses_system_hostname(self): | ||
3342 | 136 | """Datasource.gethostname runs util.get_hostname when no metadata.""" | ||
3343 | 137 | tmp = self.tmp_dir() | ||
3344 | 138 | datasource = DataSourceTestSubclassNet( | ||
3345 | 139 | self.sys_cfg, self.distro, Paths({'run_dir': tmp})) | ||
3346 | 140 | self.assertEqual({}, datasource.metadata) | ||
3347 | 141 | mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' | ||
3348 | 142 | with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: | ||
3349 | 143 | with mock.patch(mock_fqdn) as m_fqdn: | ||
3350 | 144 | m_gethost.return_value = 'systemhostname.domain.com' | ||
3351 | 145 | m_fqdn.return_value = None # No maching fqdn in /etc/hosts | ||
3352 | 146 | self.assertEqual('systemhostname', datasource.get_hostname()) | ||
3353 | 147 | self.assertEqual( | ||
3354 | 148 | 'systemhostname.domain.com', | ||
3355 | 149 | datasource.get_hostname(fqdn=True)) | ||
3356 | 150 | |||
3357 | 151 | def test_get_hostname_without_metadata_returns_none(self): | ||
3358 | 152 | """Datasource.gethostname returns None when metadata_only and no MD.""" | ||
3359 | 153 | tmp = self.tmp_dir() | ||
3360 | 154 | datasource = DataSourceTestSubclassNet( | ||
3361 | 155 | self.sys_cfg, self.distro, Paths({'run_dir': tmp})) | ||
3362 | 156 | self.assertEqual({}, datasource.metadata) | ||
3363 | 157 | mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' | ||
3364 | 158 | with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: | ||
3365 | 159 | with mock.patch(mock_fqdn) as m_fqdn: | ||
3366 | 160 | self.assertIsNone(datasource.get_hostname(metadata_only=True)) | ||
3367 | 161 | self.assertIsNone( | ||
3368 | 162 | datasource.get_hostname(fqdn=True, metadata_only=True)) | ||
3369 | 163 | self.assertEqual([], m_gethost.call_args_list) | ||
3370 | 164 | self.assertEqual([], m_fqdn.call_args_list) | ||
3371 | 165 | |||
3372 | 166 | def test_get_hostname_without_metadata_prefers_etc_hosts(self): | ||
3373 | 167 | """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" | ||
3374 | 168 | tmp = self.tmp_dir() | ||
3375 | 169 | datasource = DataSourceTestSubclassNet( | ||
3376 | 170 | self.sys_cfg, self.distro, Paths({'run_dir': tmp})) | ||
3377 | 171 | self.assertEqual({}, datasource.metadata) | ||
3378 | 172 | mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' | ||
3379 | 173 | with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: | ||
3380 | 174 | with mock.patch(mock_fqdn) as m_fqdn: | ||
3381 | 175 | m_gethost.return_value = 'systemhostname.domain.com' | ||
3382 | 176 | m_fqdn.return_value = 'fqdnhostname.domain.com' | ||
3383 | 177 | self.assertEqual('fqdnhostname', datasource.get_hostname()) | ||
3384 | 178 | self.assertEqual('fqdnhostname.domain.com', | ||
3385 | 179 | datasource.get_hostname(fqdn=True)) | ||
3386 | 180 | |||
3387 | 111 | def test_get_data_write_json_instance_data(self): | 181 | def test_get_data_write_json_instance_data(self): |
3388 | 112 | """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" | 182 | """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" |
3389 | 113 | tmp = self.tmp_dir() | 183 | tmp = self.tmp_dir() |
3390 | @@ -200,3 +270,29 @@ class TestDataSource(CiTestCase): | |||
3391 | 200 | "WARNING: Error persisting instance-data.json: 'utf8' codec can't" | 270 | "WARNING: Error persisting instance-data.json: 'utf8' codec can't" |
3392 | 201 | " decode byte 0xaa in position 2: invalid start byte", | 271 | " decode byte 0xaa in position 2: invalid start byte", |
3393 | 202 | self.logs.getvalue()) | 272 | self.logs.getvalue()) |
3394 | 273 | |||
3395 | 274 | def test_get_hostname_subclass_support(self): | ||
3396 | 275 | """Validate get_hostname signature on all subclasses of DataSource.""" | ||
3397 | 276 | # Use inspect.getfullargspec when we drop py2.6 and py2.7 | ||
3398 | 277 | get_args = inspect.getargspec # pylint: disable=W1505 | ||
3399 | 278 | base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505 | ||
3400 | 279 | # Import all DataSource subclasses so we can inspect them. | ||
3401 | 280 | modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) | ||
3402 | 281 | for loc, name in modules.items(): | ||
3403 | 282 | mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) | ||
3404 | 283 | if mod_locs: | ||
3405 | 284 | importer.import_module(mod_locs[0]) | ||
3406 | 285 | for child in DataSource.__subclasses__(): | ||
3407 | 286 | if 'Test' in child.dsname: | ||
3408 | 287 | continue | ||
3409 | 288 | self.assertEqual( | ||
3410 | 289 | base_args, | ||
3411 | 290 | get_args(child.get_hostname), # pylint: disable=W1505 | ||
3412 | 291 | '%s does not implement DataSource.get_hostname params' | ||
3413 | 292 | % child) | ||
3414 | 293 | for grandchild in child.__subclasses__(): | ||
3415 | 294 | self.assertEqual( | ||
3416 | 295 | base_args, | ||
3417 | 296 | get_args(grandchild.get_hostname), # pylint: disable=W1505 | ||
3418 | 297 | '%s does not implement DataSource.get_hostname params' | ||
3419 | 298 | % grandchild) | ||
3420 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py | |||
3421 | index d045268..bc4ebc8 100644 | |||
3422 | --- a/cloudinit/stages.py | |||
3423 | +++ b/cloudinit/stages.py | |||
3424 | @@ -132,8 +132,7 @@ class Init(object): | |||
3425 | 132 | return initial_dirs | 132 | return initial_dirs |
3426 | 133 | 133 | ||
3427 | 134 | def purge_cache(self, rm_instance_lnk=False): | 134 | def purge_cache(self, rm_instance_lnk=False): |
3430 | 135 | rm_list = [] | 135 | rm_list = [self.paths.boot_finished] |
3429 | 136 | rm_list.append(self.paths.boot_finished) | ||
3431 | 137 | if rm_instance_lnk: | 136 | if rm_instance_lnk: |
3432 | 138 | rm_list.append(self.paths.instance_link) | 137 | rm_list.append(self.paths.instance_link) |
3433 | 139 | for f in rm_list: | 138 | for f in rm_list: |
3434 | diff --git a/cloudinit/subp.py b/cloudinit/subp.py | |||
3435 | 140 | new file mode 100644 | 139 | new file mode 100644 |
3436 | index 0000000..0ad0930 | |||
3437 | --- /dev/null | |||
3438 | +++ b/cloudinit/subp.py | |||
3439 | @@ -0,0 +1,57 @@ | |||
3440 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
3441 | 2 | """Common utility functions for interacting with subprocess.""" | ||
3442 | 3 | |||
3443 | 4 | # TODO move subp shellify and runparts related functions out of util.py | ||
3444 | 5 | |||
3445 | 6 | import logging | ||
3446 | 7 | |||
3447 | 8 | LOG = logging.getLogger(__name__) | ||
3448 | 9 | |||
3449 | 10 | |||
3450 | 11 | def prepend_base_command(base_command, commands): | ||
3451 | 12 | """Ensure user-provided commands start with base_command; warn otherwise. | ||
3452 | 13 | |||
3453 | 14 | Each command is either a list or string. Perform the following: | ||
3454 | 15 | - If the command is a list, pop the first element if it is None | ||
3455 | 16 | - If the command is a list, insert base_command as the first element if | ||
3456 | 17 | not present. | ||
3457 | 18 | - When the command is a string not starting with 'base-command', warn. | ||
3458 | 19 | |||
3459 | 20 | Allow flexibility to provide non-base-command environment/config setup if | ||
3460 | 21 | needed. | ||
3461 | 22 | |||
3462 | 23 | @commands: List of commands. Each command element is a list or string. | ||
3463 | 24 | |||
3464 | 25 | @return: List of 'fixed up' commands. | ||
3465 | 26 | @raise: TypeError on invalid config item type. | ||
3466 | 27 | """ | ||
3467 | 28 | warnings = [] | ||
3468 | 29 | errors = [] | ||
3469 | 30 | fixed_commands = [] | ||
3470 | 31 | for command in commands: | ||
3471 | 32 | if isinstance(command, list): | ||
3472 | 33 | if command[0] is None: # Avoid warnings by specifying None | ||
3473 | 34 | command = command[1:] | ||
3474 | 35 | elif command[0] != base_command: # Automatically prepend | ||
3475 | 36 | command.insert(0, base_command) | ||
3476 | 37 | elif isinstance(command, str): | ||
3477 | 38 | if not command.startswith('%s ' % base_command): | ||
3478 | 39 | warnings.append(command) | ||
3479 | 40 | else: | ||
3480 | 41 | errors.append(str(command)) | ||
3481 | 42 | continue | ||
3482 | 43 | fixed_commands.append(command) | ||
3483 | 44 | |||
3484 | 45 | if warnings: | ||
3485 | 46 | LOG.warning( | ||
3486 | 47 | 'Non-%s commands in %s config:\n%s', | ||
3487 | 48 | base_command, base_command, '\n'.join(warnings)) | ||
3488 | 49 | if errors: | ||
3489 | 50 | raise TypeError( | ||
3490 | 51 | 'Invalid {name} config.' | ||
3491 | 52 | ' These commands are not a string or list:\n{errors}'.format( | ||
3492 | 53 | name=base_command, errors='\n'.join(errors))) | ||
3493 | 54 | return fixed_commands | ||
3494 | 55 | |||
3495 | 56 | |||
3496 | 57 | # vi: ts=4 expandtab | ||
3497 | diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py | |||
3498 | index 0080c72..999b1d7 100644 | |||
3499 | --- a/cloudinit/tests/helpers.py | |||
3500 | +++ b/cloudinit/tests/helpers.py | |||
3501 | @@ -173,17 +173,15 @@ class CiTestCase(TestCase): | |||
3502 | 173 | dir = self.tmp_dir() | 173 | dir = self.tmp_dir() |
3503 | 174 | return os.path.normpath(os.path.abspath(os.path.join(dir, path))) | 174 | return os.path.normpath(os.path.abspath(os.path.join(dir, path))) |
3504 | 175 | 175 | ||
3516 | 176 | def assertRaisesCodeEqual(self, expected, found): | 176 | def sys_exit(self, code): |
3517 | 177 | """Handle centos6 having different context manager for assertRaises. | 177 | """Provide a wrapper around sys.exit for python 2.6 |
3518 | 178 | with assertRaises(Exception) as e: | 178 | |
3519 | 179 | raise Exception("BOO") | 179 | In 2.6, this code would produce 'cm.exception' with value int(2) |
3520 | 180 | 180 | rather than the SystemExit that was raised by sys.exit(2). | |
3521 | 181 | centos6 will have e.exception as an integer. | 181 | with assertRaises(SystemExit) as cm: |
3522 | 182 | anything nwere will have it as something with a '.code'""" | 182 | sys.exit(2) |
3523 | 183 | if isinstance(found, int): | 183 | """ |
3524 | 184 | self.assertEqual(expected, found) | 184 | raise SystemExit(code) |
3514 | 185 | else: | ||
3515 | 186 | self.assertEqual(expected, found.code) | ||
3525 | 187 | 185 | ||
3526 | 188 | 186 | ||
3527 | 189 | class ResourceUsingTestCase(CiTestCase): | 187 | class ResourceUsingTestCase(CiTestCase): |
3528 | @@ -285,10 +283,15 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): | |||
3529 | 285 | def patchOS(self, new_root): | 283 | def patchOS(self, new_root): |
3530 | 286 | patch_funcs = { | 284 | patch_funcs = { |
3531 | 287 | os.path: [('isfile', 1), ('exists', 1), | 285 | os.path: [('isfile', 1), ('exists', 1), |
3533 | 288 | ('islink', 1), ('isdir', 1)], | 286 | ('islink', 1), ('isdir', 1), ('lexists', 1)], |
3534 | 289 | os: [('listdir', 1), ('mkdir', 1), | 287 | os: [('listdir', 1), ('mkdir', 1), |
3536 | 290 | ('lstat', 1), ('symlink', 2)], | 288 | ('lstat', 1), ('symlink', 2)] |
3537 | 291 | } | 289 | } |
3538 | 290 | |||
3539 | 291 | if hasattr(os, 'scandir'): | ||
3540 | 292 | # py27 does not have scandir | ||
3541 | 293 | patch_funcs[os].append(('scandir', 1)) | ||
3542 | 294 | |||
3543 | 292 | for (mod, funcs) in patch_funcs.items(): | 295 | for (mod, funcs) in patch_funcs.items(): |
3544 | 293 | for f, nargs in funcs: | 296 | for f, nargs in funcs: |
3545 | 294 | func = getattr(mod, f) | 297 | func = getattr(mod, f) |
3546 | @@ -411,6 +414,19 @@ except AttributeError: | |||
3547 | 411 | return decorator | 414 | return decorator |
3548 | 412 | 415 | ||
3549 | 413 | 416 | ||
3550 | 417 | try: | ||
3551 | 418 | import jsonschema | ||
3552 | 419 | assert jsonschema # avoid pyflakes error F401: import unused | ||
3553 | 420 | _missing_jsonschema_dep = False | ||
3554 | 421 | except ImportError: | ||
3555 | 422 | _missing_jsonschema_dep = True | ||
3556 | 423 | |||
3557 | 424 | |||
3558 | 425 | def skipUnlessJsonSchema(): | ||
3559 | 426 | return skipIf( | ||
3560 | 427 | _missing_jsonschema_dep, "No python-jsonschema dependency present.") | ||
3561 | 428 | |||
3562 | 429 | |||
3563 | 414 | # older versions of mock do not have the useful 'assert_not_called' | 430 | # older versions of mock do not have the useful 'assert_not_called' |
3564 | 415 | if not hasattr(mock.Mock, 'assert_not_called'): | 431 | if not hasattr(mock.Mock, 'assert_not_called'): |
3565 | 416 | def __mock_assert_not_called(mmock): | 432 | def __mock_assert_not_called(mmock): |
3566 | @@ -422,12 +438,12 @@ if not hasattr(mock.Mock, 'assert_not_called'): | |||
3567 | 422 | mock.Mock.assert_not_called = __mock_assert_not_called | 438 | mock.Mock.assert_not_called = __mock_assert_not_called |
3568 | 423 | 439 | ||
3569 | 424 | 440 | ||
3573 | 425 | # older unittest2.TestCase (centos6) do not have assertRaisesRegex | 441 | # older unittest2.TestCase (centos6) have only the now-deprecated |
3574 | 426 | # And setting assertRaisesRegex to assertRaisesRegexp causes | 442 | # assertRaisesRegexp. Simple assignment makes pylint complain, about |
3575 | 427 | # https://github.com/PyCQA/pylint/issues/1653 . So the workaround. | 443 | # users of assertRaisesRegex so we use getattr to trick it. |
3576 | 444 | # https://github.com/PyCQA/pylint/issues/1946 | ||
3577 | 428 | if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): | 445 | if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): |
3581 | 429 | def _tricky(*args, **kwargs): | 446 | unittest2.TestCase.assertRaisesRegex = ( |
3582 | 430 | return unittest2.TestCase.assertRaisesRegexp | 447 | getattr(unittest2.TestCase, 'assertRaisesRegexp')) |
3580 | 431 | unittest2.TestCase.assertRaisesRegex = _tricky | ||
3583 | 432 | 448 | ||
3584 | 433 | # vi: ts=4 expandtab | 449 | # vi: ts=4 expandtab |
3585 | diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py | |||
3586 | 434 | new file mode 100644 | 450 | new file mode 100644 |
3587 | index 0000000..448097d | |||
3588 | --- /dev/null | |||
3589 | +++ b/cloudinit/tests/test_subp.py | |||
3590 | @@ -0,0 +1,61 @@ | |||
3591 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
3592 | 2 | |||
3593 | 3 | """Tests for cloudinit.subp utility functions""" | ||
3594 | 4 | |||
3595 | 5 | from cloudinit import subp | ||
3596 | 6 | from cloudinit.tests.helpers import CiTestCase | ||
3597 | 7 | |||
3598 | 8 | |||
3599 | 9 | class TestPrependBaseCommands(CiTestCase): | ||
3600 | 10 | |||
3601 | 11 | with_logs = True | ||
3602 | 12 | |||
3603 | 13 | def test_prepend_base_command_errors_on_neither_string_nor_list(self): | ||
3604 | 14 | """Raise an error for each command which is not a string or list.""" | ||
3605 | 15 | orig_commands = ['ls', 1, {'not': 'gonna work'}, ['basecmd', 'list']] | ||
3606 | 16 | with self.assertRaises(TypeError) as context_manager: | ||
3607 | 17 | subp.prepend_base_command( | ||
3608 | 18 | base_command='basecmd', commands=orig_commands) | ||
3609 | 19 | self.assertEqual( | ||
3610 | 20 | "Invalid basecmd config. These commands are not a string or" | ||
3611 | 21 | " list:\n1\n{'not': 'gonna work'}", | ||
3612 | 22 | str(context_manager.exception)) | ||
3613 | 23 | |||
3614 | 24 | def test_prepend_base_command_warns_on_non_base_string_commands(self): | ||
3615 | 25 | """Warn on each non-base for commands of type string.""" | ||
3616 | 26 | orig_commands = [ | ||
3617 | 27 | 'ls', 'basecmd list', 'touch /blah', 'basecmd install x'] | ||
3618 | 28 | fixed_commands = subp.prepend_base_command( | ||
3619 | 29 | base_command='basecmd', commands=orig_commands) | ||
3620 | 30 | self.assertEqual( | ||
3621 | 31 | 'WARNING: Non-basecmd commands in basecmd config:\n' | ||
3622 | 32 | 'ls\ntouch /blah\n', | ||
3623 | 33 | self.logs.getvalue()) | ||
3624 | 34 | self.assertEqual(orig_commands, fixed_commands) | ||
3625 | 35 | |||
3626 | 36 | def test_prepend_base_command_prepends_on_non_base_list_commands(self): | ||
3627 | 37 | """Prepend 'basecmd' for each non-basecmd command of type list.""" | ||
3628 | 38 | orig_commands = [['ls'], ['basecmd', 'list'], ['basecmda', '/blah'], | ||
3629 | 39 | ['basecmd', 'install', 'x']] | ||
3630 | 40 | expected = [['basecmd', 'ls'], ['basecmd', 'list'], | ||
3631 | 41 | ['basecmd', 'basecmda', '/blah'], | ||
3632 | 42 | ['basecmd', 'install', 'x']] | ||
3633 | 43 | fixed_commands = subp.prepend_base_command( | ||
3634 | 44 | base_command='basecmd', commands=orig_commands) | ||
3635 | 45 | self.assertEqual('', self.logs.getvalue()) | ||
3636 | 46 | self.assertEqual(expected, fixed_commands) | ||
3637 | 47 | |||
3638 | 48 | def test_prepend_base_command_removes_first_item_when_none(self): | ||
3639 | 49 | """Remove the first element of a non-basecmd when it is None.""" | ||
3640 | 50 | orig_commands = [[None, 'ls'], ['basecmd', 'list'], | ||
3641 | 51 | [None, 'touch', '/blah'], | ||
3642 | 52 | ['basecmd', 'install', 'x']] | ||
3643 | 53 | expected = [['ls'], ['basecmd', 'list'], | ||
3644 | 54 | ['touch', '/blah'], | ||
3645 | 55 | ['basecmd', 'install', 'x']] | ||
3646 | 56 | fixed_commands = subp.prepend_base_command( | ||
3647 | 57 | base_command='basecmd', commands=orig_commands) | ||
3648 | 58 | self.assertEqual('', self.logs.getvalue()) | ||
3649 | 59 | self.assertEqual(expected, fixed_commands) | ||
3650 | 60 | |||
3651 | 61 | # vi: ts=4 expandtab | ||
3652 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py | |||
3653 | index ba6bf69..3f37dbb 100644 | |||
3654 | --- a/cloudinit/tests/test_util.py | |||
3655 | +++ b/cloudinit/tests/test_util.py | |||
3656 | @@ -3,6 +3,7 @@ | |||
3657 | 3 | """Tests for cloudinit.util""" | 3 | """Tests for cloudinit.util""" |
3658 | 4 | 4 | ||
3659 | 5 | import logging | 5 | import logging |
3660 | 6 | from textwrap import dedent | ||
3661 | 6 | 7 | ||
3662 | 7 | import cloudinit.util as util | 8 | import cloudinit.util as util |
3663 | 8 | 9 | ||
3664 | @@ -16,6 +17,25 @@ MOUNT_INFO = [ | |||
3665 | 16 | ] | 17 | ] |
3666 | 17 | 18 | ||
3667 | 18 | 19 | ||
3668 | 20 | class FakeCloud(object): | ||
3669 | 21 | |||
3670 | 22 | def __init__(self, hostname, fqdn): | ||
3671 | 23 | self.hostname = hostname | ||
3672 | 24 | self.fqdn = fqdn | ||
3673 | 25 | self.calls = [] | ||
3674 | 26 | |||
3675 | 27 | def get_hostname(self, fqdn=None, metadata_only=None): | ||
3676 | 28 | myargs = {} | ||
3677 | 29 | if fqdn is not None: | ||
3678 | 30 | myargs['fqdn'] = fqdn | ||
3679 | 31 | if metadata_only is not None: | ||
3680 | 32 | myargs['metadata_only'] = metadata_only | ||
3681 | 33 | self.calls.append(myargs) | ||
3682 | 34 | if fqdn: | ||
3683 | 35 | return self.fqdn | ||
3684 | 36 | return self.hostname | ||
3685 | 37 | |||
3686 | 38 | |||
3687 | 19 | class TestUtil(CiTestCase): | 39 | class TestUtil(CiTestCase): |
3688 | 20 | 40 | ||
3689 | 21 | def test_parse_mount_info_no_opts_no_arg(self): | 41 | def test_parse_mount_info_no_opts_no_arg(self): |
3690 | @@ -44,3 +64,152 @@ class TestUtil(CiTestCase): | |||
3691 | 44 | m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') | 64 | m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') |
3692 | 45 | is_rw = util.mount_is_read_write('/') | 65 | is_rw = util.mount_is_read_write('/') |
3693 | 46 | self.assertEqual(is_rw, False) | 66 | self.assertEqual(is_rw, False) |
3694 | 67 | |||
3695 | 68 | |||
3696 | 69 | class TestShellify(CiTestCase): | ||
3697 | 70 | |||
3698 | 71 | def test_input_dict_raises_type_error(self): | ||
3699 | 72 | self.assertRaisesRegex( | ||
3700 | 73 | TypeError, 'Input.*was.*dict.*xpected', | ||
3701 | 74 | util.shellify, {'mykey': 'myval'}) | ||
3702 | 75 | |||
3703 | 76 | def test_input_str_raises_type_error(self): | ||
3704 | 77 | self.assertRaisesRegex( | ||
3705 | 78 | TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar") | ||
3706 | 79 | |||
3707 | 80 | def test_value_with_int_raises_type_error(self): | ||
3708 | 81 | self.assertRaisesRegex( | ||
3709 | 82 | TypeError, 'shellify.*int', util.shellify, ["foo", 1]) | ||
3710 | 83 | |||
3711 | 84 | def test_supports_strings_and_lists(self): | ||
3712 | 85 | self.assertEqual( | ||
3713 | 86 | '\n'.join(["#!/bin/sh", "echo hi mom", "'echo' 'hi dad'", | ||
3714 | 87 | "'echo' 'hi' 'sis'", ""]), | ||
3715 | 88 | util.shellify(["echo hi mom", ["echo", "hi dad"], | ||
3716 | 89 | ('echo', 'hi', 'sis')])) | ||
3717 | 90 | |||
3718 | 91 | |||
3719 | 92 | class TestGetHostnameFqdn(CiTestCase): | ||
3720 | 93 | |||
3721 | 94 | def test_get_hostname_fqdn_from_only_cfg_fqdn(self): | ||
3722 | 95 | """When cfg only has the fqdn key, derive hostname and fqdn from it.""" | ||
3723 | 96 | hostname, fqdn = util.get_hostname_fqdn( | ||
3724 | 97 | cfg={'fqdn': 'myhost.domain.com'}, cloud=None) | ||
3725 | 98 | self.assertEqual('myhost', hostname) | ||
3726 | 99 | self.assertEqual('myhost.domain.com', fqdn) | ||
3727 | 100 | |||
3728 | 101 | def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): | ||
3729 | 102 | """When cfg has both fqdn and hostname keys, return them.""" | ||
3730 | 103 | hostname, fqdn = util.get_hostname_fqdn( | ||
3731 | 104 | cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None) | ||
3732 | 105 | self.assertEqual('other', hostname) | ||
3733 | 106 | self.assertEqual('myhost.domain.com', fqdn) | ||
3734 | 107 | |||
3735 | 108 | def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): | ||
3736 | 109 | """When cfg has only hostname key which represents a fqdn, use that.""" | ||
3737 | 110 | hostname, fqdn = util.get_hostname_fqdn( | ||
3738 | 111 | cfg={'hostname': 'myhost.domain.com'}, cloud=None) | ||
3739 | 112 | self.assertEqual('myhost', hostname) | ||
3740 | 113 | self.assertEqual('myhost.domain.com', fqdn) | ||
3741 | 114 | |||
3742 | 115 | def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): | ||
3743 | 116 | """When cfg has a hostname without a '.' query cloud.get_hostname.""" | ||
3744 | 117 | mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') | ||
3745 | 118 | hostname, fqdn = util.get_hostname_fqdn( | ||
3746 | 119 | cfg={'hostname': 'myhost'}, cloud=mycloud) | ||
3747 | 120 | self.assertEqual('myhost', hostname) | ||
3748 | 121 | self.assertEqual('cloudhost.mycloud.com', fqdn) | ||
3749 | 122 | self.assertEqual( | ||
3750 | 123 | [{'fqdn': True, 'metadata_only': False}], mycloud.calls) | ||
3751 | 124 | |||
3752 | 125 | def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): | ||
3753 | 126 | """When cfg has neither hostname nor fqdn cloud.get_hostname.""" | ||
3754 | 127 | mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') | ||
3755 | 128 | hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) | ||
3756 | 129 | self.assertEqual('cloudhost', hostname) | ||
3757 | 130 | self.assertEqual('cloudhost.mycloud.com', fqdn) | ||
3758 | 131 | self.assertEqual( | ||
3759 | 132 | [{'fqdn': True, 'metadata_only': False}, | ||
3760 | 133 | {'metadata_only': False}], mycloud.calls) | ||
3761 | 134 | |||
3762 | 135 | def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): | ||
3763 | 136 | """Calls to cloud.get_hostname pass the metadata_only parameter.""" | ||
3764 | 137 | mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') | ||
3765 | 138 | hostname, fqdn = util.get_hostname_fqdn( | ||
3766 | 139 | cfg={}, cloud=mycloud, metadata_only=True) | ||
3767 | 140 | self.assertEqual( | ||
3768 | 141 | [{'fqdn': True, 'metadata_only': True}, | ||
3769 | 142 | {'metadata_only': True}], mycloud.calls) | ||
3770 | 143 | |||
3771 | 144 | |||
3772 | 145 | class TestBlkid(CiTestCase): | ||
3773 | 146 | ids = { | ||
3774 | 147 | "id01": "1111-1111", | ||
3775 | 148 | "id02": "22222222-2222", | ||
3776 | 149 | "id03": "33333333-3333", | ||
3777 | 150 | "id04": "44444444-4444", | ||
3778 | 151 | "id05": "55555555-5555-5555-5555-555555555555", | ||
3779 | 152 | "id06": "66666666-6666-6666-6666-666666666666", | ||
3780 | 153 | "id07": "52894610484658920398", | ||
3781 | 154 | "id08": "86753098675309867530", | ||
3782 | 155 | "id09": "99999999-9999-9999-9999-999999999999", | ||
3783 | 156 | } | ||
3784 | 157 | |||
3785 | 158 | blkid_out = dedent("""\ | ||
3786 | 159 | /dev/loop0: TYPE="squashfs" | ||
3787 | 160 | /dev/loop1: TYPE="squashfs" | ||
3788 | 161 | /dev/loop2: TYPE="squashfs" | ||
3789 | 162 | /dev/loop3: TYPE="squashfs" | ||
3790 | 163 | /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" | ||
3791 | 164 | /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" | ||
3792 | 165 | /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" | ||
3793 | 166 | /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ | ||
3794 | 167 | """TYPE="zfs_member" PARTUUID="{id09}" | ||
3795 | 168 | /dev/loop4: TYPE="squashfs" | ||
3796 | 169 | """) | ||
3797 | 170 | |||
3798 | 171 | maxDiff = None | ||
3799 | 172 | |||
3800 | 173 | def _get_expected(self): | ||
3801 | 174 | return ({ | ||
3802 | 175 | "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, | ||
3803 | 176 | "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, | ||
3804 | 177 | "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, | ||
3805 | 178 | "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, | ||
3806 | 179 | "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, | ||
3807 | 180 | "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat", | ||
3808 | 181 | "UUID": self.ids["id01"], | ||
3809 | 182 | "PARTUUID": self.ids["id02"]}, | ||
3810 | 183 | "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4", | ||
3811 | 184 | "UUID": self.ids["id03"], | ||
3812 | 185 | "PARTUUID": self.ids["id04"]}, | ||
3813 | 186 | "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4", | ||
3814 | 187 | "UUID": self.ids["id05"], | ||
3815 | 188 | "PARTUUID": self.ids["id06"]}, | ||
3816 | 189 | "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member", | ||
3817 | 190 | "LABEL": "default", | ||
3818 | 191 | "UUID": self.ids["id07"], | ||
3819 | 192 | "UUID_SUB": self.ids["id08"], | ||
3820 | 193 | "PARTUUID": self.ids["id09"]}, | ||
3821 | 194 | }) | ||
3822 | 195 | |||
3823 | 196 | @mock.patch("cloudinit.util.subp") | ||
3824 | 197 | def test_functional_blkid(self, m_subp): | ||
3825 | 198 | m_subp.return_value = ( | ||
3826 | 199 | self.blkid_out.format(**self.ids), "") | ||
3827 | 200 | self.assertEqual(self._get_expected(), util.blkid()) | ||
3828 | 201 | m_subp.assert_called_with(["blkid", "-o", "full"], capture=True, | ||
3829 | 202 | decode="replace") | ||
3830 | 203 | |||
3831 | 204 | @mock.patch("cloudinit.util.subp") | ||
3832 | 205 | def test_blkid_no_cache_uses_no_cache(self, m_subp): | ||
3833 | 206 | """blkid should turn off cache if disable_cache is true.""" | ||
3834 | 207 | m_subp.return_value = ( | ||
3835 | 208 | self.blkid_out.format(**self.ids), "") | ||
3836 | 209 | self.assertEqual(self._get_expected(), | ||
3837 | 210 | util.blkid(disable_cache=True)) | ||
3838 | 211 | m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"], | ||
3839 | 212 | capture=True, decode="replace") | ||
3840 | 213 | |||
3841 | 214 | |||
3842 | 215 | # vi: ts=4 expandtab | ||
3843 | diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py | |||
3844 | index 0a5be0b..03a573a 100644 | |||
3845 | --- a/cloudinit/url_helper.py | |||
3846 | +++ b/cloudinit/url_helper.py | |||
3847 | @@ -16,7 +16,7 @@ import time | |||
3848 | 16 | 16 | ||
3849 | 17 | from email.utils import parsedate | 17 | from email.utils import parsedate |
3850 | 18 | from functools import partial | 18 | from functools import partial |
3852 | 19 | 19 | from itertools import count | |
3853 | 20 | from requests import exceptions | 20 | from requests import exceptions |
3854 | 21 | 21 | ||
3855 | 22 | from six.moves.urllib.parse import ( | 22 | from six.moves.urllib.parse import ( |
3856 | @@ -47,7 +47,7 @@ try: | |||
3857 | 47 | _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=no-member | 47 | _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=no-member |
3858 | 48 | if _REQ_VER >= LooseVersion('0.8.8'): | 48 | if _REQ_VER >= LooseVersion('0.8.8'): |
3859 | 49 | SSL_ENABLED = True | 49 | SSL_ENABLED = True |
3861 | 50 | if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): | 50 | if LooseVersion('0.7.0') <= _REQ_VER < LooseVersion('1.0.0'): |
3862 | 51 | CONFIG_ENABLED = True | 51 | CONFIG_ENABLED = True |
3863 | 52 | except ImportError: | 52 | except ImportError: |
3864 | 53 | pass | 53 | pass |
3865 | @@ -121,7 +121,7 @@ class UrlResponse(object): | |||
3866 | 121 | upper = 300 | 121 | upper = 300 |
3867 | 122 | if redirects_ok: | 122 | if redirects_ok: |
3868 | 123 | upper = 400 | 123 | upper = 400 |
3870 | 124 | if self.code >= 200 and self.code < upper: | 124 | if 200 <= self.code < upper: |
3871 | 125 | return True | 125 | return True |
3872 | 126 | else: | 126 | else: |
3873 | 127 | return False | 127 | return False |
3874 | @@ -172,7 +172,7 @@ def _get_ssl_args(url, ssl_details): | |||
3875 | 172 | def readurl(url, data=None, timeout=None, retries=0, sec_between=1, | 172 | def readurl(url, data=None, timeout=None, retries=0, sec_between=1, |
3876 | 173 | headers=None, headers_cb=None, ssl_details=None, | 173 | headers=None, headers_cb=None, ssl_details=None, |
3877 | 174 | check_status=True, allow_redirects=True, exception_cb=None, | 174 | check_status=True, allow_redirects=True, exception_cb=None, |
3879 | 175 | session=None): | 175 | session=None, infinite=False): |
3880 | 176 | url = _cleanurl(url) | 176 | url = _cleanurl(url) |
3881 | 177 | req_args = { | 177 | req_args = { |
3882 | 178 | 'url': url, | 178 | 'url': url, |
3883 | @@ -220,7 +220,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, | |||
3884 | 220 | excps = [] | 220 | excps = [] |
3885 | 221 | # Handle retrying ourselves since the built-in support | 221 | # Handle retrying ourselves since the built-in support |
3886 | 222 | # doesn't handle sleeping between tries... | 222 | # doesn't handle sleeping between tries... |
3888 | 223 | for i in range(0, manual_tries): | 223 | # Infinitely retry if infinite is True |
3889 | 224 | for i in count() if infinite else range(0, manual_tries): | ||
3890 | 224 | req_args['headers'] = headers_cb(url) | 225 | req_args['headers'] = headers_cb(url) |
3891 | 225 | filtered_req_args = {} | 226 | filtered_req_args = {} |
3892 | 226 | for (k, v) in req_args.items(): | 227 | for (k, v) in req_args.items(): |
3893 | @@ -229,7 +230,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, | |||
3894 | 229 | filtered_req_args[k] = v | 230 | filtered_req_args[k] = v |
3895 | 230 | try: | 231 | try: |
3896 | 231 | LOG.debug("[%s/%s] open '%s' with %s configuration", i, | 232 | LOG.debug("[%s/%s] open '%s' with %s configuration", i, |
3898 | 232 | manual_tries, url, filtered_req_args) | 233 | "infinite" if infinite else manual_tries, url, |
3899 | 234 | filtered_req_args) | ||
3900 | 233 | 235 | ||
3901 | 234 | if session is None: | 236 | if session is None: |
3902 | 235 | session = requests.Session() | 237 | session = requests.Session() |
3903 | @@ -258,11 +260,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, | |||
3904 | 258 | # ssl exceptions are not going to get fixed by waiting a | 260 | # ssl exceptions are not going to get fixed by waiting a |
3905 | 259 | # few seconds | 261 | # few seconds |
3906 | 260 | break | 262 | break |
3910 | 261 | if exception_cb and exception_cb(req_args.copy(), excps[-1]): | 263 | if exception_cb and not exception_cb(req_args.copy(), excps[-1]): |
3911 | 262 | # if an exception callback was given it should return None | 264 | # if an exception callback was given, it should return True |
3912 | 263 | # a true-ish value means to break and re-raise the exception | 265 | # to continue retrying and False to break and re-raise the |
3913 | 266 | # exception | ||
3914 | 264 | break | 267 | break |
3916 | 265 | if i + 1 < manual_tries and sec_between > 0: | 268 | if (infinite and sec_between > 0) or \ |
3917 | 269 | (i + 1 < manual_tries and sec_between > 0): | ||
3918 | 266 | LOG.debug("Please wait %s seconds while we wait to try again", | 270 | LOG.debug("Please wait %s seconds while we wait to try again", |
3919 | 267 | sec_between) | 271 | sec_between) |
3920 | 268 | time.sleep(sec_between) | 272 | time.sleep(sec_between) |
3921 | diff --git a/cloudinit/util.py b/cloudinit/util.py | |||
3922 | index 338fb97..0ab2c48 100644 | |||
3923 | --- a/cloudinit/util.py | |||
3924 | +++ b/cloudinit/util.py | |||
3925 | @@ -546,7 +546,7 @@ def is_ipv4(instr): | |||
3926 | 546 | return False | 546 | return False |
3927 | 547 | 547 | ||
3928 | 548 | try: | 548 | try: |
3930 | 549 | toks = [x for x in toks if int(x) < 256 and int(x) >= 0] | 549 | toks = [x for x in toks if 0 <= int(x) < 256] |
3931 | 550 | except Exception: | 550 | except Exception: |
3932 | 551 | return False | 551 | return False |
3933 | 552 | 552 | ||
3934 | @@ -716,8 +716,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): | |||
3935 | 716 | def make_url(scheme, host, port=None, | 716 | def make_url(scheme, host, port=None, |
3936 | 717 | path='', params='', query='', fragment=''): | 717 | path='', params='', query='', fragment=''): |
3937 | 718 | 718 | ||
3940 | 719 | pieces = [] | 719 | pieces = [scheme or ''] |
3939 | 720 | pieces.append(scheme or '') | ||
3941 | 721 | 720 | ||
3942 | 722 | netloc = '' | 721 | netloc = '' |
3943 | 723 | if host: | 722 | if host: |
3944 | @@ -1026,9 +1025,16 @@ def dos2unix(contents): | |||
3945 | 1026 | return contents.replace('\r\n', '\n') | 1025 | return contents.replace('\r\n', '\n') |
3946 | 1027 | 1026 | ||
3947 | 1028 | 1027 | ||
3951 | 1029 | def get_hostname_fqdn(cfg, cloud): | 1028 | def get_hostname_fqdn(cfg, cloud, metadata_only=False): |
3952 | 1030 | # return the hostname and fqdn from 'cfg'. If not found in cfg, | 1029 | """Get hostname and fqdn from config if present and fallback to cloud. |
3953 | 1031 | # then fall back to data from cloud | 1030 | |
3954 | 1031 | @param cfg: Dictionary of merged user-data configuration (from init.cfg). | ||
3955 | 1032 | @param cloud: Cloud instance from init.cloudify(). | ||
3956 | 1033 | @param metadata_only: Boolean, set True to only query cloud meta-data, | ||
3957 | 1034 | returning None if not present in meta-data. | ||
3958 | 1035 | @return: a Tuple of strings <hostname>, <fqdn>. Values can be none when | ||
3959 | 1036 | metadata_only is True and no cfg or metadata provides hostname info. | ||
3960 | 1037 | """ | ||
3961 | 1032 | if "fqdn" in cfg: | 1038 | if "fqdn" in cfg: |
3962 | 1033 | # user specified a fqdn. Default hostname then is based off that | 1039 | # user specified a fqdn. Default hostname then is based off that |
3963 | 1034 | fqdn = cfg['fqdn'] | 1040 | fqdn = cfg['fqdn'] |
3964 | @@ -1042,11 +1048,11 @@ def get_hostname_fqdn(cfg, cloud): | |||
3965 | 1042 | else: | 1048 | else: |
3966 | 1043 | # no fqdn set, get fqdn from cloud. | 1049 | # no fqdn set, get fqdn from cloud. |
3967 | 1044 | # get hostname from cfg if available otherwise cloud | 1050 | # get hostname from cfg if available otherwise cloud |
3969 | 1045 | fqdn = cloud.get_hostname(fqdn=True) | 1051 | fqdn = cloud.get_hostname(fqdn=True, metadata_only=metadata_only) |
3970 | 1046 | if "hostname" in cfg: | 1052 | if "hostname" in cfg: |
3971 | 1047 | hostname = cfg['hostname'] | 1053 | hostname = cfg['hostname'] |
3972 | 1048 | else: | 1054 | else: |
3974 | 1049 | hostname = cloud.get_hostname() | 1055 | hostname = cloud.get_hostname(metadata_only=metadata_only) |
3975 | 1050 | return (hostname, fqdn) | 1056 | return (hostname, fqdn) |
3976 | 1051 | 1057 | ||
3977 | 1052 | 1058 | ||
3978 | @@ -1231,6 +1237,37 @@ def find_devs_with(criteria=None, oformat='device', | |||
3979 | 1231 | return entries | 1237 | return entries |
3980 | 1232 | 1238 | ||
3981 | 1233 | 1239 | ||
3982 | 1240 | def blkid(devs=None, disable_cache=False): | ||
3983 | 1241 | """Get all device tags details from blkid. | ||
3984 | 1242 | |||
3985 | 1243 | @param devs: Optional list of device paths you wish to query. | ||
3986 | 1244 | @param disable_cache: Bool, set True to start with clean cache. | ||
3987 | 1245 | |||
3988 | 1246 | @return: Dict of key value pairs of info for the device. | ||
3989 | 1247 | """ | ||
3990 | 1248 | if devs is None: | ||
3991 | 1249 | devs = [] | ||
3992 | 1250 | else: | ||
3993 | 1251 | devs = list(devs) | ||
3994 | 1252 | |||
3995 | 1253 | cmd = ['blkid', '-o', 'full'] | ||
3996 | 1254 | if disable_cache: | ||
3997 | 1255 | cmd.extend(['-c', '/dev/null']) | ||
3998 | 1256 | cmd.extend(devs) | ||
3999 | 1257 | |||
4000 | 1258 | # we have to decode with 'replace' as shelx.split (called by | ||
4001 | 1259 | # load_shell_content) can't take bytes. So this is potentially | ||
4002 | 1260 | # lossy of non-utf-8 chars in blkid output. | ||
4003 | 1261 | out, _ = subp(cmd, capture=True, decode="replace") | ||
4004 | 1262 | ret = {} | ||
4005 | 1263 | for line in out.splitlines(): | ||
4006 | 1264 | dev, _, data = line.partition(":") | ||
4007 | 1265 | ret[dev] = load_shell_content(data) | ||
4008 | 1266 | ret[dev]["DEVNAME"] = dev | ||
4009 | 1267 | |||
4010 | 1268 | return ret | ||
4011 | 1269 | |||
4012 | 1270 | |||
4013 | 1234 | def peek_file(fname, max_bytes): | 1271 | def peek_file(fname, max_bytes): |
4014 | 1235 | LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) | 1272 | LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) |
4015 | 1236 | with open(fname, 'rb') as ifh: | 1273 | with open(fname, 'rb') as ifh: |
4016 | @@ -1746,7 +1783,7 @@ def chmod(path, mode): | |||
4017 | 1746 | def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False): | 1783 | def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False): |
4018 | 1747 | """ | 1784 | """ |
4019 | 1748 | Writes a file with the given content and sets the file mode as specified. | 1785 | Writes a file with the given content and sets the file mode as specified. |
4021 | 1749 | Resotres the SELinux context if possible. | 1786 | Restores the SELinux context if possible. |
4022 | 1750 | 1787 | ||
4023 | 1751 | @param filename: The full path of the file to write. | 1788 | @param filename: The full path of the file to write. |
4024 | 1752 | @param content: The content to write to the file. | 1789 | @param content: The content to write to the file. |
4025 | @@ -1821,7 +1858,8 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): | |||
4026 | 1821 | 1858 | ||
4027 | 1822 | 1859 | ||
4028 | 1823 | def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | 1860 | def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
4030 | 1824 | logstring=False, decode="replace", target=None, update_env=None): | 1861 | logstring=False, decode="replace", target=None, update_env=None, |
4031 | 1862 | status_cb=None): | ||
4032 | 1825 | 1863 | ||
4033 | 1826 | # not supported in cloud-init (yet), for now kept in the call signature | 1864 | # not supported in cloud-init (yet), for now kept in the call signature |
4034 | 1827 | # to ease maintaining code shared between cloud-init and curtin | 1865 | # to ease maintaining code shared between cloud-init and curtin |
4035 | @@ -1842,6 +1880,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | |||
4036 | 1842 | if target_path(target) != "/": | 1880 | if target_path(target) != "/": |
4037 | 1843 | args = ['chroot', target] + list(args) | 1881 | args = ['chroot', target] + list(args) |
4038 | 1844 | 1882 | ||
4039 | 1883 | if status_cb: | ||
4040 | 1884 | command = ' '.join(args) if isinstance(args, list) else args | ||
4041 | 1885 | status_cb('Begin run command: {command}\n'.format(command=command)) | ||
4042 | 1845 | if not logstring: | 1886 | if not logstring: |
4043 | 1846 | LOG.debug(("Running command %s with allowed return codes %s" | 1887 | LOG.debug(("Running command %s with allowed return codes %s" |
4044 | 1847 | " (shell=%s, capture=%s)"), args, rcs, shell, capture) | 1888 | " (shell=%s, capture=%s)"), args, rcs, shell, capture) |
4045 | @@ -1865,12 +1906,25 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | |||
4046 | 1865 | if not isinstance(data, bytes): | 1906 | if not isinstance(data, bytes): |
4047 | 1866 | data = data.encode() | 1907 | data = data.encode() |
4048 | 1867 | 1908 | ||
4049 | 1909 | # Popen converts entries in the arguments array from non-bytes to bytes. | ||
4050 | 1910 | # When locale is unset it may use ascii for that encoding which can | ||
4051 | 1911 | # cause UnicodeDecodeErrors. (LP: #1751051) | ||
4052 | 1912 | if isinstance(args, six.binary_type): | ||
4053 | 1913 | bytes_args = args | ||
4054 | 1914 | elif isinstance(args, six.string_types): | ||
4055 | 1915 | bytes_args = args.encode("utf-8") | ||
4056 | 1916 | else: | ||
4057 | 1917 | bytes_args = [ | ||
4058 | 1918 | x if isinstance(x, six.binary_type) else x.encode("utf-8") | ||
4059 | 1919 | for x in args] | ||
4060 | 1868 | try: | 1920 | try: |
4062 | 1869 | sp = subprocess.Popen(args, stdout=stdout, | 1921 | sp = subprocess.Popen(bytes_args, stdout=stdout, |
4063 | 1870 | stderr=stderr, stdin=stdin, | 1922 | stderr=stderr, stdin=stdin, |
4064 | 1871 | env=env, shell=shell) | 1923 | env=env, shell=shell) |
4065 | 1872 | (out, err) = sp.communicate(data) | 1924 | (out, err) = sp.communicate(data) |
4066 | 1873 | except OSError as e: | 1925 | except OSError as e: |
4067 | 1926 | if status_cb: | ||
4068 | 1927 | status_cb('ERROR: End run command: invalid command provided\n') | ||
4069 | 1874 | raise ProcessExecutionError( | 1928 | raise ProcessExecutionError( |
4070 | 1875 | cmd=args, reason=e, errno=e.errno, | 1929 | cmd=args, reason=e, errno=e.errno, |
4071 | 1876 | stdout="-" if decode else b"-", | 1930 | stdout="-" if decode else b"-", |
4072 | @@ -1895,9 +1949,14 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, | |||
4073 | 1895 | 1949 | ||
4074 | 1896 | rc = sp.returncode | 1950 | rc = sp.returncode |
4075 | 1897 | if rc not in rcs: | 1951 | if rc not in rcs: |
4076 | 1952 | if status_cb: | ||
4077 | 1953 | status_cb( | ||
4078 | 1954 | 'ERROR: End run command: exit({code})\n'.format(code=rc)) | ||
4079 | 1898 | raise ProcessExecutionError(stdout=out, stderr=err, | 1955 | raise ProcessExecutionError(stdout=out, stderr=err, |
4080 | 1899 | exit_code=rc, | 1956 | exit_code=rc, |
4081 | 1900 | cmd=args) | 1957 | cmd=args) |
4082 | 1958 | if status_cb: | ||
4083 | 1959 | status_cb('End run command: exit({code})\n'.format(code=rc)) | ||
4084 | 1901 | return (out, err) | 1960 | return (out, err) |
4085 | 1902 | 1961 | ||
4086 | 1903 | 1962 | ||
4087 | @@ -1918,6 +1977,11 @@ def abs_join(*paths): | |||
4088 | 1918 | # if it is an array, shell protect it (with single ticks) | 1977 | # if it is an array, shell protect it (with single ticks) |
4089 | 1919 | # if it is a string, do nothing | 1978 | # if it is a string, do nothing |
4090 | 1920 | def shellify(cmdlist, add_header=True): | 1979 | def shellify(cmdlist, add_header=True): |
4091 | 1980 | if not isinstance(cmdlist, (tuple, list)): | ||
4092 | 1981 | raise TypeError( | ||
4093 | 1982 | "Input to shellify was type '%s'. Expected list or tuple." % | ||
4094 | 1983 | (type_utils.obj_name(cmdlist))) | ||
4095 | 1984 | |||
4096 | 1921 | content = '' | 1985 | content = '' |
4097 | 1922 | if add_header: | 1986 | if add_header: |
4098 | 1923 | content += "#!/bin/sh\n" | 1987 | content += "#!/bin/sh\n" |
4099 | @@ -1926,7 +1990,7 @@ def shellify(cmdlist, add_header=True): | |||
4100 | 1926 | for args in cmdlist: | 1990 | for args in cmdlist: |
4101 | 1927 | # If the item is a list, wrap all items in single tick. | 1991 | # If the item is a list, wrap all items in single tick. |
4102 | 1928 | # If its not, then just write it directly. | 1992 | # If its not, then just write it directly. |
4104 | 1929 | if isinstance(args, list): | 1993 | if isinstance(args, (list, tuple)): |
4105 | 1930 | fixed = [] | 1994 | fixed = [] |
4106 | 1931 | for f in args: | 1995 | for f in args: |
4107 | 1932 | fixed.append("'%s'" % (six.text_type(f).replace("'", escaped))) | 1996 | fixed.append("'%s'" % (six.text_type(f).replace("'", escaped))) |
4108 | @@ -1936,9 +2000,10 @@ def shellify(cmdlist, add_header=True): | |||
4109 | 1936 | content = "%s%s\n" % (content, args) | 2000 | content = "%s%s\n" % (content, args) |
4110 | 1937 | cmds_made += 1 | 2001 | cmds_made += 1 |
4111 | 1938 | else: | 2002 | else: |
4115 | 1939 | raise RuntimeError(("Unable to shellify type %s" | 2003 | raise TypeError( |
4116 | 1940 | " which is not a list or string") | 2004 | "Unable to shellify type '%s'. Expected list, string, tuple. " |
4117 | 1941 | % (type_utils.obj_name(args))) | 2005 | "Got: %s" % (type_utils.obj_name(args), args)) |
4118 | 2006 | |||
4119 | 1942 | LOG.debug("Shellified %s commands.", cmds_made) | 2007 | LOG.debug("Shellified %s commands.", cmds_made) |
4120 | 1943 | return content | 2008 | return content |
4121 | 1944 | 2009 | ||
4122 | @@ -2169,7 +2234,7 @@ def get_path_dev_freebsd(path, mnt_list): | |||
4123 | 2169 | return path_found | 2234 | return path_found |
4124 | 2170 | 2235 | ||
4125 | 2171 | 2236 | ||
4127 | 2172 | def get_mount_info_freebsd(path, log=LOG): | 2237 | def get_mount_info_freebsd(path): |
4128 | 2173 | (result, err) = subp(['mount', '-p', path], rcs=[0, 1]) | 2238 | (result, err) = subp(['mount', '-p', path], rcs=[0, 1]) |
4129 | 2174 | if len(err): | 2239 | if len(err): |
4130 | 2175 | # find a path if the input is not a mounting point | 2240 | # find a path if the input is not a mounting point |
4131 | @@ -2183,23 +2248,49 @@ def get_mount_info_freebsd(path, log=LOG): | |||
4132 | 2183 | return "/dev/" + label_part, ret[2], ret[1] | 2248 | return "/dev/" + label_part, ret[2], ret[1] |
4133 | 2184 | 2249 | ||
4134 | 2185 | 2250 | ||
4135 | 2251 | def get_device_info_from_zpool(zpool): | ||
4136 | 2252 | (zpoolstatus, err) = subp(['zpool', 'status', zpool]) | ||
4137 | 2253 | if len(err): | ||
4138 | 2254 | return None | ||
4139 | 2255 | r = r'.*(ONLINE).*' | ||
4140 | 2256 | for line in zpoolstatus.split("\n"): | ||
4141 | 2257 | if re.search(r, line) and zpool not in line and "state" not in line: | ||
4142 | 2258 | disk = line.split()[0] | ||
4143 | 2259 | LOG.debug('found zpool "%s" on disk %s', zpool, disk) | ||
4144 | 2260 | return disk | ||
4145 | 2261 | |||
4146 | 2262 | |||
4147 | 2186 | def parse_mount(path): | 2263 | def parse_mount(path): |
4149 | 2187 | (mountoutput, _err) = subp("mount") | 2264 | (mountoutput, _err) = subp(['mount']) |
4150 | 2188 | mount_locs = mountoutput.splitlines() | 2265 | mount_locs = mountoutput.splitlines() |
4151 | 2266 | # there are 2 types of mount outputs we have to parse therefore | ||
4152 | 2267 | # the regex is a bit complex. to better understand this regex see: | ||
4153 | 2268 | # https://regex101.com/r/2F6c1k/1 | ||
4154 | 2269 | # https://regex101.com/r/T2en7a/1 | ||
4155 | 2270 | regex = r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + \ | ||
4156 | 2271 | '(?=(?:type)[\s]+([\S]+)|\(([^,]*))' | ||
4157 | 2189 | for line in mount_locs: | 2272 | for line in mount_locs: |
4159 | 2190 | m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) | 2273 | m = re.search(regex, line) |
4160 | 2191 | if not m: | 2274 | if not m: |
4161 | 2192 | continue | 2275 | continue |
4162 | 2276 | devpth = m.group(1) | ||
4163 | 2277 | mount_point = m.group(2) | ||
4164 | 2278 | # above regex will either fill the fs_type in group(3) | ||
4165 | 2279 | # or group(4) depending on the format we have. | ||
4166 | 2280 | fs_type = m.group(3) | ||
4167 | 2281 | if fs_type is None: | ||
4168 | 2282 | fs_type = m.group(4) | ||
4169 | 2283 | LOG.debug('found line in mount -> devpth: %s, mount_point: %s, ' | ||
4170 | 2284 | 'fs_type: %s', devpth, mount_point, fs_type) | ||
4171 | 2193 | # check whether the dev refers to a label on FreeBSD | 2285 | # check whether the dev refers to a label on FreeBSD |
4172 | 2194 | # for example, if dev is '/dev/label/rootfs', we should | 2286 | # for example, if dev is '/dev/label/rootfs', we should |
4173 | 2195 | # continue finding the real device like '/dev/da0'. | 2287 | # continue finding the real device like '/dev/da0'. |
4176 | 2196 | devm = re.search('^(/dev/.+)p([0-9])$', m.group(1)) | 2288 | # this is only valid for non zfs file systems as a zpool |
4177 | 2197 | if (not devm and is_FreeBSD()): | 2289 | # can have gpt labels as disk. |
4178 | 2290 | devm = re.search('^(/dev/.+)p([0-9])$', devpth) | ||
4179 | 2291 | if not devm and is_FreeBSD() and fs_type != 'zfs': | ||
4180 | 2198 | return get_mount_info_freebsd(path) | 2292 | return get_mount_info_freebsd(path) |
4185 | 2199 | devpth = m.group(1) | 2293 | elif mount_point == path: |
4182 | 2200 | mount_point = m.group(2) | ||
4183 | 2201 | fs_type = m.group(3) | ||
4184 | 2202 | if mount_point == path: | ||
4186 | 2203 | return devpth, fs_type, mount_point | 2294 | return devpth, fs_type, mount_point |
4187 | 2204 | return None | 2295 | return None |
4188 | 2205 | 2296 | ||
4189 | diff --git a/cloudinit/version.py b/cloudinit/version.py | |||
4190 | index be6262d..ccd0f84 100644 | |||
4191 | --- a/cloudinit/version.py | |||
4192 | +++ b/cloudinit/version.py | |||
4193 | @@ -4,7 +4,7 @@ | |||
4194 | 4 | # | 4 | # |
4195 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | 5 | # This file is part of cloud-init. See LICENSE file for license information. |
4196 | 6 | 6 | ||
4198 | 7 | __VERSION__ = "17.2" | 7 | __VERSION__ = "18.2" |
4199 | 8 | 8 | ||
4200 | 9 | FEATURES = [ | 9 | FEATURES = [ |
4201 | 10 | # supports network config version 1 | 10 | # supports network config version 1 |
4202 | diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl | |||
4203 | index 32de9c9..3129d4e 100644 | |||
4204 | --- a/config/cloud.cfg.tmpl | |||
4205 | +++ b/config/cloud.cfg.tmpl | |||
4206 | @@ -4,6 +4,8 @@ | |||
4207 | 4 | 4 | ||
4208 | 5 | {% if variant in ["freebsd"] %} | 5 | {% if variant in ["freebsd"] %} |
4209 | 6 | syslog_fix_perms: root:wheel | 6 | syslog_fix_perms: root:wheel |
4210 | 7 | {% elif variant in ["suse"] %} | ||
4211 | 8 | syslog_fix_perms: root:root | ||
4212 | 7 | {% endif %} | 9 | {% endif %} |
4213 | 8 | # A set of users which may be applied and/or used by various modules | 10 | # A set of users which may be applied and/or used by various modules |
4214 | 9 | # when a 'default' entry is found it will reference the 'default_user' | 11 | # when a 'default' entry is found it will reference the 'default_user' |
4215 | @@ -70,7 +72,8 @@ cloud_config_modules: | |||
4216 | 70 | # Emit the cloud config ready event | 72 | # Emit the cloud config ready event |
4217 | 71 | # this can be used by upstart jobs for 'start on cloud-config'. | 73 | # this can be used by upstart jobs for 'start on cloud-config'. |
4218 | 72 | - emit_upstart | 74 | - emit_upstart |
4220 | 73 | - snap_config | 75 | - snap |
4221 | 76 | - snap_config # DEPRECATED- Drop in version 18.2 | ||
4222 | 74 | {% endif %} | 77 | {% endif %} |
4223 | 75 | - ssh-import-id | 78 | - ssh-import-id |
4224 | 76 | - locale | 79 | - locale |
4225 | @@ -84,6 +87,9 @@ cloud_config_modules: | |||
4226 | 84 | - apt-pipelining | 87 | - apt-pipelining |
4227 | 85 | - apt-configure | 88 | - apt-configure |
4228 | 86 | {% endif %} | 89 | {% endif %} |
4229 | 90 | {% if variant in ["ubuntu"] %} | ||
4230 | 91 | - ubuntu-advantage | ||
4231 | 92 | {% endif %} | ||
4232 | 87 | {% if variant in ["suse"] %} | 93 | {% if variant in ["suse"] %} |
4233 | 88 | - zypper-add-repo | 94 | - zypper-add-repo |
4234 | 89 | {% endif %} | 95 | {% endif %} |
4235 | @@ -100,7 +106,7 @@ cloud_config_modules: | |||
4236 | 100 | # The modules that run in the 'final' stage | 106 | # The modules that run in the 'final' stage |
4237 | 101 | cloud_final_modules: | 107 | cloud_final_modules: |
4238 | 102 | {% if variant in ["ubuntu", "unknown", "debian"] %} | 108 | {% if variant in ["ubuntu", "unknown", "debian"] %} |
4240 | 103 | - snappy | 109 | - snappy # DEPRECATED- Drop in version 18.2 |
4241 | 104 | {% endif %} | 110 | {% endif %} |
4242 | 105 | - package-update-upgrade-install | 111 | - package-update-upgrade-install |
4243 | 106 | {% if variant in ["ubuntu", "unknown", "debian"] %} | 112 | {% if variant in ["ubuntu", "unknown", "debian"] %} |
4244 | @@ -111,9 +117,9 @@ cloud_final_modules: | |||
4245 | 111 | {% if variant not in ["freebsd"] %} | 117 | {% if variant not in ["freebsd"] %} |
4246 | 112 | - puppet | 118 | - puppet |
4247 | 113 | - chef | 119 | - chef |
4248 | 114 | - salt-minion | ||
4249 | 115 | - mcollective | 120 | - mcollective |
4250 | 116 | {% endif %} | 121 | {% endif %} |
4251 | 122 | - salt-minion | ||
4252 | 117 | - rightscale_userdata | 123 | - rightscale_userdata |
4253 | 118 | - scripts-vendor | 124 | - scripts-vendor |
4254 | 119 | - scripts-per-once | 125 | - scripts-per-once |
4255 | diff --git a/debian/changelog b/debian/changelog | |||
4256 | index a319c5e..c7fc4fc 100644 | |||
4257 | --- a/debian/changelog | |||
4258 | +++ b/debian/changelog | |||
4259 | @@ -1,10 +1,76 @@ | |||
4261 | 1 | cloud-init (17.2-35-gf576b2a2-0ubuntu1~17.10.3) UNRELEASED; urgency=medium | 1 | cloud-init (18.2-0ubuntu1~17.10.1) artful-proposed; urgency=medium |
4262 | 2 | 2 | ||
4263 | 3 | * Drop the following cherry picks in debian/patches. They are now | 3 | * Drop the following cherry picks in debian/patches. They are now |
4264 | 4 | incorporated now incorporated in the upstream source: | 4 | incorporated now incorporated in the upstream source: |
4265 | 5 | + cpick-40e7738-GCE-fix-reading-of-user-data-that-is-not-base64-encoded | 5 | + cpick-40e7738-GCE-fix-reading-of-user-data-that-is-not-base64-encoded |
4268 | 6 | 6 | * New upstream snapshot. (LP: #1759406) | |
4269 | 7 | -- Scott Moser <smoser@ubuntu.com> Wed, 14 Mar 2018 15:43:25 -0400 | 7 | - release 18.2 (LP: #1759318) |
4270 | 8 | - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. | ||
4271 | 9 | - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. | ||
4272 | 10 | (LP: #1759307) | ||
4273 | 11 | - FreeBSD: resizefs module now able to handle zfs/zpool. | ||
4274 | 12 | [Dominic Schlegel] (LP: #1721243) | ||
4275 | 13 | - cc_puppet: Revert regression of puppet creating ssl and ssl_cert dirs | ||
4276 | 14 | - Enable IBMCloud datasource in settings.py. | ||
4277 | 15 | - IBMCloud: Initial IBM Cloud datasource. | ||
4278 | 16 | - tests: remove jsonschema from xenial tox environment. | ||
4279 | 17 | - tests: Fix newly added schema unit tests to skip if no jsonschema. | ||
4280 | 18 | - ec2: Adjust ec2 datasource after exception_cb change. | ||
4281 | 19 | - Reduce AzurePreprovisioning HTTP timeouts. | ||
4282 | 20 | [Douglas Jordan] (LP: #1752977) | ||
4283 | 21 | - Revert the logic of exception_cb in read_url. | ||
4284 | 22 | [Kurt Garloff] (LP: #1702160, #1298921) | ||
4285 | 23 | - ubuntu-advantage: Add new config module to support | ||
4286 | 24 | ubuntu-advantage-tools | ||
4287 | 25 | - Handle global dns entries in netplan [Ryan Harper] (LP: #1750884) | ||
4288 | 26 | - Identify OpenTelekomCloud Xen as OpenStack DS. | ||
4289 | 27 | [Kurt Garloff] (LP: #1756471) | ||
4290 | 28 | - datasources: fix DataSource subclass get_hostname method signature | ||
4291 | 29 | (LP: #1757176) | ||
4292 | 30 | - OpenNebula: Update network to return v2 config rather than ENI. | ||
4293 | 31 | [Akihiko Ota] | ||
4294 | 32 | - Add Hetzner Cloud DataSource | ||
4295 | 33 | - net: recognize iscsi root cases without ip= on kernel command line. | ||
4296 | 34 | (LP: #1752391) | ||
4297 | 35 | - tests: fix flakes warning for unused variable | ||
4298 | 36 | - tests: patch leaked stderr messages from snap unit tests | ||
4299 | 37 | - cc_snap: Add new module to install and configure snapd and snap | ||
4300 | 38 | packages. | ||
4301 | 39 | - tests: Make pylint happy and fix python2.6 uses of assertRaisesRegex. | ||
4302 | 40 | - netplan: render bridge port-priority values (LP: #1735821) | ||
4303 | 41 | - util: Fix subp regression. Allow specifying subp command as a string. | ||
4304 | 42 | (LP: #1755965) | ||
4305 | 43 | - doc: fix all warnings issued by 'tox -e doc' | ||
4306 | 44 | - FreeBSD: Set hostname to FQDN. [Dominic Schlegel] (LP: #1753499) | ||
4307 | 45 | - tests: fix run_tree and bddeb | ||
4308 | 46 | - tests: Fix some warnings in tests that popped up with newer python. | ||
4309 | 47 | - set_hostname: When present in metadata, set it before network bringup. | ||
4310 | 48 | (LP: #1746455) | ||
4311 | 49 | - tests: Centralize and re-use skipTest based on json schema presense. | ||
4312 | 50 | - This commit fixes get_hostname on the AzureDataSource. | ||
4313 | 51 | [Douglas Jordan] (LP: #1754495) | ||
4314 | 52 | - shellify: raise TypeError on bad input. | ||
4315 | 53 | - Make salt minion module work on FreeBSD. | ||
4316 | 54 | [Dominic Schlegel] (LP: #1721503) | ||
4317 | 55 | - Simplify some comparisions. [Rémy Léone] | ||
4318 | 56 | - Change some list creation and population to literal. [Rémy Léone] | ||
4319 | 57 | - GCE: fix reading of user-data that is not base64 encoded. (LP: #1752711) | ||
4320 | 58 | - doc: fix chef install from apt packages example in RTD. | ||
4321 | 59 | - Implement puppet 4 support [Romanos Skiadas] (LP: #1446804) | ||
4322 | 60 | - subp: Fix subp usage with non-ascii characters when no system locale. | ||
4323 | 61 | (LP: #1751051) | ||
4324 | 62 | - salt: configure grains in grains file rather than in minion config. | ||
4325 | 63 | [Daniel Wallace] | ||
4326 | 64 | - release 18.1 (LP: #1751145) | ||
4327 | 65 | - OVF: Fix VMware support for 64-bit platforms. [Sankar Tanguturi] | ||
4328 | 66 | - ds-identify: Fix searching for iso9660 OVF cdroms. (LP: #1749980) | ||
4329 | 67 | - SUSE: Fix groups used for ownership of cloud-init.log [Robert Schweikert] | ||
4330 | 68 | - ds-identify: check /writable/system-data/ for nocloud seed. | ||
4331 | 69 | (LP: #1747070) | ||
4332 | 70 | - tests: run nosetests in cloudinit/ directory, fix py26 fallout. | ||
4333 | 71 | - tools: run-centos: git clone rather than tar. | ||
4334 | 72 | |||
4335 | 73 | -- Chad Smith <chad.smith@canonical.com> Tue, 27 Mar 2018 20:21:42 -0600 | ||
4336 | 8 | 74 | ||
4337 | 9 | cloud-init (17.2-35-gf576b2a2-0ubuntu1~17.10.2) artful-proposed; urgency=medium | 75 | cloud-init (17.2-35-gf576b2a2-0ubuntu1~17.10.2) artful-proposed; urgency=medium |
4338 | 10 | 76 | ||
4339 | diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt | |||
4340 | index 58d5fdc..defc5a5 100644 | |||
4341 | --- a/doc/examples/cloud-config-chef.txt | |||
4342 | +++ b/doc/examples/cloud-config-chef.txt | |||
4343 | @@ -12,8 +12,8 @@ | |||
4344 | 12 | 12 | ||
4345 | 13 | # Key from https://packages.chef.io/chef.asc | 13 | # Key from https://packages.chef.io/chef.asc |
4346 | 14 | apt: | 14 | apt: |
4349 | 15 | source1: | 15 | sources: |
4350 | 16 | source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main" | 16 | source1: "deb http://packages.chef.io/repos/apt/stable $RELEASE main" |
4351 | 17 | key: | | 17 | key: | |
4352 | 18 | -----BEGIN PGP PUBLIC KEY BLOCK----- | 18 | -----BEGIN PGP PUBLIC KEY BLOCK----- |
4353 | 19 | Version: GnuPG v1.4.12 (Darwin) | 19 | Version: GnuPG v1.4.12 (Darwin) |
4354 | diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py | |||
4355 | index 0ea3b6b..50eb05c 100644 | |||
4356 | --- a/doc/rtd/conf.py | |||
4357 | +++ b/doc/rtd/conf.py | |||
4358 | @@ -29,6 +29,7 @@ project = 'Cloud-Init' | |||
4359 | 29 | extensions = [ | 29 | extensions = [ |
4360 | 30 | 'sphinx.ext.intersphinx', | 30 | 'sphinx.ext.intersphinx', |
4361 | 31 | 'sphinx.ext.autodoc', | 31 | 'sphinx.ext.autodoc', |
4362 | 32 | 'sphinx.ext.autosectionlabel', | ||
4363 | 32 | 'sphinx.ext.viewcode', | 33 | 'sphinx.ext.viewcode', |
4364 | 33 | ] | 34 | ] |
4365 | 34 | 35 | ||
4366 | diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst | |||
4367 | index ae3a0c7..3e2c9e3 100644 | |||
4368 | --- a/doc/rtd/topics/capabilities.rst | |||
4369 | +++ b/doc/rtd/topics/capabilities.rst | |||
4370 | @@ -44,13 +44,14 @@ Currently defined feature names include: | |||
4371 | 44 | CLI Interface | 44 | CLI Interface |
4372 | 45 | ============= | 45 | ============= |
4373 | 46 | 46 | ||
4376 | 47 | The command line documentation is accessible on any cloud-init | 47 | The command line documentation is accessible on any cloud-init installed |
4377 | 48 | installed system: | 48 | system: |
4378 | 49 | 49 | ||
4380 | 50 | .. code-block:: bash | 50 | .. code-block:: shell-session |
4381 | 51 | 51 | ||
4382 | 52 | % cloud-init --help | 52 | % cloud-init --help |
4383 | 53 | usage: cloud-init [-h] [--version] [--file FILES] | 53 | usage: cloud-init [-h] [--version] [--file FILES] |
4384 | 54 | |||
4385 | 54 | [--debug] [--force] | 55 | [--debug] [--force] |
4386 | 55 | {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} | 56 | {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} |
4387 | 56 | ... | 57 | ... |
4388 | @@ -88,7 +89,7 @@ Print out each feature supported. If cloud-init does not have the | |||
4389 | 88 | features subcommand, it also does not support any features described in | 89 | features subcommand, it also does not support any features described in |
4390 | 89 | this document. | 90 | this document. |
4391 | 90 | 91 | ||
4393 | 91 | .. code-block:: bash | 92 | .. code-block:: shell-session |
4394 | 92 | 93 | ||
4395 | 93 | % cloud-init features | 94 | % cloud-init features |
4396 | 94 | NETWORK_CONFIG_V1 | 95 | NETWORK_CONFIG_V1 |
4397 | @@ -100,10 +101,11 @@ cloud-init status | |||
4398 | 100 | ----------------- | 101 | ----------------- |
4399 | 101 | Report whether cloud-init is running, done, disabled or errored. Exits | 102 | Report whether cloud-init is running, done, disabled or errored. Exits |
4400 | 102 | non-zero if an error is detected in cloud-init. | 103 | non-zero if an error is detected in cloud-init. |
4401 | 104 | |||
4402 | 103 | * **--long**: Detailed status information. | 105 | * **--long**: Detailed status information. |
4403 | 104 | * **--wait**: Block until cloud-init completes. | 106 | * **--wait**: Block until cloud-init completes. |
4404 | 105 | 107 | ||
4406 | 106 | .. code-block:: bash | 108 | .. code-block:: shell-session |
4407 | 107 | 109 | ||
4408 | 108 | % cloud-init status --long | 110 | % cloud-init status --long |
4409 | 109 | status: done | 111 | status: done |
4410 | @@ -214,7 +216,7 @@ of once-per-instance: | |||
4411 | 214 | * **--frequency**: Optionally override the declared module frequency | 216 | * **--frequency**: Optionally override the declared module frequency |
4412 | 215 | with one of (always|once-per-instance|once) | 217 | with one of (always|once-per-instance|once) |
4413 | 216 | 218 | ||
4415 | 217 | .. code-block:: bash | 219 | .. code-block:: shell-session |
4416 | 218 | 220 | ||
4417 | 219 | % cloud-init single --name set_hostname --frequency always | 221 | % cloud-init single --name set_hostname --frequency always |
4418 | 220 | 222 | ||
4419 | diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst | |||
4420 | index c2b47ed..cacc8a2 100644 | |||
4421 | --- a/doc/rtd/topics/debugging.rst | |||
4422 | +++ b/doc/rtd/topics/debugging.rst | |||
4423 | @@ -1,6 +1,6 @@ | |||
4425 | 1 | ********************** | 1 | ******************************** |
4426 | 2 | Testing and debugging cloud-init | 2 | Testing and debugging cloud-init |
4428 | 3 | ********************** | 3 | ******************************** |
4429 | 4 | 4 | ||
4430 | 5 | Overview | 5 | Overview |
4431 | 6 | ======== | 6 | ======== |
4432 | @@ -10,7 +10,7 @@ deployed instances. | |||
4433 | 10 | .. _boot_time_analysis: | 10 | .. _boot_time_analysis: |
4434 | 11 | 11 | ||
4435 | 12 | Boot Time Analysis - cloud-init analyze | 12 | Boot Time Analysis - cloud-init analyze |
4437 | 13 | ====================================== | 13 | ======================================= |
4438 | 14 | Occasionally instances don't appear as performant as we would like and | 14 | Occasionally instances don't appear as performant as we would like and |
4439 | 15 | cloud-init packages a simple facility to inspect what operations took | 15 | cloud-init packages a simple facility to inspect what operations took |
4440 | 16 | cloud-init the longest during boot and setup. | 16 | cloud-init the longest during boot and setup. |
4441 | @@ -22,9 +22,9 @@ determine the long-pole in cloud-init configuration and setup. These | |||
4442 | 22 | subcommands default to reading /var/log/cloud-init.log. | 22 | subcommands default to reading /var/log/cloud-init.log. |
4443 | 23 | 23 | ||
4444 | 24 | * ``analyze show`` Parse and organize cloud-init.log events by stage and | 24 | * ``analyze show`` Parse and organize cloud-init.log events by stage and |
4446 | 25 | include each sub-stage granularity with time delta reports. | 25 | include each sub-stage granularity with time delta reports. |
4447 | 26 | 26 | ||
4449 | 27 | .. code-block:: bash | 27 | .. code-block:: shell-session |
4450 | 28 | 28 | ||
4451 | 29 | $ cloud-init analyze show -i my-cloud-init.log | 29 | $ cloud-init analyze show -i my-cloud-init.log |
4452 | 30 | -- Boot Record 01 -- | 30 | -- Boot Record 01 -- |
4453 | @@ -41,9 +41,9 @@ include each sub-stage granularity with time delta reports. | |||
4454 | 41 | 41 | ||
4455 | 42 | 42 | ||
4456 | 43 | * ``analyze dump`` Parse cloud-init.log into event records and return a list of | 43 | * ``analyze dump`` Parse cloud-init.log into event records and return a list of |
4458 | 44 | dictionaries that can be consumed for other reporting needs. | 44 | dictionaries that can be consumed for other reporting needs. |
4459 | 45 | 45 | ||
4461 | 46 | .. code-block:: bash | 46 | .. code-block:: shell-session |
4462 | 47 | 47 | ||
4463 | 48 | $ cloud-init analyze blame -i my-cloud-init.log | 48 | $ cloud-init analyze blame -i my-cloud-init.log |
4464 | 49 | [ | 49 | [ |
4465 | @@ -56,10 +56,10 @@ dictionaries that can be consumed for other reporting needs. | |||
4466 | 56 | },... | 56 | },... |
4467 | 57 | 57 | ||
4468 | 58 | * ``analyze blame`` Parse cloud-init.log into event records and sort them based | 58 | * ``analyze blame`` Parse cloud-init.log into event records and sort them based |
4471 | 59 | on highest time cost for quick assessment of areas of cloud-init that may need | 59 | on highest time cost for quick assessment of areas of cloud-init that may |
4472 | 60 | improvement. | 60 | need improvement. |
4473 | 61 | 61 | ||
4475 | 62 | .. code-block:: bash | 62 | .. code-block:: shell-session |
4476 | 63 | 63 | ||
4477 | 64 | $ cloud-init analyze blame -i my-cloud-init.log | 64 | $ cloud-init analyze blame -i my-cloud-init.log |
4478 | 65 | -- Boot Record 11 -- | 65 | -- Boot Record 11 -- |
4479 | @@ -73,31 +73,36 @@ Analyze quickstart - LXC | |||
4480 | 73 | --------------------------- | 73 | --------------------------- |
4481 | 74 | To quickly obtain a cloud-init log try using lxc on any ubuntu system: | 74 | To quickly obtain a cloud-init log try using lxc on any ubuntu system: |
4482 | 75 | 75 | ||
4484 | 76 | .. code-block:: bash | 76 | .. code-block:: shell-session |
4485 | 77 | |||
4486 | 78 | $ lxc init ubuntu-daily:xenial x1 | ||
4487 | 79 | $ lxc start x1 | ||
4488 | 80 | $ # Take lxc's cloud-init.log and pipe it to the analyzer | ||
4489 | 81 | $ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i - | ||
4490 | 82 | $ lxc file pull x1/var/log/cloud-init.log - | \ | ||
4491 | 83 | python3 -m cloudinit.analyze dump -i - | ||
4492 | 77 | 84 | ||
4493 | 78 | $ lxc init ubuntu-daily:xenial x1 | ||
4494 | 79 | $ lxc start x1 | ||
4495 | 80 | # Take lxc's cloud-init.log and pipe it to the analyzer | ||
4496 | 81 | $ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i - | ||
4497 | 82 | $ lxc file pull x1/var/log/cloud-init.log - | \ | ||
4498 | 83 | python3 -m cloudinit.analyze dump -i - | ||
4499 | 84 | 85 | ||
4500 | 85 | Analyze quickstart - KVM | 86 | Analyze quickstart - KVM |
4501 | 86 | --------------------------- | 87 | --------------------------- |
4502 | 87 | To quickly analyze a KVM a cloud-init log: | 88 | To quickly analyze a KVM a cloud-init log: |
4503 | 88 | 89 | ||
4504 | 89 | 1. Download the current cloud image | 90 | 1. Download the current cloud image |
4506 | 90 | wget https://cloud-images.ubuntu.com/daily/server/xenial/current/xenial-server-cloudimg-amd64.img | 91 | |
4507 | 92 | .. code-block:: shell-session | ||
4508 | 93 | |||
4509 | 94 | $ wget https://cloud-images.ubuntu.com/daily/server/xenial/current/xenial-server-cloudimg-amd64.img | ||
4510 | 95 | |||
4511 | 91 | 2. Create a snapshot image to preserve the original cloud-image | 96 | 2. Create a snapshot image to preserve the original cloud-image |
4512 | 92 | 97 | ||
4514 | 93 | .. code-block:: bash | 98 | .. code-block:: shell-session |
4515 | 94 | 99 | ||
4516 | 95 | $ qemu-img create -b xenial-server-cloudimg-amd64.img -f qcow2 \ | 100 | $ qemu-img create -b xenial-server-cloudimg-amd64.img -f qcow2 \ |
4517 | 96 | test-cloudinit.qcow2 | 101 | test-cloudinit.qcow2 |
4518 | 97 | 102 | ||
4519 | 98 | 3. Create a seed image with metadata using `cloud-localds` | 103 | 3. Create a seed image with metadata using `cloud-localds` |
4520 | 99 | 104 | ||
4522 | 100 | .. code-block:: bash | 105 | .. code-block:: shell-session |
4523 | 101 | 106 | ||
4524 | 102 | $ cat > user-data <<EOF | 107 | $ cat > user-data <<EOF |
4525 | 103 | #cloud-config | 108 | #cloud-config |
4526 | @@ -108,18 +113,18 @@ To quickly analyze a KVM a cloud-init log: | |||
4527 | 108 | 113 | ||
4528 | 109 | 4. Launch your modified VM | 114 | 4. Launch your modified VM |
4529 | 110 | 115 | ||
4531 | 111 | .. code-block:: bash | 116 | .. code-block:: shell-session |
4532 | 112 | 117 | ||
4533 | 113 | $ kvm -m 512 -net nic -net user -redir tcp:2222::22 \ | 118 | $ kvm -m 512 -net nic -net user -redir tcp:2222::22 \ |
4536 | 114 | -drive file=test-cloudinit.qcow2,if=virtio,format=qcow2 \ | 119 | -drive file=test-cloudinit.qcow2,if=virtio,format=qcow2 \ |
4537 | 115 | -drive file=my-seed.img,if=virtio,format=raw | 120 | -drive file=my-seed.img,if=virtio,format=raw |
4538 | 116 | 121 | ||
4539 | 117 | 5. Analyze the boot (blame, dump, show) | 122 | 5. Analyze the boot (blame, dump, show) |
4540 | 118 | 123 | ||
4542 | 119 | .. code-block:: bash | 124 | .. code-block:: shell-session |
4543 | 120 | 125 | ||
4544 | 121 | $ ssh -p 2222 ubuntu@localhost 'cat /var/log/cloud-init.log' | \ | 126 | $ ssh -p 2222 ubuntu@localhost 'cat /var/log/cloud-init.log' | \ |
4546 | 122 | cloud-init analyze blame -i - | 127 | cloud-init analyze blame -i - |
4547 | 123 | 128 | ||
4548 | 124 | 129 | ||
4549 | 125 | Running single cloud config modules | 130 | Running single cloud config modules |
4550 | @@ -136,7 +141,7 @@ prevents a module from running again if it has already been run. To ensure that | |||
4551 | 136 | a module is run again, the desired frequency can be overridden on the | 141 | a module is run again, the desired frequency can be overridden on the |
4552 | 137 | commandline: | 142 | commandline: |
4553 | 138 | 143 | ||
4555 | 139 | .. code-block:: bash | 144 | .. code-block:: shell-session |
4556 | 140 | 145 | ||
4557 | 141 | $ sudo cloud-init single --name cc_ssh --frequency always | 146 | $ sudo cloud-init single --name cc_ssh --frequency always |
4558 | 142 | ... | 147 | ... |
4559 | diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst | |||
4560 | index 7b14675..d9720f6 100644 | |||
4561 | --- a/doc/rtd/topics/modules.rst | |||
4562 | +++ b/doc/rtd/topics/modules.rst | |||
4563 | @@ -45,6 +45,7 @@ Modules | |||
4564 | 45 | .. automodule:: cloudinit.config.cc_seed_random | 45 | .. automodule:: cloudinit.config.cc_seed_random |
4565 | 46 | .. automodule:: cloudinit.config.cc_set_hostname | 46 | .. automodule:: cloudinit.config.cc_set_hostname |
4566 | 47 | .. automodule:: cloudinit.config.cc_set_passwords | 47 | .. automodule:: cloudinit.config.cc_set_passwords |
4567 | 48 | .. automodule:: cloudinit.config.cc_snap | ||
4568 | 48 | .. automodule:: cloudinit.config.cc_snappy | 49 | .. automodule:: cloudinit.config.cc_snappy |
4569 | 49 | .. automodule:: cloudinit.config.cc_snap_config | 50 | .. automodule:: cloudinit.config.cc_snap_config |
4570 | 50 | .. automodule:: cloudinit.config.cc_spacewalk | 51 | .. automodule:: cloudinit.config.cc_spacewalk |
4571 | @@ -52,6 +53,7 @@ Modules | |||
4572 | 52 | .. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints | 53 | .. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints |
4573 | 53 | .. automodule:: cloudinit.config.cc_ssh_import_id | 54 | .. automodule:: cloudinit.config.cc_ssh_import_id |
4574 | 54 | .. automodule:: cloudinit.config.cc_timezone | 55 | .. automodule:: cloudinit.config.cc_timezone |
4575 | 56 | .. automodule:: cloudinit.config.cc_ubuntu_advantage | ||
4576 | 55 | .. automodule:: cloudinit.config.cc_update_etc_hosts | 57 | .. automodule:: cloudinit.config.cc_update_etc_hosts |
4577 | 56 | .. automodule:: cloudinit.config.cc_update_hostname | 58 | .. automodule:: cloudinit.config.cc_update_hostname |
4578 | 57 | .. automodule:: cloudinit.config.cc_users_groups | 59 | .. automodule:: cloudinit.config.cc_users_groups |
4579 | diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst | |||
4580 | index 96c1cf5..1e99455 100644 | |||
4581 | --- a/doc/rtd/topics/network-config.rst | |||
4582 | +++ b/doc/rtd/topics/network-config.rst | |||
4583 | @@ -202,7 +202,7 @@ is helpful for examining expected output for a given input format. | |||
4584 | 202 | 202 | ||
4585 | 203 | CLI Interface : | 203 | CLI Interface : |
4586 | 204 | 204 | ||
4588 | 205 | .. code-block:: bash | 205 | .. code-block:: shell-session |
4589 | 206 | 206 | ||
4590 | 207 | % tools/net-convert.py --help | 207 | % tools/net-convert.py --help |
4591 | 208 | usage: net-convert.py [-h] --network-data PATH --kind | 208 | usage: net-convert.py [-h] --network-data PATH --kind |
4592 | @@ -222,7 +222,7 @@ CLI Interface : | |||
4593 | 222 | 222 | ||
4594 | 223 | Example output converting V2 to sysconfig: | 223 | Example output converting V2 to sysconfig: |
4595 | 224 | 224 | ||
4597 | 225 | .. code-block:: bash | 225 | .. code-block:: shell-session |
4598 | 226 | 226 | ||
4599 | 227 | % tools/net-convert.py --network-data v2.yaml --kind yaml \ | 227 | % tools/net-convert.py --network-data v2.yaml --kind yaml \ |
4600 | 228 | --output-kind sysconfig -d target | 228 | --output-kind sysconfig -d target |
4601 | diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst | |||
4602 | index bf04bb3..cac4a6e 100644 | |||
4603 | --- a/doc/rtd/topics/tests.rst | |||
4604 | +++ b/doc/rtd/topics/tests.rst | |||
4605 | @@ -21,7 +21,7 @@ Overview | |||
4606 | 21 | In order to avoid the need for dependencies and ease the setup and | 21 | In order to avoid the need for dependencies and ease the setup and |
4607 | 22 | configuration users can run the integration tests via tox: | 22 | configuration users can run the integration tests via tox: |
4608 | 23 | 23 | ||
4610 | 24 | .. code-block:: bash | 24 | .. code-block:: shell-session |
4611 | 25 | 25 | ||
4612 | 26 | $ git clone https://git.launchpad.net/cloud-init | 26 | $ git clone https://git.launchpad.net/cloud-init |
4613 | 27 | $ cd cloud-init | 27 | $ cd cloud-init |
4614 | @@ -51,7 +51,7 @@ The first example will provide a complete end-to-end run of data | |||
4615 | 51 | collection and verification. There are additional examples below | 51 | collection and verification. There are additional examples below |
4616 | 52 | explaining how to run one or the other independently. | 52 | explaining how to run one or the other independently. |
4617 | 53 | 53 | ||
4619 | 54 | .. code-block:: bash | 54 | .. code-block:: shell-session |
4620 | 55 | 55 | ||
4621 | 56 | $ git clone https://git.launchpad.net/cloud-init | 56 | $ git clone https://git.launchpad.net/cloud-init |
4622 | 57 | $ cd cloud-init | 57 | $ cd cloud-init |
4623 | @@ -93,7 +93,7 @@ If developing tests it may be necessary to see if cloud-config works as | |||
4624 | 93 | expected and the correct files are pulled down. In this case only a | 93 | expected and the correct files are pulled down. In this case only a |
4625 | 94 | collect can be ran by running: | 94 | collect can be ran by running: |
4626 | 95 | 95 | ||
4628 | 96 | .. code-block:: bash | 96 | .. code-block:: shell-session |
4629 | 97 | 97 | ||
4630 | 98 | $ tox -e citest -- collect -n xenial --data-dir /tmp/collection | 98 | $ tox -e citest -- collect -n xenial --data-dir /tmp/collection |
4631 | 99 | 99 | ||
4632 | @@ -106,7 +106,7 @@ Verify | |||
4633 | 106 | When developing tests it is much easier to simply rerun the verify scripts | 106 | When developing tests it is much easier to simply rerun the verify scripts |
4634 | 107 | without the more lengthy collect process. This can be done by running: | 107 | without the more lengthy collect process. This can be done by running: |
4635 | 108 | 108 | ||
4637 | 109 | .. code-block:: bash | 109 | .. code-block:: shell-session |
4638 | 110 | 110 | ||
4639 | 111 | $ tox -e citest -- verify --data-dir /tmp/collection | 111 | $ tox -e citest -- verify --data-dir /tmp/collection |
4640 | 112 | 112 | ||
4641 | @@ -133,7 +133,7 @@ cloud-init deb from or use the ``tree_run`` command using a copy of | |||
4642 | 133 | cloud-init located in a different directory, use the option ``--cloud-init | 133 | cloud-init located in a different directory, use the option ``--cloud-init |
4643 | 134 | /path/to/cloud-init``. | 134 | /path/to/cloud-init``. |
4644 | 135 | 135 | ||
4646 | 136 | .. code-block:: bash | 136 | .. code-block:: shell-session |
4647 | 137 | 137 | ||
4648 | 138 | $ tox -e citest -- tree_run --verbose \ | 138 | $ tox -e citest -- tree_run --verbose \ |
4649 | 139 | --os-name xenial --os-name stretch \ | 139 | --os-name xenial --os-name stretch \ |
4650 | @@ -331,7 +331,7 @@ Integration tests are located under the `tests/cloud_tests` directory. | |||
4651 | 331 | Test configurations are placed under `configs` and the test verification | 331 | Test configurations are placed under `configs` and the test verification |
4652 | 332 | scripts under `testcases`: | 332 | scripts under `testcases`: |
4653 | 333 | 333 | ||
4655 | 334 | .. code-block:: bash | 334 | .. code-block:: shell-session |
4656 | 335 | 335 | ||
4657 | 336 | cloud-init$ tree -d tests/cloud_tests/ | 336 | cloud-init$ tree -d tests/cloud_tests/ |
4658 | 337 | tests/cloud_tests/ | 337 | tests/cloud_tests/ |
4659 | @@ -362,7 +362,7 @@ The following would create a test case named ``example`` under the | |||
4660 | 362 | ``modules`` category with the given description, and cloud config data read | 362 | ``modules`` category with the given description, and cloud config data read |
4661 | 363 | in from ``/tmp/user_data``. | 363 | in from ``/tmp/user_data``. |
4662 | 364 | 364 | ||
4664 | 365 | .. code-block:: bash | 365 | .. code-block:: shell-session |
4665 | 366 | 366 | ||
4666 | 367 | $ tox -e citest -- create modules/example \ | 367 | $ tox -e citest -- create modules/example \ |
4667 | 368 | -d "a simple example test case" -c "$(< /tmp/user_data)" | 368 | -d "a simple example test case" -c "$(< /tmp/user_data)" |
4668 | @@ -385,7 +385,7 @@ Development Checklist | |||
4669 | 385 | * Placed in the appropriate sub-folder in the test cases directory | 385 | * Placed in the appropriate sub-folder in the test cases directory |
4670 | 386 | * Tested by running the test: | 386 | * Tested by running the test: |
4671 | 387 | 387 | ||
4673 | 388 | .. code-block:: bash | 388 | .. code-block:: shell-session |
4674 | 389 | 389 | ||
4675 | 390 | $ tox -e citest -- run -verbose \ | 390 | $ tox -e citest -- run -verbose \ |
4676 | 391 | --os-name <release target> \ | 391 | --os-name <release target> \ |
4677 | @@ -404,14 +404,14 @@ These configuration files are the standard that the AWS cli and other AWS | |||
4678 | 404 | tools utilize for interacting directly with AWS itself and are normally | 404 | tools utilize for interacting directly with AWS itself and are normally |
4679 | 405 | generated when running ``aws configure``: | 405 | generated when running ``aws configure``: |
4680 | 406 | 406 | ||
4682 | 407 | .. code-block:: bash | 407 | .. code-block:: shell-session |
4683 | 408 | 408 | ||
4684 | 409 | $ cat $HOME/.aws/credentials | 409 | $ cat $HOME/.aws/credentials |
4685 | 410 | [default] | 410 | [default] |
4686 | 411 | aws_access_key_id = <KEY HERE> | 411 | aws_access_key_id = <KEY HERE> |
4687 | 412 | aws_secret_access_key = <KEY HERE> | 412 | aws_secret_access_key = <KEY HERE> |
4688 | 413 | 413 | ||
4690 | 414 | .. code-block:: bash | 414 | .. code-block:: shell-session |
4691 | 415 | 415 | ||
4692 | 416 | $ cat $HOME/.aws/config | 416 | $ cat $HOME/.aws/config |
4693 | 417 | [default] | 417 | [default] |
4694 | diff --git a/packages/debian/control.in b/packages/debian/control.in | |||
4695 | index 265b261..46da6df 100644 | |||
4696 | --- a/packages/debian/control.in | |||
4697 | +++ b/packages/debian/control.in | |||
4698 | @@ -10,7 +10,8 @@ Standards-Version: 3.9.6 | |||
4699 | 10 | Package: cloud-init | 10 | Package: cloud-init |
4700 | 11 | Architecture: all | 11 | Architecture: all |
4701 | 12 | Depends: ${misc:Depends}, | 12 | Depends: ${misc:Depends}, |
4703 | 13 | ${${python}:Depends} | 13 | ${${python}:Depends}, |
4704 | 14 | isc-dhcp-client | ||
4705 | 14 | Recommends: eatmydata, sudo, software-properties-common, gdisk | 15 | Recommends: eatmydata, sudo, software-properties-common, gdisk |
4706 | 15 | XB-Python-Version: ${python:Versions} | 16 | XB-Python-Version: ${python:Versions} |
4707 | 16 | Description: Init scripts for cloud instances | 17 | Description: Init scripts for cloud instances |
4708 | diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py | |||
4709 | index a6d5069..b9cfcfa 100644 | |||
4710 | --- a/tests/cloud_tests/bddeb.py | |||
4711 | +++ b/tests/cloud_tests/bddeb.py | |||
4712 | @@ -16,7 +16,7 @@ pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] | |||
4713 | 16 | 16 | ||
4714 | 17 | def _out(cmd_res): | 17 | def _out(cmd_res): |
4715 | 18 | """Get clean output from cmd result.""" | 18 | """Get clean output from cmd result.""" |
4717 | 19 | return cmd_res[0].strip() | 19 | return cmd_res[0].decode("utf-8").strip() |
4718 | 20 | 20 | ||
4719 | 21 | 21 | ||
4720 | 22 | def build_deb(args, instance): | 22 | def build_deb(args, instance): |
4721 | diff --git a/tests/cloud_tests/platforms/ec2/__init__.py b/tests/cloud_tests/platforms/ec2/__init__.py | |||
4722 | 23 | new file mode 100644 | 23 | new file mode 100644 |
4723 | index 0000000..e69de29 | |||
4724 | --- /dev/null | |||
4725 | +++ b/tests/cloud_tests/platforms/ec2/__init__.py | |||
4726 | diff --git a/tests/cloud_tests/platforms/lxd/__init__.py b/tests/cloud_tests/platforms/lxd/__init__.py | |||
4727 | 24 | new file mode 100644 | 24 | new file mode 100644 |
4728 | index 0000000..e69de29 | |||
4729 | --- /dev/null | |||
4730 | +++ b/tests/cloud_tests/platforms/lxd/__init__.py | |||
4731 | diff --git a/tests/cloud_tests/platforms/lxd/platform.py b/tests/cloud_tests/platforms/lxd/platform.py | |||
4732 | index 6a01692..f7251a0 100644 | |||
4733 | --- a/tests/cloud_tests/platforms/lxd/platform.py | |||
4734 | +++ b/tests/cloud_tests/platforms/lxd/platform.py | |||
4735 | @@ -101,8 +101,4 @@ class LXDPlatform(Platform): | |||
4736 | 101 | """ | 101 | """ |
4737 | 102 | return self.client.images.get_by_alias(alias) | 102 | return self.client.images.get_by_alias(alias) |
4738 | 103 | 103 | ||
4739 | 104 | def destroy(self): | ||
4740 | 105 | """Clean up platform data.""" | ||
4741 | 106 | super(LXDPlatform, self).destroy() | ||
4742 | 107 | |||
4743 | 108 | # vi: ts=4 expandtab | 104 | # vi: ts=4 expandtab |
4744 | diff --git a/tests/cloud_tests/platforms/nocloudkvm/__init__.py b/tests/cloud_tests/platforms/nocloudkvm/__init__.py | |||
4745 | 109 | new file mode 100644 | 105 | new file mode 100644 |
4746 | index 0000000..e69de29 | |||
4747 | --- /dev/null | |||
4748 | +++ b/tests/cloud_tests/platforms/nocloudkvm/__init__.py | |||
4749 | diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py | |||
4750 | index 932dc0f..33ff3f2 100644 | |||
4751 | --- a/tests/cloud_tests/platforms/nocloudkvm/instance.py | |||
4752 | +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py | |||
4753 | @@ -109,7 +109,7 @@ class NoCloudKVMInstance(Instance): | |||
4754 | 109 | if self.pid: | 109 | if self.pid: |
4755 | 110 | try: | 110 | try: |
4756 | 111 | c_util.subp(['kill', '-9', self.pid]) | 111 | c_util.subp(['kill', '-9', self.pid]) |
4758 | 112 | except util.ProcessExectuionError: | 112 | except c_util.ProcessExecutionError: |
4759 | 113 | pass | 113 | pass |
4760 | 114 | 114 | ||
4761 | 115 | if self.pid_file: | 115 | if self.pid_file: |
4762 | diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py | |||
4763 | index a7e6f5d..8593346 100644 | |||
4764 | --- a/tests/cloud_tests/platforms/nocloudkvm/platform.py | |||
4765 | +++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py | |||
4766 | @@ -21,10 +21,6 @@ class NoCloudKVMPlatform(Platform): | |||
4767 | 21 | 21 | ||
4768 | 22 | platform_name = 'nocloud-kvm' | 22 | platform_name = 'nocloud-kvm' |
4769 | 23 | 23 | ||
4770 | 24 | def __init__(self, config): | ||
4771 | 25 | """Set up platform.""" | ||
4772 | 26 | super(NoCloudKVMPlatform, self).__init__(config) | ||
4773 | 27 | |||
4774 | 28 | def get_image(self, img_conf): | 24 | def get_image(self, img_conf): |
4775 | 29 | """Get image using specified image configuration. | 25 | """Get image using specified image configuration. |
4776 | 30 | 26 | ||
4777 | diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py | |||
4778 | index 1542b3b..abbfebb 100644 | |||
4779 | --- a/tests/cloud_tests/platforms/platforms.py | |||
4780 | +++ b/tests/cloud_tests/platforms/platforms.py | |||
4781 | @@ -2,12 +2,15 @@ | |||
4782 | 2 | 2 | ||
4783 | 3 | """Base platform class.""" | 3 | """Base platform class.""" |
4784 | 4 | import os | 4 | import os |
4785 | 5 | import shutil | ||
4786 | 5 | 6 | ||
4787 | 6 | from simplestreams import filters, mirrors | 7 | from simplestreams import filters, mirrors |
4788 | 7 | from simplestreams import util as s_util | 8 | from simplestreams import util as s_util |
4789 | 8 | 9 | ||
4790 | 9 | from cloudinit import util as c_util | 10 | from cloudinit import util as c_util |
4791 | 10 | 11 | ||
4792 | 12 | from tests.cloud_tests import util | ||
4793 | 13 | |||
4794 | 11 | 14 | ||
4795 | 12 | class Platform(object): | 15 | class Platform(object): |
4796 | 13 | """Base class for platforms.""" | 16 | """Base class for platforms.""" |
4797 | @@ -17,7 +20,14 @@ class Platform(object): | |||
4798 | 17 | def __init__(self, config): | 20 | def __init__(self, config): |
4799 | 18 | """Set up platform.""" | 21 | """Set up platform.""" |
4800 | 19 | self.config = config | 22 | self.config = config |
4802 | 20 | self._generate_ssh_keys(config['data_dir']) | 23 | self.tmpdir = util.mkdtemp() |
4803 | 24 | if 'data_dir' in config: | ||
4804 | 25 | self.data_dir = config['data_dir'] | ||
4805 | 26 | else: | ||
4806 | 27 | self.data_dir = os.path.join(self.tmpdir, "data_dir") | ||
4807 | 28 | os.mkdir(self.data_dir) | ||
4808 | 29 | |||
4809 | 30 | self._generate_ssh_keys(self.data_dir) | ||
4810 | 21 | 31 | ||
4811 | 22 | def get_image(self, img_conf): | 32 | def get_image(self, img_conf): |
4812 | 23 | """Get image using specified image configuration. | 33 | """Get image using specified image configuration. |
4813 | @@ -29,7 +39,7 @@ class Platform(object): | |||
4814 | 29 | 39 | ||
4815 | 30 | def destroy(self): | 40 | def destroy(self): |
4816 | 31 | """Clean up platform data.""" | 41 | """Clean up platform data.""" |
4818 | 32 | pass | 42 | shutil.rmtree(self.tmpdir) |
4819 | 33 | 43 | ||
4820 | 34 | def _generate_ssh_keys(self, data_dir): | 44 | def _generate_ssh_keys(self, data_dir): |
4821 | 35 | """Generate SSH keys to be used with image.""" | 45 | """Generate SSH keys to be used with image.""" |
4822 | diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml | |||
4823 | index d8bc170..c7dcbe8 100644 | |||
4824 | --- a/tests/cloud_tests/releases.yaml | |||
4825 | +++ b/tests/cloud_tests/releases.yaml | |||
4826 | @@ -30,6 +30,9 @@ default_release_config: | |||
4827 | 30 | mirror_url: https://cloud-images.ubuntu.com/daily | 30 | mirror_url: https://cloud-images.ubuntu.com/daily |
4828 | 31 | mirror_dir: '/srv/citest/images' | 31 | mirror_dir: '/srv/citest/images' |
4829 | 32 | keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg | 32 | keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg |
4830 | 33 | # The OS version formatted as Major.Minor is used to compare releases | ||
4831 | 34 | version: null # Each release needs to define this, for example 16.04 | ||
4832 | 35 | |||
4833 | 33 | ec2: | 36 | ec2: |
4834 | 34 | # Choose from: [ebs, instance-store] | 37 | # Choose from: [ebs, instance-store] |
4835 | 35 | root-store: ebs | 38 | root-store: ebs |
4836 | diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml | |||
4837 | index 8e0fb62..a3e2990 100644 | |||
4838 | --- a/tests/cloud_tests/testcases.yaml | |||
4839 | +++ b/tests/cloud_tests/testcases.yaml | |||
4840 | @@ -15,6 +15,9 @@ base_test_data: | |||
4841 | 15 | instance-id: | | 15 | instance-id: | |
4842 | 16 | #!/bin/sh | 16 | #!/bin/sh |
4843 | 17 | cat /run/cloud-init/.instance-id | 17 | cat /run/cloud-init/.instance-id |
4844 | 18 | instance-data.json: | | ||
4845 | 19 | #!/bin/sh | ||
4846 | 20 | cat /run/cloud-init/instance-data.json | ||
4847 | 18 | result.json: | | 21 | result.json: | |
4848 | 19 | #!/bin/sh | 22 | #!/bin/sh |
4849 | 20 | cat /run/cloud-init/result.json | 23 | cat /run/cloud-init/result.json |
4850 | diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py | |||
4851 | index a29a092..bd548f5 100644 | |||
4852 | --- a/tests/cloud_tests/testcases/__init__.py | |||
4853 | +++ b/tests/cloud_tests/testcases/__init__.py | |||
4854 | @@ -7,6 +7,8 @@ import inspect | |||
4855 | 7 | import unittest | 7 | import unittest |
4856 | 8 | from unittest.util import strclass | 8 | from unittest.util import strclass |
4857 | 9 | 9 | ||
4858 | 10 | from cloudinit.util import read_conf | ||
4859 | 11 | |||
4860 | 10 | from tests.cloud_tests import config | 12 | from tests.cloud_tests import config |
4861 | 11 | from tests.cloud_tests.testcases.base import CloudTestCase as base_test | 13 | from tests.cloud_tests.testcases.base import CloudTestCase as base_test |
4862 | 12 | 14 | ||
4863 | @@ -48,6 +50,7 @@ def get_suite(test_name, data, conf): | |||
4864 | 48 | def setUpClass(cls): | 50 | def setUpClass(cls): |
4865 | 49 | cls.data = data | 51 | cls.data = data |
4866 | 50 | cls.conf = conf | 52 | cls.conf = conf |
4867 | 53 | cls.release_conf = read_conf(config.RELEASES_CONF)['releases'] | ||
4868 | 51 | 54 | ||
4869 | 52 | suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(tmp)) | 55 | suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(tmp)) |
4870 | 53 | 56 | ||
4871 | diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py | |||
4872 | index 20e9595..324c7c9 100644 | |||
4873 | --- a/tests/cloud_tests/testcases/base.py | |||
4874 | +++ b/tests/cloud_tests/testcases/base.py | |||
4875 | @@ -4,10 +4,14 @@ | |||
4876 | 4 | 4 | ||
4877 | 5 | import crypt | 5 | import crypt |
4878 | 6 | import json | 6 | import json |
4879 | 7 | import re | ||
4880 | 7 | import unittest | 8 | import unittest |
4881 | 8 | 9 | ||
4882 | 10 | |||
4883 | 9 | from cloudinit import util as c_util | 11 | from cloudinit import util as c_util |
4884 | 10 | 12 | ||
4885 | 13 | SkipTest = unittest.SkipTest | ||
4886 | 14 | |||
4887 | 11 | 15 | ||
4888 | 12 | class CloudTestCase(unittest.TestCase): | 16 | class CloudTestCase(unittest.TestCase): |
4889 | 13 | """Base test class for verifiers.""" | 17 | """Base test class for verifiers.""" |
4890 | @@ -16,6 +20,43 @@ class CloudTestCase(unittest.TestCase): | |||
4891 | 16 | data = {} | 20 | data = {} |
4892 | 17 | conf = None | 21 | conf = None |
4893 | 18 | _cloud_config = None | 22 | _cloud_config = None |
4894 | 23 | release_conf = {} # The platform's os release configuration | ||
4895 | 24 | |||
4896 | 25 | expected_warnings = () # Subclasses set to ignore expected WARN logs | ||
4897 | 26 | |||
4898 | 27 | @property | ||
4899 | 28 | def os_cfg(self): | ||
4900 | 29 | return self.release_conf[self.os_name]['default'] | ||
4901 | 30 | |||
4902 | 31 | def is_distro(self, distro_name): | ||
4903 | 32 | return self.os_cfg['os'] == distro_name | ||
4904 | 33 | |||
4905 | 34 | def os_version_cmp(self, cmp_version): | ||
4906 | 35 | """Compare the version of the test to comparison_version. | ||
4907 | 36 | |||
4908 | 37 | @param: cmp_version: Either a float or a string representing | ||
4909 | 38 | a release os from releases.yaml (e.g. centos66) | ||
4910 | 39 | |||
4911 | 40 | @return: -1 when version < cmp_version, 0 when version=cmp_version and | ||
4912 | 41 | 1 when version > cmp_version. | ||
4913 | 42 | """ | ||
4914 | 43 | version = self.release_conf[self.os_name]['default']['version'] | ||
4915 | 44 | if isinstance(cmp_version, str): | ||
4916 | 45 | cmp_version = self.release_conf[cmp_version]['default']['version'] | ||
4917 | 46 | if version < cmp_version: | ||
4918 | 47 | return -1 | ||
4919 | 48 | elif version == cmp_version: | ||
4920 | 49 | return 0 | ||
4921 | 50 | else: | ||
4922 | 51 | return 1 | ||
4923 | 52 | |||
4924 | 53 | @property | ||
4925 | 54 | def os_name(self): | ||
4926 | 55 | return self.data.get('os_name', 'UNKNOWN') | ||
4927 | 56 | |||
4928 | 57 | @property | ||
4929 | 58 | def platform(self): | ||
4930 | 59 | return self.data.get('platform', 'UNKNOWN') | ||
4931 | 19 | 60 | ||
4932 | 20 | @property | 61 | @property |
4933 | 21 | def cloud_config(self): | 62 | def cloud_config(self): |
4934 | @@ -72,12 +113,134 @@ class CloudTestCase(unittest.TestCase): | |||
4935 | 72 | self.assertEqual(len(result['errors']), 0) | 113 | self.assertEqual(len(result['errors']), 0) |
4936 | 73 | 114 | ||
4937 | 74 | def test_no_warnings_in_log(self): | 115 | def test_no_warnings_in_log(self): |
4939 | 75 | """Warnings should not be found in the log.""" | 116 | """Unexpected warnings should not be found in the log.""" |
4940 | 117 | warnings = [ | ||
4941 | 118 | l for l in self.get_data_file('cloud-init.log').splitlines() | ||
4942 | 119 | if 'WARN' in l] | ||
4943 | 120 | joined_warnings = '\n'.join(warnings) | ||
4944 | 121 | for expected_warning in self.expected_warnings: | ||
4945 | 122 | self.assertIn( | ||
4946 | 123 | expected_warning, joined_warnings, | ||
4947 | 124 | msg="Did not find %s in cloud-init.log" % expected_warning) | ||
4948 | 125 | # Prune expected from discovered warnings | ||
4949 | 126 | warnings = [w for w in warnings if expected_warning not in w] | ||
4950 | 127 | self.assertEqual( | ||
4951 | 128 | [], warnings, msg="'WARN' found inside cloud-init.log") | ||
4952 | 129 | |||
4953 | 130 | def test_instance_data_json_ec2(self): | ||
4954 | 131 | """Validate instance-data.json content by ec2 platform. | ||
4955 | 132 | |||
4956 | 133 | This content is sourced by snapd when determining snapstore endpoints. | ||
4957 | 134 | We validate expected values per cloud type to ensure we don't break | ||
4958 | 135 | snapd. | ||
4959 | 136 | """ | ||
4960 | 137 | if self.platform != 'ec2': | ||
4961 | 138 | raise SkipTest( | ||
4962 | 139 | 'Skipping ec2 instance-data.json on %s' % self.platform) | ||
4963 | 140 | out = self.get_data_file('instance-data.json') | ||
4964 | 141 | if not out: | ||
4965 | 142 | if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0: | ||
4966 | 143 | raise AssertionError( | ||
4967 | 144 | 'No instance-data.json found on %s' % self.os_name) | ||
4968 | 145 | raise SkipTest( | ||
4969 | 146 | 'Skipping instance-data.json test.' | ||
4970 | 147 | ' OS: %s not bionic or newer' % self.os_name) | ||
4971 | 148 | instance_data = json.loads(out) | ||
4972 | 149 | self.assertEqual( | ||
4973 | 150 | ['ds/user-data'], instance_data['base64-encoded-keys']) | ||
4974 | 151 | ds = instance_data.get('ds', {}) | ||
4975 | 152 | macs = ds.get('network', {}).get('interfaces', {}).get('macs', {}) | ||
4976 | 153 | if not macs: | ||
4977 | 154 | raise AssertionError('No network data from EC2 meta-data') | ||
4978 | 155 | # Check meta-data items we depend on | ||
4979 | 156 | expected_net_keys = [ | ||
4980 | 157 | 'public-ipv4s', 'ipv4-associations', 'local-hostname', | ||
4981 | 158 | 'public-hostname'] | ||
4982 | 159 | for mac, mac_data in macs.items(): | ||
4983 | 160 | for key in expected_net_keys: | ||
4984 | 161 | self.assertIn(key, mac_data) | ||
4985 | 162 | self.assertIsNotNone( | ||
4986 | 163 | ds.get('placement', {}).get('availability-zone'), | ||
4987 | 164 | 'Could not determine EC2 Availability zone placement') | ||
4988 | 165 | ds = instance_data.get('ds', {}) | ||
4989 | 166 | v1_data = instance_data.get('v1', {}) | ||
4990 | 167 | self.assertIsNotNone( | ||
4991 | 168 | v1_data['availability-zone'], 'expected ec2 availability-zone') | ||
4992 | 169 | self.assertEqual('aws', v1_data['cloud-name']) | ||
4993 | 170 | self.assertIn('i-', v1_data['instance-id']) | ||
4994 | 171 | self.assertIn('ip-', v1_data['local-hostname']) | ||
4995 | 172 | self.assertIsNotNone(v1_data['region'], 'expected ec2 region') | ||
4996 | 173 | |||
4997 | 174 | def test_instance_data_json_lxd(self): | ||
4998 | 175 | """Validate instance-data.json content by lxd platform. | ||
4999 | 176 | |||
5000 | 177 | This content is sourced by snapd when determining snapstore endpoints. |
The diff has been truncated for viewing.
PASSED: Continuous integration, rev:71972989518 0aec0e5e42f6ae6 2fa88fe8e2a523 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 941/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 941/rebuild
https:/