Merge ~smoser/cloud-init:bug-lp-1645644-ntp into ~raharper/cloud-init:bug-lp-1645644-ntp
- Git
- lp:~smoser/cloud-init
- bug-lp-1645644-ntp
- Merge into bug-lp-1645644-ntp
Proposed by
Scott Moser
on 2017-03-09
| Status: | Merged | ||||||||
|---|---|---|---|---|---|---|---|---|---|
| Merge reported by: | Scott Moser | ||||||||
| Merged at revision: | 2195736c1e3116ebe3c52b3995fd3e39b9dd0e71 | ||||||||
| Proposed branch: | ~smoser/cloud-init:bug-lp-1645644-ntp | ||||||||
| Merge into: | ~raharper/cloud-init:bug-lp-1645644-ntp | ||||||||
| Diff against target: |
4168 lines (+2525/-393) 47 files modified
Makefile (+9/-5) cloudinit/cmd/main.py (+150/-16) cloudinit/config/cc_set_hostname.py (+1/-1) cloudinit/config/cc_set_passwords.py (+20/-2) cloudinit/distros/parsers/resolv_conf.py (+7/-4) cloudinit/distros/rhel.py (+12/-7) cloudinit/ec2_utils.py (+4/-1) cloudinit/helpers.py (+2/-0) cloudinit/net/eni.py (+19/-14) cloudinit/net/sysconfig.py (+28/-13) cloudinit/settings.py (+2/-0) cloudinit/sources/DataSourceAliYun.py (+4/-0) cloudinit/sources/DataSourceEc2.py (+145/-2) cloudinit/sources/DataSourceOVF.py (+33/-4) cloudinit/sources/DataSourceOpenStack.py (+12/-3) cloudinit/sources/helpers/vmware/imc/config_nic.py (+6/-18) cloudinit/ssh_util.py (+3/-0) cloudinit/stages.py (+14/-1) cloudinit/util.py (+0/-44) cloudinit/warnings.py (+139/-0) doc/examples/cloud-config.txt (+1/-1) doc/rtd/topics/datasources/altcloud.rst (+2/-2) doc/rtd/topics/datasources/openstack.rst (+35/-1) doc/rtd/topics/format.rst (+7/-6) packages/debian/rules.in (+2/-0) setup.py (+2/-1) systemd/cloud-init-generator (+37/-2) tests/unittests/helpers.py (+26/-48) tests/unittests/test__init__.py (+59/-33) tests/unittests/test_atomic_helper.py (+2/-2) tests/unittests/test_data.py (+37/-16) tests/unittests/test_datasource/test_gce.py (+3/-1) tests/unittests/test_datasource/test_openstack.py (+5/-6) tests/unittests/test_distros/test_resolv.py (+1/-1) tests/unittests/test_distros/test_user_data_normalize.py (+0/-0) tests/unittests/test_ec2_util.py (+47/-2) tests/unittests/test_net.py (+233/-45) tests/unittests/test_sshutil.py (+23/-1) tools/Z99-cloud-locale-test.sh (+74/-74) tools/Z99-cloudinit-warnings.sh (+30/-0) tools/ds-identify (+1252/-0) tools/make-mime.py (+1/-1) tools/make-tarball (+1/-1) tools/mock-meta.py (+25/-12) tools/read-version (+1/-1) tools/validate-yaml.py (+1/-1) tox.ini (+8/-0) |
||||||||
| Related bugs: |
|
| Reviewer | Review Type | Date Requested | Status |
|---|---|---|---|
| Ryan Harper | 2017-03-09 | Pending | |
|
Review via email:
|
|||
Commit Message
Description of the Change
To post a comment you must log in.
| Ryan Harper (raharper) wrote : | # |
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
| 1 | diff --git a/Makefile b/Makefile |
| 2 | index 5d35dcc..5940ed7 100644 |
| 3 | --- a/Makefile |
| 4 | +++ b/Makefile |
| 5 | @@ -27,13 +27,16 @@ ifeq ($(distro),) |
| 6 | distro = redhat |
| 7 | endif |
| 8 | |
| 9 | -READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version) |
| 10 | +READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version || \ |
| 11 | + echo read-version-failed) |
| 12 | CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.version_string())") |
| 13 | |
| 14 | |
| 15 | all: check |
| 16 | |
| 17 | -check: check_version pep8 $(pyflakes) test $(yaml) |
| 18 | +check: check_version test $(yaml) |
| 19 | + |
| 20 | +style-check: pep8 $(pyflakes) |
| 21 | |
| 22 | pep8: |
| 23 | @$(CWD)/tools/run-pep8 |
| 24 | @@ -62,8 +65,8 @@ test: $(unittests) |
| 25 | |
| 26 | check_version: |
| 27 | @if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \ |
| 28 | - echo "Error: read-version version $(READ_VERSION)" \ |
| 29 | - "not equal to code version $(CODE_VERSION)"; exit 2; \ |
| 30 | + echo "Error: read-version version '$(READ_VERSION)'" \ |
| 31 | + "not equal to code version '$(CODE_VERSION)'"; exit 2; \ |
| 32 | else true; fi |
| 33 | |
| 34 | clean_pyc: |
| 35 | @@ -73,7 +76,7 @@ clean: clean_pyc |
| 36 | rm -rf /var/log/cloud-init.log /var/lib/cloud/ |
| 37 | |
| 38 | yaml: |
| 39 | - @$(CWD)/tools/validate-yaml.py $(YAML_FILES) |
| 40 | + @$(PYVER) $(CWD)/tools/validate-yaml.py $(YAML_FILES) |
| 41 | |
| 42 | rpm: |
| 43 | ./packages/brpm --distro $(distro) |
| 44 | @@ -83,3 +86,4 @@ deb: |
| 45 | |
| 46 | .PHONY: test pyflakes pyflakes3 clean pep8 rpm deb yaml check_version |
| 47 | .PHONY: pip-test-requirements pip-requirements clean_pyc unittest unittest3 |
| 48 | +.PHONY: style-check |
| 49 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py |
| 50 | index c83496c..6ff4e1c 100644 |
| 51 | --- a/cloudinit/cmd/main.py |
| 52 | +++ b/cloudinit/cmd/main.py |
| 53 | @@ -26,8 +26,10 @@ from cloudinit import signal_handler |
| 54 | from cloudinit import sources |
| 55 | from cloudinit import stages |
| 56 | from cloudinit import templater |
| 57 | +from cloudinit import url_helper |
| 58 | from cloudinit import util |
| 59 | from cloudinit import version |
| 60 | +from cloudinit import warnings |
| 61 | |
| 62 | from cloudinit import reporting |
| 63 | from cloudinit.reporting import events |
| 64 | @@ -129,23 +131,104 @@ def apply_reporting_cfg(cfg): |
| 65 | reporting.update_configuration(cfg.get('reporting')) |
| 66 | |
| 67 | |
| 68 | +def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')): |
| 69 | + data = util.keyval_str_to_dict(cmdline) |
| 70 | + for key in names: |
| 71 | + if key in data: |
| 72 | + return key, data[key] |
| 73 | + raise KeyError("No keys (%s) found in string '%s'" % |
| 74 | + (cmdline, names)) |
| 75 | + |
| 76 | + |
| 77 | +def attempt_cmdline_url(path, network=True, cmdline=None): |
| 78 | + """Write data from url referenced in command line to path. |
| 79 | + |
| 80 | + path: a file to write content to if downloaded. |
| 81 | + network: should network access be assumed. |
| 82 | + cmdline: the cmdline to parse for cloud-config-url. |
| 83 | + |
| 84 | + This is used in MAAS datasource, in "ephemeral" (read-only root) |
| 85 | + environment where the instance netboots to iscsi ro root. |
| 86 | + and the entity that controls the pxe config has to configure |
| 87 | + the maas datasource. |
| 88 | + |
| 89 | + An attempt is made on network urls even in local datasource |
| 90 | + for case of network set up in initramfs. |
| 91 | + |
| 92 | + Return value is a tuple of a logger function (logging.DEBUG) |
| 93 | + and a message indicating what happened. |
| 94 | + """ |
| 95 | + |
| 96 | + if cmdline is None: |
| 97 | + cmdline = util.get_cmdline() |
| 98 | + |
| 99 | + try: |
| 100 | + cmdline_name, url = parse_cmdline_url(cmdline) |
| 101 | + except KeyError: |
| 102 | + return (logging.DEBUG, "No kernel command line url found.") |
| 103 | + |
| 104 | + path_is_local = url.startswith("file://") or url.startswith("/") |
| 105 | + |
| 106 | + if path_is_local and os.path.exists(path): |
| 107 | + if network: |
| 108 | + m = ("file '%s' existed, possibly from local stage download" |
| 109 | + " of command line url '%s'. Not re-writing." % (path, url)) |
| 110 | + level = logging.INFO |
| 111 | + if path_is_local: |
| 112 | + level = logging.DEBUG |
| 113 | + else: |
| 114 | + m = ("file '%s' existed, possibly from previous boot download" |
| 115 | + " of command line url '%s'. Not re-writing." % (path, url)) |
| 116 | + level = logging.WARN |
| 117 | + |
| 118 | + return (level, m) |
| 119 | + |
| 120 | + kwargs = {'url': url, 'timeout': 10, 'retries': 2} |
| 121 | + if network or path_is_local: |
| 122 | + level = logging.WARN |
| 123 | + kwargs['sec_between'] = 1 |
| 124 | + else: |
| 125 | + level = logging.DEBUG |
| 126 | + kwargs['sec_between'] = .1 |
| 127 | + |
| 128 | + data = None |
| 129 | + header = b'#cloud-config' |
| 130 | + try: |
| 131 | + resp = util.read_file_or_url(**kwargs) |
| 132 | + if resp.ok(): |
| 133 | + data = resp.contents |
| 134 | + if not resp.contents.startswith(header): |
| 135 | + if cmdline_name == 'cloud-config-url': |
| 136 | + level = logging.WARN |
| 137 | + else: |
| 138 | + level = logging.INFO |
| 139 | + return ( |
| 140 | + level, |
| 141 | + "contents of '%s' did not start with %s" % (url, header)) |
| 142 | + else: |
| 143 | + return (level, |
| 144 | + "url '%s' returned code %s. Ignoring." % (url, resp.code)) |
| 145 | + |
| 146 | + except url_helper.UrlError as e: |
| 147 | + return (level, "retrieving url '%s' failed: %s" % (url, e)) |
| 148 | + |
| 149 | + util.write_file(path, data, mode=0o600) |
| 150 | + return (logging.INFO, |
| 151 | + "wrote cloud-config data from %s='%s' to %s" % |
| 152 | + (cmdline_name, url, path)) |
| 153 | + |
| 154 | + |
| 155 | def main_init(name, args): |
| 156 | deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] |
| 157 | if args.local: |
| 158 | deps = [sources.DEP_FILESYSTEM] |
| 159 | |
| 160 | - if not args.local: |
| 161 | - # See doc/kernel-cmdline.txt |
| 162 | - # |
| 163 | - # This is used in maas datasource, in "ephemeral" (read-only root) |
| 164 | - # environment where the instance netboots to iscsi ro root. |
| 165 | - # and the entity that controls the pxe config has to configure |
| 166 | - # the maas datasource. |
| 167 | - # |
| 168 | - # Could be used elsewhere, only works on network based (not local). |
| 169 | - root_name = "%s.d" % (CLOUD_CONFIG) |
| 170 | - target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg") |
| 171 | - util.read_write_cmdline_url(target_fn) |
| 172 | + early_logs = [] |
| 173 | + early_logs.append( |
| 174 | + attempt_cmdline_url( |
| 175 | + path=os.path.join("%s.d" % CLOUD_CONFIG, |
| 176 | + "91_kernel_cmdline_url.cfg"), |
| 177 | + network=not args.local)) |
| 178 | |
| 179 | # Cloud-init 'init' stage is broken up into the following sub-stages |
| 180 | # 1. Ensure that the init object fetches its config without errors |
| 181 | @@ -171,12 +254,14 @@ def main_init(name, args): |
| 182 | outfmt = None |
| 183 | errfmt = None |
| 184 | try: |
| 185 | - LOG.debug("Closing stdin") |
| 186 | + early_logs.append((logging.DEBUG, "Closing stdin.")) |
| 187 | util.close_stdin() |
| 188 | (outfmt, errfmt) = util.fixup_output(init.cfg, name) |
| 189 | except Exception: |
| 190 | - util.logexc(LOG, "Failed to setup output redirection!") |
| 191 | - print_exc("Failed to setup output redirection!") |
| 192 | + msg = "Failed to setup output redirection!" |
| 193 | + util.logexc(LOG, msg) |
| 194 | + print_exc(msg) |
| 195 | + early_logs.append((logging.WARN, msg)) |
| 196 | if args.debug: |
| 197 | # Reset so that all the debug handlers are closed out |
| 198 | LOG.debug(("Logging being reset, this logger may no" |
| 199 | @@ -190,6 +275,10 @@ def main_init(name, args): |
| 200 | # been redirected and log now configured. |
| 201 | welcome(name, msg=w_msg) |
| 202 | |
| 203 | + # re-play early log messages before logging was setup |
| 204 | + for lvl, msg in early_logs: |
| 205 | + LOG.log(lvl, msg) |
| 206 | + |
| 207 | # Stage 3 |
| 208 | try: |
| 209 | init.initialize() |
| 210 | @@ -224,8 +313,15 @@ def main_init(name, args): |
| 211 | " would allow us to stop early.") |
| 212 | else: |
| 213 | existing = "check" |
| 214 | - if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False): |
| 215 | + mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False) |
| 216 | + if mcfg: |
| 217 | + LOG.debug("manual cache clean set from config") |
| 218 | existing = "trust" |
| 219 | + else: |
| 220 | + mfile = path_helper.get_ipath_cur("manual_clean_marker") |
| 221 | + if os.path.exists(mfile): |
| 222 | + LOG.debug("manual cache clean found from marker: %s", mfile) |
| 223 | + existing = "trust" |
| 224 | |
| 225 | init.purge_cache() |
| 226 | # Delete the non-net file as well |
| 227 | @@ -318,10 +414,48 @@ def main_init(name, args): |
| 228 | # give the activated datasource a chance to adjust |
| 229 | init.activate_datasource() |
| 230 | |
| 231 | + di_report_warn(datasource=init.datasource, cfg=init.cfg) |
| 232 | + |
| 233 | # Stage 10 |
| 234 | return (init.datasource, run_module_section(mods, name, name)) |
| 235 | |
| 236 | |
| 237 | +def di_report_warn(datasource, cfg): |
| 238 | + if 'di_report' not in cfg: |
| 239 | + LOG.debug("no di_report found in config.") |
| 240 | + return |
| 241 | + |
| 242 | + dicfg = cfg.get('di_report', {}) |
| 243 | + if not isinstance(dicfg, dict): |
| 244 | + LOG.warn("di_report config not a dictionary: %s", dicfg) |
| 245 | + return |
| 246 | + |
| 247 | + dslist = dicfg.get('datasource_list') |
| 248 | + if dslist is None: |
| 249 | + LOG.warn("no 'datasource_list' found in di_report.") |
| 250 | + return |
| 251 | + elif not isinstance(dslist, list): |
| 252 | + LOG.warn("di_report/datasource_list not a list: %s", dslist) |
| 253 | + return |
| 254 | + |
| 255 | + # ds.__module__ is like cloudinit.sources.DataSourceName |
| 256 | + # where Name is the thing that shows up in datasource_list. |
| 257 | + modname = datasource.__module__.rpartition(".")[2] |
| 258 | + if modname.startswith(sources.DS_PREFIX): |
| 259 | + modname = modname[len(sources.DS_PREFIX):] |
| 260 | + else: |
| 261 | + LOG.warn("Datasource '%s' came from unexpected module '%s'.", |
| 262 | + datasource, modname) |
| 263 | + |
| 264 | + if modname in dslist: |
| 265 | + LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", |
| 266 | + datasource, modname, dslist) |
| 267 | + return |
| 268 | + |
| 269 | + warnings.show_warning('dsid_missing_source', cfg, |
| 270 | + source=modname, dslist=str(dslist)) |
| 271 | + |
| 272 | + |
| 273 | def main_modules(action_name, args): |
| 274 | name = args.mode |
| 275 | # Cloud-init 'modules' stages are broken up into the following sub-stages |
| 276 | diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py |
| 277 | index e42799f..aa3dfe5 100644 |
| 278 | --- a/cloudinit/config/cc_set_hostname.py |
| 279 | +++ b/cloudinit/config/cc_set_hostname.py |
| 280 | @@ -27,7 +27,7 @@ will be used. |
| 281 | |
| 282 | **Config keys**:: |
| 283 | |
| 284 | - perserve_hostname: <true/false> |
| 285 | + preserve_hostname: <true/false> |
| 286 | fqdn: <fqdn> |
| 287 | hostname: <fqdn/hostname> |
| 288 | """ |
| 289 | diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py |
| 290 | index cf1f59e..fa343a7 100755 |
| 291 | --- a/cloudinit/config/cc_set_passwords.py |
| 292 | +++ b/cloudinit/config/cc_set_passwords.py |
| 293 | @@ -45,6 +45,16 @@ enabled, disabled, or left to system defaults using ``ssh_pwauth``. |
| 294 | expire: <true/false> |
| 295 | |
| 296 | chpasswd: |
| 297 | + list: | |
| 298 | + user1:password1 |
| 299 | + user2:Random |
| 300 | + user3:password3 |
| 301 | + user4:R |
| 302 | + |
| 303 | + ## |
| 304 | + # or as yaml list |
| 305 | + ## |
| 306 | + chpasswd: |
| 307 | list: |
| 308 | - user1:password1 |
| 309 | - user2:Random |
| 310 | @@ -79,7 +89,15 @@ def handle(_name, cfg, cloud, log, args): |
| 311 | |
| 312 | if 'chpasswd' in cfg: |
| 313 | chfg = cfg['chpasswd'] |
| 314 | - plist = util.get_cfg_option_str(chfg, 'list', plist) |
| 315 | + if isinstance(chfg['list'], list): |
| 316 | + log.debug("Handling input for chpasswd as list.") |
| 317 | + plist = util.get_cfg_option_list(chfg, 'list', plist) |
| 318 | + else: |
| 319 | + log.debug("Handling input for chpasswd as multiline string.") |
| 320 | + plist = util.get_cfg_option_str(chfg, 'list', plist) |
| 321 | + if plist: |
| 322 | + plist = plist.spitlines() |
| 323 | + |
| 324 | expire = util.get_cfg_option_bool(chfg, 'expire', expire) |
| 325 | |
| 326 | if not plist and password: |
| 327 | @@ -95,7 +113,7 @@ def handle(_name, cfg, cloud, log, args): |
| 328 | plist_in = [] |
| 329 | randlist = [] |
| 330 | users = [] |
| 331 | - for line in plist.splitlines(): |
| 332 | + for line in plist: |
| 333 | u, p = line.split(':', 1) |
| 334 | if p == "R" or p == "RANDOM": |
| 335 | p = rand_user_password() |
| 336 | diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py |
| 337 | index ff6ee30..d1f8a04 100644 |
| 338 | --- a/cloudinit/distros/parsers/resolv_conf.py |
| 339 | +++ b/cloudinit/distros/parsers/resolv_conf.py |
| 340 | @@ -6,9 +6,11 @@ |
| 341 | |
| 342 | from six import StringIO |
| 343 | |
| 344 | +from cloudinit.distros.parsers import chop_comment |
| 345 | +from cloudinit import log as logging |
| 346 | from cloudinit import util |
| 347 | |
| 348 | -from cloudinit.distros.parsers import chop_comment |
| 349 | +LOG = logging.getLogger(__name__) |
| 350 | |
| 351 | |
| 352 | # See: man resolv.conf |
| 353 | @@ -79,9 +81,10 @@ class ResolvConf(object): |
| 354 | if len(new_ns) == len(current_ns): |
| 355 | return current_ns |
| 356 | if len(current_ns) >= 3: |
| 357 | - # Hard restriction on only 3 name servers |
| 358 | - raise ValueError(("Adding %r would go beyond the " |
| 359 | - "'3' maximum name servers") % (ns)) |
| 360 | + LOG.warn("ignoring nameserver %r: adding would " |
| 361 | + "exceed the maximum of " |
| 362 | + "'3' name servers (see resolv.conf(5))" % (ns)) |
| 363 | + return current_ns[:3] |
| 364 | self._remove_option('nameserver') |
| 365 | for n in new_ns: |
| 366 | self._contents.append(('option', ['nameserver', n, ''])) |
| 367 | diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py |
| 368 | index aa55838..7498c63 100644 |
| 369 | --- a/cloudinit/distros/rhel.py |
| 370 | +++ b/cloudinit/distros/rhel.py |
| 371 | @@ -190,13 +190,18 @@ class Distro(distros.Distro): |
| 372 | if pkgs is None: |
| 373 | pkgs = [] |
| 374 | |
| 375 | - cmd = ['yum'] |
| 376 | - # If enabled, then yum will be tolerant of errors on the command line |
| 377 | - # with regard to packages. |
| 378 | - # For example: if you request to install foo, bar and baz and baz is |
| 379 | - # installed; yum won't error out complaining that baz is already |
| 380 | - # installed. |
| 381 | - cmd.append("-t") |
| 382 | + if util.which('dnf'): |
| 383 | + LOG.debug('Using DNF for package management') |
| 384 | + cmd = ['dnf'] |
| 385 | + else: |
| 386 | + LOG.debug('Using YUM for package management') |
| 387 | + # the '-t' argument makes yum tolerant of errors on the command |
| 388 | + # line with regard to packages. |
| 389 | + # |
| 390 | + # For example: if you request to install foo, bar and baz and baz |
| 391 | + # is installed; yum won't error out complaining that baz is already |
| 392 | + # installed. |
| 393 | + cmd = ['yum', '-t'] |
| 394 | # Determines whether or not yum prompts for confirmation |
| 395 | # of critical actions. We don't want to prompt... |
| 396 | cmd.append("-y") |
| 397 | diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py |
| 398 | index c656ef1..1369154 100644 |
| 399 | --- a/cloudinit/ec2_utils.py |
| 400 | +++ b/cloudinit/ec2_utils.py |
| 401 | @@ -28,7 +28,7 @@ class MetadataLeafDecoder(object): |
| 402 | |
| 403 | def __call__(self, field, blob): |
| 404 | if not blob: |
| 405 | - return blob |
| 406 | + return '' |
| 407 | try: |
| 408 | blob = util.decode_binary(blob) |
| 409 | except UnicodeDecodeError: |
| 410 | @@ -82,6 +82,9 @@ class MetadataMaterializer(object): |
| 411 | field_name = get_name(field) |
| 412 | if not field or not field_name: |
| 413 | continue |
| 414 | + # Don't materialize credentials |
| 415 | + if field_name == 'security-credentials': |
| 416 | + continue |
| 417 | if has_children(field): |
| 418 | if field_name not in children: |
| 419 | children.append(field_name) |
| 420 | diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py |
| 421 | index 4528fb0..7435d58 100644 |
| 422 | --- a/cloudinit/helpers.py |
| 423 | +++ b/cloudinit/helpers.py |
| 424 | @@ -339,6 +339,8 @@ class Paths(object): |
| 425 | "vendordata_raw": "vendor-data.txt", |
| 426 | "vendordata": "vendor-data.txt.i", |
| 427 | "instance_id": ".instance-id", |
| 428 | + "manual_clean_marker": "manual-clean", |
| 429 | + "warnings": "warnings", |
| 430 | } |
| 431 | # Set when a datasource becomes active |
| 432 | self.datasource = ds |
| 433 | diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py |
| 434 | index b06ffac..5b249f1 100644 |
| 435 | --- a/cloudinit/net/eni.py |
| 436 | +++ b/cloudinit/net/eni.py |
| 437 | @@ -90,8 +90,6 @@ def _iface_add_attrs(iface, index): |
| 438 | |
| 439 | def _iface_start_entry(iface, index, render_hwaddress=False): |
| 440 | fullname = iface['name'] |
| 441 | - if index != 0: |
| 442 | - fullname += ":%s" % index |
| 443 | |
| 444 | control = iface['control'] |
| 445 | if control == "auto": |
| 446 | @@ -113,6 +111,16 @@ def _iface_start_entry(iface, index, render_hwaddress=False): |
| 447 | return lines |
| 448 | |
| 449 | |
| 450 | +def _subnet_is_ipv6(subnet): |
| 451 | + # 'static6' or 'dhcp6' |
| 452 | + if subnet['type'].endswith('6'): |
| 453 | + # This is a request for DHCPv6. |
| 454 | + return True |
| 455 | + elif subnet['type'] == 'static' and ":" in subnet['address']: |
| 456 | + return True |
| 457 | + return False |
| 458 | + |
| 459 | + |
| 460 | def _parse_deb_config_data(ifaces, contents, src_dir, src_path): |
| 461 | """Parses the file contents, placing result into ifaces. |
| 462 | |
| 463 | @@ -354,21 +362,23 @@ class Renderer(renderer.Renderer): |
| 464 | sections = [] |
| 465 | subnets = iface.get('subnets', {}) |
| 466 | if subnets: |
| 467 | - for index, subnet in zip(range(0, len(subnets)), subnets): |
| 468 | + for index, subnet in enumerate(subnets): |
| 469 | iface['index'] = index |
| 470 | iface['mode'] = subnet['type'] |
| 471 | iface['control'] = subnet.get('control', 'auto') |
| 472 | subnet_inet = 'inet' |
| 473 | - if iface['mode'].endswith('6'): |
| 474 | - # This is a request for DHCPv6. |
| 475 | - subnet_inet += '6' |
| 476 | - elif iface['mode'] == 'static' and ":" in subnet['address']: |
| 477 | - # This is a static IPv6 address. |
| 478 | + if _subnet_is_ipv6(subnet): |
| 479 | subnet_inet += '6' |
| 480 | iface['inet'] = subnet_inet |
| 481 | - if iface['mode'].startswith('dhcp'): |
| 482 | + if subnet['type'].startswith('dhcp'): |
| 483 | iface['mode'] = 'dhcp' |
| 484 | |
| 485 | + # do not emit multiple 'auto $IFACE' lines as older (precise) |
| 486 | + # ifupdown complains |
| 487 | + if True in ["auto %s" % (iface['name']) in line |
| 488 | + for line in sections]: |
| 489 | + iface['control'] = 'alias' |
| 490 | + |
| 491 | lines = list( |
| 492 | _iface_start_entry( |
| 493 | iface, index, render_hwaddress=render_hwaddress) + |
| 494 | @@ -378,11 +388,6 @@ class Renderer(renderer.Renderer): |
| 495 | for route in subnet.get('routes', []): |
| 496 | lines.extend(self._render_route(route, indent=" ")) |
| 497 | |
| 498 | - if len(subnets) > 1 and index == 0: |
| 499 | - tmpl = " post-up ifup %s:%s\n" |
| 500 | - for i in range(1, len(subnets)): |
| 501 | - lines.append(tmpl % (iface['name'], i)) |
| 502 | - |
| 503 | sections.append(lines) |
| 504 | else: |
| 505 | # ifenslave docs say to auto the slave devices |
| 506 | diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py |
| 507 | index 9be7407..06de660 100644 |
| 508 | --- a/cloudinit/net/sysconfig.py |
| 509 | +++ b/cloudinit/net/sysconfig.py |
| 510 | @@ -87,7 +87,8 @@ class Route(ConfigMap): |
| 511 | def __init__(self, route_name, base_sysconf_dir): |
| 512 | super(Route, self).__init__() |
| 513 | self.last_idx = 1 |
| 514 | - self.has_set_default = False |
| 515 | + self.has_set_default_ipv4 = False |
| 516 | + self.has_set_default_ipv6 = False |
| 517 | self._route_name = route_name |
| 518 | self._base_sysconf_dir = base_sysconf_dir |
| 519 | |
| 520 | @@ -95,7 +96,8 @@ class Route(ConfigMap): |
| 521 | r = Route(self._route_name, self._base_sysconf_dir) |
| 522 | r._conf = self._conf.copy() |
| 523 | r.last_idx = self.last_idx |
| 524 | - r.has_set_default = self.has_set_default |
| 525 | + r.has_set_default_ipv4 = self.has_set_default_ipv4 |
| 526 | + r.has_set_default_ipv6 = self.has_set_default_ipv6 |
| 527 | return r |
| 528 | |
| 529 | @property |
| 530 | @@ -119,10 +121,10 @@ class NetInterface(ConfigMap): |
| 531 | super(NetInterface, self).__init__() |
| 532 | self.children = [] |
| 533 | self.routes = Route(iface_name, base_sysconf_dir) |
| 534 | - self._kind = kind |
| 535 | + self.kind = kind |
| 536 | + |
| 537 | self._iface_name = iface_name |
| 538 | self._conf['DEVICE'] = iface_name |
| 539 | - self._conf['TYPE'] = self.iface_types[kind] |
| 540 | self._base_sysconf_dir = base_sysconf_dir |
| 541 | |
| 542 | @property |
| 543 | @@ -140,6 +142,8 @@ class NetInterface(ConfigMap): |
| 544 | |
| 545 | @kind.setter |
| 546 | def kind(self, kind): |
| 547 | + if kind not in self.iface_types: |
| 548 | + raise ValueError(kind) |
| 549 | self._kind = kind |
| 550 | self._conf['TYPE'] = self.iface_types[kind] |
| 551 | |
| 552 | @@ -173,7 +177,7 @@ class Renderer(renderer.Renderer): |
| 553 | ('BOOTPROTO', 'none'), |
| 554 | ]) |
| 555 | |
| 556 | - # If these keys exist, then there values will be used to form |
| 557 | + # If these keys exist, then their values will be used to form |
| 558 | # a BONDING_OPTS grouping; otherwise no grouping will be set. |
| 559 | bond_tpl_opts = tuple([ |
| 560 | ('bond_mode', "mode=%s"), |
| 561 | @@ -199,6 +203,7 @@ class Renderer(renderer.Renderer): |
| 562 | def _render_iface_shared(cls, iface, iface_cfg): |
| 563 | for k, v in cls.iface_defaults: |
| 564 | iface_cfg[k] = v |
| 565 | + |
| 566 | for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]: |
| 567 | old_value = iface.get(old_key) |
| 568 | if old_value is not None: |
| 569 | @@ -227,10 +232,20 @@ class Renderer(renderer.Renderer): |
| 570 | if 'netmask' in subnet: |
| 571 | iface_cfg['NETMASK'] = subnet['netmask'] |
| 572 | for route in subnet.get('routes', []): |
| 573 | + if subnet.get('ipv6'): |
| 574 | + gw_cfg = 'IPV6_DEFAULTGW' |
| 575 | + else: |
| 576 | + gw_cfg = 'GATEWAY' |
| 577 | + |
| 578 | if _is_default_route(route): |
| 579 | - if route_cfg.has_set_default: |
| 580 | - raise ValueError("Duplicate declaration of default" |
| 581 | - " route found for interface '%s'" |
| 582 | + if ( |
| 583 | + (subnet.get('ipv4') and |
| 584 | + route_cfg.has_set_default_ipv4) or |
| 585 | + (subnet.get('ipv6') and |
| 586 | + route_cfg.has_set_default_ipv6) |
| 587 | + ): |
| 588 | + raise ValueError("Duplicate declaration of default " |
| 589 | + "route found for interface '%s'" |
| 590 | % (iface_cfg.name)) |
| 591 | # NOTE(harlowja): ipv6 and ipv4 default gateways |
| 592 | gw_key = 'GATEWAY0' |
| 593 | @@ -242,7 +257,7 @@ class Renderer(renderer.Renderer): |
| 594 | # also provided the default route? |
| 595 | iface_cfg['DEFROUTE'] = True |
| 596 | if 'gateway' in route: |
| 597 | - iface_cfg['GATEWAY'] = route['gateway'] |
| 598 | + iface_cfg[gw_cfg] = route['gateway'] |
| 599 | route_cfg.has_set_default = True |
| 600 | else: |
| 601 | gw_key = 'GATEWAY%s' % route_cfg.last_idx |
| 602 | @@ -282,12 +297,12 @@ class Renderer(renderer.Renderer): |
| 603 | if len(iface_subnets) == 1: |
| 604 | cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) |
| 605 | elif len(iface_subnets) > 1: |
| 606 | - for i, iface_subnet in enumerate(iface_subnets, |
| 607 | - start=len(iface.children)): |
| 608 | + for i, isubnet in enumerate(iface_subnets, |
| 609 | + start=len(iface_cfg.children)): |
| 610 | iface_sub_cfg = iface_cfg.copy() |
| 611 | iface_sub_cfg.name = "%s:%s" % (iface_name, i) |
| 612 | - iface.children.append(iface_sub_cfg) |
| 613 | - cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet) |
| 614 | + iface_cfg.children.append(iface_sub_cfg) |
| 615 | + cls._render_subnet(iface_sub_cfg, route_cfg, isubnet) |
| 616 | |
| 617 | @classmethod |
| 618 | def _render_bond_interfaces(cls, network_state, iface_contents): |
| 619 | diff --git a/cloudinit/settings.py b/cloudinit/settings.py |
| 620 | index b1fdd31..692ff5e 100644 |
| 621 | --- a/cloudinit/settings.py |
| 622 | +++ b/cloudinit/settings.py |
| 623 | @@ -14,6 +14,8 @@ CFG_ENV_NAME = "CLOUD_CFG" |
| 624 | # This is expected to be a yaml formatted file |
| 625 | CLOUD_CONFIG = '/etc/cloud/cloud.cfg' |
| 626 | |
| 627 | +RUN_CLOUD_CONFIG = '/run/cloud-init/cloud.cfg' |
| 628 | + |
| 629 | # What u get if no config is provided |
| 630 | CFG_BUILTIN = { |
| 631 | 'datasource_list': [ |
| 632 | diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py |
| 633 | index 2d00255..9debe94 100644 |
| 634 | --- a/cloudinit/sources/DataSourceAliYun.py |
| 635 | +++ b/cloudinit/sources/DataSourceAliYun.py |
| 636 | @@ -22,6 +22,10 @@ class DataSourceAliYun(EC2.DataSourceEc2): |
| 637 | def get_public_ssh_keys(self): |
| 638 | return parse_public_keys(self.metadata.get('public-keys', {})) |
| 639 | |
| 640 | + @property |
| 641 | + def cloud_platform(self): |
| 642 | + return EC2.Platforms.ALIYUN |
| 643 | + |
| 644 | |
| 645 | def parse_public_keys(public_keys): |
| 646 | keys = [] |
| 647 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py |
| 648 | index c657fd0..6f01a13 100644 |
| 649 | --- a/cloudinit/sources/DataSourceEc2.py |
| 650 | +++ b/cloudinit/sources/DataSourceEc2.py |
| 651 | @@ -16,18 +16,31 @@ from cloudinit import log as logging |
| 652 | from cloudinit import sources |
| 653 | from cloudinit import url_helper as uhelp |
| 654 | from cloudinit import util |
| 655 | +from cloudinit import warnings |
| 656 | |
| 657 | LOG = logging.getLogger(__name__) |
| 658 | |
| 659 | # Which version we are requesting of the ec2 metadata apis |
| 660 | DEF_MD_VERSION = '2009-04-04' |
| 661 | |
| 662 | +STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") |
| 663 | +STRICT_ID_DEFAULT = "warn" |
| 664 | + |
| 665 | + |
| 666 | +class Platforms(object): |
| 667 | + ALIYUN = "AliYun" |
| 668 | + AWS = "AWS" |
| 669 | + BRIGHTBOX = "Brightbox" |
| 670 | + SEEDED = "Seeded" |
| 671 | + UNKNOWN = "Unknown" |
| 672 | + |
| 673 | |
| 674 | class DataSourceEc2(sources.DataSource): |
| 675 | # Default metadata urls that will be used if none are provided |
| 676 | # They will be checked for 'resolveability' and some of the |
| 677 | # following may be discarded if they do not resolve |
| 678 | metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"] |
| 679 | + _cloud_platform = None |
| 680 | |
| 681 | def __init__(self, sys_cfg, distro, paths): |
| 682 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
| 683 | @@ -41,8 +54,18 @@ class DataSourceEc2(sources.DataSource): |
| 684 | self.userdata_raw = seed_ret['user-data'] |
| 685 | self.metadata = seed_ret['meta-data'] |
| 686 | LOG.debug("Using seeded ec2 data from %s", self.seed_dir) |
| 687 | + self._cloud_platform = Platforms.SEEDED |
| 688 | return True |
| 689 | |
| 690 | + strict_mode, _sleep = read_strict_mode( |
| 691 | + util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, |
| 692 | + STRICT_ID_DEFAULT), ("warn", None)) |
| 693 | + |
| 694 | + LOG.debug("strict_mode: %s, cloud_platform=%s", |
| 695 | + strict_mode, self.cloud_platform) |
| 696 | + if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: |
| 697 | + return False |
| 698 | + |
| 699 | try: |
| 700 | if not self.wait_for_metadata_service(): |
| 701 | return False |
| 702 | @@ -51,8 +74,8 @@ class DataSourceEc2(sources.DataSource): |
| 703 | ec2.get_instance_userdata(self.api_ver, self.metadata_address) |
| 704 | self.metadata = ec2.get_instance_metadata(self.api_ver, |
| 705 | self.metadata_address) |
| 706 | - LOG.debug("Crawl of metadata service took %s seconds", |
| 707 | - int(time.time() - start_time)) |
| 708 | + LOG.debug("Crawl of metadata service took %.3f seconds", |
| 709 | + time.time() - start_time) |
| 710 | return True |
| 711 | except Exception: |
| 712 | util.logexc(LOG, "Failed reading from metadata address %s", |
| 713 | @@ -190,6 +213,126 @@ class DataSourceEc2(sources.DataSource): |
| 714 | return az[:-1] |
| 715 | return None |
| 716 | |
| 717 | + @property |
| 718 | + def cloud_platform(self): |
| 719 | + if self._cloud_platform is None: |
| 720 | + self._cloud_platform = identify_platform() |
| 721 | + return self._cloud_platform |
| 722 | + |
| 723 | + def activate(self, cfg, is_new_instance): |
| 724 | + if not is_new_instance: |
| 725 | + return |
| 726 | + if self.cloud_platform == Platforms.UNKNOWN: |
| 727 | + warn_if_necessary( |
| 728 | + util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), |
| 729 | + cfg) |
| 730 | + |
| 731 | + |
| 732 | +def read_strict_mode(cfgval, default): |
| 733 | + try: |
| 734 | + return parse_strict_mode(cfgval) |
| 735 | + except ValueError as e: |
| 736 | + LOG.warn(e) |
| 737 | + return default |
| 738 | + |
| 739 | + |
| 740 | +def parse_strict_mode(cfgval): |
| 741 | + # given a mode like: |
| 742 | + # true, false, warn,[sleep] |
| 743 | + # return tuple with string mode (true|false|warn) and sleep. |
| 744 | + if cfgval is True: |
| 745 | + return 'true', None |
| 746 | + if cfgval is False: |
| 747 | + return 'false', None |
| 748 | + |
| 749 | + if not cfgval: |
| 750 | + return 'warn', 0 |
| 751 | + |
| 752 | + mode, _, sleep = cfgval.partition(",") |
| 753 | + if mode not in ('true', 'false', 'warn'): |
| 754 | + raise ValueError( |
| 755 | + "Invalid mode '%s' in strict_id setting '%s': " |
| 756 | + "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)) |
| 757 | + |
| 758 | + if sleep: |
| 759 | + try: |
| 760 | + sleep = int(sleep) |
| 761 | + except ValueError: |
| 762 | + raise ValueError("Invalid sleep '%s' in strict_id setting '%s': " |
| 763 | + "not an integer" % (sleep, cfgval)) |
| 764 | + else: |
| 765 | + sleep = None |
| 766 | + |
| 767 | + return mode, sleep |
| 768 | + |
| 769 | + |
| 770 | +def warn_if_necessary(cfgval, cfg): |
| 771 | + try: |
| 772 | + mode, sleep = parse_strict_mode(cfgval) |
| 773 | + except ValueError as e: |
| 774 | + LOG.warn(e) |
| 775 | + return |
| 776 | + |
| 777 | + if mode == "false": |
| 778 | + return |
| 779 | + |
| 780 | + warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep) |
| 781 | + |
| 782 | + |
| 783 | +def identify_aws(data): |
| 784 | + # data is a dictionary returned by _collect_platform_data. |
| 785 | + if (data['uuid'].startswith('ec2') and |
| 786 | + (data['uuid_source'] == 'hypervisor' or |
| 787 | + data['uuid'] == data['serial'])): |
| 788 | + return Platforms.AWS |
| 789 | + |
| 790 | + return None |
| 791 | + |
| 792 | + |
| 793 | +def identify_brightbox(data): |
| 794 | + if data['serial'].endswith('brightbox.com'): |
| 795 | + return Platforms.BRIGHTBOX |
| 796 | + |
| 797 | + |
| 798 | +def identify_platform(): |
| 799 | + # identify the platform and return an entry in Platforms. |
| 800 | + data = _collect_platform_data() |
| 801 | + checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN) |
| 802 | + for checker in checks: |
| 803 | + try: |
| 804 | + result = checker(data) |
| 805 | + if result: |
| 806 | + return result |
| 807 | + except Exception as e: |
| 808 | + LOG.warn("calling %s with %s raised exception: %s", |
| 809 | + checker, data, e) |
| 810 | + |
| 811 | + |
| 812 | +def _collect_platform_data(): |
| 813 | + # returns a dictionary with all lower case values: |
| 814 | + # uuid: system-uuid from dmi or /sys/hypervisor |
| 815 | + # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' |
| 816 | + # serial: dmi 'system-serial-number' (/sys/.../product_serial) |
| 817 | + data = {} |
| 818 | + try: |
| 819 | + uuid = util.load_file("/sys/hypervisor/uuid").strip() |
| 820 | + data['uuid_source'] = 'hypervisor' |
| 821 | + except Exception: |
| 822 | + uuid = util.read_dmi_data('system-uuid') |
| 823 | + data['uuid_source'] = 'dmi' |
| 824 | + |
| 825 | + if uuid is None: |
| 826 | + uuid = '' |
| 827 | + data['uuid'] = uuid.lower() |
| 828 | + |
| 829 | + serial = util.read_dmi_data('system-serial-number') |
| 830 | + if serial is None: |
| 831 | + serial = '' |
| 832 | + |
| 833 | + data['serial'] = serial.lower() |
| 834 | + |
| 835 | + return data |
| 836 | + |
| 837 | |
| 838 | # Used to match classes to dependencies |
| 839 | datasources = [ |
| 840 | diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py |
| 841 | index 78928c7..d70784a 100644 |
| 842 | --- a/cloudinit/sources/DataSourceOVF.py |
| 843 | +++ b/cloudinit/sources/DataSourceOVF.py |
| 844 | @@ -48,6 +48,7 @@ class DataSourceOVF(sources.DataSource): |
| 845 | self.environment = None |
| 846 | self.cfg = {} |
| 847 | self.supported_seed_starts = ("/", "file://") |
| 848 | + self.vmware_customization_supported = True |
| 849 | |
| 850 | def __str__(self): |
| 851 | root = sources.DataSource.__str__(self) |
| 852 | @@ -78,7 +79,10 @@ class DataSourceOVF(sources.DataSource): |
| 853 | found.append(seed) |
| 854 | elif system_type and 'vmware' in system_type.lower(): |
| 855 | LOG.debug("VMware Virtualization Platform found") |
| 856 | - if not util.get_cfg_option_bool( |
| 857 | + if not self.vmware_customization_supported: |
| 858 | + LOG.debug("Skipping the check for " |
| 859 | + "VMware Customization support") |
| 860 | + elif not util.get_cfg_option_bool( |
| 861 | self.sys_cfg, "disable_vmware_customization", True): |
| 862 | deployPkgPluginPath = search_file("/usr/lib/vmware-tools", |
| 863 | "libdeployPkgPlugin.so") |
| 864 | @@ -90,17 +94,18 @@ class DataSourceOVF(sources.DataSource): |
| 865 | # copies the customization specification file to |
| 866 | # /var/run/vmware-imc directory. cloud-init code needs |
| 867 | # to search for the file in that directory. |
| 868 | + max_wait = get_max_wait_from_cfg(self.ds_cfg) |
| 869 | vmwareImcConfigFilePath = util.log_time( |
| 870 | logfunc=LOG.debug, |
| 871 | msg="waiting for configuration file", |
| 872 | func=wait_for_imc_cfg_file, |
| 873 | - args=("/var/run/vmware-imc", "cust.cfg")) |
| 874 | + args=("/var/run/vmware-imc", "cust.cfg", max_wait)) |
| 875 | |
| 876 | if vmwareImcConfigFilePath: |
| 877 | - LOG.debug("Found VMware DeployPkg Config File at %s" % |
| 878 | + LOG.debug("Found VMware Customization Config File at %s", |
| 879 | vmwareImcConfigFilePath) |
| 880 | else: |
| 881 | - LOG.debug("Did not find VMware DeployPkg Config File Path") |
| 882 | + LOG.debug("Did not find VMware Customization Config File") |
| 883 | else: |
| 884 | LOG.debug("Customization for VMware platform is disabled.") |
| 885 | |
| 886 | @@ -206,6 +211,29 @@ class DataSourceOVFNet(DataSourceOVF): |
| 887 | DataSourceOVF.__init__(self, sys_cfg, distro, paths) |
| 888 | self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net') |
| 889 | self.supported_seed_starts = ("http://", "https://", "ftp://") |
| 890 | + self.vmware_customization_supported = False |
| 891 | + |
| 892 | + |
| 893 | +def get_max_wait_from_cfg(cfg): |
| 894 | + default_max_wait = 90 |
| 895 | + max_wait_cfg_option = 'vmware_cust_file_max_wait' |
| 896 | + max_wait = default_max_wait |
| 897 | + |
| 898 | + if not cfg: |
| 899 | + return max_wait |
| 900 | + |
| 901 | + try: |
| 902 | + max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait)) |
| 903 | + except ValueError: |
| 904 | + LOG.warn("Failed to get '%s', using %s", |
| 905 | + max_wait_cfg_option, default_max_wait) |
| 906 | + |
| 907 | + if max_wait <= 0: |
| 908 | + LOG.warn("Invalid value '%s' for '%s', using '%s' instead", |
| 909 | + max_wait, max_wait_cfg_option, default_max_wait) |
| 910 | + max_wait = default_max_wait |
| 911 | + |
| 912 | + return max_wait |
| 913 | |
| 914 | |
| 915 | def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): |
| 916 | @@ -215,6 +243,7 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): |
| 917 | fileFullPath = search_file(dirpath, filename) |
| 918 | if fileFullPath: |
| 919 | return fileFullPath |
| 920 | + LOG.debug("Waiting for VMware Customization Config File") |
| 921 | time.sleep(naplen) |
| 922 | waited += naplen |
| 923 | return None |
| 924 | diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py |
| 925 | index 2a58f1c..e1ea21f 100644 |
| 926 | --- a/cloudinit/sources/DataSourceOpenStack.py |
| 927 | +++ b/cloudinit/sources/DataSourceOpenStack.py |
| 928 | @@ -45,6 +45,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
| 929 | # max_wait < 0 indicates do not wait |
| 930 | max_wait = -1 |
| 931 | timeout = 10 |
| 932 | + retries = 5 |
| 933 | |
| 934 | try: |
| 935 | max_wait = int(self.ds_cfg.get("max_wait", max_wait)) |
| 936 | @@ -55,7 +56,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
| 937 | timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) |
| 938 | except Exception: |
| 939 | util.logexc(LOG, "Failed to get timeout, using %s", timeout) |
| 940 | - return (max_wait, timeout) |
| 941 | + |
| 942 | + try: |
| 943 | + retries = int(self.ds_cfg.get("retries", retries)) |
| 944 | + except Exception: |
| 945 | + util.logexc(LOG, "Failed to get max wait. using %s", retries) |
| 946 | + |
| 947 | + return (max_wait, timeout, retries) |
| 948 | |
| 949 | def wait_for_metadata_service(self): |
| 950 | urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) |
| 951 | @@ -76,7 +83,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
| 952 | md_urls.append(md_url) |
| 953 | url2base[md_url] = url |
| 954 | |
| 955 | - (max_wait, timeout) = self._get_url_settings() |
| 956 | + (max_wait, timeout, retries) = self._get_url_settings() |
| 957 | start_time = time.time() |
| 958 | avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, |
| 959 | timeout=timeout) |
| 960 | @@ -89,13 +96,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
| 961 | self.metadata_address = url2base.get(avail_url) |
| 962 | return bool(avail_url) |
| 963 | |
| 964 | - def get_data(self, retries=5, timeout=5): |
| 965 | + def get_data(self): |
| 966 | try: |
| 967 | if not self.wait_for_metadata_service(): |
| 968 | return False |
| 969 | except IOError: |
| 970 | return False |
| 971 | |
| 972 | + (max_wait, timeout, retries) = self._get_url_settings() |
| 973 | + |
| 974 | try: |
| 975 | results = util.log_time(LOG.debug, |
| 976 | 'Crawl of openstack metadata service', |
| 977 | diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py |
| 978 | index d5a7c34..67ac21d 100644 |
| 979 | --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py |
| 980 | +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py |
| 981 | @@ -101,7 +101,11 @@ class NicConfigurator(object): |
| 982 | return lines |
| 983 | |
| 984 | # Static Ipv4 |
| 985 | - v4 = nic.staticIpv4 |
| 986 | + addrs = nic.staticIpv4 |
| 987 | + if not addrs: |
| 988 | + return lines |
| 989 | + |
| 990 | + v4 = addrs[0] |
| 991 | if v4.ip: |
| 992 | lines.append(' address %s' % v4.ip) |
| 993 | if v4.netmask: |
| 994 | @@ -197,22 +201,6 @@ class NicConfigurator(object): |
| 995 | util.subp(["pkill", "dhclient"], rcs=[0, 1]) |
| 996 | util.subp(["rm", "-f", "/var/lib/dhcp/*"]) |
| 997 | |
| 998 | - def if_down_up(self): |
| 999 | - names = [] |
| 1000 | - for nic in self.nics: |
| 1001 | - name = self.mac2Name.get(nic.mac.lower()) |
| 1002 | - names.append(name) |
| 1003 | - |
| 1004 | - for name in names: |
| 1005 | - logger.info('Bring down interface %s' % name) |
| 1006 | - util.subp(["ifdown", "%s" % name]) |
| 1007 | - |
| 1008 | - self.clear_dhcp() |
| 1009 | - |
| 1010 | - for name in names: |
| 1011 | - logger.info('Bring up interface %s' % name) |
| 1012 | - util.subp(["ifup", "%s" % name]) |
| 1013 | - |
| 1014 | def configure(self): |
| 1015 | """ |
| 1016 | Configure the /etc/network/intefaces |
| 1017 | @@ -232,6 +220,6 @@ class NicConfigurator(object): |
| 1018 | for line in lines: |
| 1019 | fp.write('%s\n' % line) |
| 1020 | |
| 1021 | - self.if_down_up() |
| 1022 | + self.clear_dhcp() |
| 1023 | |
| 1024 | # vi: ts=4 expandtab |
| 1025 | diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py |
| 1026 | index be8a49e..b95b956 100644 |
| 1027 | --- a/cloudinit/ssh_util.py |
| 1028 | +++ b/cloudinit/ssh_util.py |
| 1029 | @@ -22,8 +22,11 @@ DEF_SSHD_CFG = "/etc/ssh/sshd_config" |
| 1030 | VALID_KEY_TYPES = ( |
| 1031 | "dsa", |
| 1032 | "ecdsa", |
| 1033 | + "ecdsa-sha2-nistp256", |
| 1034 | "ecdsa-sha2-nistp256-cert-v01@openssh.com", |
| 1035 | + "ecdsa-sha2-nistp384", |
| 1036 | "ecdsa-sha2-nistp384-cert-v01@openssh.com", |
| 1037 | + "ecdsa-sha2-nistp521", |
| 1038 | "ecdsa-sha2-nistp521-cert-v01@openssh.com", |
| 1039 | "ed25519", |
| 1040 | "rsa", |
| 1041 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py |
| 1042 | index b0552dd..5bed903 100644 |
| 1043 | --- a/cloudinit/stages.py |
| 1044 | +++ b/cloudinit/stages.py |
| 1045 | @@ -11,7 +11,8 @@ import sys |
| 1046 | import six |
| 1047 | from six.moves import cPickle as pickle |
| 1048 | |
| 1049 | -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG) |
| 1050 | +from cloudinit.settings import ( |
| 1051 | + FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG) |
| 1052 | |
| 1053 | from cloudinit import handlers |
| 1054 | |
| 1055 | @@ -188,6 +189,12 @@ class Init(object): |
| 1056 | def _write_to_cache(self): |
| 1057 | if self.datasource is NULL_DATA_SOURCE: |
| 1058 | return False |
| 1059 | + if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False): |
| 1060 | + # The empty file in instance/ dir indicates manual cleaning, |
| 1061 | + # and can be read by ds-identify. |
| 1062 | + util.write_file( |
| 1063 | + self.paths.get_ipath_cur("manual_clean_marker"), |
| 1064 | + omode="w", content="") |
| 1065 | return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl")) |
| 1066 | |
| 1067 | def _get_datasources(self): |
| 1068 | @@ -828,6 +835,10 @@ class Modules(object): |
| 1069 | return self._run_modules(mostly_mods) |
| 1070 | |
| 1071 | |
| 1072 | +def read_runtime_config(): |
| 1073 | + return util.read_conf(RUN_CLOUD_CONFIG) |
| 1074 | + |
| 1075 | + |
| 1076 | def fetch_base_config(): |
| 1077 | return util.mergemanydict( |
| 1078 | [ |
| 1079 | @@ -835,6 +846,8 @@ def fetch_base_config(): |
| 1080 | util.get_builtin_cfg(), |
| 1081 | # Anything in your conf.d or 'default' cloud.cfg location. |
| 1082 | util.read_conf_with_confd(CLOUD_CONFIG), |
| 1083 | + # runtime config |
| 1084 | + read_runtime_config(), |
| 1085 | # Kernel/cmdline parameters override system config |
| 1086 | util.read_conf_from_cmdline(), |
| 1087 | ], reverse=True) |
| 1088 | diff --git a/cloudinit/util.py b/cloudinit/util.py |
| 1089 | index 5725129..7196a7c 100644 |
| 1090 | --- a/cloudinit/util.py |
| 1091 | +++ b/cloudinit/util.py |
| 1092 | @@ -1089,31 +1089,6 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"): |
| 1093 | return fqdn |
| 1094 | |
| 1095 | |
| 1096 | -def get_cmdline_url(names=('cloud-config-url', 'url'), |
| 1097 | - starts=b"#cloud-config", cmdline=None): |
| 1098 | - if cmdline is None: |
| 1099 | - cmdline = get_cmdline() |
| 1100 | - |
| 1101 | - data = keyval_str_to_dict(cmdline) |
| 1102 | - url = None |
| 1103 | - key = None |
| 1104 | - for key in names: |
| 1105 | - if key in data: |
| 1106 | - url = data[key] |
| 1107 | - break |
| 1108 | - |
| 1109 | - if not url: |
| 1110 | - return (None, None, None) |
| 1111 | - |
| 1112 | - resp = read_file_or_url(url) |
| 1113 | - # allow callers to pass starts as text when comparing to bytes contents |
| 1114 | - starts = encode_text(starts) |
| 1115 | - if resp.ok() and resp.contents.startswith(starts): |
| 1116 | - return (key, url, resp.contents) |
| 1117 | - |
| 1118 | - return (key, url, None) |
| 1119 | - |
| 1120 | - |
| 1121 | def is_resolvable(name): |
| 1122 | """determine if a url is resolvable, return a boolean |
| 1123 | This also attempts to be resilent against dns redirection. |
| 1124 | @@ -1475,25 +1450,6 @@ def ensure_dirs(dirlist, mode=0o755): |
| 1125 | ensure_dir(d, mode) |
| 1126 | |
| 1127 | |
| 1128 | -def read_write_cmdline_url(target_fn): |
| 1129 | - if not os.path.exists(target_fn): |
| 1130 | - try: |
| 1131 | - (key, url, content) = get_cmdline_url() |
| 1132 | - except Exception: |
| 1133 | - logexc(LOG, "Failed fetching command line url") |
| 1134 | - return |
| 1135 | - try: |
| 1136 | - if key and content: |
| 1137 | - write_file(target_fn, content, mode=0o600) |
| 1138 | - LOG.debug(("Wrote to %s with contents of command line" |
| 1139 | - " url %s (len=%s)"), target_fn, url, len(content)) |
| 1140 | - elif key and not content: |
| 1141 | - LOG.debug(("Command line key %s with url" |
| 1142 | - " %s had no contents"), key, url) |
| 1143 | - except Exception: |
| 1144 | - logexc(LOG, "Failed writing url content to %s", target_fn) |
| 1145 | - |
| 1146 | - |
| 1147 | def yaml_dumps(obj, explicit_start=True, explicit_end=True): |
| 1148 | return yaml.safe_dump(obj, |
| 1149 | line_break="\n", |
| 1150 | diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py |
| 1151 | new file mode 100644 |
| 1152 | index 0000000..3206d4e |
| 1153 | --- /dev/null |
| 1154 | +++ b/cloudinit/warnings.py |
| 1155 | @@ -0,0 +1,139 @@ |
| 1156 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 1157 | + |
| 1158 | +from cloudinit import helpers |
| 1159 | +from cloudinit import log as logging |
| 1160 | +from cloudinit import util |
| 1161 | + |
| 1162 | +import os |
| 1163 | +import time |
| 1164 | + |
| 1165 | +LOG = logging.getLogger() |
| 1166 | + |
| 1167 | +WARNINGS = { |
| 1168 | + 'non_ec2_md': """ |
| 1169 | +This system is using the EC2 Metadata Service, but does not appear to |
| 1170 | +be running on Amazon EC2 or one of cloud-init's known platforms that |
| 1171 | +provide a EC2 Metadata service. In the future, cloud-init may stop |
| 1172 | +reading metadata from the EC2 Metadata Service unless the platform can |
| 1173 | +be identified. |
| 1174 | + |
| 1175 | +If you are seeing this message, please file a bug against |
| 1176 | +cloud-init at |
| 1177 | + https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid |
| 1178 | +Make sure to include the cloud provider your instance is |
| 1179 | +running on. |
| 1180 | + |
| 1181 | +For more information see |
| 1182 | + https://bugs.launchpad.net/bugs/1660385 |
| 1183 | + |
| 1184 | +After you have filed a bug, you can disable this warning by |
| 1185 | +launching your instance with the cloud-config below, or |
| 1186 | +putting that content into |
| 1187 | + /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg |
| 1188 | + |
| 1189 | +#cloud-config |
| 1190 | +datasource: |
| 1191 | + Ec2: |
| 1192 | + strict_id: false""", |
| 1193 | + 'dsid_missing_source': """ |
| 1194 | +A new feature in cloud-init identified possible datasources for |
| 1195 | +this system as: |
| 1196 | + {dslist} |
| 1197 | +However, the datasource used was: {source} |
| 1198 | + |
| 1199 | +In the future, cloud-init will only attempt to use datasources that |
| 1200 | +are identified or specifically configured. |
| 1201 | +For more information see |
| 1202 | + https://bugs.launchpad.net/bugs/1669675 |
| 1203 | + |
| 1204 | +If you are seeing this message, please file a bug against |
| 1205 | +cloud-init at |
| 1206 | + https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid |
| 1207 | +Make sure to include the cloud provider your instance is |
| 1208 | +running on. |
| 1209 | + |
| 1210 | +After you have filed a bug, you can disable this warning by launching |
| 1211 | +your instance with the cloud-config below, or putting that content |
| 1212 | +into /etc/cloud/cloud.cfg.d/99-warnings.cfg |
| 1213 | + |
| 1214 | +#cloud-config |
| 1215 | +warnings: |
| 1216 | + dsid_missing_source: off""", |
| 1217 | +} |
| 1218 | + |
| 1219 | + |
| 1220 | +def _get_warn_dir(cfg): |
| 1221 | + paths = helpers.Paths( |
| 1222 | + path_cfgs=cfg.get('system_info', {}).get('paths', {})) |
| 1223 | + return paths.get_ipath_cur('warnings') |
| 1224 | + |
| 1225 | + |
| 1226 | +def _load_warn_cfg(cfg, name, mode=True, sleep=None): |
| 1227 | + # parse cfg['warnings']['name'] returning boolean, sleep |
| 1228 | + # expected value is form of: |
| 1229 | + # (on|off|true|false|sleep)[,sleeptime] |
| 1230 | + # boolean True == on, False == off |
| 1231 | + default = (mode, sleep) |
| 1232 | + if not cfg or not isinstance(cfg, dict): |
| 1233 | + return default |
| 1234 | + |
| 1235 | + ncfg = util.get_cfg_by_path(cfg, ('warnings', name)) |
| 1236 | + if ncfg is None: |
| 1237 | + return default |
| 1238 | + |
| 1239 | + if ncfg in ("on", "true", True): |
| 1240 | + return True, None |
| 1241 | + |
| 1242 | + if ncfg in ("off", "false", False): |
| 1243 | + return False, None |
| 1244 | + |
| 1245 | + mode, _, csleep = ncfg.partition(",") |
| 1246 | + if mode != "sleep": |
| 1247 | + return default |
| 1248 | + |
| 1249 | + if csleep: |
| 1250 | + try: |
| 1251 | + sleep = int(csleep) |
| 1252 | + except ValueError: |
| 1253 | + return default |
| 1254 | + |
| 1255 | + return True, sleep |
| 1256 | + |
| 1257 | + |
| 1258 | +def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs): |
| 1259 | + # kwargs are used for .format of the message. |
| 1260 | + # sleep and mode are default values used if |
| 1261 | + # cfg['warnings']['name'] is not present. |
| 1262 | + if cfg is None: |
| 1263 | + cfg = {} |
| 1264 | + |
| 1265 | + mode, sleep = _load_warn_cfg(cfg, name, mode=mode, sleep=sleep) |
| 1266 | + if not mode: |
| 1267 | + return |
| 1268 | + |
| 1269 | + msg = WARNINGS[name].format(**kwargs) |
| 1270 | + msgwidth = 70 |
| 1271 | + linewidth = msgwidth + 4 |
| 1272 | + |
| 1273 | + fmt = "# %%-%ds #" % msgwidth |
| 1274 | + topline = "*" * linewidth + "\n" |
| 1275 | + fmtlines = [] |
| 1276 | + for line in msg.strip("\n").splitlines(): |
| 1277 | + fmtlines.append(fmt % line) |
| 1278 | + |
| 1279 | + closeline = topline |
| 1280 | + if sleep: |
| 1281 | + sleepmsg = " [sleeping for %d seconds] " % sleep |
| 1282 | + closeline = sleepmsg.center(linewidth, "*") + "\n" |
| 1283 | + |
| 1284 | + util.write_file( |
| 1285 | + os.path.join(_get_warn_dir(cfg), name), |
| 1286 | + topline + "\n".join(fmtlines) + "\n" + topline) |
| 1287 | + |
| 1288 | + LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline) |
| 1289 | + |
| 1290 | + if sleep: |
| 1291 | + LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name)) |
| 1292 | + time.sleep(sleep) |
| 1293 | + |
| 1294 | +# vi: ts=4 expandtab |
| 1295 | diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt |
| 1296 | index c5f84b1..c03f102 100644 |
| 1297 | --- a/doc/examples/cloud-config.txt |
| 1298 | +++ b/doc/examples/cloud-config.txt |
| 1299 | @@ -200,7 +200,7 @@ ssh_import_id: [smoser] |
| 1300 | # |
| 1301 | # Default: none |
| 1302 | # |
| 1303 | -debconf_selections: | # Need to perserve newlines |
| 1304 | +debconf_selections: | # Need to preserve newlines |
| 1305 | # Force debconf priority to critical. |
| 1306 | debconf debconf/priority select critical |
| 1307 | |
| 1308 | diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst |
| 1309 | index 8646e77..202b0a4 100644 |
| 1310 | --- a/doc/rtd/topics/datasources/altcloud.rst |
| 1311 | +++ b/doc/rtd/topics/datasources/altcloud.rst |
| 1312 | @@ -66,7 +66,7 @@ NOTE: The file name on the ISO must be: ``user-data.txt`` |
| 1313 | |
| 1314 | .. sourcecode:: sh |
| 1315 | |
| 1316 | - % cp simple_scirpt.bash my-iso/user-data.txt |
| 1317 | + % cp simple_script.bash my-iso/user-data.txt |
| 1318 | % genisoimage -o user-data.iso -r my-iso |
| 1319 | |
| 1320 | Verify the ISO |
| 1321 | @@ -75,7 +75,7 @@ Verify the ISO |
| 1322 | .. sourcecode:: sh |
| 1323 | |
| 1324 | % sudo mkdir /media/vsphere_iso |
| 1325 | - % sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso |
| 1326 | + % sudo mount -o loop user-data.iso /media/vsphere_iso |
| 1327 | % cat /media/vsphere_iso/user-data.txt |
| 1328 | % sudo umount /media/vsphere_iso |
| 1329 | |
| 1330 | diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst |
| 1331 | index ea47ea8..164b0e0 100644 |
| 1332 | --- a/doc/rtd/topics/datasources/openstack.rst |
| 1333 | +++ b/doc/rtd/topics/datasources/openstack.rst |
| 1334 | @@ -1,7 +1,41 @@ |
| 1335 | OpenStack |
| 1336 | ========= |
| 1337 | |
| 1338 | -*TODO* |
| 1339 | +This datasource supports reading data from the |
| 1340 | +`OpenStack Metadata Service |
| 1341 | +<http://docs.openstack.org/admin-guide/compute-networking-nova.html#metadata-service>`_. |
| 1342 | + |
| 1343 | +Configuration |
| 1344 | +------------- |
| 1345 | +The following configuration can be set for the datasource in system |
| 1346 | +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). |
| 1347 | + |
| 1348 | +The settings that may be configured are: |
| 1349 | + |
| 1350 | + * **metadata_urls**: This list of urls will be searched for an OpenStack |
| 1351 | + metadata service. The first entry that successfully returns a 200 response |
| 1352 | + for <url>/openstack will be selected. (default: ['http://169.254.169.254']). |
| 1353 | + * **max_wait**: the maximum amount of clock time in seconds that should be |
| 1354 | + spent searching metadata_urls. A value less than zero will result in only |
| 1355 | + one request being made, to the first in the list. (default: -1) |
| 1356 | + * **timeout**: the timeout value provided to urlopen for each individual http |
| 1357 | + request. This is used both when selecting a metadata_url and when crawling |
| 1358 | + the metadata service. (default: 10) |
| 1359 | + * **retries**: The number of retries that should be done for an http request. |
| 1360 | + This value is used only after metadata_url is selected. (default: 5) |
| 1361 | + |
| 1362 | +An example configuration with the default values is provided as example below: |
| 1363 | + |
| 1364 | +.. sourcecode:: yaml |
| 1365 | + |
| 1366 | + #cloud-config |
| 1367 | + datasource: |
| 1368 | + OpenStack: |
| 1369 | + metadata_urls: ["http://169.254.169.254"] |
| 1370 | + max_wait: -1 |
| 1371 | + timeout: 10 |
| 1372 | + retries: 5 |
| 1373 | + |
| 1374 | |
| 1375 | Vendor Data |
| 1376 | ----------- |
| 1377 | diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst |
| 1378 | index ed87d3e..436eb00 100644 |
| 1379 | --- a/doc/rtd/topics/format.rst |
| 1380 | +++ b/doc/rtd/topics/format.rst |
| 1381 | @@ -127,11 +127,11 @@ Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when u |
| 1382 | Part Handler |
| 1383 | ============ |
| 1384 | |
| 1385 | -This is a ``part-handler``. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated). |
| 1386 | -This must be python code that contains a ``list_types`` method and a ``handle_type`` method. |
| 1387 | -Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles. |
| 1388 | +This is a ``part-handler``: It contains custom code for either supporting new mime-types in multi-part user data, or overriding the existing handlers for supported mime-types. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated). |
| 1389 | +This must be python code that contains a ``list_types`` function and a ``handle_part`` function. |
| 1390 | +Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles. Because mime parts are processed in order, a ``part-handler`` part must precede any parts with mime-types it is expected to handle in the same user data. |
| 1391 | |
| 1392 | -The ``handle_type`` method must be like: |
| 1393 | +The ``handle_part`` function must be defined like: |
| 1394 | |
| 1395 | .. code-block:: python |
| 1396 | |
| 1397 | @@ -141,8 +141,9 @@ The ``handle_type`` method must be like: |
| 1398 | # filename = the filename of the part (or a generated filename if none is present in mime data) |
| 1399 | # payload = the parts' content |
| 1400 | |
| 1401 | -Cloud-init will then call the ``handle_type`` method once at begin, once per part received, and once at end. |
| 1402 | -The ``begin`` and ``end`` calls are to allow the part handler to do initialization or teardown. |
| 1403 | +Cloud-init will then call the ``handle_part`` function once before it handles any parts, once per part received, and once after all parts have been handled. |
| 1404 | +The ``'__begin__'`` and ``'__end__'`` sentinels allow the part handler to do initialization or teardown before or after |
| 1405 | +receiving any parts. |
| 1406 | |
| 1407 | Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when using a MIME archive. |
| 1408 | |
| 1409 | diff --git a/packages/debian/rules.in b/packages/debian/rules.in |
| 1410 | index 9b00435..053b764 100755 |
| 1411 | --- a/packages/debian/rules.in |
| 1412 | +++ b/packages/debian/rules.in |
| 1413 | @@ -11,6 +11,8 @@ override_dh_install: |
| 1414 | dh_install |
| 1415 | install -d debian/cloud-init/etc/rsyslog.d |
| 1416 | cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf |
| 1417 | + install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh |
| 1418 | + install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh |
| 1419 | |
| 1420 | override_dh_auto_test: |
| 1421 | ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) |
| 1422 | diff --git a/setup.py b/setup.py |
| 1423 | index 0403607..e6693c9 100755 |
| 1424 | --- a/setup.py |
| 1425 | +++ b/setup.py |
| 1426 | @@ -168,7 +168,8 @@ else: |
| 1427 | (ETC + '/cloud/templates', glob('templates/*')), |
| 1428 | (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), |
| 1429 | (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), |
| 1430 | - (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', |
| 1431 | + (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', |
| 1432 | + 'tools/uncloud-init', |
| 1433 | 'tools/write-ssh-key-fingerprints']), |
| 1434 | (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), |
| 1435 | (USR + '/share/doc/cloud-init/examples', |
| 1436 | diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator |
| 1437 | index fedb630..bd9f267 100755 |
| 1438 | --- a/systemd/cloud-init-generator |
| 1439 | +++ b/systemd/cloud-init-generator |
| 1440 | @@ -6,6 +6,8 @@ DEBUG_LEVEL=1 |
| 1441 | LOG_D="/run/cloud-init" |
| 1442 | ENABLE="enabled" |
| 1443 | DISABLE="disabled" |
| 1444 | +FOUND="found" |
| 1445 | +NOTFOUND="notfound" |
| 1446 | RUN_ENABLED_FILE="$LOG_D/$ENABLE" |
| 1447 | CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" |
| 1448 | CLOUD_TARGET_NAME="cloud-init.target" |
| 1449 | @@ -74,10 +76,30 @@ default() { |
| 1450 | _RET="$ENABLE" |
| 1451 | } |
| 1452 | |
| 1453 | +check_for_datasource() { |
| 1454 | + local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" |
| 1455 | + if [ ! -x "$dsidentify" ]; then |
| 1456 | + debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" |
| 1457 | + return 0 |
| 1458 | + fi |
| 1459 | + $dsidentify |
| 1460 | + ds_rc=$? |
| 1461 | + debug 1 "ds-identify rc=$ds_rc" |
| 1462 | + if [ "$ds_rc" = "0" ]; then |
| 1463 | + _RET="$FOUND" |
| 1464 | + debug 1 "ds-identify _RET=$_RET" |
| 1465 | + return 0 |
| 1466 | + fi |
| 1467 | + _RET="$NOTFOUND" |
| 1468 | + debug 1 "ds-identify _RET=$_RET" |
| 1469 | + return 1 |
| 1470 | +} |
| 1471 | + |
| 1472 | main() { |
| 1473 | local normal_d="$1" early_d="$2" late_d="$3" |
| 1474 | local target_name="multi-user.target" gen_d="$early_d" |
| 1475 | local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}" |
| 1476 | + local ds="$NOTFOUND" |
| 1477 | |
| 1478 | debug 1 "$0 normal=$normal_d early=$early_d late=$late_d" |
| 1479 | debug 2 "$0 $*" |
| 1480 | @@ -93,7 +115,20 @@ main() { |
| 1481 | debug 0 "search $search returned $ret" |
| 1482 | fi |
| 1483 | done |
| 1484 | - |
| 1485 | + |
| 1486 | + # enable AND ds=found == enable |
| 1487 | + # enable AND ds=notfound == disable |
| 1488 | + # disable || <any> == disabled |
| 1489 | + if [ "$result" = "$ENABLE" ]; then |
| 1490 | + debug 1 "checking for datasource" |
| 1491 | + check_for_datasource |
| 1492 | + ds=$_RET |
| 1493 | + if [ "$ds" = "$NOTFOUND" ]; then |
| 1494 | + debug 1 "cloud-init is enabled but no datasource found, disabling" |
| 1495 | + result="$DISABLE" |
| 1496 | + fi |
| 1497 | + fi |
| 1498 | + |
| 1499 | if [ "$result" = "$ENABLE" ]; then |
| 1500 | if [ -e "$link_path" ]; then |
| 1501 | debug 1 "already enabled: no change needed" |
| 1502 | @@ -124,7 +159,7 @@ main() { |
| 1503 | rm -f "$RUN_ENABLED_FILE" |
| 1504 | fi |
| 1505 | else |
| 1506 | - debug 0 "unexpected result '$result'" |
| 1507 | + debug 0 "unexpected result '$result' 'ds=$ds'" |
| 1508 | ret=3 |
| 1509 | fi |
| 1510 | return $ret |
| 1511 | diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py |
| 1512 | index cf3b46d..90e2431 100644 |
| 1513 | --- a/tests/unittests/helpers.py |
| 1514 | +++ b/tests/unittests/helpers.py |
| 1515 | @@ -29,7 +29,6 @@ PY2 = False |
| 1516 | PY26 = False |
| 1517 | PY27 = False |
| 1518 | PY3 = False |
| 1519 | -FIX_HTTPRETTY = False |
| 1520 | |
| 1521 | _PY_VER = sys.version_info |
| 1522 | _PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3] |
| 1523 | @@ -44,8 +43,6 @@ else: |
| 1524 | PY2 = True |
| 1525 | if (_PY_MAJOR, _PY_MINOR) >= (3, 0): |
| 1526 | PY3 = True |
| 1527 | - if _PY_MINOR == 4 and _PY_MICRO < 3: |
| 1528 | - FIX_HTTPRETTY = True |
| 1529 | |
| 1530 | |
| 1531 | # Makes the old path start |
| 1532 | @@ -86,6 +83,28 @@ class TestCase(unittest2.TestCase): |
| 1533 | pass |
| 1534 | |
| 1535 | |
| 1536 | +class CiTestCase(TestCase): |
| 1537 | + """This is the preferred test case base class unless user |
| 1538 | + needs other test case classes below.""" |
| 1539 | + def tmp_dir(self, dir=None, cleanup=True): |
| 1540 | + # return a full path to a temporary directory that will be cleaned up. |
| 1541 | + if dir is None: |
| 1542 | + tmpd = tempfile.mkdtemp( |
| 1543 | + prefix="ci-%s." % self.__class__.__name__) |
| 1544 | + else: |
| 1545 | + tmpd = tempfile.mkdtemp(dir=dir) |
| 1546 | + self.addCleanup(functools.partial(shutil.rmtree, tmpd)) |
| 1547 | + return tmpd |
| 1548 | + |
| 1549 | + def tmp_path(self, path, dir=None): |
| 1550 | + # return an absolute path to 'path' under dir. |
| 1551 | + # if dir is None, one will be created with tmp_dir() |
| 1552 | + # the file is not created or modified. |
| 1553 | + if dir is None: |
| 1554 | + dir = self.tmp_dir() |
| 1555 | + return os.path.normpath(os.path.abspath(os.path.join(dir, path))) |
| 1556 | + |
| 1557 | + |
| 1558 | class ResourceUsingTestCase(TestCase): |
| 1559 | def setUp(self): |
| 1560 | super(ResourceUsingTestCase, self).setUp() |
| 1561 | @@ -216,37 +235,6 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): |
| 1562 | return root |
| 1563 | |
| 1564 | |
| 1565 | -def import_httpretty(): |
| 1566 | - """Import HTTPretty and monkey patch Python 3.4 issue. |
| 1567 | - See https://github.com/gabrielfalcao/HTTPretty/pull/193 and |
| 1568 | - as well as https://github.com/gabrielfalcao/HTTPretty/issues/221. |
| 1569 | - |
| 1570 | - Lifted from |
| 1571 | - https://github.com/inveniosoftware/datacite/blob/master/tests/helpers.py |
| 1572 | - """ |
| 1573 | - if not FIX_HTTPRETTY: |
| 1574 | - import httpretty |
| 1575 | - else: |
| 1576 | - import socket |
| 1577 | - old_SocketType = socket.SocketType |
| 1578 | - |
| 1579 | - import httpretty |
| 1580 | - from httpretty import core |
| 1581 | - |
| 1582 | - def sockettype_patch(f): |
| 1583 | - @functools.wraps(f) |
| 1584 | - def inner(*args, **kwargs): |
| 1585 | - f(*args, **kwargs) |
| 1586 | - socket.SocketType = old_SocketType |
| 1587 | - socket.__dict__['SocketType'] = old_SocketType |
| 1588 | - return inner |
| 1589 | - |
| 1590 | - core.httpretty.disable = sockettype_patch( |
| 1591 | - httpretty.httpretty.disable |
| 1592 | - ) |
| 1593 | - return httpretty |
| 1594 | - |
| 1595 | - |
| 1596 | class HttprettyTestCase(TestCase): |
| 1597 | # necessary as http_proxy gets in the way of httpretty |
| 1598 | # https://github.com/gabrielfalcao/HTTPretty/issues/122 |
| 1599 | @@ -262,23 +250,10 @@ class HttprettyTestCase(TestCase): |
| 1600 | super(HttprettyTestCase, self).tearDown() |
| 1601 | |
| 1602 | |
| 1603 | -class TempDirTestCase(TestCase): |
| 1604 | - # provide a tempdir per class, not per test. |
| 1605 | - def setUp(self): |
| 1606 | - super(TempDirTestCase, self).setUp() |
| 1607 | - self.tmp = tempfile.mkdtemp() |
| 1608 | - self.addCleanup(shutil.rmtree, self.tmp) |
| 1609 | - |
| 1610 | - def tmp_path(self, path): |
| 1611 | - if path.startswith(os.path.sep): |
| 1612 | - path = "." + path |
| 1613 | - |
| 1614 | - return os.path.normpath(os.path.join(self.tmp, path)) |
| 1615 | - |
| 1616 | - |
| 1617 | def populate_dir(path, files): |
| 1618 | if not os.path.exists(path): |
| 1619 | os.makedirs(path) |
| 1620 | + ret = [] |
| 1621 | for (name, content) in files.items(): |
| 1622 | p = os.path.join(path, name) |
| 1623 | util.ensure_dir(os.path.dirname(p)) |
| 1624 | @@ -288,6 +263,9 @@ def populate_dir(path, files): |
| 1625 | else: |
| 1626 | fp.write(content.encode('utf-8')) |
| 1627 | fp.close() |
| 1628 | + ret.append(p) |
| 1629 | + |
| 1630 | + return ret |
| 1631 | |
| 1632 | |
| 1633 | def dir2dict(startdir, prefix=None): |
| 1634 | diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py |
| 1635 | index 7b6f8c4..781f6d5 100644 |
| 1636 | --- a/tests/unittests/test__init__.py |
| 1637 | +++ b/tests/unittests/test__init__.py |
| 1638 | @@ -1,16 +1,18 @@ |
| 1639 | # This file is part of cloud-init. See LICENSE file for license information. |
| 1640 | |
| 1641 | +import logging |
| 1642 | import os |
| 1643 | import shutil |
| 1644 | import tempfile |
| 1645 | |
| 1646 | +from cloudinit.cmd import main |
| 1647 | from cloudinit import handlers |
| 1648 | from cloudinit import helpers |
| 1649 | from cloudinit import settings |
| 1650 | from cloudinit import url_helper |
| 1651 | from cloudinit import util |
| 1652 | |
| 1653 | -from .helpers import TestCase, ExitStack, mock |
| 1654 | +from .helpers import TestCase, CiTestCase, ExitStack, mock |
| 1655 | |
| 1656 | |
| 1657 | class FakeModule(handlers.Handler): |
| 1658 | @@ -170,44 +172,68 @@ class TestHandlerHandlePart(TestCase): |
| 1659 | self.data, self.ctype, self.filename, self.payload) |
| 1660 | |
| 1661 | |
| 1662 | -class TestCmdlineUrl(TestCase): |
| 1663 | - def test_invalid_content(self): |
| 1664 | - url = "http://example.com/foo" |
| 1665 | - key = "mykey" |
| 1666 | - payload = b"0" |
| 1667 | - cmdline = "ro %s=%s bar=1" % (key, url) |
| 1668 | +class TestCmdlineUrl(CiTestCase): |
| 1669 | + def test_parse_cmdline_url_nokey_raises_keyerror(self): |
| 1670 | + self.assertRaises( |
| 1671 | + KeyError, main.parse_cmdline_url, 'root=foo bar single') |
| 1672 | |
| 1673 | - with mock.patch('cloudinit.url_helper.readurl', |
| 1674 | - return_value=url_helper.StringResponse(payload)): |
| 1675 | - self.assertEqual( |
| 1676 | - util.get_cmdline_url(names=[key], starts="xxxxxx", |
| 1677 | - cmdline=cmdline), |
| 1678 | - (key, url, None)) |
| 1679 | + def test_parse_cmdline_url_found(self): |
| 1680 | + cmdline = 'root=foo bar single url=http://example.com arg1 -v' |
| 1681 | + self.assertEqual( |
| 1682 | + ('url', 'http://example.com'), main.parse_cmdline_url(cmdline)) |
| 1683 | |
| 1684 | - def test_valid_content(self): |
| 1685 | - url = "http://example.com/foo" |
| 1686 | - key = "mykey" |
| 1687 | - payload = b"xcloud-config\nmydata: foo\nbar: wark\n" |
| 1688 | + @mock.patch('cloudinit.cmd.main.util.read_file_or_url') |
| 1689 | + def test_invalid_content(self, m_read): |
| 1690 | + key = "cloud-config-url" |
| 1691 | + url = 'http://example.com/foo' |
| 1692 | cmdline = "ro %s=%s bar=1" % (key, url) |
| 1693 | + m_read.return_value = url_helper.StringResponse(b"unexpected blob") |
| 1694 | |
| 1695 | - with mock.patch('cloudinit.url_helper.readurl', |
| 1696 | - return_value=url_helper.StringResponse(payload)): |
| 1697 | - self.assertEqual( |
| 1698 | - util.get_cmdline_url(names=[key], starts=b"xcloud-config", |
| 1699 | - cmdline=cmdline), |
| 1700 | - (key, url, payload)) |
| 1701 | + fpath = self.tmp_path("ccfile") |
| 1702 | + lvl, msg = main.attempt_cmdline_url( |
| 1703 | + fpath, network=True, cmdline=cmdline) |
| 1704 | + self.assertEqual(logging.WARN, lvl) |
| 1705 | + self.assertIn(url, msg) |
| 1706 | + self.assertFalse(os.path.exists(fpath)) |
| 1707 | |
| 1708 | - def test_no_key_found(self): |
| 1709 | + @mock.patch('cloudinit.cmd.main.util.read_file_or_url') |
| 1710 | + def test_valid_content(self, m_read): |
| 1711 | url = "http://example.com/foo" |
| 1712 | - key = "mykey" |
| 1713 | - cmdline = "ro %s=%s bar=1" % (key, url) |
| 1714 | - |
| 1715 | - with mock.patch('cloudinit.url_helper.readurl', |
| 1716 | - return_value=url_helper.StringResponse(b'')): |
| 1717 | - self.assertEqual( |
| 1718 | - util.get_cmdline_url(names=["does-not-appear"], |
| 1719 | - starts="#cloud-config", cmdline=cmdline), |
| 1720 | - (None, None, None)) |
| 1721 | + payload = b"#cloud-config\nmydata: foo\nbar: wark\n" |
| 1722 | + cmdline = "ro %s=%s bar=1" % ('cloud-config-url', url) |
| 1723 | + |
| 1724 | + m_read.return_value = url_helper.StringResponse(payload) |
| 1725 | + fpath = self.tmp_path("ccfile") |
| 1726 | + lvl, msg = main.attempt_cmdline_url( |
| 1727 | + fpath, network=True, cmdline=cmdline) |
| 1728 | + self.assertEqual(util.load_file(fpath, decode=False), payload) |
| 1729 | + self.assertEqual(logging.INFO, lvl) |
| 1730 | + self.assertIn(url, msg) |
| 1731 | + |
| 1732 | + @mock.patch('cloudinit.cmd.main.util.read_file_or_url') |
| 1733 | + def test_no_key_found(self, m_read): |
| 1734 | + cmdline = "ro mykey=http://example.com/foo root=foo" |
| 1735 | + fpath = self.tmp_path("ccpath") |
| 1736 | + lvl, msg = main.attempt_cmdline_url( |
| 1737 | + fpath, network=True, cmdline=cmdline) |
| 1738 | + |
| 1739 | + m_read.assert_not_called() |
| 1740 | + self.assertFalse(os.path.exists(fpath)) |
| 1741 | + self.assertEqual(logging.DEBUG, lvl) |
| 1742 | + |
| 1743 | + @mock.patch('cloudinit.cmd.main.util.read_file_or_url') |
| 1744 | + def test_exception_warns(self, m_read): |
| 1745 | + url = "http://example.com/foo" |
| 1746 | + cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url |
| 1747 | + fpath = self.tmp_path("ccfile") |
| 1748 | + m_read.side_effect = url_helper.UrlError( |
| 1749 | + cause="Unexpected Error", url="http://example.com/foo") |
| 1750 | + |
| 1751 | + lvl, msg = main.attempt_cmdline_url( |
| 1752 | + fpath, network=True, cmdline=cmdline) |
| 1753 | + self.assertEqual(logging.WARN, lvl) |
| 1754 | + self.assertIn(url, msg) |
| 1755 | + self.assertFalse(os.path.exists(fpath)) |
| 1756 | |
| 1757 | |
| 1758 | # vi: ts=4 expandtab |
| 1759 | diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py |
| 1760 | index e170c7c..515919d 100644 |
| 1761 | --- a/tests/unittests/test_atomic_helper.py |
| 1762 | +++ b/tests/unittests/test_atomic_helper.py |
| 1763 | @@ -6,10 +6,10 @@ import stat |
| 1764 | |
| 1765 | from cloudinit import atomic_helper |
| 1766 | |
| 1767 | -from . import helpers |
| 1768 | +from .helpers import CiTestCase |
| 1769 | |
| 1770 | |
| 1771 | -class TestAtomicHelper(helpers.TempDirTestCase): |
| 1772 | +class TestAtomicHelper(CiTestCase): |
| 1773 | def test_basic_usage(self): |
| 1774 | """write_file takes bytes if no omode.""" |
| 1775 | path = self.tmp_path("test_basic_usage") |
| 1776 | diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py |
| 1777 | index 4092d9c..4ad86bb 100644 |
| 1778 | --- a/tests/unittests/test_data.py |
| 1779 | +++ b/tests/unittests/test_data.py |
| 1780 | @@ -564,12 +564,12 @@ class TestConvertString(helpers.TestCase): |
| 1781 | |
| 1782 | |
| 1783 | class TestFetchBaseConfig(helpers.TestCase): |
| 1784 | - |
| 1785 | - def test_only_builtin_gets_builtin2(self): |
| 1786 | + def test_only_builtin_gets_builtin(self): |
| 1787 | ret = helpers.wrap_and_call( |
| 1788 | - 'cloudinit.stages.util', |
| 1789 | - {'read_conf_with_confd': None, |
| 1790 | - 'read_conf_from_cmdline': None}, |
| 1791 | + 'cloudinit.stages', |
| 1792 | + {'util.read_conf_with_confd': None, |
| 1793 | + 'util.read_conf_from_cmdline': None, |
| 1794 | + 'read_runtime_config': {'return_value': {}}}, |
| 1795 | stages.fetch_base_config) |
| 1796 | self.assertEqual(util.get_builtin_cfg(), ret) |
| 1797 | |
| 1798 | @@ -578,9 +578,11 @@ class TestFetchBaseConfig(helpers.TestCase): |
| 1799 | test_key = sorted(builtin)[0] |
| 1800 | test_value = 'test' |
| 1801 | ret = helpers.wrap_and_call( |
| 1802 | - 'cloudinit.stages.util', |
| 1803 | - {'read_conf_with_confd': {'return_value': {test_key: test_value}}, |
| 1804 | - 'read_conf_from_cmdline': None}, |
| 1805 | + 'cloudinit.stages', |
| 1806 | + {'util.read_conf_with_confd': |
| 1807 | + {'return_value': {test_key: test_value}}, |
| 1808 | + 'util.read_conf_from_cmdline': None, |
| 1809 | + 'read_runtime_config': {'return_value': {}}}, |
| 1810 | stages.fetch_base_config) |
| 1811 | self.assertEqual(ret.get(test_key), test_value) |
| 1812 | builtin[test_key] = test_value |
| 1813 | @@ -592,25 +594,44 @@ class TestFetchBaseConfig(helpers.TestCase): |
| 1814 | test_value = 'test' |
| 1815 | cmdline = {test_key: test_value} |
| 1816 | ret = helpers.wrap_and_call( |
| 1817 | - 'cloudinit.stages.util', |
| 1818 | - {'read_conf_from_cmdline': {'return_value': cmdline}, |
| 1819 | - 'read_conf_with_confd': None}, |
| 1820 | + 'cloudinit.stages', |
| 1821 | + {'util.read_conf_from_cmdline': {'return_value': cmdline}, |
| 1822 | + 'util.read_conf_with_confd': None, |
| 1823 | + 'read_runtime_config': None}, |
| 1824 | stages.fetch_base_config) |
| 1825 | self.assertEqual(ret.get(test_key), test_value) |
| 1826 | builtin[test_key] = test_value |
| 1827 | self.assertEqual(ret, builtin) |
| 1828 | |
| 1829 | - def test_cmdline_overrides_conf_d_and_defaults(self): |
| 1830 | + def test_cmdline_overrides_confd_runtime_and_defaults(self): |
| 1831 | builtin = {'key1': 'value0', 'key3': 'other2'} |
| 1832 | conf_d = {'key1': 'value1', 'key2': 'other1'} |
| 1833 | cmdline = {'key3': 'other3', 'key2': 'other2'} |
| 1834 | + runtime = {'key3': 'runtime3'} |
| 1835 | ret = helpers.wrap_and_call( |
| 1836 | - 'cloudinit.stages.util', |
| 1837 | - {'read_conf_with_confd': {'return_value': conf_d}, |
| 1838 | - 'get_builtin_cfg': {'return_value': builtin}, |
| 1839 | - 'read_conf_from_cmdline': {'return_value': cmdline}}, |
| 1840 | + 'cloudinit.stages', |
| 1841 | + {'util.read_conf_with_confd': {'return_value': conf_d}, |
| 1842 | + 'util.get_builtin_cfg': {'return_value': builtin}, |
| 1843 | + 'read_runtime_config': {'return_value': runtime}, |
| 1844 | + 'util.read_conf_from_cmdline': {'return_value': cmdline}}, |
| 1845 | stages.fetch_base_config) |
| 1846 | self.assertEqual(ret, {'key1': 'value1', 'key2': 'other2', |
| 1847 | 'key3': 'other3'}) |
| 1848 | |
| 1849 | + def test_order_precedence_is_builtin_system_runtime_cmdline(self): |
| 1850 | + builtin = {'key1': 'builtin0', 'key3': 'builtin3'} |
| 1851 | + conf_d = {'key1': 'confd1', 'key2': 'confd2', 'keyconfd1': 'kconfd1'} |
| 1852 | + runtime = {'key1': 'runtime1', 'key2': 'runtime2'} |
| 1853 | + cmdline = {'key1': 'cmdline1'} |
| 1854 | + ret = helpers.wrap_and_call( |
| 1855 | + 'cloudinit.stages', |
| 1856 | + {'util.read_conf_with_confd': {'return_value': conf_d}, |
| 1857 | + 'util.get_builtin_cfg': {'return_value': builtin}, |
| 1858 | + 'util.read_conf_from_cmdline': {'return_value': cmdline}, |
| 1859 | + 'read_runtime_config': {'return_value': runtime}, |
| 1860 | + }, |
| 1861 | + stages.fetch_base_config) |
| 1862 | + self.assertEqual(ret, {'key1': 'cmdline1', 'key2': 'runtime2', |
| 1863 | + 'key3': 'builtin3', 'keyconfd1': 'kconfd1'}) |
| 1864 | + |
| 1865 | # vi: ts=4 expandtab |
| 1866 | diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py |
| 1867 | index 4f66767..4f83454 100644 |
| 1868 | --- a/tests/unittests/test_datasource/test_gce.py |
| 1869 | +++ b/tests/unittests/test_datasource/test_gce.py |
| 1870 | @@ -4,6 +4,7 @@ |
| 1871 | # |
| 1872 | # This file is part of cloud-init. See LICENSE file for license information. |
| 1873 | |
| 1874 | +import httpretty |
| 1875 | import re |
| 1876 | |
| 1877 | from base64 import b64encode, b64decode |
| 1878 | @@ -15,7 +16,6 @@ from cloudinit.sources import DataSourceGCE |
| 1879 | |
| 1880 | from .. import helpers as test_helpers |
| 1881 | |
| 1882 | -httpretty = test_helpers.import_httpretty() |
| 1883 | |
| 1884 | GCE_META = { |
| 1885 | 'instance/id': '123', |
| 1886 | @@ -59,6 +59,8 @@ def _set_mock_metadata(gce_meta=None): |
| 1887 | else: |
| 1888 | return (404, headers, '') |
| 1889 | |
| 1890 | + # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 |
| 1891 | + httpretty.reset() |
| 1892 | httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) |
| 1893 | |
| 1894 | |
| 1895 | diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py |
| 1896 | index e5b6fcc..7bf5508 100644 |
| 1897 | --- a/tests/unittests/test_datasource/test_openstack.py |
| 1898 | +++ b/tests/unittests/test_datasource/test_openstack.py |
| 1899 | @@ -5,6 +5,7 @@ |
| 1900 | # This file is part of cloud-init. See LICENSE file for license information. |
| 1901 | |
| 1902 | import copy |
| 1903 | +import httpretty as hp |
| 1904 | import json |
| 1905 | import re |
| 1906 | |
| 1907 | @@ -20,8 +21,6 @@ from cloudinit.sources import DataSourceOpenStack as ds |
| 1908 | from cloudinit.sources.helpers import openstack |
| 1909 | from cloudinit import util |
| 1910 | |
| 1911 | -hp = test_helpers.import_httpretty() |
| 1912 | - |
| 1913 | BASE_URL = "http://169.254.169.254" |
| 1914 | PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' |
| 1915 | EC2_META = { |
| 1916 | @@ -232,7 +231,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): |
| 1917 | None, |
| 1918 | helpers.Paths({})) |
| 1919 | self.assertIsNone(ds_os.version) |
| 1920 | - found = ds_os.get_data(timeout=0.1, retries=0) |
| 1921 | + found = ds_os.get_data() |
| 1922 | self.assertTrue(found) |
| 1923 | self.assertEqual(2, ds_os.version) |
| 1924 | md = dict(ds_os.metadata) |
| 1925 | @@ -256,7 +255,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): |
| 1926 | None, |
| 1927 | helpers.Paths({})) |
| 1928 | self.assertIsNone(ds_os.version) |
| 1929 | - found = ds_os.get_data(timeout=0.1, retries=0) |
| 1930 | + found = ds_os.get_data() |
| 1931 | self.assertFalse(found) |
| 1932 | self.assertIsNone(ds_os.version) |
| 1933 | |
| 1934 | @@ -275,7 +274,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): |
| 1935 | 'timeout': 0, |
| 1936 | } |
| 1937 | self.assertIsNone(ds_os.version) |
| 1938 | - found = ds_os.get_data(timeout=0.1, retries=0) |
| 1939 | + found = ds_os.get_data() |
| 1940 | self.assertFalse(found) |
| 1941 | self.assertIsNone(ds_os.version) |
| 1942 | |
| 1943 | @@ -298,7 +297,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): |
| 1944 | 'timeout': 0, |
| 1945 | } |
| 1946 | self.assertIsNone(ds_os.version) |
| 1947 | - found = ds_os.get_data(timeout=0.1, retries=0) |
| 1948 | + found = ds_os.get_data() |
| 1949 | self.assertFalse(found) |
| 1950 | self.assertIsNone(ds_os.version) |
| 1951 | |
| 1952 | diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py |
| 1953 | index 6b535a9..c9d0347 100644 |
| 1954 | --- a/tests/unittests/test_distros/test_resolv.py |
| 1955 | +++ b/tests/unittests/test_distros/test_resolv.py |
| 1956 | @@ -46,7 +46,7 @@ class TestResolvHelper(TestCase): |
| 1957 | self.assertNotIn('10.3', rp.nameservers) |
| 1958 | self.assertEqual(len(rp.nameservers), 3) |
| 1959 | rp.add_nameserver('10.2') |
| 1960 | - self.assertRaises(ValueError, rp.add_nameserver, '10.3') |
| 1961 | + rp.add_nameserver('10.3') |
| 1962 | self.assertNotIn('10.3', rp.nameservers) |
| 1963 | |
| 1964 | def test_search_domains(self): |
| 1965 | diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py |
| 1966 | old mode 100755 |
| 1967 | new mode 100644 |
| 1968 | index 88746e0..88746e0 |
| 1969 | --- a/tests/unittests/test_distros/test_user_data_normalize.py |
| 1970 | +++ b/tests/unittests/test_distros/test_user_data_normalize.py |
| 1971 | diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py |
| 1972 | index 4a33d74..65fdb51 100644 |
| 1973 | --- a/tests/unittests/test_ec2_util.py |
| 1974 | +++ b/tests/unittests/test_ec2_util.py |
| 1975 | @@ -1,12 +1,12 @@ |
| 1976 | # This file is part of cloud-init. See LICENSE file for license information. |
| 1977 | |
| 1978 | +import httpretty as hp |
| 1979 | + |
| 1980 | from . import helpers |
| 1981 | |
| 1982 | from cloudinit import ec2_utils as eu |
| 1983 | from cloudinit import url_helper as uh |
| 1984 | |
| 1985 | -hp = helpers.import_httpretty() |
| 1986 | - |
| 1987 | |
| 1988 | class TestEc2Util(helpers.HttprettyTestCase): |
| 1989 | VERSION = 'latest' |
| 1990 | @@ -140,4 +140,49 @@ class TestEc2Util(helpers.HttprettyTestCase): |
| 1991 | self.assertEqual(bdm['ami'], 'sdb') |
| 1992 | self.assertEqual(bdm['ephemeral0'], 'sdc') |
| 1993 | |
| 1994 | + @hp.activate |
| 1995 | + def test_metadata_no_security_credentials(self): |
| 1996 | + base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) |
| 1997 | + hp.register_uri(hp.GET, base_url, status=200, |
| 1998 | + body="\n".join(['instance-id', |
| 1999 | + 'iam/'])) |
| 2000 | + hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'), |
| 2001 | + status=200, body='i-0123451689abcdef0') |
| 2002 | + hp.register_uri(hp.GET, |
| 2003 | + uh.combine_url(base_url, 'iam/'), |
| 2004 | + status=200, |
| 2005 | + body="\n".join(['info/', 'security-credentials/'])) |
| 2006 | + hp.register_uri(hp.GET, |
| 2007 | + uh.combine_url(base_url, 'iam/info/'), |
| 2008 | + status=200, |
| 2009 | + body='LastUpdated') |
| 2010 | + hp.register_uri(hp.GET, |
| 2011 | + uh.combine_url(base_url, 'iam/info/LastUpdated'), |
| 2012 | + status=200, body='2016-10-27T17:29:39Z') |
| 2013 | + hp.register_uri(hp.GET, |
| 2014 | + uh.combine_url(base_url, 'iam/security-credentials/'), |
| 2015 | + status=200, |
| 2016 | + body='ReadOnly/') |
| 2017 | + hp.register_uri(hp.GET, |
| 2018 | + uh.combine_url(base_url, |
| 2019 | + 'iam/security-credentials/ReadOnly/'), |
| 2020 | + status=200, |
| 2021 | + body="\n".join(['LastUpdated', 'Expiration'])) |
| 2022 | + hp.register_uri(hp.GET, |
| 2023 | + uh.combine_url( |
| 2024 | + base_url, |
| 2025 | + 'iam/security-credentials/ReadOnly/LastUpdated'), |
| 2026 | + status=200, body='2016-10-27T17:28:17Z') |
| 2027 | + hp.register_uri(hp.GET, |
| 2028 | + uh.combine_url( |
| 2029 | + base_url, |
| 2030 | + 'iam/security-credentials/ReadOnly/Expiration'), |
| 2031 | + status=200, body='2016-10-28T00:00:34Z') |
| 2032 | + md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) |
| 2033 | + self.assertEqual(md['instance-id'], 'i-0123451689abcdef0') |
| 2034 | + iam = md['iam'] |
| 2035 | + self.assertEqual(1, len(iam)) |
| 2036 | + self.assertEqual(iam['info']['LastUpdated'], '2016-10-27T17:29:39Z') |
| 2037 | + self.assertNotIn('security-credentials', iam) |
| 2038 | + |
| 2039 | # vi: ts=4 expandtab |
| 2040 | diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py |
| 2041 | old mode 100755 |
| 2042 | new mode 100644 |
| 2043 | index 1090282..8d25310 |
| 2044 | --- a/tests/unittests/test_net.py |
| 2045 | +++ b/tests/unittests/test_net.py |
| 2046 | @@ -8,11 +8,10 @@ from cloudinit.net import sysconfig |
| 2047 | from cloudinit.sources.helpers import openstack |
| 2048 | from cloudinit import util |
| 2049 | |
| 2050 | +from .helpers import CiTestCase |
| 2051 | from .helpers import dir2dict |
| 2052 | from .helpers import mock |
| 2053 | from .helpers import populate_dir |
| 2054 | -from .helpers import TempDirTestCase |
| 2055 | -from .helpers import TestCase |
| 2056 | |
| 2057 | import base64 |
| 2058 | import copy |
| 2059 | @@ -20,8 +19,6 @@ import gzip |
| 2060 | import io |
| 2061 | import json |
| 2062 | import os |
| 2063 | -import shutil |
| 2064 | -import tempfile |
| 2065 | import textwrap |
| 2066 | import yaml |
| 2067 | |
| 2068 | @@ -166,6 +163,185 @@ nameserver 172.19.0.12 |
| 2069 | ('etc/udev/rules.d/70-persistent-net.rules', |
| 2070 | "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', |
| 2071 | 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] |
| 2072 | + }, |
| 2073 | + { |
| 2074 | + 'in_data': { |
| 2075 | + "services": [{"type": "dns", "address": "172.19.0.12"}], |
| 2076 | + "networks": [{ |
| 2077 | + "network_id": "public-ipv4", |
| 2078 | + "type": "ipv4", "netmask": "255.255.252.0", |
| 2079 | + "link": "tap1a81968a-79", |
| 2080 | + "routes": [{ |
| 2081 | + "netmask": "0.0.0.0", |
| 2082 | + "network": "0.0.0.0", |
| 2083 | + "gateway": "172.19.3.254", |
| 2084 | + }], |
| 2085 | + "ip_address": "172.19.1.34", "id": "network0" |
| 2086 | + }, { |
| 2087 | + "network_id": "private-ipv4", |
| 2088 | + "type": "ipv4", "netmask": "255.255.255.0", |
| 2089 | + "link": "tap1a81968a-79", |
| 2090 | + "routes": [], |
| 2091 | + "ip_address": "10.0.0.10", "id": "network1" |
| 2092 | + }], |
| 2093 | + "links": [ |
| 2094 | + { |
| 2095 | + "ethernet_mac_address": "fa:16:3e:ed:9a:59", |
| 2096 | + "mtu": None, "type": "bridge", "id": |
| 2097 | + "tap1a81968a-79", |
| 2098 | + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" |
| 2099 | + }, |
| 2100 | + ], |
| 2101 | + }, |
| 2102 | + 'in_macs': { |
| 2103 | + 'fa:16:3e:ed:9a:59': 'eth0', |
| 2104 | + }, |
| 2105 | + 'out_sysconfig': [ |
| 2106 | + ('etc/sysconfig/network-scripts/ifcfg-eth0', |
| 2107 | + """ |
| 2108 | +# Created by cloud-init on instance boot automatically, do not edit. |
| 2109 | +# |
| 2110 | +BOOTPROTO=none |
| 2111 | +DEVICE=eth0 |
| 2112 | +HWADDR=fa:16:3e:ed:9a:59 |
| 2113 | +NM_CONTROLLED=no |
| 2114 | +ONBOOT=yes |
| 2115 | +TYPE=Ethernet |
| 2116 | +USERCTL=no |
| 2117 | +""".lstrip()), |
| 2118 | + ('etc/sysconfig/network-scripts/ifcfg-eth0:0', |
| 2119 | + """ |
| 2120 | +# Created by cloud-init on instance boot automatically, do not edit. |
| 2121 | +# |
| 2122 | +BOOTPROTO=static |
| 2123 | +DEFROUTE=yes |
| 2124 | +DEVICE=eth0:0 |
| 2125 | +GATEWAY=172.19.3.254 |
| 2126 | +HWADDR=fa:16:3e:ed:9a:59 |
| 2127 | +IPADDR=172.19.1.34 |
| 2128 | +NETMASK=255.255.252.0 |
| 2129 | +NM_CONTROLLED=no |
| 2130 | +ONBOOT=yes |
| 2131 | +TYPE=Ethernet |
| 2132 | +USERCTL=no |
| 2133 | +""".lstrip()), |
| 2134 | + ('etc/sysconfig/network-scripts/ifcfg-eth0:1', |
| 2135 | + """ |
| 2136 | +# Created by cloud-init on instance boot automatically, do not edit. |
| 2137 | +# |
| 2138 | +BOOTPROTO=static |
| 2139 | +DEVICE=eth0:1 |
| 2140 | +HWADDR=fa:16:3e:ed:9a:59 |
| 2141 | +IPADDR=10.0.0.10 |
| 2142 | +NETMASK=255.255.255.0 |
| 2143 | +NM_CONTROLLED=no |
| 2144 | +ONBOOT=yes |
| 2145 | +TYPE=Ethernet |
| 2146 | +USERCTL=no |
| 2147 | +""".lstrip()), |
| 2148 | + ('etc/resolv.conf', |
| 2149 | + """ |
| 2150 | +; Created by cloud-init on instance boot automatically, do not edit. |
| 2151 | +; |
| 2152 | +nameserver 172.19.0.12 |
| 2153 | +""".lstrip()), |
| 2154 | + ('etc/udev/rules.d/70-persistent-net.rules', |
| 2155 | + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', |
| 2156 | + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] |
| 2157 | + }, |
| 2158 | + { |
| 2159 | + 'in_data': { |
| 2160 | + "services": [{"type": "dns", "address": "172.19.0.12"}], |
| 2161 | + "networks": [{ |
| 2162 | + "network_id": "public-ipv4", |
| 2163 | + "type": "ipv4", "netmask": "255.255.252.0", |
| 2164 | + "link": "tap1a81968a-79", |
| 2165 | + "routes": [{ |
| 2166 | + "netmask": "0.0.0.0", |
| 2167 | + "network": "0.0.0.0", |
| 2168 | + "gateway": "172.19.3.254", |
| 2169 | + }], |
| 2170 | + "ip_address": "172.19.1.34", "id": "network0" |
| 2171 | + }, { |
| 2172 | + "network_id": "public-ipv6", |
| 2173 | + "type": "ipv6", "netmask": "", |
| 2174 | + "link": "tap1a81968a-79", |
| 2175 | + "routes": [ |
| 2176 | + { |
| 2177 | + "gateway": "2001:DB8::1", |
| 2178 | + "netmask": "::", |
| 2179 | + "network": "::" |
| 2180 | + } |
| 2181 | + ], |
| 2182 | + "ip_address": "2001:DB8::10", "id": "network1" |
| 2183 | + }], |
| 2184 | + "links": [ |
| 2185 | + { |
| 2186 | + "ethernet_mac_address": "fa:16:3e:ed:9a:59", |
| 2187 | + "mtu": None, "type": "bridge", "id": |
| 2188 | + "tap1a81968a-79", |
| 2189 | + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" |
| 2190 | + }, |
| 2191 | + ], |
| 2192 | + }, |
| 2193 | + 'in_macs': { |
| 2194 | + 'fa:16:3e:ed:9a:59': 'eth0', |
| 2195 | + }, |
| 2196 | + 'out_sysconfig': [ |
| 2197 | + ('etc/sysconfig/network-scripts/ifcfg-eth0', |
| 2198 | + """ |
| 2199 | +# Created by cloud-init on instance boot automatically, do not edit. |
| 2200 | +# |
| 2201 | +BOOTPROTO=none |
| 2202 | +DEVICE=eth0 |
| 2203 | +HWADDR=fa:16:3e:ed:9a:59 |
| 2204 | +NM_CONTROLLED=no |
| 2205 | +ONBOOT=yes |
| 2206 | +TYPE=Ethernet |
| 2207 | +USERCTL=no |
| 2208 | +""".lstrip()), |
| 2209 | + ('etc/sysconfig/network-scripts/ifcfg-eth0:0', |
| 2210 | + """ |
| 2211 | +# Created by cloud-init on instance boot automatically, do not edit. |
| 2212 | +# |
| 2213 | +BOOTPROTO=static |
| 2214 | +DEFROUTE=yes |
| 2215 | +DEVICE=eth0:0 |
| 2216 | +GATEWAY=172.19.3.254 |
| 2217 | +HWADDR=fa:16:3e:ed:9a:59 |
| 2218 | +IPADDR=172.19.1.34 |
| 2219 | +NETMASK=255.255.252.0 |
| 2220 | +NM_CONTROLLED=no |
| 2221 | +ONBOOT=yes |
| 2222 | +TYPE=Ethernet |
| 2223 | +USERCTL=no |
| 2224 | +""".lstrip()), |
| 2225 | + ('etc/sysconfig/network-scripts/ifcfg-eth0:1', |
| 2226 | + """ |
| 2227 | +# Created by cloud-init on instance boot automatically, do not edit. |
| 2228 | +# |
| 2229 | +BOOTPROTO=static |
| 2230 | +DEFROUTE=yes |
| 2231 | +DEVICE=eth0:1 |
| 2232 | +HWADDR=fa:16:3e:ed:9a:59 |
| 2233 | +IPV6ADDR=2001:DB8::10 |
| 2234 | +IPV6INIT=yes |
| 2235 | +IPV6_DEFAULTGW=2001:DB8::1 |
| 2236 | +NETMASK= |
| 2237 | +NM_CONTROLLED=no |
| 2238 | +ONBOOT=yes |
| 2239 | +TYPE=Ethernet |
| 2240 | +USERCTL=no |
| 2241 | +""".lstrip()), |
| 2242 | + ('etc/resolv.conf', |
| 2243 | + """ |
| 2244 | +; Created by cloud-init on instance boot automatically, do not edit. |
| 2245 | +; |
| 2246 | +nameserver 172.19.0.12 |
| 2247 | +""".lstrip()), |
| 2248 | + ('etc/udev/rules.d/70-persistent-net.rules', |
| 2249 | + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', |
| 2250 | + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] |
| 2251 | } |
| 2252 | ] |
| 2253 | |
| 2254 | @@ -222,11 +398,9 @@ NETWORK_CONFIGS = { |
| 2255 | |
| 2256 | auto eth99 |
| 2257 | iface eth99 inet dhcp |
| 2258 | - post-up ifup eth99:1 |
| 2259 | |
| 2260 | - |
| 2261 | - auto eth99:1 |
| 2262 | - iface eth99:1 inet static |
| 2263 | + # control-alias eth99 |
| 2264 | + iface eth99 inet static |
| 2265 | address 192.168.21.3/24 |
| 2266 | dns-nameservers 8.8.8.8 8.8.4.4 |
| 2267 | dns-search barley.maas sach.maas |
| 2268 | @@ -264,6 +438,27 @@ NETWORK_CONFIGS = { |
| 2269 | - wark.maas |
| 2270 | """), |
| 2271 | }, |
| 2272 | + 'v4_and_v6': { |
| 2273 | + 'expected_eni': textwrap.dedent("""\ |
| 2274 | + auto lo |
| 2275 | + iface lo inet loopback |
| 2276 | + |
| 2277 | + auto iface0 |
| 2278 | + iface iface0 inet dhcp |
| 2279 | + |
| 2280 | + # control-alias iface0 |
| 2281 | + iface iface0 inet6 dhcp |
| 2282 | + """).rstrip(' '), |
| 2283 | + 'yaml': textwrap.dedent("""\ |
| 2284 | + version: 1 |
| 2285 | + config: |
| 2286 | + - type: 'physical' |
| 2287 | + name: 'iface0' |
| 2288 | + subnets: |
| 2289 | + - {'type': 'dhcp4'} |
| 2290 | + - {'type': 'dhcp6'} |
| 2291 | + """).rstrip(' '), |
| 2292 | + }, |
| 2293 | 'all': { |
| 2294 | 'expected_eni': ("""\ |
| 2295 | auto lo |
| 2296 | @@ -301,11 +496,9 @@ iface br0 inet static |
| 2297 | address 192.168.14.2/24 |
| 2298 | bridge_ports eth3 eth4 |
| 2299 | bridge_stp off |
| 2300 | - post-up ifup br0:1 |
| 2301 | - |
| 2302 | |
| 2303 | -auto br0:1 |
| 2304 | -iface br0:1 inet6 static |
| 2305 | +# control-alias br0 |
| 2306 | +iface br0 inet6 static |
| 2307 | address 2001:1::1/64 |
| 2308 | |
| 2309 | auto bond0.200 |
| 2310 | @@ -322,11 +515,9 @@ iface eth0.101 inet static |
| 2311 | mtu 1500 |
| 2312 | vlan-raw-device eth0 |
| 2313 | vlan_id 101 |
| 2314 | - post-up ifup eth0.101:1 |
| 2315 | |
| 2316 | - |
| 2317 | -auto eth0.101:1 |
| 2318 | -iface eth0.101:1 inet static |
| 2319 | +# control-alias eth0.101 |
| 2320 | +iface eth0.101 inet static |
| 2321 | address 192.168.2.10/24 |
| 2322 | |
| 2323 | post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true |
| 2324 | @@ -478,7 +669,7 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, |
| 2325 | mock_sys_dev_path.side_effect = sys_dev_path |
| 2326 | |
| 2327 | |
| 2328 | -class TestSysConfigRendering(TestCase): |
| 2329 | +class TestSysConfigRendering(CiTestCase): |
| 2330 | |
| 2331 | @mock.patch("cloudinit.net.sys_dev_path") |
| 2332 | @mock.patch("cloudinit.net.read_sys_net") |
| 2333 | @@ -486,8 +677,7 @@ class TestSysConfigRendering(TestCase): |
| 2334 | def test_default_generation(self, mock_get_devicelist, |
| 2335 | mock_read_sys_net, |
| 2336 | mock_sys_dev_path): |
| 2337 | - tmp_dir = tempfile.mkdtemp() |
| 2338 | - self.addCleanup(shutil.rmtree, tmp_dir) |
| 2339 | + tmp_dir = self.tmp_dir() |
| 2340 | _setup_test(tmp_dir, mock_get_devicelist, |
| 2341 | mock_read_sys_net, mock_sys_dev_path) |
| 2342 | |
| 2343 | @@ -518,10 +708,8 @@ USERCTL=no |
| 2344 | self.assertEqual(expected_content, content) |
| 2345 | |
| 2346 | def test_openstack_rendering_samples(self): |
| 2347 | - tmp_dir = tempfile.mkdtemp() |
| 2348 | - self.addCleanup(shutil.rmtree, tmp_dir) |
| 2349 | - render_dir = os.path.join(tmp_dir, "render") |
| 2350 | for os_sample in OS_SAMPLES: |
| 2351 | + render_dir = self.tmp_dir() |
| 2352 | ex_input = os_sample['in_data'] |
| 2353 | ex_mac_addrs = os_sample['in_macs'] |
| 2354 | network_cfg = openstack.convert_net_json( |
| 2355 | @@ -535,7 +723,7 @@ USERCTL=no |
| 2356 | self.assertEqual(expected_content, fh.read()) |
| 2357 | |
| 2358 | |
| 2359 | -class TestEniNetRendering(TestCase): |
| 2360 | +class TestEniNetRendering(CiTestCase): |
| 2361 | |
| 2362 | @mock.patch("cloudinit.net.sys_dev_path") |
| 2363 | @mock.patch("cloudinit.net.read_sys_net") |
| 2364 | @@ -543,8 +731,7 @@ class TestEniNetRendering(TestCase): |
| 2365 | def test_default_generation(self, mock_get_devicelist, |
| 2366 | mock_read_sys_net, |
| 2367 | mock_sys_dev_path): |
| 2368 | - tmp_dir = tempfile.mkdtemp() |
| 2369 | - self.addCleanup(shutil.rmtree, tmp_dir) |
| 2370 | + tmp_dir = self.tmp_dir() |
| 2371 | _setup_test(tmp_dir, mock_get_devicelist, |
| 2372 | mock_read_sys_net, mock_sys_dev_path) |
| 2373 | |
| 2374 | @@ -576,7 +763,7 @@ iface eth1000 inet dhcp |
| 2375 | self.assertEqual(expected.lstrip(), contents.lstrip()) |
| 2376 | |
| 2377 | |
| 2378 | -class TestEniNetworkStateToEni(TestCase): |
| 2379 | +class TestEniNetworkStateToEni(CiTestCase): |
| 2380 | mycfg = { |
| 2381 | 'config': [{"type": "physical", "name": "eth0", |
| 2382 | "mac_address": "c0:d6:9f:2c:e8:80", |
| 2383 | @@ -607,7 +794,7 @@ class TestEniNetworkStateToEni(TestCase): |
| 2384 | self.assertNotIn("hwaddress", rendered) |
| 2385 | |
| 2386 | |
| 2387 | -class TestCmdlineConfigParsing(TestCase): |
| 2388 | +class TestCmdlineConfigParsing(CiTestCase): |
| 2389 | simple_cfg = { |
| 2390 | 'config': [{"type": "physical", "name": "eth0", |
| 2391 | "mac_address": "c0:d6:9f:2c:e8:80", |
| 2392 | @@ -665,7 +852,7 @@ class TestCmdlineConfigParsing(TestCase): |
| 2393 | self.assertEqual(found, self.simple_cfg) |
| 2394 | |
| 2395 | |
| 2396 | -class TestCmdlineReadKernelConfig(TempDirTestCase): |
| 2397 | +class TestCmdlineReadKernelConfig(CiTestCase): |
| 2398 | macs = { |
| 2399 | 'eth0': '14:02:ec:42:48:00', |
| 2400 | 'eno1': '14:02:ec:42:48:01', |
| 2401 | @@ -673,8 +860,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): |
| 2402 | |
| 2403 | def test_ip_cmdline_read_kernel_cmdline_ip(self): |
| 2404 | content = {'net-eth0.conf': DHCP_CONTENT_1} |
| 2405 | - populate_dir(self.tmp, content) |
| 2406 | - files = [os.path.join(self.tmp, k) for k in content.keys()] |
| 2407 | + files = sorted(populate_dir(self.tmp_dir(), content)) |
| 2408 | found = cmdline.read_kernel_cmdline_config( |
| 2409 | files=files, cmdline='foo ip=dhcp', mac_addrs=self.macs) |
| 2410 | exp1 = copy.deepcopy(DHCP_EXPECTED_1) |
| 2411 | @@ -684,8 +870,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): |
| 2412 | |
| 2413 | def test_ip_cmdline_read_kernel_cmdline_ip6(self): |
| 2414 | content = {'net6-eno1.conf': DHCP6_CONTENT_1} |
| 2415 | - populate_dir(self.tmp, content) |
| 2416 | - files = [os.path.join(self.tmp, k) for k in content.keys()] |
| 2417 | + files = sorted(populate_dir(self.tmp_dir(), content)) |
| 2418 | found = cmdline.read_kernel_cmdline_config( |
| 2419 | files=files, cmdline='foo ip6=dhcp root=/dev/sda', |
| 2420 | mac_addrs=self.macs) |
| 2421 | @@ -701,8 +886,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): |
| 2422 | def test_ip_cmdline_read_kernel_cmdline_none(self): |
| 2423 | # if there is no ip= or ip6= on cmdline, return value should be None |
| 2424 | content = {'net6-eno1.conf': DHCP6_CONTENT_1} |
| 2425 | - populate_dir(self.tmp, content) |
| 2426 | - files = [os.path.join(self.tmp, k) for k in content.keys()] |
| 2427 | + files = sorted(populate_dir(self.tmp_dir(), content)) |
| 2428 | found = cmdline.read_kernel_cmdline_config( |
| 2429 | files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) |
| 2430 | self.assertEqual(found, None) |
| 2431 | @@ -710,8 +894,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): |
| 2432 | def test_ip_cmdline_both_ip_ip6(self): |
| 2433 | content = {'net-eth0.conf': DHCP_CONTENT_1, |
| 2434 | 'net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')} |
| 2435 | - populate_dir(self.tmp, content) |
| 2436 | - files = [os.path.join(self.tmp, k) for k in sorted(content.keys())] |
| 2437 | + files = sorted(populate_dir(self.tmp_dir(), content)) |
| 2438 | found = cmdline.read_kernel_cmdline_config( |
| 2439 | files=files, cmdline='foo ip=dhcp ip6=dhcp', mac_addrs=self.macs) |
| 2440 | |
| 2441 | @@ -725,14 +908,12 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): |
| 2442 | self.assertEqual(found['config'], expected) |
| 2443 | |
| 2444 | |
| 2445 | -class TestEniRoundTrip(TestCase): |
| 2446 | - def setUp(self): |
| 2447 | - super(TestCase, self).setUp() |
| 2448 | - self.tmp_dir = tempfile.mkdtemp() |
| 2449 | - self.addCleanup(shutil.rmtree, self.tmp_dir) |
| 2450 | - |
| 2451 | +class TestEniRoundTrip(CiTestCase): |
| 2452 | def _render_and_read(self, network_config=None, state=None, eni_path=None, |
| 2453 | - links_prefix=None, netrules_path=None): |
| 2454 | + links_prefix=None, netrules_path=None, dir=None): |
| 2455 | + if dir is None: |
| 2456 | + dir = self.tmp_dir() |
| 2457 | + |
| 2458 | if network_config: |
| 2459 | ns = network_state.parse_net_config_data(network_config) |
| 2460 | elif state: |
| 2461 | @@ -747,8 +928,8 @@ class TestEniRoundTrip(TestCase): |
| 2462 | config={'eni_path': eni_path, 'links_path_prefix': links_prefix, |
| 2463 | 'netrules_path': netrules_path}) |
| 2464 | |
| 2465 | - renderer.render_network_state(self.tmp_dir, ns) |
| 2466 | - return dir2dict(self.tmp_dir) |
| 2467 | + renderer.render_network_state(dir, ns) |
| 2468 | + return dir2dict(dir) |
| 2469 | |
| 2470 | def testsimple_convert_and_render(self): |
| 2471 | network_config = eni.convert_eni_data(EXAMPLE_ENI) |
| 2472 | @@ -771,6 +952,13 @@ class TestEniRoundTrip(TestCase): |
| 2473 | entry['expected_eni'].splitlines(), |
| 2474 | files['/etc/network/interfaces'].splitlines()) |
| 2475 | |
| 2476 | + def testsimple_render_v4_and_v6(self): |
| 2477 | + entry = NETWORK_CONFIGS['v4_and_v6'] |
| 2478 | + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) |
| 2479 | + self.assertEqual( |
| 2480 | + entry['expected_eni'].splitlines(), |
| 2481 | + files['/etc/network/interfaces'].splitlines()) |
| 2482 | + |
| 2483 | def test_routes_rendered(self): |
| 2484 | # as reported in bug 1649652 |
| 2485 | conf = [ |
| 2486 | diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py |
| 2487 | index 55971b5..991f45a 100644 |
| 2488 | --- a/tests/unittests/test_sshutil.py |
| 2489 | +++ b/tests/unittests/test_sshutil.py |
| 2490 | @@ -32,6 +32,22 @@ VALID_CONTENT = { |
| 2491 | "YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07" |
| 2492 | "/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw==" |
| 2493 | ), |
| 2494 | + 'ecdsa-sha2-nistp256': ( |
| 2495 | + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMy/WuXq5MF" |
| 2496 | + "r5hVQ9EEKKUTF7vUaOkgxUh6bNsCs9SFMVslIm1zM/WJYwUv52LdEePjtDYiV4A" |
| 2497 | + "l2XthJ9/bs7Pc=" |
| 2498 | + ), |
| 2499 | + 'ecdsa-sha2-nistp521': ( |
| 2500 | + "AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABOdNTkh9F" |
| 2501 | + "McK4hZRLs5LTXBEXwNr0+Yg9uvJYRFcz2ZlnjYX9tM4Z3QQFjqogU4pU+zpKLqZ" |
| 2502 | + "5VE4Jcnb1T608UywBIdXkSFZT8trGJqBv9nFWGgmTX3KP8kiBbihpuv1cGwglPl" |
| 2503 | + "Hxs50A42iP0JiT7auGtEAGsu/uMql323GTGb4171Q==" |
| 2504 | + ), |
| 2505 | + 'ecdsa-sha2-nistp384': ( |
| 2506 | + "AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAnoqFU9Gnl" |
| 2507 | + "LcsEuCJnobs/c6whzvjCgouaOO61kgXNtIxyF4Wkutg6xaGYgBBt/phb7a2TurI" |
| 2508 | + "bcIBuzJ/mP22UyUAbNnBfStAEBmYbrTf1EfiMCYUAr1XnL0UdYmZ8HFg==" |
| 2509 | + ), |
| 2510 | } |
| 2511 | |
| 2512 | TEST_OPTIONS = ( |
| 2513 | @@ -44,7 +60,13 @@ class TestAuthKeyLineParser(test_helpers.TestCase): |
| 2514 | def test_simple_parse(self): |
| 2515 | # test key line with common 3 fields (keytype, base64, comment) |
| 2516 | parser = ssh_util.AuthKeyLineParser() |
| 2517 | - for ktype in ['rsa', 'ecdsa', 'dsa']: |
| 2518 | + ecdsa_types = [ |
| 2519 | + 'ecdsa-sha2-nistp256', |
| 2520 | + 'ecdsa-sha2-nistp384', |
| 2521 | + 'ecdsa-sha2-nistp521', |
| 2522 | + ] |
| 2523 | + |
| 2524 | + for ktype in ['rsa', 'ecdsa', 'dsa'] + ecdsa_types: |
| 2525 | content = VALID_CONTENT[ktype] |
| 2526 | comment = 'user-%s@host' % ktype |
| 2527 | line = ' '.join((ktype, content, comment,)) |
| 2528 | diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh |
| 2529 | old mode 100755 |
| 2530 | new mode 100644 |
| 2531 | index 5912bae..4978d87 |
| 2532 | --- a/tools/Z99-cloud-locale-test.sh |
| 2533 | +++ b/tools/Z99-cloud-locale-test.sh |
| 2534 | @@ -11,90 +11,90 @@ |
| 2535 | # of how to fix them. |
| 2536 | |
| 2537 | locale_warn() { |
| 2538 | - local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv="" |
| 2539 | - local w1 w2 w3 w4 remain |
| 2540 | + local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv="" |
| 2541 | + local w1 w2 w3 w4 remain |
| 2542 | |
| 2543 | - # if shell is zsh, act like sh only for this function (-L). |
| 2544 | - # The behavior change will not permenently affect user's shell. |
| 2545 | - [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh |
| 2546 | + # if shell is zsh, act like sh only for this function (-L). |
| 2547 | + # The behavior change will not permenently affect user's shell. |
| 2548 | + [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh |
| 2549 | |
| 2550 | - # locale is expected to output either: |
| 2551 | - # VARIABLE= |
| 2552 | - # VARIABLE="value" |
| 2553 | - # locale: Cannot set LC_SOMETHING to default locale |
| 2554 | - while read -r w1 w2 w3 w4 remain; do |
| 2555 | - case "$w1" in |
| 2556 | - locale:) bad_names="${bad_names} ${w4}";; |
| 2557 | - *) |
| 2558 | - key=${w1%%=*} |
| 2559 | - val=${w1#*=} |
| 2560 | - val=${val#\"} |
| 2561 | - val=${val%\"} |
| 2562 | - vars="${vars} $key=$val";; |
| 2563 | - esac |
| 2564 | - done |
| 2565 | - for bad in $bad_names; do |
| 2566 | - for var in ${vars}; do |
| 2567 | - [ "${bad}" = "${var%=*}" ] || continue |
| 2568 | - val=${var#*=} |
| 2569 | - [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] && |
| 2570 | - bad_lcs="${bad_lcs} ${val}" |
| 2571 | - bad_kv="${bad_kv} $bad=$val" |
| 2572 | - break |
| 2573 | - done |
| 2574 | - done |
| 2575 | - bad_lcs=${bad_lcs# } |
| 2576 | - bad_kv=${bad_kv# } |
| 2577 | - [ -n "$bad_lcs" ] || return 0 |
| 2578 | + # locale is expected to output either: |
| 2579 | + # VARIABLE= |
| 2580 | + # VARIABLE="value" |
| 2581 | + # locale: Cannot set LC_SOMETHING to default locale |
| 2582 | + while read -r w1 w2 w3 w4 remain; do |
| 2583 | + case "$w1" in |
| 2584 | + locale:) bad_names="${bad_names} ${w4}";; |
| 2585 | + *) |
| 2586 | + key=${w1%%=*} |
| 2587 | + val=${w1#*=} |
| 2588 | + val=${val#\"} |
| 2589 | + val=${val%\"} |
| 2590 | + vars="${vars} $key=$val";; |
| 2591 | + esac |
| 2592 | + done |
| 2593 | + for bad in $bad_names; do |
| 2594 | + for var in ${vars}; do |
| 2595 | + [ "${bad}" = "${var%=*}" ] || continue |
| 2596 | + val=${var#*=} |
| 2597 | + [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] && |
| 2598 | + bad_lcs="${bad_lcs} ${val}" |
| 2599 | + bad_kv="${bad_kv} $bad=$val" |
| 2600 | + break |
| 2601 | + done |
| 2602 | + done |
| 2603 | + bad_lcs=${bad_lcs# } |
| 2604 | + bad_kv=${bad_kv# } |
| 2605 | + [ -n "$bad_lcs" ] || return 0 |
| 2606 | |
| 2607 | - printf "_____________________________________________________________________\n" |
| 2608 | - printf "WARNING! Your environment specifies an invalid locale.\n" |
| 2609 | - printf " The unknown environment variables are:\n %s\n" "$bad_kv" |
| 2610 | - printf " This can affect your user experience significantly, including the\n" |
| 2611 | - printf " ability to manage packages. You may install the locales by running:\n\n" |
| 2612 | + printf "_____________________________________________________________________\n" |
| 2613 | + printf "WARNING! Your environment specifies an invalid locale.\n" |
| 2614 | + printf " The unknown environment variables are:\n %s\n" "$bad_kv" |
| 2615 | + printf " This can affect your user experience significantly, including the\n" |
| 2616 | + printf " ability to manage packages. You may install the locales by running:\n\n" |
| 2617 | |
| 2618 | - local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED" |
| 2619 | - local pkgs="" |
| 2620 | - if [ -e "$sfile" ]; then |
| 2621 | - for bad in ${bad_lcs}; do |
| 2622 | - grep -q -i "${bad}" "$sfile" && |
| 2623 | - to_gen="${to_gen} ${bad}" || |
| 2624 | - invalid="${invalid} ${bad}" |
| 2625 | - done |
| 2626 | - else |
| 2627 | - printf " sudo apt-get install locales\n" |
| 2628 | - to_gen=$bad_lcs |
| 2629 | - fi |
| 2630 | - to_gen=${to_gen# } |
| 2631 | + local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED" |
| 2632 | + local pkgs="" |
| 2633 | + if [ -e "$sfile" ]; then |
| 2634 | + for bad in ${bad_lcs}; do |
| 2635 | + grep -q -i "${bad}" "$sfile" && |
| 2636 | + to_gen="${to_gen} ${bad}" || |
| 2637 | + invalid="${invalid} ${bad}" |
| 2638 | + done |
| 2639 | + else |
| 2640 | + printf " sudo apt-get install locales\n" |
| 2641 | + to_gen=$bad_lcs |
| 2642 | + fi |
| 2643 | + to_gen=${to_gen# } |
| 2644 | |
| 2645 | - local pkgs="" |
| 2646 | - for bad in ${to_gen}; do |
| 2647 | - pkgs="${pkgs} language-pack-${bad%%_*}" |
| 2648 | - done |
| 2649 | - pkgs=${pkgs# } |
| 2650 | + local pkgs="" |
| 2651 | + for bad in ${to_gen}; do |
| 2652 | + pkgs="${pkgs} language-pack-${bad%%_*}" |
| 2653 | + done |
| 2654 | + pkgs=${pkgs# } |
| 2655 | |
| 2656 | - if [ -n "${pkgs}" ]; then |
| 2657 | - printf " sudo apt-get install ${pkgs# }\n" |
| 2658 | - printf " or\n" |
| 2659 | - printf " sudo locale-gen ${to_gen# }\n" |
| 2660 | - printf "\n" |
| 2661 | - fi |
| 2662 | - for bad in ${invalid}; do |
| 2663 | - printf "WARNING: '${bad}' is an invalid locale\n" |
| 2664 | - done |
| 2665 | + if [ -n "${pkgs}" ]; then |
| 2666 | + printf " sudo apt-get install ${pkgs# }\n" |
| 2667 | + printf " or\n" |
| 2668 | + printf " sudo locale-gen ${to_gen# }\n" |
| 2669 | + printf "\n" |
| 2670 | + fi |
| 2671 | + for bad in ${invalid}; do |
| 2672 | + printf "WARNING: '${bad}' is an invalid locale\n" |
| 2673 | + done |
| 2674 | |
| 2675 | - printf "To see all available language packs, run:\n" |
| 2676 | - printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n" |
| 2677 | - printf "To disable this message for all users, run:\n" |
| 2678 | - printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n" |
| 2679 | - printf "_____________________________________________________________________\n\n" |
| 2680 | + printf "To see all available language packs, run:\n" |
| 2681 | + printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n" |
| 2682 | + printf "To disable this message for all users, run:\n" |
| 2683 | + printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n" |
| 2684 | + printf "_____________________________________________________________________\n\n" |
| 2685 | |
| 2686 | - # only show the message once |
| 2687 | - : > ~/.cloud-locale-test.skip 2>/dev/null || : |
| 2688 | + # only show the message once |
| 2689 | + : > ~/.cloud-locale-test.skip 2>/dev/null || : |
| 2690 | } |
| 2691 | |
| 2692 | [ -f ~/.cloud-locale-test.skip -o -f /var/lib/cloud/instance/locale-check.skip ] || |
| 2693 | - locale 2>&1 | locale_warn |
| 2694 | + locale 2>&1 | locale_warn |
| 2695 | |
| 2696 | unset locale_warn |
| 2697 | -# vi: ts=4 noexpandtab |
| 2698 | +# vi: ts=4 expandtab |
| 2699 | diff --git a/tools/Z99-cloudinit-warnings.sh b/tools/Z99-cloudinit-warnings.sh |
| 2700 | new file mode 100644 |
| 2701 | index 0000000..b237786 |
| 2702 | --- /dev/null |
| 2703 | +++ b/tools/Z99-cloudinit-warnings.sh |
| 2704 | @@ -0,0 +1,30 @@ |
| 2705 | +#!/bin/sh |
| 2706 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 2707 | + |
| 2708 | +# Purpose: show user warnings on login. |
| 2709 | + |
| 2710 | +cloud_init_warnings() { |
| 2711 | + local skipf="" warning="" idir="/var/lib/cloud/instance" n=0 |
| 2712 | + local warndir="$idir/warnings" |
| 2713 | + local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip" |
| 2714 | + [ -d "$warndir" ] || return 0 |
| 2715 | + [ ! -f "$ufile" ] || return 0 |
| 2716 | + [ ! -f "$skipf" ] || return 0 |
| 2717 | + |
| 2718 | + for warning in "$warndir"/*; do |
| 2719 | + [ -f "$warning" ] || continue |
| 2720 | + cat "$warning" |
| 2721 | + n=$((n+1)) |
| 2722 | + done |
| 2723 | + [ $n -eq 0 ] && return 0 |
| 2724 | + echo "" |
| 2725 | + echo "Disable the warnings above by:" |
| 2726 | + echo " touch $ufile" |
| 2727 | + echo "or" |
| 2728 | + echo " touch $sfile" |
| 2729 | +} |
| 2730 | + |
| 2731 | +cloud_init_warnings 1>&2 |
| 2732 | +unset cloud_init_warnings |
| 2733 | + |
| 2734 | +# vi: syntax=sh ts=4 expandtab |
| 2735 | diff --git a/tools/ds-identify b/tools/ds-identify |
| 2736 | new file mode 100755 |
| 2737 | index 0000000..e138d78 |
| 2738 | --- /dev/null |
| 2739 | +++ b/tools/ds-identify |
| 2740 | @@ -0,0 +1,1252 @@ |
| 2741 | +#!/bin/sh |
| 2742 | +# |
| 2743 | +# ds-identify is configured via /etc/cloud/ds-identify.cfg |
| 2744 | +# or on the kernel command line. It takes primarily 2 inputs: |
| 2745 | +# datasource: can specify the datasource that should be used. |
| 2746 | +# kernel command line option: ci.datasource=<dsname> |
| 2747 | +# |
| 2748 | +# policy: a string that indicates how ds-identify should operate. |
| 2749 | +# kernel command line option: ci.di.policy=<policy> |
| 2750 | +# The format is: |
| 2751 | +# <mode>,found=value,maybe=value,notfound=value |
| 2752 | +# default setting is: |
| 2753 | +# search,found=all,maybe=all,notfound=disable |
| 2754 | +# |
| 2755 | +# Mode: |
| 2756 | +# disabled: disable cloud-init |
| 2757 | +# enabled: enable cloud-init. |
| 2758 | +# ds-identify writes no config and just exits success. |
| 2759 | +# the caller (cloud-init-generator) then enables cloud-init to |
| 2760 | +# run just without any aid from ds-identify. |
| 2761 | +# search: determine which source or sources should be used |
| 2762 | +# and write the result (datasource_list) to |
| 2763 | +# /run/cloud-init/cloud.cfg |
| 2764 | +# report: basically 'dry run' for search. results are still written |
| 2765 | +# to the file, but are namespaced under the top level key |
| 2766 | +# 'di_report' Thus cloud-init is not affected, but can still |
| 2767 | +# see the result. |
| 2768 | +# |
| 2769 | +# found,maybe,notfound: |
| 2770 | +# found: (default=all) |
| 2771 | +# first: use the first found do no further checking |
| 2772 | +# all: enable all DS_FOUND |
| 2773 | +# |
| 2774 | +# maybe: (default=all) |
| 2775 | +# if nothing returned 'found', then how to handle maybe. |
| 2776 | +# no network sources are allowed to return 'maybe'. |
| 2777 | +# all: enable all DS_MAYBE |
| 2778 | +# none: ignore any DS_MAYBE |
| 2779 | +# |
| 2780 | +# notfound: (default=disabled) |
| 2781 | +# disabled: disable cloud-init |
| 2782 | +# enabled: enable cloud-init |
| 2783 | +# |
| 2784 | +# ci.datasource.ec2.strict_id: (true|false|warn[,0-9]) |
| 2785 | +# if ec2 datasource does not strictly match, |
| 2786 | +# return not_found if true |
| 2787 | +# return maybe if false or warn*. |
| 2788 | +# |
| 2789 | + |
| 2790 | +set -u |
| 2791 | +set -f |
| 2792 | +UNAVAILABLE="unavailable" |
| 2793 | +CR=" |
| 2794 | +" |
| 2795 | +ERROR="error" |
| 2796 | +DI_ENABLED="enabled" |
| 2797 | +DI_DISABLED="disabled" |
| 2798 | + |
| 2799 | +DI_DEBUG_LEVEL="${DEBUG_LEVEL:-1}" |
| 2800 | + |
| 2801 | +PATH_ROOT=${PATH_ROOT:-""} |
| 2802 | +PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/run"} |
| 2803 | +PATH_SYS_CLASS_DMI_ID=${PATH_SYS_CLASS_DMI_ID:-${PATH_ROOT}/sys/class/dmi/id} |
| 2804 | +PATH_SYS_HYPERVISOR=${PATH_SYS_HYPERVISOR:-${PATH_ROOT}/sys/hypervisor} |
| 2805 | +PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block} |
| 2806 | +PATH_DEV_DISK="${PATH_DEV_DISK:-${PATH_ROOT}/dev/disk}" |
| 2807 | +PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" |
| 2808 | +PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" |
| 2809 | +PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" |
| 2810 | +PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" |
| 2811 | +PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" |
| 2812 | +PATH_PROC_UPTIME=${PATH_PROC_UPTIME:-${PATH_ROOT}/proc/uptime} |
| 2813 | +PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" |
| 2814 | +PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" |
| 2815 | +PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} |
| 2816 | +PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result} |
| 2817 | + |
| 2818 | +DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" |
| 2819 | +_DI_LOGGED="" |
| 2820 | + |
| 2821 | +# set DI_MAIN='noop' in environment to source this file with no main called. |
| 2822 | +DI_MAIN=${DI_MAIN:-main} |
| 2823 | + |
| 2824 | +DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" |
| 2825 | +DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" |
| 2826 | +DI_DMI_PRODUCT_NAME="" |
| 2827 | +DI_DMI_SYS_VENDOR="" |
| 2828 | +DI_DMI_PRODUCT_SERIAL="" |
| 2829 | +DI_DMI_PRODUCT_UUID="" |
| 2830 | +DI_FS_LABELS="" |
| 2831 | +DI_KERNEL_CMDLINE="" |
| 2832 | +DI_VIRT="" |
| 2833 | +DI_PID_1_PLATFORM="" |
| 2834 | + |
| 2835 | +DI_UNAME_KERNEL_NAME="" |
| 2836 | +DI_UNAME_KERNEL_RELEASE="" |
| 2837 | +DI_UNAME_KERNEL_VERSION="" |
| 2838 | +DI_UNAME_MACHINE="" |
| 2839 | +DI_UNAME_NODENAME="" |
| 2840 | +DI_UNAME_OPERATING_SYSTEM="" |
| 2841 | +DI_UNAME_CMD_OUT="" |
| 2842 | + |
| 2843 | +DS_FOUND=0 |
| 2844 | +DS_NOT_FOUND=1 |
| 2845 | +DS_MAYBE=2 |
| 2846 | + |
| 2847 | +DI_DSNAME="" |
| 2848 | +# this has to match the builtin list in cloud-init, it is what will |
| 2849 | +# be searched if there is no setting found in config. |
| 2850 | +DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ |
| 2851 | +CloudSigma CloudStack DigitalOcean Ec2 OpenNebula OpenStack OVF SmartOS" |
| 2852 | +DI_DSLIST="" |
| 2853 | +DI_MODE="" |
| 2854 | +DI_ON_FOUND="" |
| 2855 | +DI_ON_MAYBE="" |
| 2856 | +DI_ON_NOTFOUND="" |
| 2857 | + |
| 2858 | +DI_EC2_STRICT_ID_DEFAULT="true" |
| 2859 | + |
| 2860 | +error() { |
| 2861 | + set -- "ERROR:" "$@"; |
| 2862 | + debug 0 "$@" |
| 2863 | + stderr "$@" |
| 2864 | +} |
| 2865 | +warn() { |
| 2866 | + set -- "WARN:" "$@" |
| 2867 | + debug 0 "$@" |
| 2868 | + stderr "$@" |
| 2869 | +} |
| 2870 | + |
| 2871 | +stderr() { echo "$@" 1>&2; } |
| 2872 | + |
| 2873 | +debug() { |
| 2874 | + local lvl="$1" |
| 2875 | + shift |
| 2876 | + [ "$lvl" -gt "${DI_DEBUG_LEVEL}" ] && return |
| 2877 | + |
| 2878 | + if [ "$_DI_LOGGED" != "$DI_LOG" ]; then |
| 2879 | + # first time here, open file descriptor for append |
| 2880 | + case "$DI_LOG" in |
| 2881 | + stderr) :;; |
| 2882 | + ?*/*) |
| 2883 | + if [ ! -d "${DI_LOG%/*}" ]; then |
| 2884 | + mkdir -p "${DI_LOG%/*}" || { |
| 2885 | + stderr "ERROR:" "cannot write to $DI_LOG" |
| 2886 | + DI_LOG="stderr" |
| 2887 | + } |
| 2888 | + fi |
| 2889 | + esac |
| 2890 | + if [ "$DI_LOG" = "stderr" ]; then |
| 2891 | + exec 3>&2 |
| 2892 | + else |
| 2893 | + ( exec 3>>"$DI_LOG" ) && exec 3>>"$DI_LOG" || { |
| 2894 | + stderr "ERROR: failed writing to $DI_LOG. logging to stderr."; |
| 2895 | + exec 3>&2 |
| 2896 | + DI_LOG="stderr" |
| 2897 | + } |
| 2898 | + fi |
| 2899 | + _DI_LOGGED="$DI_LOG" |
| 2900 | + fi |
| 2901 | + echo "$@" 1>&3 |
| 2902 | +} |
| 2903 | + |
| 2904 | +get_dmi_field() { |
| 2905 | + local path="${PATH_SYS_CLASS_DMI_ID}/$1" |
| 2906 | + if [ ! -f "$path" ] || [ ! -r "$path" ]; then |
| 2907 | + _RET="$UNAVAILABLE" |
| 2908 | + return |
| 2909 | + fi |
| 2910 | + read _RET < "${path}" || _RET="$ERROR" |
| 2911 | +} |
| 2912 | + |
| 2913 | +block_dev_with_label() { |
| 2914 | + local p="${PATH_DEV_DISK}/by-label/$1" |
| 2915 | + [ -b "$p" ] || return 1 |
| 2916 | + _RET=$p |
| 2917 | + return 0 |
| 2918 | +} |
| 2919 | + |
| 2920 | +read_fs_labels() { |
| 2921 | + cached "${DI_FS_LABELS}" && return 0 |
| 2922 | + # do not rely on links in /dev/disk which might not be present yet. |
| 2923 | + # note that older blkid versions do not report DEVNAME in 'export' output. |
| 2924 | + local out="" ret=0 oifs="$IFS" line="" delim="," |
| 2925 | + local labels="" |
| 2926 | + if is_container; then |
| 2927 | + # blkid will in a container, or at least currently in lxd |
| 2928 | + # not provide useful information. |
| 2929 | + DI_FS_LABELS="$UNAVAILABLE:container" |
| 2930 | + else |
| 2931 | + out=$(blkid -c /dev/null -o export) || { |
| 2932 | + ret=$? |
| 2933 | + error "failed running [$ret]: blkid -c /dev/null -o export" |
| 2934 | + return $ret |
| 2935 | + } |
| 2936 | + IFS="$CR" |
| 2937 | + set -- $out |
| 2938 | + IFS="$oifs" |
| 2939 | + for line in "$@"; do |
| 2940 | + case "${line}" in |
| 2941 | + LABEL=*) labels="${labels}${line#LABEL=}${delim}";; |
| 2942 | + esac |
| 2943 | + done |
| 2944 | + DI_FS_LABELS="${labels%${delim}}" |
| 2945 | + fi |
| 2946 | +} |
| 2947 | + |
| 2948 | +cached() { |
| 2949 | + [ -n "$1" ] && _RET="$1" && return || return 1 |
| 2950 | +} |
| 2951 | + |
| 2952 | + |
| 2953 | +has_cdrom() { |
| 2954 | + [ -e "${PATH_ROOT}/dev/cdrom" ] |
| 2955 | +} |
| 2956 | + |
| 2957 | +read_virt() { |
| 2958 | + cached "$DI_VIRT" && return 0 |
| 2959 | + local out="" r="" virt="${UNAVAILABLE}" |
| 2960 | + if [ -d /run/systemd ]; then |
| 2961 | + out=$(systemd-detect-virt 2>&1) |
| 2962 | + r=$? |
| 2963 | + if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then |
| 2964 | + virt="$out" |
| 2965 | + fi |
| 2966 | + fi |
| 2967 | + DI_VIRT=$virt |
| 2968 | +} |
| 2969 | + |
| 2970 | +is_container() { |
| 2971 | + case "${DI_VIRT}" in |
| 2972 | + lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; |
| 2973 | + *) return 1;; |
| 2974 | + esac |
| 2975 | +} |
| 2976 | + |
| 2977 | +read_kernel_cmdline() { |
| 2978 | + cached "${DI_KERNEL_CMDLINE}" && return |
| 2979 | + local cmdline="" fpath="${PATH_PROC_CMDLINE}" |
| 2980 | + if is_container; then |
| 2981 | + local p1path="${PATH_PROC_1_CMDLINE}" x="" |
| 2982 | + cmdline="${UNAVAILABLE}:container" |
| 2983 | + if [ -f "$p1path" ] && x=$(tr '\0' ' ' < "$p1path"); then |
| 2984 | + cmdline=$x |
| 2985 | + fi |
| 2986 | + elif [ -f "$fpath" ]; then |
| 2987 | + read cmdline <"$fpath" |
| 2988 | + else |
| 2989 | + cmdline="${UNAVAILABLE}:no-cmdline" |
| 2990 | + fi |
| 2991 | + DI_KERNEL_CMDLINE="$cmdline" |
| 2992 | +} |
| 2993 | + |
| 2994 | +read_dmi_sys_vendor() { |
| 2995 | + cached "${DI_DMI_SYS_VENDOR}" && return |
| 2996 | + get_dmi_field sys_vendor |
| 2997 | + DI_DMI_SYS_VENDOR="$_RET" |
| 2998 | +} |
| 2999 | + |
| 3000 | +read_dmi_product_name() { |
| 3001 | + cached "${DI_DMI_PRODUCT_NAME}" && return |
| 3002 | + get_dmi_field product_name |
| 3003 | + DI_DMI_PRODUCT_NAME="$_RET" |
| 3004 | +} |
| 3005 | + |
| 3006 | +read_dmi_product_uuid() { |
| 3007 | + cached "${DI_DMI_PRODUCT_UUID}" && return |
| 3008 | + get_dmi_field product_uuid |
| 3009 | + DI_DMI_PRODUCT_UUID="$_RET" |
| 3010 | +} |
| 3011 | + |
| 3012 | +read_dmi_product_serial() { |
| 3013 | + cached "${DI_DMI_PRODUCT_SERIAL}" && return |
| 3014 | + get_dmi_field product_serial |
| 3015 | + DI_DMI_PRODUCT_SERIAL="$_RET" |
| 3016 | +} |
| 3017 | + |
| 3018 | +read_uname_info() { |
| 3019 | + # run uname, and parse output. |
| 3020 | + # uname is tricky to parse as it outputs always in a given order |
| 3021 | + # independent of option order. kernel-version is known to have spaces. |
| 3022 | + # 1 -s kernel-name |
| 3023 | + # 2 -n nodename |
| 3024 | + # 3 -r kernel-release |
| 3025 | + # 4.. -v kernel-version(whitespace) |
| 3026 | + # N-2 -m machine |
| 3027 | + # N-1 -o operating-system |
| 3028 | + cached "${DI_UNAME_CMD_OUT}" && return |
| 3029 | + local out="${1:-}" ret=0 buf="" |
| 3030 | + if [ -z "$out" ]; then |
| 3031 | + out=$(uname -snrvmo) || { |
| 3032 | + ret=$? |
| 3033 | + error "failed reading uname with 'uname -snrvmo'" |
| 3034 | + return $ret |
| 3035 | + } |
| 3036 | + fi |
| 3037 | + set -- $out |
| 3038 | + DI_UNAME_KERNEL_NAME="$1" |
| 3039 | + DI_UNAME_NODENAME="$2" |
| 3040 | + DI_UNAME_KERNEL_RELEASE="$3" |
| 3041 | + shift 3 |
| 3042 | + while [ $# -gt 2 ]; do |
| 3043 | + buf="$buf $1" |
| 3044 | + shift |
| 3045 | + done |
| 3046 | + DI_UNAME_KERNEL_VERSION="${buf# }" |
| 3047 | + DI_UNAME_MACHINE="$1" |
| 3048 | + DI_UNAME_OPERATING_SYSTEM="$2" |
| 3049 | + DI_UNAME_CMD_OUT="$out" |
| 3050 | + return 0 |
| 3051 | +} |
| 3052 | + |
| 3053 | +parse_yaml_array() { |
| 3054 | + # parse a yaml single line array value ([1,2,3], not key: [1,2,3]). |
| 3055 | + # supported with or without leading and closing brackets |
| 3056 | + # ['1'] or [1] |
| 3057 | + # '1', '2' |
| 3058 | + local val="$1" oifs="$IFS" ret="" tok="" |
| 3059 | + val=${val#[} |
| 3060 | + val=${val%]} |
| 3061 | + IFS=","; set -- $val; IFS="$oifs" |
| 3062 | + for tok in "$@"; do |
| 3063 | + trim "$tok" |
| 3064 | + unquote "$_RET" |
| 3065 | + ret="${ret} $_RET" |
| 3066 | + done |
| 3067 | + _RET="${ret# }" |
| 3068 | +} |
| 3069 | + |
| 3070 | +read_datasource_list() { |
| 3071 | + cached "$DI_DSLIST" && return |
| 3072 | + local dslist="" |
| 3073 | + # if DI_DSNAME is set directly, then avoid parsing config. |
| 3074 | + if [ -n "${DI_DSNAME}" ]; then |
| 3075 | + dslist="${DI_DSNAME}" |
| 3076 | + fi |
| 3077 | + |
| 3078 | + # LP: #1582323. cc:{'datasource_list': ['name']} |
| 3079 | + # more generically cc:<yaml>[end_cc] |
| 3080 | + local cb="]" ob="[" |
| 3081 | + case "$DI_KERNEL_CMDLINE" in |
| 3082 | + *cc:*datasource_list*) |
| 3083 | + t=${DI_KERNEL_CMDLINE##*datasource_list} |
| 3084 | + t=${t%%$cb*} |
| 3085 | + t=${t##*$ob} |
| 3086 | + parse_yaml_array "$t" |
| 3087 | + dslist=${_RET} |
| 3088 | + ;; |
| 3089 | + esac |
| 3090 | + if [ -z "$dslist" ] && check_config datasource_list; then |
| 3091 | + debug 1 "$_RET_fname set datasource_list: $_RET" |
| 3092 | + parse_yaml_array "$_RET" |
| 3093 | + dslist=${_RET} |
| 3094 | + fi |
| 3095 | + if [ -z "$dslist" ]; then |
| 3096 | + dslist=${DI_DSLIST_DEFAULT} |
| 3097 | + debug 1 "no datasource_list found, using default:" $dslist |
| 3098 | + fi |
| 3099 | + DI_DSLIST=$dslist |
| 3100 | + return 0 |
| 3101 | +} |
| 3102 | + |
| 3103 | +read_pid1_platform() { |
| 3104 | + local oifs="$IFS" out="" tok="" key="" val="" platform="${UNAVAILABLE}" |
| 3105 | + cached "${DI_PID_1_PLATFORM}" && return |
| 3106 | + [ -r "${PATH_PROC_1_ENVIRON}" ] || return |
| 3107 | + out=$(tr '\0' '\n' <"${PATH_PROC_1_ENVIRON}") |
| 3108 | + IFS="$CR"; set -- $out; IFS="$oifs" |
| 3109 | + for tok in "$@"; do |
| 3110 | + key=${tok%%=*} |
| 3111 | + [ "$key" != "$tok" ] || continue |
| 3112 | + val=${tok#*=} |
| 3113 | + [ "$key" = "platform" ] && platform="$val" && break |
| 3114 | + done |
| 3115 | + DI_PID_1_PLATFORM="$platform" |
| 3116 | +} |
| 3117 | + |
| 3118 | +dmi_product_name_matches() { |
| 3119 | + is_container && return 1 |
| 3120 | + case "${DI_DMI_PRODUCT_NAME}" in |
| 3121 | + $1) return 0;; |
| 3122 | + esac |
| 3123 | + return 1 |
| 3124 | +} |
| 3125 | + |
| 3126 | +dmi_product_name_is() { |
| 3127 | + is_container && return 1 |
| 3128 | + [ "${DI_DMI_PRODUCT_NAME}" = "$1" ] |
| 3129 | +} |
| 3130 | + |
| 3131 | +dmi_sys_vendor_is() { |
| 3132 | + is_container && return 1 |
| 3133 | + [ "${DI_DMI_SYS_VENDOR}" = "$1" ] |
| 3134 | +} |
| 3135 | + |
| 3136 | +has_fs_with_label() { |
| 3137 | + local label="$1" |
| 3138 | + case ",${DI_FS_LABELS}," in |
| 3139 | + *,$label,*) return 0;; |
| 3140 | + esac |
| 3141 | + return 1 |
| 3142 | +} |
| 3143 | + |
| 3144 | +nocase_equal() { |
| 3145 | + # nocase_equal(a, b) |
| 3146 | + # return 0 if case insenstive comparision a.lower() == b.lower() |
| 3147 | + # different lengths |
| 3148 | + [ "${#1}" = "${#2}" ] || return 1 |
| 3149 | + # case sensitive equal |
| 3150 | + [ "$1" = "$2" ] && return 0 |
| 3151 | + |
| 3152 | + local delim="-delim-" |
| 3153 | + out=$(echo "$1${delim}$2" | tr A-Z a-z) |
| 3154 | + [ "${out#*${delim}}" = "${out%${delim}*}" ] |
| 3155 | +} |
| 3156 | + |
| 3157 | +check_seed_dir() { |
| 3158 | + # check_seed_dir(name, [required]) |
| 3159 | + # check the seed dir /var/lib/cloud/seed/<name> for 'required' |
| 3160 | + # required defaults to 'meta-data' |
| 3161 | + local name="$1" |
| 3162 | + local dir="${PATH_VAR_LIB_CLOUD}/seed/$name" |
| 3163 | + [ -d "$dir" ] || return 1 |
| 3164 | + shift |
| 3165 | + if [ $# -eq 0 ]; then |
| 3166 | + set -- meta-data |
| 3167 | + fi |
| 3168 | + local f="" |
| 3169 | + for f in "$@"; do |
| 3170 | + [ -f "$dir/$f" ] || return 1 |
| 3171 | + done |
| 3172 | + return 0 |
| 3173 | +} |
| 3174 | + |
| 3175 | +probe_floppy() { |
| 3176 | + cached "${STATE_FLOPPY_PROBED}" && return "${STATE_FLOPPY_PROBED}" |
| 3177 | + local fpath=/dev/floppy |
| 3178 | + |
| 3179 | + [ -b "$fpath" ] || |
| 3180 | + { STATE_FLOPPY_PROBED=1; return 1; } |
| 3181 | + |
| 3182 | + modprobe --use-blacklist floppy >/dev/null 2>&1 || |
| 3183 | + { STATE_FLOPPY_PROBED=1; return 1; } |
| 3184 | + |
| 3185 | + udevadm settle "--exit-if-exists=$fpath" || |
| 3186 | + { STATE_FLOPPY_PROBED=1; return 1; } |
| 3187 | + |
| 3188 | + [ -b "$fpath" ] |
| 3189 | + STATE_FLOPPY_PROBED=$? |
| 3190 | + return "${STATE_FLOPPY_PROBED}" |
| 3191 | +} |
| 3192 | + |
| 3193 | + |
| 3194 | +dscheck_CloudStack() { |
| 3195 | + is_container && return ${DS_NOT_FOUND} |
| 3196 | + dmi_product_name_matches "CloudStack*" && return $DS_FOUND |
| 3197 | + return $DS_NOT_FOUND |
| 3198 | +} |
| 3199 | + |
| 3200 | +dscheck_CloudSigma() { |
| 3201 | + # http://paste.ubuntu.com/23624795/ |
| 3202 | + dmi_product_name_is "CloudSigma" && return $DS_FOUND |
| 3203 | + return $DS_NOT_FOUND |
| 3204 | +} |
| 3205 | + |
| 3206 | +check_config() { |
| 3207 | + # somewhat hackily read config for 'key' in files matching 'files' |
| 3208 | + # currently does not respect any hierarchy. |
| 3209 | + local key="$1" files="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" |
| 3210 | + if [ $# -eq 1 ]; then |
| 3211 | + files="$bp ${bp}.d/*.cfg" |
| 3212 | + else |
| 3213 | + files="$*" |
| 3214 | + fi |
| 3215 | + shift |
| 3216 | + set +f; set -- $files; set +f; |
| 3217 | + if [ "$1" = "$files" -a ! -f "$1" ]; then |
| 3218 | + return 1 |
| 3219 | + fi |
| 3220 | + local fname="" line="" ret="" found=0 found_fn="" |
| 3221 | + for fname in "$@"; do |
| 3222 | + [ -f "$fname" ] || continue |
| 3223 | + while read line; do |
| 3224 | + line=${line%%#*} |
| 3225 | + case "$line" in |
| 3226 | + $key:\ *|$key:) |
| 3227 | + ret=${line#*:}; |
| 3228 | + ret=${ret# }; |
| 3229 | + found=$((found+1)) |
| 3230 | + found_fn="$fname";; |
| 3231 | + esac |
| 3232 | + done <"$fname" |
| 3233 | + done |
| 3234 | + if [ $found -ne 0 ]; then |
| 3235 | + _RET="$ret" |
| 3236 | + _RET_fname="$found_fn" |
| 3237 | + return 0 |
| 3238 | + fi |
| 3239 | + return 1 |
| 3240 | +} |
| 3241 | + |
| 3242 | +dscheck_MAAS() { |
| 3243 | + is_container && return "${DS_NOT_FOUND}" |
| 3244 | + # heuristic check for ephemeral boot environment |
| 3245 | + # for maas that do not set 'ci.dsname=' in the ephemeral environment |
| 3246 | + # these have iscsi root and cloud-config-url on the cmdline. |
| 3247 | + local maasiqn="iqn.2004-05.com.ubuntu:maas" |
| 3248 | + case "${DI_KERNEL_CMDLINE}" in |
| 3249 | + *cloud-config-url=*${maasiqn}*|*${maasiqn}*cloud-config-url=*) |
| 3250 | + return ${DS_FOUND} |
| 3251 | + ;; |
| 3252 | + esac |
| 3253 | + |
| 3254 | + # check config files written by maas for installed system. |
| 3255 | + local confd="${PATH_CLOUD_CONFD}" |
| 3256 | + local fnmatch="$confd/*maas*.cfg $confd/*kernel_cmdline*.cfg" |
| 3257 | + if check_config "MAAS" "$fnmatch"; then |
| 3258 | + return "${DS_FOUND}" |
| 3259 | + fi |
| 3260 | + return ${DS_NOT_FOUND} |
| 3261 | +} |
| 3262 | + |
| 3263 | +dscheck_NoCloud() { |
| 3264 | + local fslabel="cidata" d="" |
| 3265 | + case " ${DI_KERNEL_CMDLINE} " in |
| 3266 | + *\ ds=nocloud*) return ${DS_FOUND};; |
| 3267 | + esac |
| 3268 | + for d in nocloud nocloud-net; do |
| 3269 | + check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} |
| 3270 | + done |
| 3271 | + if has_fs_with_label "${fslabel}"; then |
| 3272 | + return ${DS_FOUND} |
| 3273 | + fi |
| 3274 | + return ${DS_NOT_FOUND} |
| 3275 | +} |
| 3276 | + |
| 3277 | +check_configdrive_v2() { |
| 3278 | + if has_fs_with_label "config-2"; then |
| 3279 | + return ${DS_FOUND} |
| 3280 | + fi |
| 3281 | + return ${DS_NOT_FOUND} |
| 3282 | +} |
| 3283 | + |
| 3284 | +check_configdrive_v1() { |
| 3285 | + # FIXME: this has to check any file system that is vfat... |
| 3286 | + # for now, just return not found. |
| 3287 | + return ${DS_NOT_FOUND} |
| 3288 | +} |
| 3289 | + |
| 3290 | +dscheck_ConfigDrive() { |
| 3291 | + local ret="" |
| 3292 | + check_configdrive_v2 |
| 3293 | + ret=$? |
| 3294 | + [ $DS_FOUND -eq $ret ] && return $ret |
| 3295 | + |
| 3296 | + check_configdrive_v1 |
| 3297 | +} |
| 3298 | + |
| 3299 | +dscheck_DigitalOcean() { |
| 3300 | + dmi_sys_vendor_is DigitalOcean && return ${DS_FOUND} |
| 3301 | + return ${DS_NOT_FOUND} |
| 3302 | +} |
| 3303 | + |
| 3304 | +dscheck_OpenNebula() { |
| 3305 | + check_seed_dir opennebula && return ${DS_FOUND} |
| 3306 | + has_fs_with_label "CONTEXT" && return ${DS_FOUND} |
| 3307 | + return ${DS_NOT_FOUND} |
| 3308 | +} |
| 3309 | + |
| 3310 | +ovf_vmware_guest_customization() { |
| 3311 | + # vmware guest customization |
| 3312 | + |
| 3313 | + # virt provider must be vmware |
| 3314 | + [ "${DI_VIRT}" = "vmware" ] || return 1 |
| 3315 | + |
| 3316 | + # we have to have the plugin to do vmware customization |
| 3317 | + local found="" pkg="" pre="/usr/lib" |
| 3318 | + for pkg in vmware-tools open-vm-tools; do |
| 3319 | + if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then |
| 3320 | + found="$pkg"; break; |
| 3321 | + fi |
| 3322 | + done |
| 3323 | + [ -n "$found" ] || return 1 |
| 3324 | + |
| 3325 | + # vmware customization is disabled by default |
| 3326 | + # (disable_vmware_customization=true). If it is set to false, then |
| 3327 | + # user has requested customization. |
| 3328 | + local key="disable_vmware_customization" |
| 3329 | + local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" |
| 3330 | + match="$bp $bp.d/*[Oo][Vv][Ff]*.cfg" |
| 3331 | + if check_config "$key" "$match"; then |
| 3332 | + debug 2 "${_RET_fname} set $key to $_RET" |
| 3333 | + case "$_RET" in |
| 3334 | + 0|false|False) return 0;; |
| 3335 | + *) return 1;; |
| 3336 | + esac |
| 3337 | + fi |
| 3338 | + |
| 3339 | + return 1 |
| 3340 | +} |
| 3341 | + |
| 3342 | +dscheck_OVF() { |
| 3343 | + local p="" |
| 3344 | + check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" |
| 3345 | + |
| 3346 | + if ovf_vmware_guest_customization; then |
| 3347 | + return ${DS_FOUND} |
| 3348 | + fi |
| 3349 | + |
| 3350 | + has_cdrom || return ${DS_NOT_FOUND} |
| 3351 | + |
| 3352 | + # FIXME: currently just return maybe if there is a cdrom |
| 3353 | + # ovf iso9660 transport does not specify an fs label. |
| 3354 | + # better would be to check if |
| 3355 | + return ${DS_MAYBE} |
| 3356 | +} |
| 3357 | + |
| 3358 | +dscheck_Azure() { |
| 3359 | + # http://paste.ubuntu.com/23630873/ |
| 3360 | + # $ grep /sr0 /run/blkid/blkid.tab |
| 3361 | + # <device DEVNO="0x0b00" TIME="1481737655.543841" |
| 3362 | + # UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209" |
| 3363 | + # TYPE="udf">/dev/sr0</device> |
| 3364 | + # |
| 3365 | + check_seed_dir azure ovf-env.xml && return ${DS_FOUND} |
| 3366 | + |
| 3367 | + [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} |
| 3368 | + |
| 3369 | + has_fs_with_label "rd_rdfe_*" && return ${DS_FOUND} |
| 3370 | + |
| 3371 | + return ${DS_NOT_FOUND} |
| 3372 | +} |
| 3373 | + |
| 3374 | +dscheck_Bigstep() { |
| 3375 | + # bigstep is activated by presense of seed file 'url' |
| 3376 | + check_seed_dir "bigstep" url && return ${DS_FOUND} |
| 3377 | + return ${DS_NOT_FOUND} |
| 3378 | +} |
| 3379 | + |
| 3380 | +ec2_read_strict_setting() { |
| 3381 | + # the 'strict_id' setting for Ec2 controls behavior when |
| 3382 | + # the platform does not identify itself directly as Ec2. |
| 3383 | + # order of precedence is: |
| 3384 | + # 1. builtin setting here cloud-init/ds-identify builtin |
| 3385 | + # 2. ds-identify config |
| 3386 | + # 3. system config (/etc/cloud/cloud.cfg.d/*Ec2*.cfg) |
| 3387 | + # 4. kernel command line (undocumented) |
| 3388 | + # 5. user-data or vendor-data (not available here) |
| 3389 | + local default="$1" key="ci.datasource.ec2.strict_id" val="" |
| 3390 | + |
| 3391 | + # 4. kernel command line |
| 3392 | + case " ${DI_KERNEL_CMDLINE} " in |
| 3393 | + *\ $key=*\ ) |
| 3394 | + val=${DI_KERNEL_CMDLINE##*$key=} |
| 3395 | + val=${val%% *}; |
| 3396 | + _RET=${val:-$default} |
| 3397 | + return 0 |
| 3398 | + esac |
| 3399 | + |
| 3400 | + # 3. look for the key 'strict_id' (datasource/Ec2/strict_id) |
| 3401 | + local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" |
| 3402 | + match="$bp $bp.d/*[Ee][Cc]2*.cfg" |
| 3403 | + if check_config strict_id "$match"; then |
| 3404 | + debug 2 "${_RET_fname} set strict_id to $_RET" |
| 3405 | + return 0 |
| 3406 | + fi |
| 3407 | + |
| 3408 | + # 2. ds-identify config (datasource.ec2.strict) |
| 3409 | + local config="${PATH_DI_CONFIG}" |
| 3410 | + if [ -f "$config" ]; then |
| 3411 | + if _read_config "$key" < "$config"; then |
| 3412 | + _RET=${_RET:-$default} |
| 3413 | + return 0 |
| 3414 | + fi |
| 3415 | + fi |
| 3416 | + |
| 3417 | + # 1. Default |
| 3418 | + _RET=$default |
| 3419 | + return 0 |
| 3420 | +} |
| 3421 | + |
| 3422 | +ec2_identify_platform() { |
| 3423 | + local default="$1" |
| 3424 | + local serial="${DI_DMI_PRODUCT_SERIAL}" |
| 3425 | + |
| 3426 | + # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693 |
| 3427 | + case "$serial" in |
| 3428 | + *brightbox.com) _RET="Brightbox"; return 0;; |
| 3429 | + esac |
| 3430 | + |
| 3431 | + # AWS http://docs.aws.amazon.com/AWSEC2/ |
| 3432 | + # latest/UserGuide/identify_ec2_instances.html |
| 3433 | + local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" |
| 3434 | + # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' |
| 3435 | + if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && |
| 3436 | + [ "${uuid#ec2}" != "$uuid" ]; then |
| 3437 | + _RET="AWS" |
| 3438 | + return 0 |
| 3439 | + fi |
| 3440 | + |
| 3441 | + # product uuid and product serial start with case insensitive |
| 3442 | + local uuid="${DI_DMI_PRODUCT_UUID}" |
| 3443 | + case "$uuid:$serial" in |
| 3444 | + [Ee][Cc]2*:[Ee][Cc]2) |
| 3445 | + # both start with ec2, now check for case insenstive equal |
| 3446 | + nocase_equal "$uuid" "$serial" && |
| 3447 | + { _RET="AWS"; return 0; };; |
| 3448 | + esac |
| 3449 | + |
| 3450 | + _RET="$default" |
| 3451 | + return 0; |
| 3452 | +} |
| 3453 | + |
| 3454 | +dscheck_Ec2() { |
| 3455 | + check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} |
| 3456 | + is_container && return ${DS_NOT_FOUND} |
| 3457 | + |
| 3458 | + local unknown="Unknown" platform="" |
| 3459 | + if ec2_identify_platform "$unknown"; then |
| 3460 | + platform="$_RET" |
| 3461 | + else |
| 3462 | + warn "Failed to identify ec2 platform. Using '$unknown'." |
| 3463 | + platform=$unknown |
| 3464 | + fi |
| 3465 | + |
| 3466 | + debug 1 "ec2 platform is '$platform'." |
| 3467 | + if [ "$platform" != "$unknown" ]; then |
| 3468 | + return $DS_FOUND |
| 3469 | + fi |
| 3470 | + |
| 3471 | + local default="${DI_EC2_STRICT_ID_DEFAULT}" |
| 3472 | + if ec2_read_strict_setting "$default"; then |
| 3473 | + strict="$_RET" |
| 3474 | + else |
| 3475 | + debug 1 "ec2_read_strict returned non-zero: $?. using '$default'." |
| 3476 | + strict="$default" |
| 3477 | + fi |
| 3478 | + |
| 3479 | + local key="datasource/Ec2/strict_id" |
| 3480 | + case "$strict" in |
| 3481 | + true|false|warn|warn,[0-9]*) :;; |
| 3482 | + *) |
| 3483 | + warn "$key was set to invalid '$strict'. using '$default'" |
| 3484 | + strict="$default";; |
| 3485 | + esac |
| 3486 | + |
| 3487 | + _RET_excfg="datasource: {Ec2: {strict_id: \"$strict\"}}" |
| 3488 | + if [ "$strict" = "true" ]; then |
| 3489 | + return $DS_NOT_FOUND |
| 3490 | + else |
| 3491 | + return $DS_MAYBE |
| 3492 | + fi |
| 3493 | +} |
| 3494 | + |
| 3495 | +dscheck_GCE() { |
| 3496 | + if dmi_product_name_is "Google Compute Engine"; then |
| 3497 | + return ${DS_FOUND} |
| 3498 | + fi |
| 3499 | + return ${DS_NOT_FOUND} |
| 3500 | +} |
| 3501 | + |
| 3502 | +dscheck_OpenStack() { |
| 3503 | + # the openstack metadata http service |
| 3504 | + |
| 3505 | + # if there is a config drive, then do not check metadata |
| 3506 | + # FIXME: if config drive not in the search list, then we should not |
| 3507 | + # do this check. |
| 3508 | + check_configdrive_v2 |
| 3509 | + if [ $? -eq ${DS_FOUND} ]; then |
| 3510 | + return ${DS_NOT_FOUND} |
| 3511 | + fi |
| 3512 | + if dmi_product_name_is "OpenStack Nova"; then |
| 3513 | + return ${DS_FOUND} |
| 3514 | + fi |
| 3515 | + if [ "${DI_PID_1_PLATFORM}" = "OpenStack Nova" ]; then |
| 3516 | + return ${DS_FOUND} |
| 3517 | + fi |
| 3518 | + |
| 3519 | + return ${DS_NOT_FOUND} |
| 3520 | +} |
| 3521 | + |
| 3522 | +dscheck_AliYun() { |
| 3523 | + # aliyun is not enabled by default (LP: #1638931) |
| 3524 | + # so if we are here, it is because the datasource_list was |
| 3525 | + # set to include it. Thus, 'maybe'. |
| 3526 | + return $DS_MAYBE |
| 3527 | +} |
| 3528 | + |
| 3529 | +dscheck_AltCloud() { |
| 3530 | + # ctype: either the dmi product name, or contents of |
| 3531 | + # /etc/sysconfig/cloud-info |
| 3532 | + # if ctype == "vsphere" |
| 3533 | + # device = device with label 'CDROM' |
| 3534 | + # elif ctype == "rhev" |
| 3535 | + # device = /dev/floppy |
| 3536 | + # then, filesystem on that device must have |
| 3537 | + # user-data.txt or deltacloud-user-data.txt |
| 3538 | + local ctype="" dev="" |
| 3539 | + local match_rhev="[Rr][Hh][Ee][Vv]" |
| 3540 | + local match_vsphere="[Vv][Ss][Pp][Hh][Ee][Rr][Ee]" |
| 3541 | + local cinfo="${PATH_ROOT}/etc/sysconfig/cloud-info" |
| 3542 | + if [ -f "$cinfo" ]; then |
| 3543 | + read ctype < "$cinfo" |
| 3544 | + else |
| 3545 | + ctype="${DI_DMI_PRODUCT_NAME}" |
| 3546 | + fi |
| 3547 | + case "$ctype" in |
| 3548 | + ${match_rhev}) |
| 3549 | + probe_floppy || return ${DS_NOT_FOUND} |
| 3550 | + dev="/dev/floppy" |
| 3551 | + ;; |
| 3552 | + ${match_vsphere}) |
| 3553 | + block_dev_with_label CDROM || return ${DS_NOT_FOUND} |
| 3554 | + dev="$_RET" |
| 3555 | + ;; |
| 3556 | + *) return ${DS_NOT_FOUND};; |
| 3557 | + esac |
| 3558 | + |
| 3559 | + # FIXME: need to check $dev for user-data.txt or deltacloud-user-data.txt |
| 3560 | + : "$dev" |
| 3561 | + return $DS_MAYBE |
| 3562 | +} |
| 3563 | + |
| 3564 | +dscheck_SmartOS() { |
| 3565 | + # joyent cloud has two virt types: kvm and container |
| 3566 | + # on kvm, product name on joyent public cloud shows 'SmartDC HVM' |
| 3567 | + # on the container platform, uname's version has: BrandZ virtual linux |
| 3568 | + local smartdc_kver="BrandZ virtual linux" |
| 3569 | + dmi_product_name_matches "SmartDC*" && return $DS_FOUND |
| 3570 | + if [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && |
| 3571 | + [ "${DI_VIRT}" = "container-other" ]; then |
| 3572 | + return ${DS_FOUND} |
| 3573 | + fi |
| 3574 | + return ${DS_NOT_FOUND} |
| 3575 | +} |
| 3576 | + |
| 3577 | +dscheck_None() { |
| 3578 | + return ${DS_NOT_FOUND} |
| 3579 | +} |
| 3580 | + |
| 3581 | +collect_info() { |
| 3582 | + read_virt |
| 3583 | + read_pid1_platform |
| 3584 | + read_kernel_cmdline |
| 3585 | + read_uname_info |
| 3586 | + read_config |
| 3587 | + read_datasource_list |
| 3588 | + read_dmi_sys_vendor |
| 3589 | + read_dmi_product_name |
| 3590 | + read_dmi_product_serial |
| 3591 | + read_dmi_product_uuid |
| 3592 | + read_fs_labels |
| 3593 | +} |
| 3594 | + |
| 3595 | +print_info() { |
| 3596 | + collect_info |
| 3597 | + _print_info |
| 3598 | +} |
| 3599 | + |
| 3600 | +_print_info() { |
| 3601 | + local n="" v="" vars="" |
| 3602 | + vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" |
| 3603 | + vars="$vars DMI_PRODUCT_UUID PID_1_PLATFORM" |
| 3604 | + vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" |
| 3605 | + vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" |
| 3606 | + vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" |
| 3607 | + vars="$vars DSNAME DSLIST" |
| 3608 | + vars="$vars MODE ON_FOUND ON_MAYBE ON_NOTFOUND" |
| 3609 | + for v in ${vars}; do |
| 3610 | + eval n='${DI_'"$v"'}' |
| 3611 | + echo "$v=$n" |
| 3612 | + done |
| 3613 | + echo "pid=$$ ppid=$PPID" |
| 3614 | + is_container && echo "is_container=true" || echo "is_container=false" |
| 3615 | +} |
| 3616 | + |
| 3617 | +write_result() { |
| 3618 | + local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" pre="" |
| 3619 | + { |
| 3620 | + if [ "$DI_MODE" = "report" ]; then |
| 3621 | + echo "di_report:" |
| 3622 | + pre=" " |
| 3623 | + fi |
| 3624 | + for line in "$@"; do |
| 3625 | + echo "${pre}$line"; |
| 3626 | + done |
| 3627 | + } > "$runcfg" |
| 3628 | + ret=$? |
| 3629 | + [ $ret -eq 0 ] || { |
| 3630 | + error "failed to write to ${runcfg}" |
| 3631 | + return $ret |
| 3632 | + } |
| 3633 | + return 0 |
| 3634 | +} |
| 3635 | + |
| 3636 | +record_notfound() { |
| 3637 | + # in report mode, report nothing was found. |
| 3638 | + # if not report mode: only report the negative result. |
| 3639 | + # reporting an empty list would mean cloud-init would not search |
| 3640 | + # any datasources. |
| 3641 | + if [ "$DI_MODE" = "report" ]; then |
| 3642 | + found -- |
| 3643 | + elif [ "$DI_MODE" = "search" ]; then |
| 3644 | + local msg="# reporting not found result. notfound=${DI_ON_NOTFOUND}." |
| 3645 | + local DI_MODE="report" |
| 3646 | + found -- "$msg" |
| 3647 | + fi |
| 3648 | +} |
| 3649 | + |
| 3650 | +found() { |
| 3651 | + # found(ds1, [ds2 ...], [-- [extra lines]]) |
| 3652 | + local list="" ds="" |
| 3653 | + while [ $# -ne 0 ]; do |
| 3654 | + if [ "$1" = "--" ]; then |
| 3655 | + shift |
| 3656 | + break |
| 3657 | + fi |
| 3658 | + list="${list:+${list}, }$1" |
| 3659 | + shift |
| 3660 | + done |
| 3661 | + if [ $# -eq 1 ] && [ -z "$1" ]; then |
| 3662 | + # do not pass an empty line through. |
| 3663 | + shift |
| 3664 | + fi |
| 3665 | + # always write the None datasource last. |
| 3666 | + list="${list:+${list}, }None" |
| 3667 | + write_result "datasource_list: [ $list ]" "$@" |
| 3668 | + return |
| 3669 | +} |
| 3670 | + |
| 3671 | +trim() { |
| 3672 | + set -- $* |
| 3673 | + _RET="$*" |
| 3674 | +} |
| 3675 | + |
| 3676 | +unquote() { |
| 3677 | + # remove quotes from quoted value |
| 3678 | + local quote='"' tick="'" |
| 3679 | + local val="$1" |
| 3680 | + case "$val" in |
| 3681 | + ${quote}*${quote}|${tick}*${tick}) |
| 3682 | + val=${val#?}; val=${val%?};; |
| 3683 | + esac |
| 3684 | + _RET="$val" |
| 3685 | +} |
| 3686 | + |
| 3687 | +_read_config() { |
| 3688 | + # reads config from stdin, |
| 3689 | + # if no parameters are set, modifies _rc scoped environment vars. |
| 3690 | + # if keyname is provided, then returns found value of that key. |
| 3691 | + local keyname="${1:-_unset}" |
| 3692 | + local line="" hash="#" ckey="" key="" val="" |
| 3693 | + while read line; do |
| 3694 | + line=${line%%${hash}*} |
| 3695 | + key="${line%%:*}" |
| 3696 | + |
| 3697 | + # no : in the line. |
| 3698 | + [ "$key" = "$line" ] && continue |
| 3699 | + trim "$key" |
| 3700 | + key=${_RET} |
| 3701 | + |
| 3702 | + [ "$keyname" != "_unset" ] && [ "$keyname" != "$key" ] && |
| 3703 | + continue |
| 3704 | + |
| 3705 | + val="${line#*:}" |
| 3706 | + trim "$val" |
| 3707 | + unquote "${_RET}" |
| 3708 | + val=${_RET} |
| 3709 | + |
| 3710 | + if [ "$keyname" = "$key" ]; then |
| 3711 | + _RET="$val" |
| 3712 | + return 0 |
| 3713 | + fi |
| 3714 | + |
| 3715 | + case "$key" in |
| 3716 | + datasource) _rc_dsname="$val";; |
| 3717 | + policy) _rc_policy="$val";; |
| 3718 | + esac |
| 3719 | + done |
| 3720 | + if [ "$keyname" = "_unset" ]; then |
| 3721 | + return 1 |
| 3722 | + fi |
| 3723 | + _RET="" |
| 3724 | + return 0 |
| 3725 | +} |
| 3726 | + |
| 3727 | +parse_warn() { |
| 3728 | + echo "WARN: invalid value '$2' for key '$1'. Using $1=$3." 1>&2 |
| 3729 | +} |
| 3730 | + |
| 3731 | +parse_def_policy() { |
| 3732 | + local _rc_mode="" _rc_report="" _rc_found="" _rc_maybe="" _rc_notfound="" |
| 3733 | + local ret="" |
| 3734 | + parse_policy "$@" |
| 3735 | + ret=$? |
| 3736 | + _def_mode=$_rc_mode |
| 3737 | + _def_report=$_rc_report |
| 3738 | + _def_found=$_rc_found |
| 3739 | + _def_maybe=$_rc_maybe |
| 3740 | + _def_notfound=$_rc_notfound |
| 3741 | + return $ret |
| 3742 | +} |
| 3743 | + |
| 3744 | +parse_policy() { |
| 3745 | + # parse_policy(policy, default) |
| 3746 | + # parse a policy string. sets |
| 3747 | + # _rc_mode (enabled|disabled|search|report) |
| 3748 | + # _rc_report true|false |
| 3749 | + # _rc_found first|all |
| 3750 | + # _rc_maybe all|none |
| 3751 | + # _rc_notfound enabled|disabled |
| 3752 | + local def="" |
| 3753 | + case "$DI_UNAME_MACHINE" in |
| 3754 | + # these have dmi data |
| 3755 | + i?86|x86_64) def=${DI_DEFAULT_POLICY};; |
| 3756 | + # aarch64 has dmi, but not currently used (LP: #1663304) |
| 3757 | + aarch64) def=${DI_DEFAULT_POLICY_NO_DMI};; |
| 3758 | + *) def=${DI_DEFAULT_POLICY_NO_DMI};; |
| 3759 | + esac |
| 3760 | + local policy="$1" |
| 3761 | + local _def_mode="" _def_report="" _def_found="" _def_maybe="" |
| 3762 | + local _def_notfound="" |
| 3763 | + if [ $# -eq 1 ] || [ "$2" != "-" ]; then |
| 3764 | + def=${2:-${def}} |
| 3765 | + parse_def_policy "$def" - |
| 3766 | + fi |
| 3767 | + |
| 3768 | + local mode="" report="" found="" maybe="" notfound="" |
| 3769 | + local oifs="$IFS" tok="" val="" |
| 3770 | + IFS=","; set -- $policy; IFS="$oifs" |
| 3771 | + for tok in "$@"; do |
| 3772 | + val=${tok#*=} |
| 3773 | + case "$tok" in |
| 3774 | + $DI_ENABLED|$DI_DISABLED|search|report) mode=$tok;; |
| 3775 | + found=all|found=first) found=$val;; |
| 3776 | + maybe=all|maybe=none) maybe=$val;; |
| 3777 | + notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;; |
| 3778 | + found=*) |
| 3779 | + parse_warn found "$val" "${_def_found}" |
| 3780 | + found=${_def_found};; |
| 3781 | + maybe=*) |
| 3782 | + parse_warn maybe "$val" "${_def_maybe}" |
| 3783 | + maybe=${_def_maybe};; |
| 3784 | + notfound=*) |
| 3785 | + parse_warn notfound "$val" "${_def_notfound}" |
| 3786 | + notfound=${_def_notfound};; |
| 3787 | + esac |
| 3788 | + done |
| 3789 | + report=${report:-${_def_report:-false}} |
| 3790 | + _rc_report=${report} |
| 3791 | + _rc_mode=${mode:-${_def_mode}} |
| 3792 | + _rc_found=${found:-${_def_found}} |
| 3793 | + _rc_maybe=${maybe:-${_def_maybe}} |
| 3794 | + _rc_notfound=${notfound:-${_def_notfound}} |
| 3795 | +} |
| 3796 | + |
| 3797 | +read_config() { |
| 3798 | + local config="${PATH_DI_CONFIG}" |
| 3799 | + local _rc_dsname="" _rc_policy="" ret="" |
| 3800 | + if [ -f "$config" ]; then |
| 3801 | + _read_config < "$config" |
| 3802 | + ret=$? |
| 3803 | + elif [ -e "$config" ]; then |
| 3804 | + error "$config exists but is not a file!" |
| 3805 | + ret=1 |
| 3806 | + fi |
| 3807 | + local tok="" key="" val="" |
| 3808 | + for tok in ${DI_KERNEL_CMDLINE}; do |
| 3809 | + key=${tok%%=*} |
| 3810 | + val=${tok#*=} |
| 3811 | + case "$key" in |
| 3812 | + ci.ds) _rc_dsname="$val";; |
| 3813 | + ci.datasource) _rc_dsname="$val";; |
| 3814 | + ci.di.policy) _rc_policy="$val";; |
| 3815 | + esac |
| 3816 | + done |
| 3817 | + |
| 3818 | + local _rc_mode _rc_report _rc_found _rc_maybe _rc_notfound |
| 3819 | + parse_policy "${_rc_policy}" |
| 3820 | + debug 1 "policy loaded: mode=${_rc_mode} report=${_rc_report}" \ |
| 3821 | + "found=${_rc_found} maybe=${_rc_maybe} notfound=${_rc_notfound}" |
| 3822 | + DI_MODE=${_rc_mode} |
| 3823 | + DI_ON_FOUND=${_rc_found} |
| 3824 | + DI_ON_MAYBE=${_rc_maybe} |
| 3825 | + DI_ON_NOTFOUND=${_rc_notfound} |
| 3826 | + |
| 3827 | + DI_DSNAME="${_rc_dsname}" |
| 3828 | + return $ret |
| 3829 | +} |
| 3830 | + |
| 3831 | + |
| 3832 | +manual_clean_and_existing() { |
| 3833 | + [ -f "${PATH_VAR_LIB_CLOUD}/instance/manual-clean" ] |
| 3834 | +} |
| 3835 | + |
| 3836 | +read_uptime() { |
| 3837 | + local up idle |
| 3838 | + _RET="${UNAVAILABLE}" |
| 3839 | + [ -f "$PATH_PROC_UPTIME" ] && |
| 3840 | + read up idle < "$PATH_PROC_UPTIME" && _RET="$up" |
| 3841 | + return |
| 3842 | +} |
| 3843 | + |
| 3844 | +_main() { |
| 3845 | + local dscheck="" ret_dis=1 ret_en=0 |
| 3846 | + |
| 3847 | + read_uptime |
| 3848 | + debug 1 "[up ${_RET}s]" "ds-identify $*" |
| 3849 | + collect_info |
| 3850 | + |
| 3851 | + if [ "$DI_LOG" = "stderr" ]; then |
| 3852 | + _print_info 1>&2 |
| 3853 | + else |
| 3854 | + _print_info >> "$DI_LOG" |
| 3855 | + fi |
| 3856 | + |
| 3857 | + case "$DI_MODE" in |
| 3858 | + $DI_DISABLED) |
| 3859 | + debug 1 "mode=$DI_DISABLED. returning $ret_dis" |
| 3860 | + return $ret_dis |
| 3861 | + ;; |
| 3862 | + $DI_ENABLED) |
| 3863 | + debug 1 "mode=$DI_ENABLED. returning $ret_en" |
| 3864 | + return $ret_en;; |
| 3865 | + search|report) :;; |
| 3866 | + esac |
| 3867 | + |
| 3868 | + if [ -n "${DI_DSNAME}" ]; then |
| 3869 | + debug 1 "datasource '$DI_DSNAME' specified." |
| 3870 | + found "$DI_DSNAME" |
| 3871 | + return |
| 3872 | + fi |
| 3873 | + |
| 3874 | + if manual_clean_and_existing; then |
| 3875 | + debug 1 "manual_cache_clean enabled. Not writing datasource_list." |
| 3876 | + write_result "# manual_cache_clean." |
| 3877 | + return |
| 3878 | + fi |
| 3879 | + |
| 3880 | + # if there is only a single entry in $DI_DSLIST |
| 3881 | + set -- $DI_DSLIST |
| 3882 | + if [ $# -eq 1 ] || [ $# -eq 2 -a "$2" = "None" ] ; then |
| 3883 | + debug 1 "single entry in datasource_list ($DI_DSLIST) use that." |
| 3884 | + found "$@" |
| 3885 | + return |
| 3886 | + fi |
| 3887 | + |
| 3888 | + local found="" ret="" ds="" maybe="" _RET_excfg="" |
| 3889 | + local exfound_cfg="" exmaybe_cfg="" |
| 3890 | + for ds in ${DI_DSLIST}; do |
| 3891 | + dscheck_fn="dscheck_${ds}" |
| 3892 | + debug 2 "Checking for datasource '$ds' via '$dscheck_fn'" |
| 3893 | + if ! type "$dscheck_fn" >/dev/null 2>&1; then |
| 3894 | + warn "No check method '$dscheck_fn' for datasource '$ds'" |
| 3895 | + continue |
| 3896 | + fi |
| 3897 | + _RET_excfg="" |
| 3898 | + $dscheck_fn |
| 3899 | + ret="$?" |
| 3900 | + case "$ret" in |
| 3901 | + $DS_FOUND) |
| 3902 | + debug 1 "check for '$ds' returned found"; |
| 3903 | + exfound_cfg="${exfound_cfg:+${exfound_cfg}${CR}}${_RET_excfg}" |
| 3904 | + found="${found} $ds";; |
| 3905 | + $DS_MAYBE) |
| 3906 | + debug 1 "check for '$ds' returned maybe"; |
| 3907 | + exmaybe_cfg="${exmaybe_cfg:+${exmaybe_cfg}${CR}}${_RET_excfg}" |
| 3908 | + maybe="${maybe} $ds";; |
| 3909 | + *) debug 2 "check for '$ds' returned not-found[$ret]";; |
| 3910 | + esac |
| 3911 | + done |
| 3912 | + |
| 3913 | + debug 2 "found=${found# } maybe=${maybe# }" |
| 3914 | + set -- $found |
| 3915 | + if [ $# -ne 0 ]; then |
| 3916 | + if [ $# -eq 1 ]; then |
| 3917 | + debug 1 "Found single datasource: $1" |
| 3918 | + else |
| 3919 | + # found=all |
| 3920 | + debug 1 "Found $# datasources found=${DI_ON_FOUND}: $*" |
| 3921 | + if [ "${DI_ON_FOUND}" = "first" ]; then |
| 3922 | + set -- "$1" |
| 3923 | + fi |
| 3924 | + fi |
| 3925 | + found "$@" -- "${exfound_cfg}" |
| 3926 | + return |
| 3927 | + fi |
| 3928 | + |
| 3929 | + set -- $maybe |
| 3930 | + if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then |
| 3931 | + debug 1 "$# datasources returned maybe: $*" |
| 3932 | + found "$@" -- "${exmaybe_cfg}" |
| 3933 | + return |
| 3934 | + fi |
| 3935 | + |
| 3936 | + # record the empty result. |
| 3937 | + record_notfound |
| 3938 | + |
| 3939 | + local basemsg="No ds found [mode=$DI_MODE, notfound=$DI_ON_NOTFOUND]." |
| 3940 | + local msg="" ret=3 |
| 3941 | + case "$DI_MODE:$DI_ON_NOTFOUND" in |
| 3942 | + report:$DI_DISABLED) |
| 3943 | + msg="$basemsg Would disable cloud-init [$ret_dis]" |
| 3944 | + ret=$ret_en;; |
| 3945 | + report:$DI_ENABLED) |
| 3946 | + msg="$basemsg Would enable cloud-init [$ret_en]" |
| 3947 | + ret=$ret_en;; |
| 3948 | + search:$DI_DISABLED) |
| 3949 | + msg="$basemsg Disabled cloud-init [$ret_dis]" |
| 3950 | + ret=$ret_dis;; |
| 3951 | + search:$DI_ENABLED) |
| 3952 | + msg="$basemsg Enabled cloud-init [$ret_en]" |
| 3953 | + ret=$ret_en;; |
| 3954 | + *) error "Unexpected result";; |
| 3955 | + esac |
| 3956 | + debug 1 "$msg" |
| 3957 | + return $ret |
| 3958 | +} |
| 3959 | + |
| 3960 | +main() { |
| 3961 | + local ret="" |
| 3962 | + [ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI" |
| 3963 | + if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] && |
| 3964 | + [ -f "$PATH_RUN_DI_RESULT" ]; then |
| 3965 | + if read ret < "$PATH_RUN_DI_RESULT"; then |
| 3966 | + if [ "$ret" = "0" ] || [ "$ret" = "1" ]; then |
| 3967 | + debug 2 "used cached result $ret. pass --force to re-run." |
| 3968 | + return $ret; |
| 3969 | + fi |
| 3970 | + debug 1 "previous run returned unexpected '$ret'. Re-running." |
| 3971 | + else |
| 3972 | + error "failed to read result from $PATH_RUN_DI_RESULT!" |
| 3973 | + fi |
| 3974 | + fi |
| 3975 | + _main "$@" |
| 3976 | + ret=$? |
| 3977 | + echo "$ret" > "$PATH_RUN_DI_RESULT" |
| 3978 | + read_uptime |
| 3979 | + debug 1 "[up ${_RET}s]" "returning $ret" |
| 3980 | + return $ret |
| 3981 | +} |
| 3982 | + |
| 3983 | +noop() { |
| 3984 | + : |
| 3985 | +} |
| 3986 | + |
| 3987 | +case "${DI_MAIN}" in |
| 3988 | + main|print_info|noop) "${DI_MAIN}" "$@";; |
| 3989 | + *) error "unexpected value for DI_MAIN"; exit 1;; |
| 3990 | +esac |
| 3991 | + |
| 3992 | +# vi: syntax=sh ts=4 expandtab |
| 3993 | diff --git a/tools/make-mime.py b/tools/make-mime.py |
| 3994 | index 1272712..f6a7204 100755 |
| 3995 | --- a/tools/make-mime.py |
| 3996 | +++ b/tools/make-mime.py |
| 3997 | @@ -22,7 +22,7 @@ def file_content_type(text): |
| 3998 | try: |
| 3999 | filename, content_type = text.split(":", 1) |
| 4000 | return (open(filename, 'r'), filename, content_type.strip()) |
| 4001 | - except: |
| 4002 | + except ValueError: |
| 4003 | raise argparse.ArgumentError("Invalid value for %r" % (text)) |
| 4004 | |
| 4005 | |
| 4006 | diff --git a/tools/make-tarball b/tools/make-tarball |
| 4007 | index c150dd2..91c4562 100755 |
| 4008 | --- a/tools/make-tarball |
| 4009 | +++ b/tools/make-tarball |
| 4010 | @@ -35,7 +35,7 @@ while [ $# -ne 0 ]; do |
| 4011 | done |
| 4012 | |
| 4013 | rev=${1:-HEAD} |
| 4014 | -version=$(git describe ${long_opt} $rev) |
| 4015 | +version=$(git describe "--match=[0-9]*" ${long_opt} $rev) |
| 4016 | |
| 4017 | archive_base="cloud-init-$version" |
| 4018 | if [ -z "$output" ]; then |
| 4019 | diff --git a/tools/mock-meta.py b/tools/mock-meta.py |
| 4020 | index d74f9e3..95fc465 100755 |
| 4021 | --- a/tools/mock-meta.py |
| 4022 | +++ b/tools/mock-meta.py |
| 4023 | @@ -18,10 +18,10 @@ Then: |
| 4024 | """ |
| 4025 | |
| 4026 | import functools |
| 4027 | -import httplib |
| 4028 | import json |
| 4029 | import logging |
| 4030 | import os |
| 4031 | +import socket |
| 4032 | import random |
| 4033 | import string |
| 4034 | import sys |
| 4035 | @@ -29,7 +29,13 @@ import yaml |
| 4036 | |
| 4037 | from optparse import OptionParser |
| 4038 | |
| 4039 | -from BaseHTTPServer import (HTTPServer, BaseHTTPRequestHandler) |
| 4040 | +try: |
| 4041 | + from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler |
| 4042 | + import httplib as hclient |
| 4043 | +except ImportError: |
| 4044 | + from http.server import HTTPServer, BaseHTTPRequestHandler |
| 4045 | + from http import client as hclient |
| 4046 | + |
| 4047 | |
| 4048 | log = logging.getLogger('meta-server') |
| 4049 | |
| 4050 | @@ -183,6 +189,10 @@ def get_ssh_keys(): |
| 4051 | return keys |
| 4052 | |
| 4053 | |
| 4054 | +class HTTPServerV6(HTTPServer): |
| 4055 | + address_family = socket.AF_INET6 |
| 4056 | + |
| 4057 | + |
| 4058 | class MetaDataHandler(object): |
| 4059 | |
| 4060 | def __init__(self, opts): |
| 4061 | @@ -249,8 +259,11 @@ class MetaDataHandler(object): |
| 4062 | try: |
| 4063 | key_id = int(mybe_key) |
| 4064 | key_name = key_ids[key_id] |
| 4065 | - except: |
| 4066 | - raise WebException(httplib.BAD_REQUEST, |
| 4067 | + except ValueError: |
| 4068 | + raise WebException(hclient.BAD_REQUEST, |
| 4069 | + "%s: not an integer" % mybe_key) |
| 4070 | + except KeyError: |
| 4071 | + raise WebException(hclient.BAD_REQUEST, |
| 4072 | "Unknown key id %r" % mybe_key) |
| 4073 | # Extract the possible sub-params |
| 4074 | result = traverse(nparams[1:], { |
| 4075 | @@ -342,13 +355,13 @@ class Ec2Handler(BaseHTTPRequestHandler): |
| 4076 | return self._get_versions |
| 4077 | date = segments[0].strip().lower() |
| 4078 | if date not in self._get_versions(): |
| 4079 | - raise WebException(httplib.BAD_REQUEST, |
| 4080 | + raise WebException(hclient.BAD_REQUEST, |
| 4081 | "Unknown version format %r" % date) |
| 4082 | if len(segments) < 2: |
| 4083 | - raise WebException(httplib.BAD_REQUEST, "No action provided") |
| 4084 | + raise WebException(hclient.BAD_REQUEST, "No action provided") |
| 4085 | look_name = segments[1].lower() |
| 4086 | if look_name not in func_mapping: |
| 4087 | - raise WebException(httplib.BAD_REQUEST, |
| 4088 | + raise WebException(hclient.BAD_REQUEST, |
| 4089 | "Unknown requested data %r" % look_name) |
| 4090 | base_func = func_mapping[look_name] |
| 4091 | who = self.address_string() |
| 4092 | @@ -371,16 +384,16 @@ class Ec2Handler(BaseHTTPRequestHandler): |
| 4093 | data = func() |
| 4094 | if not data: |
| 4095 | data = '' |
| 4096 | - self.send_response(httplib.OK) |
| 4097 | + self.send_response(hclient.OK) |
| 4098 | self.send_header("Content-Type", "binary/octet-stream") |
| 4099 | self.send_header("Content-Length", len(data)) |
| 4100 | log.info("Sending data (len=%s):\n%s", len(data), |
| 4101 | format_text(data)) |
| 4102 | self.end_headers() |
| 4103 | - self.wfile.write(data) |
| 4104 | + self.wfile.write(data.encode()) |
| 4105 | except RuntimeError as e: |
| 4106 | log.exception("Error somewhere in the server.") |
| 4107 | - self.send_error(httplib.INTERNAL_SERVER_ERROR, message=str(e)) |
| 4108 | + self.send_error(hclient.INTERNAL_SERVER_ERROR, message=str(e)) |
| 4109 | except WebException as e: |
| 4110 | code = e.code |
| 4111 | log.exception(str(e)) |
| 4112 | @@ -408,7 +421,7 @@ def extract_opts(): |
| 4113 | help=("port from which to serve traffic" |
| 4114 | " (default: %default)")) |
| 4115 | parser.add_option("-a", "--addr", dest="address", action="store", type=str, |
| 4116 | - default='0.0.0.0', metavar="ADDRESS", |
| 4117 | + default='::', metavar="ADDRESS", |
| 4118 | help=("address from which to serve traffic" |
| 4119 | " (default: %default)")) |
| 4120 | parser.add_option("-f", '--user-data-file', dest='user_data_file', |
| 4121 | @@ -444,7 +457,7 @@ def run_server(): |
| 4122 | setup_fetchers(opts) |
| 4123 | log.info("CLI opts: %s", opts) |
| 4124 | server_address = (opts['address'], opts['port']) |
| 4125 | - server = HTTPServer(server_address, Ec2Handler) |
| 4126 | + server = HTTPServerV6(server_address, Ec2Handler) |
| 4127 | sa = server.socket.getsockname() |
| 4128 | log.info("Serving ec2 metadata on %s using port %s ...", sa[0], sa[1]) |
| 4129 | server.serve_forever() |
| 4130 | diff --git a/tools/read-version b/tools/read-version |
| 4131 | index 3b30b49..ddb2838 100755 |
| 4132 | --- a/tools/read-version |
| 4133 | +++ b/tools/read-version |
| 4134 | @@ -56,7 +56,7 @@ if os.path.isdir(os.path.join(_tdir, ".git")) and which("git"): |
| 4135 | flags = [] |
| 4136 | if use_tags: |
| 4137 | flags = ['--tags'] |
| 4138 | - cmd = ['git', 'describe'] + flags |
| 4139 | + cmd = ['git', 'describe', '--match=[0-9]*'] + flags |
| 4140 | |
| 4141 | version = tiny_p(cmd).strip() |
| 4142 | |
| 4143 | diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py |
| 4144 | index d8bbcfc..a57ea84 100755 |
| 4145 | --- a/tools/validate-yaml.py |
| 4146 | +++ b/tools/validate-yaml.py |
| 4147 | @@ -1,4 +1,4 @@ |
| 4148 | -#!/usr/bin/env python3 |
| 4149 | +#!/usr/bin/env python |
| 4150 | |
| 4151 | """Try to read a YAML file and report any errors. |
| 4152 | """ |
| 4153 | diff --git a/tox.ini b/tox.ini |
| 4154 | index db63275..983d595 100644 |
| 4155 | --- a/tox.ini |
| 4156 | +++ b/tox.ini |
| 4157 | @@ -112,3 +112,11 @@ deps = |
| 4158 | jsonpatch==1.2 |
| 4159 | six==1.9.0 |
| 4160 | -r{toxinidir}/test-requirements.txt |
| 4161 | + |
| 4162 | +[testenv:tip-pycodestyle] |
| 4163 | +commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/} |
| 4164 | +deps = pycodestyle |
| 4165 | + |
| 4166 | +[testenv:tip-pyflakes] |
| 4167 | +commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} |
| 4168 | +deps = pyflakes |


I need to rebase this on master, I can't pick out the actual changes in this diff.