Merge ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial

Proposed by Chad Smith
Status: Merged
Approved by: Scott Moser
Approved revision: 04b240a3e24e9813314a2159d0c4999a876f0d18
Merged at revision: a48cab85b23b542f4bfe9072282b573aa59987ab
Proposed branch: ~chad.smith/cloud-init:ubuntu/xenial
Merge into: cloud-init:ubuntu/xenial
Diff against target: 10355 lines (+5108/-1157)
144 files modified
.gitignore (+1/-0)
.pylintrc (+2/-2)
ChangeLog (+85/-0)
HACKING.rst (+8/-0)
cloudinit/analyze/__main__.py (+3/-1)
cloudinit/analyze/dump.py (+1/-7)
cloudinit/cmd/clean.py (+103/-0)
cloudinit/cmd/main.py (+37/-7)
cloudinit/cmd/status.py (+160/-0)
cloudinit/cmd/tests/__init__.py (+0/-0)
cloudinit/cmd/tests/test_clean.py (+176/-0)
cloudinit/cmd/tests/test_status.py (+368/-0)
cloudinit/config/cc_apt_configure.py (+3/-2)
cloudinit/config/cc_disk_setup.py (+5/-3)
cloudinit/config/cc_landscape.py (+4/-4)
cloudinit/config/cc_ntp.py (+5/-5)
cloudinit/config/cc_power_state_change.py (+1/-0)
cloudinit/config/cc_resizefs.py (+11/-1)
cloudinit/config/cc_rh_subscription.py (+2/-3)
cloudinit/config/cc_rsyslog.py (+5/-5)
cloudinit/config/cc_seed_random.py (+2/-1)
cloudinit/config/cc_snap_config.py (+5/-2)
cloudinit/distros/__init__.py (+18/-13)
cloudinit/distros/freebsd.py (+3/-8)
cloudinit/ec2_utils.py (+30/-9)
cloudinit/net/__init__.py (+2/-2)
cloudinit/net/cmdline.py (+5/-4)
cloudinit/net/dhcp.py (+42/-1)
cloudinit/net/network_state.py (+17/-3)
cloudinit/sources/DataSourceAliYun.py (+1/-0)
cloudinit/sources/DataSourceAltCloud.py (+5/-2)
cloudinit/sources/DataSourceAzure.py (+150/-21)
cloudinit/sources/DataSourceBigstep.py (+4/-1)
cloudinit/sources/DataSourceCloudSigma.py (+4/-1)
cloudinit/sources/DataSourceCloudStack.py (+4/-1)
cloudinit/sources/DataSourceConfigDrive.py (+6/-3)
cloudinit/sources/DataSourceDigitalOcean.py (+4/-1)
cloudinit/sources/DataSourceEc2.py (+41/-24)
cloudinit/sources/DataSourceGCE.py (+99/-40)
cloudinit/sources/DataSourceMAAS.py (+44/-15)
cloudinit/sources/DataSourceNoCloud.py (+4/-1)
cloudinit/sources/DataSourceNone.py (+4/-1)
cloudinit/sources/DataSourceOVF.py (+92/-38)
cloudinit/sources/DataSourceOpenNebula.py (+66/-56)
cloudinit/sources/DataSourceOpenStack.py (+4/-1)
cloudinit/sources/DataSourceScaleway.py (+3/-1)
cloudinit/sources/DataSourceSmartOS.py (+4/-1)
cloudinit/sources/__init__.py (+117/-14)
cloudinit/sources/helpers/azure.py (+16/-9)
cloudinit/sources/helpers/vmware/imc/config.py (+4/-0)
cloudinit/sources/helpers/vmware/imc/config_custom_script.py (+153/-0)
cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1)
cloudinit/sources/tests/__init__.py (+0/-0)
cloudinit/sources/tests/test_init.py (+202/-0)
cloudinit/temp_utils.py (+8/-3)
cloudinit/tests/helpers.py (+35/-7)
cloudinit/tests/test_util.py (+46/-0)
cloudinit/url_helper.py (+20/-9)
cloudinit/util.py (+129/-64)
cloudinit/version.py (+1/-1)
debian/changelog (+55/-3)
dev/null (+0/-172)
doc/rtd/topics/boot.rst (+10/-3)
doc/rtd/topics/capabilities.rst (+153/-7)
doc/rtd/topics/debugging.rst (+1/-0)
doc/rtd/topics/modules.rst (+2/-0)
doc/rtd/topics/network-config-format-v1.rst (+1/-1)
doc/rtd/topics/tests.rst (+32/-6)
integration-requirements.txt (+20/-0)
setup.py (+24/-1)
systemd/cloud-init-local.service.tmpl (+0/-6)
tests/cloud_tests/__init__.py (+6/-0)
tests/cloud_tests/bddeb.py (+4/-4)
tests/cloud_tests/collect.py (+28/-16)
tests/cloud_tests/config.py (+3/-1)
tests/cloud_tests/platforms.yaml (+6/-5)
tests/cloud_tests/platforms/__init__.py (+20/-2)
tests/cloud_tests/platforms/ec2/image.py (+99/-0)
tests/cloud_tests/platforms/ec2/instance.py (+132/-0)
tests/cloud_tests/platforms/ec2/platform.py (+258/-0)
tests/cloud_tests/platforms/ec2/snapshot.py (+66/-0)
tests/cloud_tests/platforms/images.py (+2/-1)
tests/cloud_tests/platforms/instances.py (+69/-1)
tests/cloud_tests/platforms/lxd/image.py (+5/-6)
tests/cloud_tests/platforms/lxd/instance.py (+22/-27)
tests/cloud_tests/platforms/lxd/platform.py (+7/-7)
tests/cloud_tests/platforms/lxd/snapshot.py (+2/-2)
tests/cloud_tests/platforms/nocloudkvm/image.py (+5/-16)
tests/cloud_tests/platforms/nocloudkvm/instance.py (+72/-59)
tests/cloud_tests/platforms/nocloudkvm/platform.py (+11/-9)
tests/cloud_tests/platforms/nocloudkvm/snapshot.py (+2/-22)
tests/cloud_tests/platforms/platforms.py (+96/-0)
tests/cloud_tests/platforms/snapshots.py (+0/-0)
tests/cloud_tests/releases.yaml (+10/-22)
tests/cloud_tests/setup_image.py (+0/-18)
tests/cloud_tests/testcases.yaml (+21/-6)
tests/cloud_tests/testcases/base.py (+6/-3)
tests/cloud_tests/testcases/modules/apt_configure_sources_list.py (+5/-0)
tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml (+6/-0)
tests/cloud_tests/testcases/modules/ntp_pools.yaml (+1/-1)
tests/cloud_tests/testcases/modules/ntp_servers.yaml (+1/-1)
tests/cloud_tests/testcases/modules/set_hostname_fqdn.py (+1/-1)
tests/cloud_tests/util.py (+16/-3)
tests/cloud_tests/verify.py (+1/-1)
tests/unittests/test_cli.py (+99/-6)
tests/unittests/test_cs_util.py (+1/-0)
tests/unittests/test_datasource/test_aliyun.py (+17/-1)
tests/unittests/test_datasource/test_altcloud.py (+13/-9)
tests/unittests/test_datasource/test_azure.py (+204/-40)
tests/unittests/test_datasource/test_cloudsigma.py (+9/-4)
tests/unittests/test_datasource/test_cloudstack.py (+13/-6)
tests/unittests/test_datasource/test_configdrive.py (+25/-37)
tests/unittests/test_datasource/test_digitalocean.py (+13/-7)
tests/unittests/test_datasource/test_ec2.py (+5/-3)
tests/unittests/test_datasource/test_gce.py (+174/-22)
tests/unittests/test_datasource/test_maas.py (+46/-7)
tests/unittests/test_datasource/test_nocloud.py (+6/-8)
tests/unittests/test_datasource/test_opennebula.py (+182/-53)
tests/unittests/test_datasource/test_openstack.py (+8/-4)
tests/unittests/test_datasource/test_ovf.py (+107/-4)
tests/unittests/test_datasource/test_scaleway.py (+9/-4)
tests/unittests/test_datasource/test_smartos.py (+2/-1)
tests/unittests/test_distros/test_create_users.py (+5/-2)
tests/unittests/test_distros/test_netconfig.py (+46/-6)
tests/unittests/test_ds_identify.py (+130/-3)
tests/unittests/test_handler/test_handler_lxd.py (+0/-3)
tests/unittests/test_handler/test_handler_power_state.py (+0/-3)
tests/unittests/test_handler/test_handler_resizefs.py (+21/-1)
tests/unittests/test_handler/test_handler_yum_add_repo.py (+2/-8)
tests/unittests/test_handler/test_handler_zypper_add_repo.py (+1/-6)
tests/unittests/test_net.py (+15/-3)
tests/unittests/test_reporting.py (+1/-1)
tests/unittests/test_runs/test_merge_run.py (+1/-0)
tests/unittests/test_runs/test_simple_run.py (+2/-1)
tests/unittests/test_templating.py (+1/-1)
tests/unittests/test_util.py (+64/-3)
tests/unittests/test_vmware/__init__.py (+0/-0)
tests/unittests/test_vmware/test_custom_script.py (+99/-0)
tests/unittests/test_vmware_config_file.py (+9/-1)
tools/ds-identify (+81/-35)
tools/make-mime.py (+1/-1)
tools/mock-meta.py (+21/-24)
tools/read-version (+14/-1)
tox.ini (+5/-6)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Needs Fixing
Scott Moser Pending
Review via email: mp+337098@code.launchpad.net

Description of the change

Sync snapshot of master into xenial per SRU.

LP: #1747059

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

FAILED: Continuous integration, rev:04b240a3e24e9813314a2159d0c4999a876f0d18
https://jenkins.ubuntu.com/server/job/cloud-init-ci/757/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    FAILED: Ubuntu LTS: Build

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/757/rebuild

review: Needs Fixing (continuous-integration)

There was an error fetching revisions from git servers. Please try again in a few minutes. If the problem persists, contact Launchpad support.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/.gitignore b/.gitignore
index b0500a6..75565ed 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,3 +10,4 @@ parts
10prime10prime
11stage11stage
12*.snap12*.snap
13*.cover
diff --git a/.pylintrc b/.pylintrc
index b160ce7..05a086d 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -46,7 +46,7 @@ reports=no
46# (useful for modules/projects where namespaces are manipulated during runtime46# (useful for modules/projects where namespaces are manipulated during runtime
47# and thus existing member attributes cannot be deduced by static analysis. It47# and thus existing member attributes cannot be deduced by static analysis. It
48# supports qualified module names, as well as Unix pattern matching.48# supports qualified module names, as well as Unix pattern matching.
49ignored-modules=six.moves,pkg_resources,httplib,http.client49ignored-modules=six.moves,pkg_resources,httplib,http.client,paramiko,simplestreams
5050
51# List of class names for which member attributes should not be checked (useful51# List of class names for which member attributes should not be checked (useful
52# for classes with dynamically set attributes). This supports the use of52# for classes with dynamically set attributes). This supports the use of
@@ -56,5 +56,5 @@ ignored-classes=optparse.Values,thread._local
56# List of members which are set dynamically and missed by pylint inference56# List of members which are set dynamically and missed by pylint inference
57# system, and so shouldn't trigger E1101 when accessed. Python regular57# system, and so shouldn't trigger E1101 when accessed. Python regular
58# expressions are accepted.58# expressions are accepted.
59generated-members=types,http.client,command_handlers59generated-members=types,http.client,command_handlers,m_.*
6060
diff --git a/ChangeLog b/ChangeLog
index 0260c57..31c2dcb 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,88 @@
117.2:
2 - ds-identify: failure in NoCloud due to unset variable usage.
3 (LP: #1737704)
4 - tests: fix collect_console when not implemented [Joshua Powers]
5 - ec2: Use instance-identity doc for region and instance-id
6 [Andrew Jorgensen]
7 - tests: remove leaked tmp files in config drive tests.
8 - setup.py: Do not include rendered files in SOURCES.txt
9 - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert]
10 - tests: move to using tox 1.7.5
11 - OVF: improve ds-identify to support finding OVF iso transport.
12 (LP: #1731868)
13 - VMware: Support for user provided pre and post-customization scripts
14 [Maitreyee Saikia]
15 - citest: In NoCloudKVM provide keys via metadata not userdata.
16 - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix
17 complaints.
18 - Datasources: Formalize DataSource get_data and related properties.
19 - cli: Add clean and status subcommands
20 - tests: consolidate platforms into specific dirs
21 - ec2: Fix sandboxed dhclient background process cleanup. (LP: #1735331)
22 - tests: NoCloudKVMImage do not modify the original local cache image.
23 - tests: Enable bionic in integration tests. [Joshua Powers]
24 - tests: Use apt-get to install a deb so that depends get resolved.
25 - sysconfig: Correctly render dns and dns search info.
26 [Ryan McCabe] (LP: #1705804)
27 - integration test: replace curtin test ppa with cloud-init test ppa.
28 - EC2: Fix bug using fallback_nic and metadata when restoring from cache.
29 (LP: #1732917)
30 - EC2: Kill dhclient process used in sandbox dhclient. (LP: #1732964)
31 - ntp: fix configuration template rendering for openSUSE and SLES
32 (LP: #1726572)
33 - centos: Provide the failed #include url in error messages
34 - Catch UrlError when #include'ing URLs [Andrew Jorgensen]
35 - hosts: Fix openSUSE and SLES setup for /etc/hosts and clarify docs.
36 [Robert Schweikert] (LP: #1731022)
37 - rh_subscription: Perform null checks for enabled and disabled repos.
38 [Dave Mulford]
39 - Improve warning message when a template is not found.
40 [Robert Schweikert] (LP: #1731035)
41 - Replace the temporary i9n.brickies.net with i9n.cloud-init.io.
42 - Azure: don't generate network configuration for SRIOV devices
43 (LP: #1721579)
44 - tests: address some minor feedback missed in last merge.
45 - tests: integration test cleanup and full pass of nocloud-kvm.
46 - Gentoo: chmod +x on all files in sysvinit/gentoo/
47 [ckonstanski] (LP: #1727126)
48 - EC2: Limit network config to fallback nic, fix local-ipv4 only
49 instances. (LP: #1728152)
50 - Gentoo: Use "rc-service" rather than "service".
51 [Carlos Konstanski] (LP: #1727121)
52 - resizefs: Fix regression when system booted with root=PARTUUID=
53 (LP: #1725067)
54 - tools: make yum package installation more reliable
55 - citest: fix remaining warnings raised by integration tests.
56 - citest: show the class actual class name in results.
57 - ntp: fix config module schema to allow empty ntp config (LP: #1724951)
58 - tools: disable fastestmirror if using proxy [Joshua Powers]
59 - schema: Log debug instead of warning when jsonschema is not available.
60 (LP: #1724354)
61 - simpletable: Fix get_string method to return table-formatted string
62 (LP: #1722566)
63 - net: Handle bridge stp values of 0 and convert to boolean type
64 - tools: Give specific --abbrev=8 to "git describe"
65 - network: bridge_stp value not always correct (LP: #1721157)
66 - tests: re-enable tox with nocloud-kvm support [Joshua Powers]
67 - systemd: remove limit on tasks created by cloud-init-final.service.
68 [Robert Schweikert] (LP: #1717969)
69 - suse: Support addition of zypper repos via cloud-config.
70 [Robert Schweikert] (LP: #1718675)
71 - tests: Combine integration configs and testcases [Joshua Powers]
72 - Azure, CloudStack: Support reading dhcp options from systemd-networkd.
73 [Dimitri John Ledkov] (LP: #1718029)
74 - packages/debian/copyright: remove mention of boto and MIT license
75 - systemd: only mention Before=apt-daily.service on debian based distros.
76 [Robert Schweikert]
77 - Add missing simpletable and simpletable tests for failed merge
78 - Remove prettytable dependency, introduce simpletable [Andrew Jorgensen]
79 - debian/copyright: dep5 updates, reorganize, add Apache 2.0 license.
80 [Joshua Powers] (LP: #1718681)
81 - tests: remove dependency on shlex [Joshua Powers]
82 - AltCloud: Trust PATH for udevadm and modprobe.
83 - DataSourceOVF: use util.find_devs_with(TYPE=iso9660) (LP: #1718287)
84 - tests: remove a temp file used in bootcmd tests.
85
117.1:8617.1:
2 - doc: document GCE datasource. [Arnd Hannemann]87 - doc: document GCE datasource. [Arnd Hannemann]
3 - suse: updates to templates to support openSUSE and SLES.88 - suse: updates to templates to support openSUSE and SLES.
diff --git a/HACKING.rst b/HACKING.rst
index 93e3f42..3bb555c 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -16,6 +16,14 @@ Do these things once
16 When prompted for 'Project contact' or 'Canonical Project Manager' enter16 When prompted for 'Project contact' or 'Canonical Project Manager' enter
17 'Scott Moser'.17 'Scott Moser'.
1818
19* Configure git with your email and name for commit messages.
20
21 Your name will appear in commit messages and will also be used in
22 changelogs or release notes. Give yourself credit!::
23
24 git config user.name "Your Name"
25 git config user.email "Your Email"
26
19* Clone the upstream `repository`_ on Launchpad::27* Clone the upstream `repository`_ on Launchpad::
2028
21 git clone https://git.launchpad.net/cloud-init29 git clone https://git.launchpad.net/cloud-init
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 69b9e43..3ba5903 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -6,6 +6,8 @@ import argparse
6import re6import re
7import sys7import sys
88
9from cloudinit.util import json_dumps
10
9from . import dump11from . import dump
10from . import show12from . import show
1113
@@ -112,7 +114,7 @@ def analyze_show(name, args):
112def analyze_dump(name, args):114def analyze_dump(name, args):
113 """Dump cloud-init events in json format"""115 """Dump cloud-init events in json format"""
114 (infh, outfh) = configure_io(args)116 (infh, outfh) = configure_io(args)
115 outfh.write(dump.json_dumps(_get_events(infh)) + '\n')117 outfh.write(json_dumps(_get_events(infh)) + '\n')
116118
117119
118def _get_events(infile):120def _get_events(infile):
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index ca4da49..b071aa1 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -2,7 +2,6 @@
22
3import calendar3import calendar
4from datetime import datetime4from datetime import datetime
5import json
6import sys5import sys
76
8from cloudinit import util7from cloudinit import util
@@ -132,11 +131,6 @@ def parse_ci_logline(line):
132 return event131 return event
133132
134133
135def json_dumps(data):
136 return json.dumps(data, indent=1, sort_keys=True,
137 separators=(',', ': '))
138
139
140def dump_events(cisource=None, rawdata=None):134def dump_events(cisource=None, rawdata=None):
141 events = []135 events = []
142 event = None136 event = None
@@ -169,7 +163,7 @@ def main():
169 else:163 else:
170 cisource = sys.stdin164 cisource = sys.stdin
171165
172 return json_dumps(dump_events(cisource))166 return util.json_dumps(dump_events(cisource))
173167
174168
175if __name__ == "__main__":169if __name__ == "__main__":
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
176new file mode 100644170new file mode 100644
index 0000000..de22f7f
--- /dev/null
+++ b/cloudinit/cmd/clean.py
@@ -0,0 +1,103 @@
1# Copyright (C) 2017 Canonical Ltd.
2#
3# This file is part of cloud-init. See LICENSE file for license information.
4
5"""Define 'clean' utility and handler as part of cloud-init commandline."""
6
7import argparse
8import os
9import sys
10
11from cloudinit.stages import Init
12from cloudinit.util import (
13 ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles,
14 is_link, subp)
15
16
17def error(msg):
18 sys.stderr.write("ERROR: " + msg + "\n")
19
20
21def get_parser(parser=None):
22 """Build or extend an arg parser for clean utility.
23
24 @param parser: Optional existing ArgumentParser instance representing the
25 clean subcommand which will be extended to support the args of
26 this utility.
27
28 @returns: ArgumentParser with proper argument configuration.
29 """
30 if not parser:
31 parser = argparse.ArgumentParser(
32 prog='clean',
33 description=('Remove logs and artifacts so cloud-init re-runs on '
34 'a clean system'))
35 parser.add_argument(
36 '-l', '--logs', action='store_true', default=False, dest='remove_logs',
37 help='Remove cloud-init logs.')
38 parser.add_argument(
39 '-r', '--reboot', action='store_true', default=False,
40 help='Reboot system after logs are cleaned so cloud-init re-runs.')
41 parser.add_argument(
42 '-s', '--seed', action='store_true', default=False, dest='remove_seed',
43 help='Remove cloud-init seed directory /var/lib/cloud/seed.')
44 return parser
45
46
47def remove_artifacts(remove_logs, remove_seed=False):
48 """Helper which removes artifacts dir and optionally log files.
49
50 @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False
51 preserves them.
52 @param: remove_seed: Boolean. Set True to also delete seed subdir in
53 paths.cloud_dir.
54 @returns: 0 on success, 1 otherwise.
55 """
56 init = Init(ds_deps=[])
57 init.read_cfg()
58 if remove_logs:
59 for log_file in get_config_logfiles(init.cfg):
60 del_file(log_file)
61
62 if not os.path.isdir(init.paths.cloud_dir):
63 return 0 # Artifacts dir already cleaned
64 with chdir(init.paths.cloud_dir):
65 for path in os.listdir('.'):
66 if path == 'seed' and not remove_seed:
67 continue
68 try:
69 if os.path.isdir(path) and not is_link(path):
70 del_dir(path)
71 else:
72 del_file(path)
73 except OSError as e:
74 error('Could not remove {0}: {1}'.format(path, str(e)))
75 return 1
76 return 0
77
78
79def handle_clean_args(name, args):
80 """Handle calls to 'cloud-init clean' as a subcommand."""
81 exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
82 if exit_code == 0 and args.reboot:
83 cmd = ['shutdown', '-r', 'now']
84 try:
85 subp(cmd, capture=False)
86 except ProcessExecutionError as e:
87 error(
88 'Could not reboot this system using "{0}": {1}'.format(
89 cmd, str(e)))
90 exit_code = 1
91 return exit_code
92
93
94def main():
95 """Tool to collect and tar all cloud-init related logs."""
96 parser = get_parser()
97 sys.exit(handle_clean_args('clean', parser.parse_args()))
98
99
100if __name__ == '__main__':
101 main()
102
103# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 6fb9d9e..d2f1b77 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -421,7 +421,13 @@ def di_report_warn(datasource, cfg):
421 LOG.debug("no di_report found in config.")421 LOG.debug("no di_report found in config.")
422 return422 return
423423
424 dicfg = cfg.get('di_report', {})424 dicfg = cfg['di_report']
425 if dicfg is None:
426 # ds-identify may write 'di_report:\n #comment\n'
427 # which reads as {'di_report': None}
428 LOG.debug("di_report was None.")
429 return
430
425 if not isinstance(dicfg, dict):431 if not isinstance(dicfg, dict):
426 LOG.warning("di_report config not a dictionary: %s", dicfg)432 LOG.warning("di_report config not a dictionary: %s", dicfg)
427 return433 return
@@ -603,7 +609,11 @@ def status_wrapper(name, args, data_d=None, link_d=None):
603 else:609 else:
604 raise ValueError("unknown name: %s" % name)610 raise ValueError("unknown name: %s" % name)
605611
606 modes = ('init', 'init-local', 'modules-config', 'modules-final')612 modes = ('init', 'init-local', 'modules-init', 'modules-config',
613 'modules-final')
614 if mode not in modes:
615 raise ValueError(
616 "Invalid cloud init mode specified '{0}'".format(mode))
607617
608 status = None618 status = None
609 if mode == 'init-local':619 if mode == 'init-local':
@@ -615,16 +625,18 @@ def status_wrapper(name, args, data_d=None, link_d=None):
615 except Exception:625 except Exception:
616 pass626 pass
617627
628 nullstatus = {
629 'errors': [],
630 'start': None,
631 'finished': None,
632 }
618 if status is None:633 if status is None:
619 nullstatus = {
620 'errors': [],
621 'start': None,
622 'finished': None,
623 }
624 status = {'v1': {}}634 status = {'v1': {}}
625 for m in modes:635 for m in modes:
626 status['v1'][m] = nullstatus.copy()636 status['v1'][m] = nullstatus.copy()
627 status['v1']['datasource'] = None637 status['v1']['datasource'] = None
638 elif mode not in status['v1']:
639 status['v1'][mode] = nullstatus.copy()
628640
629 v1 = status['v1']641 v1 = status['v1']
630 v1['stage'] = mode642 v1['stage'] = mode
@@ -767,6 +779,12 @@ def main(sysv_args=None):
767 parser_collect_logs = subparsers.add_parser(779 parser_collect_logs = subparsers.add_parser(
768 'collect-logs', help='Collect and tar all cloud-init debug info')780 'collect-logs', help='Collect and tar all cloud-init debug info')
769781
782 parser_clean = subparsers.add_parser(
783 'clean', help='Remove logs and artifacts so cloud-init can re-run.')
784
785 parser_status = subparsers.add_parser(
786 'status', help='Report cloud-init status or wait on completion.')
787
770 if sysv_args:788 if sysv_args:
771 # Only load subparsers if subcommand is specified to avoid load cost789 # Only load subparsers if subcommand is specified to avoid load cost
772 if sysv_args[0] == 'analyze':790 if sysv_args[0] == 'analyze':
@@ -783,6 +801,18 @@ def main(sysv_args=None):
783 logs_parser(parser_collect_logs)801 logs_parser(parser_collect_logs)
784 parser_collect_logs.set_defaults(802 parser_collect_logs.set_defaults(
785 action=('collect-logs', handle_collect_logs_args))803 action=('collect-logs', handle_collect_logs_args))
804 elif sysv_args[0] == 'clean':
805 from cloudinit.cmd.clean import (
806 get_parser as clean_parser, handle_clean_args)
807 clean_parser(parser_clean)
808 parser_clean.set_defaults(
809 action=('clean', handle_clean_args))
810 elif sysv_args[0] == 'status':
811 from cloudinit.cmd.status import (
812 get_parser as status_parser, handle_status_args)
813 status_parser(parser_status)
814 parser_status.set_defaults(
815 action=('status', handle_status_args))
786816
787 args = parser.parse_args(args=sysv_args)817 args = parser.parse_args(args=sysv_args)
788818
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
789new file mode 100644819new file mode 100644
index 0000000..d7aaee9
--- /dev/null
+++ b/cloudinit/cmd/status.py
@@ -0,0 +1,160 @@
1# Copyright (C) 2017 Canonical Ltd.
2#
3# This file is part of cloud-init. See LICENSE file for license information.
4
5"""Define 'status' utility and handler as part of cloud-init commandline."""
6
7import argparse
8import os
9import sys
10from time import gmtime, strftime, sleep
11
12from cloudinit.distros import uses_systemd
13from cloudinit.stages import Init
14from cloudinit.util import get_cmdline, load_file, load_json
15
16CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled'
17
18# customer visible status messages
19STATUS_ENABLED_NOT_RUN = 'not run'
20STATUS_RUNNING = 'running'
21STATUS_DONE = 'done'
22STATUS_ERROR = 'error'
23STATUS_DISABLED = 'disabled'
24
25
26def get_parser(parser=None):
27 """Build or extend an arg parser for status utility.
28
29 @param parser: Optional existing ArgumentParser instance representing the
30 status subcommand which will be extended to support the args of
31 this utility.
32
33 @returns: ArgumentParser with proper argument configuration.
34 """
35 if not parser:
36 parser = argparse.ArgumentParser(
37 prog='status',
38 description='Report run status of cloud init')
39 parser.add_argument(
40 '-l', '--long', action='store_true', default=False,
41 help=('Report long format of statuses including run stage name and'
42 ' error messages'))
43 parser.add_argument(
44 '-w', '--wait', action='store_true', default=False,
45 help='Block waiting on cloud-init to complete')
46 return parser
47
48
49def handle_status_args(name, args):
50 """Handle calls to 'cloud-init status' as a subcommand."""
51 # Read configured paths
52 init = Init(ds_deps=[])
53 init.read_cfg()
54
55 status, status_detail, time = _get_status_details(init.paths)
56 if args.wait:
57 while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING):
58 sys.stdout.write('.')
59 sys.stdout.flush()
60 status, status_detail, time = _get_status_details(init.paths)
61 sleep(0.25)
62 sys.stdout.write('\n')
63 if args.long:
64 print('status: {0}'.format(status))
65 if time:
66 print('time: {0}'.format(time))
67 print('detail:\n{0}'.format(status_detail))
68 else:
69 print('status: {0}'.format(status))
70 return 1 if status == STATUS_ERROR else 0
71
72
73def _is_cloudinit_disabled(disable_file, paths):
74 """Report whether cloud-init is disabled.
75
76 @param disable_file: The path to the cloud-init disable file.
77 @param paths: An initialized cloudinit.helpers.Paths object.
78 @returns: A tuple containing (bool, reason) about cloud-init's status and
79 why.
80 """
81 is_disabled = False
82 cmdline_parts = get_cmdline().split()
83 if not uses_systemd():
84 reason = 'Cloud-init enabled on sysvinit'
85 elif 'cloud-init=enabled' in cmdline_parts:
86 reason = 'Cloud-init enabled by kernel command line cloud-init=enabled'
87 elif os.path.exists(disable_file):
88 is_disabled = True
89 reason = 'Cloud-init disabled by {0}'.format(disable_file)
90 elif 'cloud-init=disabled' in cmdline_parts:
91 is_disabled = True
92 reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled'
93 elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')):
94 is_disabled = True
95 reason = 'Cloud-init disabled by cloud-init-generator'
96 else:
97 reason = 'Cloud-init enabled by systemd cloud-init-generator'
98 return (is_disabled, reason)
99
100
101def _get_status_details(paths):
102 """Return a 3-tuple of status, status_details and time of last event.
103
104 @param paths: An initialized cloudinit.helpers.paths object.
105
106 Values are obtained from parsing paths.run_dir/status.json.
107 """
108
109 status = STATUS_ENABLED_NOT_RUN
110 status_detail = ''
111 status_v1 = {}
112
113 status_file = os.path.join(paths.run_dir, 'status.json')
114
115 (is_disabled, reason) = _is_cloudinit_disabled(
116 CLOUDINIT_DISABLED_FILE, paths)
117 if is_disabled:
118 status = STATUS_DISABLED
119 status_detail = reason
120 if os.path.exists(status_file):
121 status_v1 = load_json(load_file(status_file)).get('v1', {})
122 errors = []
123 latest_event = 0
124 for key, value in sorted(status_v1.items()):
125 if key == 'stage':
126 if value:
127 status_detail = 'Running in stage: {0}'.format(value)
128 elif key == 'datasource':
129 status_detail = value
130 elif isinstance(value, dict):
131 errors.extend(value.get('errors', []))
132 start = value.get('start') or 0
133 finished = value.get('finished') or 0
134 if finished == 0 and start != 0:
135 status = STATUS_RUNNING
136 event_time = max(start, finished)
137 if event_time > latest_event:
138 latest_event = event_time
139 if errors:
140 status = STATUS_ERROR
141 status_detail = '\n'.join(errors)
142 elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0:
143 status = STATUS_DONE
144 if latest_event:
145 time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event))
146 else:
147 time = ''
148 return status, status_detail, time
149
150
151def main():
152 """Tool to report status of cloud-init."""
153 parser = get_parser()
154 sys.exit(handle_status_args('status', parser.parse_args()))
155
156
157if __name__ == '__main__':
158 main()
159
160# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py
0new file mode 100644161new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cloudinit/cmd/tests/__init__.py
diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py
1new file mode 100644162new file mode 100644
index 0000000..6713af4
--- /dev/null
+++ b/cloudinit/cmd/tests/test_clean.py
@@ -0,0 +1,176 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3from cloudinit.cmd import clean
4from cloudinit.util import ensure_dir, sym_link, write_file
5from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
6from collections import namedtuple
7import os
8from six import StringIO
9
10mypaths = namedtuple('MyPaths', 'cloud_dir')
11
12
13class TestClean(CiTestCase):
14
15 def setUp(self):
16 super(TestClean, self).setUp()
17 self.new_root = self.tmp_dir()
18 self.artifact_dir = self.tmp_path('artifacts', self.new_root)
19 self.log1 = self.tmp_path('cloud-init.log', self.new_root)
20 self.log2 = self.tmp_path('cloud-init-output.log', self.new_root)
21
22 class FakeInit(object):
23 cfg = {'def_log_file': self.log1,
24 'output': {'all': '|tee -a {0}'.format(self.log2)}}
25 paths = mypaths(cloud_dir=self.artifact_dir)
26
27 def __init__(self, ds_deps):
28 pass
29
30 def read_cfg(self):
31 pass
32
33 self.init_class = FakeInit
34
35 def test_remove_artifacts_removes_logs(self):
36 """remove_artifacts removes logs when remove_logs is True."""
37 write_file(self.log1, 'cloud-init-log')
38 write_file(self.log2, 'cloud-init-output-log')
39
40 self.assertFalse(
41 os.path.exists(self.artifact_dir), 'Unexpected artifacts dir')
42 retcode = wrap_and_call(
43 'cloudinit.cmd.clean',
44 {'Init': {'side_effect': self.init_class}},
45 clean.remove_artifacts, remove_logs=True)
46 self.assertFalse(os.path.exists(self.log1), 'Unexpected file')
47 self.assertFalse(os.path.exists(self.log2), 'Unexpected file')
48 self.assertEqual(0, retcode)
49
50 def test_remove_artifacts_preserves_logs(self):
51 """remove_artifacts leaves logs when remove_logs is False."""
52 write_file(self.log1, 'cloud-init-log')
53 write_file(self.log2, 'cloud-init-output-log')
54
55 retcode = wrap_and_call(
56 'cloudinit.cmd.clean',
57 {'Init': {'side_effect': self.init_class}},
58 clean.remove_artifacts, remove_logs=False)
59 self.assertTrue(os.path.exists(self.log1), 'Missing expected file')
60 self.assertTrue(os.path.exists(self.log2), 'Missing expected file')
61 self.assertEqual(0, retcode)
62
63 def test_remove_artifacts_removes_unlinks_symlinks(self):
64 """remove_artifacts cleans artifacts dir unlinking any symlinks."""
65 dir1 = os.path.join(self.artifact_dir, 'dir1')
66 ensure_dir(dir1)
67 symlink = os.path.join(self.artifact_dir, 'mylink')
68 sym_link(dir1, symlink)
69
70 retcode = wrap_and_call(
71 'cloudinit.cmd.clean',
72 {'Init': {'side_effect': self.init_class}},
73 clean.remove_artifacts, remove_logs=False)
74 self.assertEqual(0, retcode)
75 for path in (dir1, symlink):
76 self.assertFalse(
77 os.path.exists(path),
78 'Unexpected {0} dir'.format(path))
79
80 def test_remove_artifacts_removes_artifacts_skipping_seed(self):
81 """remove_artifacts cleans artifacts dir with exception of seed dir."""
82 dirs = [
83 self.artifact_dir,
84 os.path.join(self.artifact_dir, 'seed'),
85 os.path.join(self.artifact_dir, 'dir1'),
86 os.path.join(self.artifact_dir, 'dir2')]
87 for _dir in dirs:
88 ensure_dir(_dir)
89
90 retcode = wrap_and_call(
91 'cloudinit.cmd.clean',
92 {'Init': {'side_effect': self.init_class}},
93 clean.remove_artifacts, remove_logs=False)
94 self.assertEqual(0, retcode)
95 for expected_dir in dirs[:2]:
96 self.assertTrue(
97 os.path.exists(expected_dir),
98 'Missing {0} dir'.format(expected_dir))
99 for deleted_dir in dirs[2:]:
100 self.assertFalse(
101 os.path.exists(deleted_dir),
102 'Unexpected {0} dir'.format(deleted_dir))
103
104 def test_remove_artifacts_removes_artifacts_removes_seed(self):
105 """remove_artifacts removes seed dir when remove_seed is True."""
106 dirs = [
107 self.artifact_dir,
108 os.path.join(self.artifact_dir, 'seed'),
109 os.path.join(self.artifact_dir, 'dir1'),
110 os.path.join(self.artifact_dir, 'dir2')]
111 for _dir in dirs:
112 ensure_dir(_dir)
113
114 retcode = wrap_and_call(
115 'cloudinit.cmd.clean',
116 {'Init': {'side_effect': self.init_class}},
117 clean.remove_artifacts, remove_logs=False, remove_seed=True)
118 self.assertEqual(0, retcode)
119 self.assertTrue(
120 os.path.exists(self.artifact_dir), 'Missing artifact dir')
121 for deleted_dir in dirs[1:]:
122 self.assertFalse(
123 os.path.exists(deleted_dir),
124 'Unexpected {0} dir'.format(deleted_dir))
125
126 def test_remove_artifacts_returns_one_on_errors(self):
127 """remove_artifacts returns non-zero on failure and prints an error."""
128 ensure_dir(self.artifact_dir)
129 ensure_dir(os.path.join(self.artifact_dir, 'dir1'))
130
131 with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
132 retcode = wrap_and_call(
133 'cloudinit.cmd.clean',
134 {'del_dir': {'side_effect': OSError('oops')},
135 'Init': {'side_effect': self.init_class}},
136 clean.remove_artifacts, remove_logs=False)
137 self.assertEqual(1, retcode)
138 self.assertEqual(
139 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue())
140
141 def test_handle_clean_args_reboots(self):
142 """handle_clean_args_reboots when reboot arg is provided."""
143
144 called_cmds = []
145
146 def fake_subp(cmd, capture):
147 called_cmds.append((cmd, capture))
148 return '', ''
149
150 myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot')
151 cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True)
152 retcode = wrap_and_call(
153 'cloudinit.cmd.clean',
154 {'subp': {'side_effect': fake_subp},
155 'Init': {'side_effect': self.init_class}},
156 clean.handle_clean_args, name='does not matter', args=cmdargs)
157 self.assertEqual(0, retcode)
158 self.assertEqual(
159 [(['shutdown', '-r', 'now'], False)], called_cmds)
160
161 def test_status_main(self):
162 '''clean.main can be run as a standalone script.'''
163 write_file(self.log1, 'cloud-init-log')
164 with self.assertRaises(SystemExit) as context_manager:
165 wrap_and_call(
166 'cloudinit.cmd.clean',
167 {'Init': {'side_effect': self.init_class},
168 'sys.argv': {'new': ['clean', '--logs']}},
169 clean.main)
170
171 self.assertRaisesCodeEqual(0, context_manager.exception.code)
172 self.assertFalse(
173 os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1))
174
175
176# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
0new file mode 100644177new file mode 100644
index 0000000..a7c0a91
--- /dev/null
+++ b/cloudinit/cmd/tests/test_status.py
@@ -0,0 +1,368 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3from collections import namedtuple
4import os
5from six import StringIO
6from textwrap import dedent
7
8from cloudinit.atomic_helper import write_json
9from cloudinit.cmd import status
10from cloudinit.util import write_file
11from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
12
13mypaths = namedtuple('MyPaths', 'run_dir')
14myargs = namedtuple('MyArgs', 'long wait')
15
16
17class TestStatus(CiTestCase):
18
19 def setUp(self):
20 super(TestStatus, self).setUp()
21 self.new_root = self.tmp_dir()
22 self.status_file = self.tmp_path('status.json', self.new_root)
23 self.disable_file = self.tmp_path('cloudinit-disable', self.new_root)
24 self.paths = mypaths(run_dir=self.new_root)
25
26 class FakeInit(object):
27 paths = self.paths
28
29 def __init__(self, ds_deps):
30 pass
31
32 def read_cfg(self):
33 pass
34
35 self.init_class = FakeInit
36
37 def test__is_cloudinit_disabled_false_on_sysvinit(self):
38 '''When not in an environment using systemd, return False.'''
39 write_file(self.disable_file, '') # Create the ignored disable file
40 (is_disabled, reason) = wrap_and_call(
41 'cloudinit.cmd.status',
42 {'uses_systemd': False},
43 status._is_cloudinit_disabled, self.disable_file, self.paths)
44 self.assertFalse(
45 is_disabled, 'expected enabled cloud-init on sysvinit')
46 self.assertEqual('Cloud-init enabled on sysvinit', reason)
47
48 def test__is_cloudinit_disabled_true_on_disable_file(self):
49 '''When using systemd and disable_file is present return disabled.'''
50 write_file(self.disable_file, '') # Create observed disable file
51 (is_disabled, reason) = wrap_and_call(
52 'cloudinit.cmd.status',
53 {'uses_systemd': True},
54 status._is_cloudinit_disabled, self.disable_file, self.paths)
55 self.assertTrue(is_disabled, 'expected disabled cloud-init')
56 self.assertEqual(
57 'Cloud-init disabled by {0}'.format(self.disable_file), reason)
58
59 def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):
60 '''Not disabled when using systemd and enabled via commandline.'''
61 write_file(self.disable_file, '') # Create ignored disable file
62 (is_disabled, reason) = wrap_and_call(
63 'cloudinit.cmd.status',
64 {'uses_systemd': True,
65 'get_cmdline': 'something cloud-init=enabled else'},
66 status._is_cloudinit_disabled, self.disable_file, self.paths)
67 self.assertFalse(is_disabled, 'expected enabled cloud-init')
68 self.assertEqual(
69 'Cloud-init enabled by kernel command line cloud-init=enabled',
70 reason)
71
72 def test__is_cloudinit_disabled_true_on_kernel_cmdline(self):
73 '''When using systemd and disable_file is present return disabled.'''
74 (is_disabled, reason) = wrap_and_call(
75 'cloudinit.cmd.status',
76 {'uses_systemd': True,
77 'get_cmdline': 'something cloud-init=disabled else'},
78 status._is_cloudinit_disabled, self.disable_file, self.paths)
79 self.assertTrue(is_disabled, 'expected disabled cloud-init')
80 self.assertEqual(
81 'Cloud-init disabled by kernel parameter cloud-init=disabled',
82 reason)
83
84 def test__is_cloudinit_disabled_true_when_generator_disables(self):
85 '''When cloud-init-generator doesn't write enabled file return True.'''
86 enabled_file = os.path.join(self.paths.run_dir, 'enabled')
87 self.assertFalse(os.path.exists(enabled_file))
88 (is_disabled, reason) = wrap_and_call(
89 'cloudinit.cmd.status',
90 {'uses_systemd': True,
91 'get_cmdline': 'something'},
92 status._is_cloudinit_disabled, self.disable_file, self.paths)
93 self.assertTrue(is_disabled, 'expected disabled cloud-init')
94 self.assertEqual('Cloud-init disabled by cloud-init-generator', reason)
95
96 def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):
97 '''Report enabled when systemd generator creates the enabled file.'''
98 enabled_file = os.path.join(self.paths.run_dir, 'enabled')
99 write_file(enabled_file, '')
100 (is_disabled, reason) = wrap_and_call(
101 'cloudinit.cmd.status',
102 {'uses_systemd': True,
103 'get_cmdline': 'something ignored'},
104 status._is_cloudinit_disabled, self.disable_file, self.paths)
105 self.assertFalse(is_disabled, 'expected enabled cloud-init')
106 self.assertEqual(
107 'Cloud-init enabled by systemd cloud-init-generator', reason)
108
109 def test_status_returns_not_run(self):
110 '''When status.json does not exist yet, return 'not run'.'''
111 self.assertFalse(
112 os.path.exists(self.status_file), 'Unexpected status.json found')
113 cmdargs = myargs(long=False, wait=False)
114 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
115 retcode = wrap_and_call(
116 'cloudinit.cmd.status',
117 {'_is_cloudinit_disabled': (False, ''),
118 'Init': {'side_effect': self.init_class}},
119 status.handle_status_args, 'ignored', cmdargs)
120 self.assertEqual(0, retcode)
121 self.assertEqual('status: not run\n', m_stdout.getvalue())
122
123 def test_status_returns_disabled_long_on_presence_of_disable_file(self):
124 '''When cloudinit is disabled, return disabled reason.'''
125
126 checked_files = []
127
128 def fakeexists(filepath):
129 checked_files.append(filepath)
130 status_file = os.path.join(self.paths.run_dir, 'status.json')
131 return bool(not filepath == status_file)
132
133 cmdargs = myargs(long=True, wait=False)
134 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
135 retcode = wrap_and_call(
136 'cloudinit.cmd.status',
137 {'os.path.exists': {'side_effect': fakeexists},
138 '_is_cloudinit_disabled': (True, 'disabled for some reason'),
139 'Init': {'side_effect': self.init_class}},
140 status.handle_status_args, 'ignored', cmdargs)
141 self.assertEqual(0, retcode)
142 self.assertEqual(
143 [os.path.join(self.paths.run_dir, 'status.json')],
144 checked_files)
145 expected = dedent('''\
146 status: disabled
147 detail:
148 disabled for some reason
149 ''')
150 self.assertEqual(expected, m_stdout.getvalue())
151
152 def test_status_returns_running(self):
153 '''Report running when status exists with an unfinished stage.'''
154 write_json(self.status_file,
155 {'v1': {'init': {'start': 1, 'finished': None}}})
156 cmdargs = myargs(long=False, wait=False)
157 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
158 retcode = wrap_and_call(
159 'cloudinit.cmd.status',
160 {'_is_cloudinit_disabled': (False, ''),
161 'Init': {'side_effect': self.init_class}},
162 status.handle_status_args, 'ignored', cmdargs)
163 self.assertEqual(0, retcode)
164 self.assertEqual('status: running\n', m_stdout.getvalue())
165
166 def test_status_returns_done(self):
167 '''Reports done when stage is None and all stages are finished.'''
168 write_json(
169 self.status_file,
170 {'v1': {'stage': None,
171 'datasource': (
172 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
173 '[dsmode=net]'),
174 'blah': {'finished': 123.456},
175 'init': {'errors': [], 'start': 124.567,
176 'finished': 125.678},
177 'init-local': {'start': 123.45, 'finished': 123.46}}})
178 cmdargs = myargs(long=False, wait=False)
179 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
180 retcode = wrap_and_call(
181 'cloudinit.cmd.status',
182 {'_is_cloudinit_disabled': (False, ''),
183 'Init': {'side_effect': self.init_class}},
184 status.handle_status_args, 'ignored', cmdargs)
185 self.assertEqual(0, retcode)
186 self.assertEqual('status: done\n', m_stdout.getvalue())
187
188 def test_status_returns_done_long(self):
189 '''Long format of done status includes datasource info.'''
190 write_json(
191 self.status_file,
192 {'v1': {'stage': None,
193 'datasource': (
194 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
195 '[dsmode=net]'),
196 'init': {'start': 124.567, 'finished': 125.678},
197 'init-local': {'start': 123.45, 'finished': 123.46}}})
198 cmdargs = myargs(long=True, wait=False)
199 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
200 retcode = wrap_and_call(
201 'cloudinit.cmd.status',
202 {'_is_cloudinit_disabled': (False, ''),
203 'Init': {'side_effect': self.init_class}},
204 status.handle_status_args, 'ignored', cmdargs)
205 self.assertEqual(0, retcode)
206 expected = dedent('''\
207 status: done
208 time: Thu, 01 Jan 1970 00:02:05 +0000
209 detail:
210 DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net]
211 ''')
212 self.assertEqual(expected, m_stdout.getvalue())
213
214 def test_status_on_errors(self):
215 '''Reports error when any stage has errors.'''
216 write_json(
217 self.status_file,
218 {'v1': {'stage': None,
219 'blah': {'errors': [], 'finished': 123.456},
220 'init': {'errors': ['error1'], 'start': 124.567,
221 'finished': 125.678},
222 'init-local': {'start': 123.45, 'finished': 123.46}}})
223 cmdargs = myargs(long=False, wait=False)
224 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
225 retcode = wrap_and_call(
226 'cloudinit.cmd.status',
227 {'_is_cloudinit_disabled': (False, ''),
228 'Init': {'side_effect': self.init_class}},
229 status.handle_status_args, 'ignored', cmdargs)
230 self.assertEqual(1, retcode)
231 self.assertEqual('status: error\n', m_stdout.getvalue())
232
233 def test_status_on_errors_long(self):
234 '''Long format of error status includes all error messages.'''
235 write_json(
236 self.status_file,
237 {'v1': {'stage': None,
238 'datasource': (
239 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
240 '[dsmode=net]'),
241 'init': {'errors': ['error1'], 'start': 124.567,
242 'finished': 125.678},
243 'init-local': {'errors': ['error2', 'error3'],
244 'start': 123.45, 'finished': 123.46}}})
245 cmdargs = myargs(long=True, wait=False)
246 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
247 retcode = wrap_and_call(
248 'cloudinit.cmd.status',
249 {'_is_cloudinit_disabled': (False, ''),
250 'Init': {'side_effect': self.init_class}},
251 status.handle_status_args, 'ignored', cmdargs)
252 self.assertEqual(1, retcode)
253 expected = dedent('''\
254 status: error
255 time: Thu, 01 Jan 1970 00:02:05 +0000
256 detail:
257 error1
258 error2
259 error3
260 ''')
261 self.assertEqual(expected, m_stdout.getvalue())
262
263 def test_status_returns_running_long_format(self):
264 '''Long format reports the stage in which we are running.'''
265 write_json(
266 self.status_file,
267 {'v1': {'stage': 'init',
268 'init': {'start': 124.456, 'finished': None},
269 'init-local': {'start': 123.45, 'finished': 123.46}}})
270 cmdargs = myargs(long=True, wait=False)
271 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
272 retcode = wrap_and_call(
273 'cloudinit.cmd.status',
274 {'_is_cloudinit_disabled': (False, ''),
275 'Init': {'side_effect': self.init_class}},
276 status.handle_status_args, 'ignored', cmdargs)
277 self.assertEqual(0, retcode)
278 expected = dedent('''\
279 status: running
280 time: Thu, 01 Jan 1970 00:02:04 +0000
281 detail:
282 Running in stage: init
283 ''')
284 self.assertEqual(expected, m_stdout.getvalue())
285
286 def test_status_wait_blocks_until_done(self):
287 '''Specifying wait will poll every 1/4 second until done state.'''
288 running_json = {
289 'v1': {'stage': 'init',
290 'init': {'start': 124.456, 'finished': None},
291 'init-local': {'start': 123.45, 'finished': 123.46}}}
292 done_json = {
293 'v1': {'stage': None,
294 'init': {'start': 124.456, 'finished': 125.678},
295 'init-local': {'start': 123.45, 'finished': 123.46}}}
296
297 self.sleep_calls = 0
298
299 def fake_sleep(interval):
300 self.assertEqual(0.25, interval)
301 self.sleep_calls += 1
302 if self.sleep_calls == 2:
303 write_json(self.status_file, running_json)
304 elif self.sleep_calls == 3:
305 write_json(self.status_file, done_json)
306
307 cmdargs = myargs(long=False, wait=True)
308 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
309 retcode = wrap_and_call(
310 'cloudinit.cmd.status',
311 {'sleep': {'side_effect': fake_sleep},
312 '_is_cloudinit_disabled': (False, ''),
313 'Init': {'side_effect': self.init_class}},
314 status.handle_status_args, 'ignored', cmdargs)
315 self.assertEqual(0, retcode)
316 self.assertEqual(4, self.sleep_calls)
317 self.assertEqual('....\nstatus: done\n', m_stdout.getvalue())
318
319 def test_status_wait_blocks_until_error(self):
320 '''Specifying wait will poll every 1/4 second until error state.'''
321 running_json = {
322 'v1': {'stage': 'init',
323 'init': {'start': 124.456, 'finished': None},
324 'init-local': {'start': 123.45, 'finished': 123.46}}}
325 error_json = {
326 'v1': {'stage': None,
327 'init': {'errors': ['error1'], 'start': 124.456,
328 'finished': 125.678},
329 'init-local': {'start': 123.45, 'finished': 123.46}}}
330
331 self.sleep_calls = 0
332
333 def fake_sleep(interval):
334 self.assertEqual(0.25, interval)
335 self.sleep_calls += 1
336 if self.sleep_calls == 2:
337 write_json(self.status_file, running_json)
338 elif self.sleep_calls == 3:
339 write_json(self.status_file, error_json)
340
341 cmdargs = myargs(long=False, wait=True)
342 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
343 retcode = wrap_and_call(
344 'cloudinit.cmd.status',
345 {'sleep': {'side_effect': fake_sleep},
346 '_is_cloudinit_disabled': (False, ''),
347 'Init': {'side_effect': self.init_class}},
348 status.handle_status_args, 'ignored', cmdargs)
349 self.assertEqual(1, retcode)
350 self.assertEqual(4, self.sleep_calls)
351 self.assertEqual('....\nstatus: error\n', m_stdout.getvalue())
352
353 def test_status_main(self):
354 '''status.main can be run as a standalone script.'''
355 write_json(self.status_file,
356 {'v1': {'init': {'start': 1, 'finished': None}}})
357 with self.assertRaises(SystemExit) as context_manager:
358 with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
359 wrap_and_call(
360 'cloudinit.cmd.status',
361 {'sys.argv': {'new': ['status']},
362 '_is_cloudinit_disabled': (False, ''),
363 'Init': {'side_effect': self.init_class}},
364 status.main)
365 self.assertRaisesCodeEqual(0, context_manager.exception.code)
366 self.assertEqual('status: running\n', m_stdout.getvalue())
367
368# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 177cbcf..5b9cbca 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -275,8 +275,9 @@ def handle(name, ocfg, cloud, log, _):
275 cfg = ocfg.get('apt', {})275 cfg = ocfg.get('apt', {})
276276
277 if not isinstance(cfg, dict):277 if not isinstance(cfg, dict):
278 raise ValueError("Expected dictionary for 'apt' config, found %s",278 raise ValueError(
279 type(cfg))279 "Expected dictionary for 'apt' config, found {config_type}".format(
280 config_type=type(cfg)))
280281
281 apply_debconf_selections(cfg, target)282 apply_debconf_selections(cfg, target)
282 apply_apt(cfg, cloud, target)283 apply_apt(cfg, cloud, target)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index c2b83ae..c3e8c48 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -788,7 +788,8 @@ def mkpart(device, definition):
788 # This prevents you from overwriting the device788 # This prevents you from overwriting the device
789 LOG.debug("Checking if device %s is a valid device", device)789 LOG.debug("Checking if device %s is a valid device", device)
790 if not is_device_valid(device):790 if not is_device_valid(device):
791 raise Exception("Device %s is not a disk device!", device)791 raise Exception(
792 'Device {device} is not a disk device!'.format(device=device))
792793
793 # Remove the partition table entries794 # Remove the partition table entries
794 if isinstance(layout, str) and layout.lower() == "remove":795 if isinstance(layout, str) and layout.lower() == "remove":
@@ -945,8 +946,9 @@ def mkfs(fs_cfg):
945946
946 # Check that we can create the FS947 # Check that we can create the FS
947 if not (fs_type or fs_cmd):948 if not (fs_type or fs_cmd):
948 raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "949 raise Exception(
949 "must be set.", label)950 "No way to create filesystem '{label}'. fs_type or fs_cmd "
951 "must be set.".format(label=label))
950952
951 # Create the commands953 # Create the commands
952 shell = False954 shell = False
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 8f9f1ab..eaf1e94 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -94,10 +94,10 @@ def handle(_name, cfg, cloud, log, _args):
94 ls_cloudcfg = cfg.get("landscape", {})94 ls_cloudcfg = cfg.get("landscape", {})
9595
96 if not isinstance(ls_cloudcfg, (dict)):96 if not isinstance(ls_cloudcfg, (dict)):
97 raise RuntimeError(("'landscape' key existed in config,"97 raise RuntimeError(
98 " but not a dictionary type,"98 "'landscape' key existed in config, but not a dictionary type,"
99 " is a %s instead"),99 " is a {_type} instead".format(
100 type_utils.obj_name(ls_cloudcfg))100 _type=type_utils.obj_name(ls_cloudcfg)))
101 if not ls_cloudcfg:101 if not ls_cloudcfg:
102 return102 return
103103
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index f50bcb3..cbd0237 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -106,9 +106,9 @@ def handle(name, cfg, cloud, log, _args):
106106
107 # TODO drop this when validate_cloudconfig_schema is strict=True107 # TODO drop this when validate_cloudconfig_schema is strict=True
108 if not isinstance(ntp_cfg, (dict)):108 if not isinstance(ntp_cfg, (dict)):
109 raise RuntimeError(("'ntp' key existed in config,"109 raise RuntimeError(
110 " but not a dictionary type,"110 "'ntp' key existed in config, but not a dictionary type,"
111 " is a %s %instead"), type_utils.obj_name(ntp_cfg))111 " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
112112
113 validate_cloudconfig_schema(cfg, schema)113 validate_cloudconfig_schema(cfg, schema)
114 if ntp_installable():114 if ntp_installable():
@@ -206,8 +206,8 @@ def write_ntp_config_template(cfg, cloud, path, template=None):
206 if not template_fn:206 if not template_fn:
207 template_fn = cloud.get_template_filename('ntp.conf')207 template_fn = cloud.get_template_filename('ntp.conf')
208 if not template_fn:208 if not template_fn:
209 raise RuntimeError(("No template found, "209 raise RuntimeError(
210 "not rendering %s"), path)210 'No template found, not rendering {path}'.format(path=path))
211211
212 templater.render_to_file(template_fn, path, params)212 templater.render_to_file(template_fn, path, params)
213213
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index eba58b0..4da3a58 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -194,6 +194,7 @@ def doexit(sysexit):
194194
195195
196def execmd(exe_args, output=None, data_in=None):196def execmd(exe_args, output=None, data_in=None):
197 ret = 1
197 try:198 try:
198 proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,199 proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
199 stdout=output, stderr=subprocess.STDOUT)200 stdout=output, stderr=subprocess.STDOUT)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 0d282e6..cec22bb 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -59,7 +59,17 @@ __doc__ = get_schema_doc(schema) # Supplement python help()
5959
6060
61def _resize_btrfs(mount_point, devpth):61def _resize_btrfs(mount_point, devpth):
62 return ('btrfs', 'filesystem', 'resize', 'max', mount_point)62 # If "/" is ro resize will fail. However it should be allowed since resize
63 # makes everything bigger and subvolumes that are not ro will benefit.
64 # Use a subvolume that is not ro to trick the resize operation to do the
65 # "right" thing. The use of ".snapshot" is specific to "snapper" a generic
66 # solution would be walk the subvolumes and find a rw mounted subvolume.
67 if (not util.mount_is_read_write(mount_point) and
68 os.path.isdir("%s/.snapshots" % mount_point)):
69 return ('btrfs', 'filesystem', 'resize', 'max',
70 '%s/.snapshots' % mount_point)
71 else:
72 return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
6373
6474
65def _resize_ext(mount_point, devpth):75def _resize_ext(mount_point, devpth):
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index a9d21e7..530808c 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -276,9 +276,8 @@ class SubscriptionManager(object):
276 cmd = ['attach', '--auto']276 cmd = ['attach', '--auto']
277 try:277 try:
278 return_out, return_err = self._sub_man_cli(cmd)278 return_out, return_err = self._sub_man_cli(cmd)
279 except util.ProcessExecutionError:279 except util.ProcessExecutionError as e:
280 self.log_warn("Auto-attach failed with: "280 self.log_warn("Auto-attach failed with: {0}".format(e))
281 "{0}]".format(return_err.strip()))
282 return False281 return False
283 for line in return_out.split("\n"):282 for line in return_out.split("\n"):
284 if line is not "":283 if line is not "":
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 50ff9e3..af08788 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -20,15 +20,15 @@ which defaults to ``20-cloud-config.conf``. The rsyslog config directory to
20write config files to may be specified in ``config_dir``, which defaults to20write config files to may be specified in ``config_dir``, which defaults to
21``/etc/rsyslog.d``.21``/etc/rsyslog.d``.
2222
23A list of configurations for for rsyslog can be specified under the ``configs``23A list of configurations for rsyslog can be specified under the ``configs`` key
24key in the ``rsyslog`` config. Each entry in ``configs`` is either a string or24in the ``rsyslog`` config. Each entry in ``configs`` is either a string or a
25a dictionary. Each config entry contains a configuration string and a file to25dictionary. Each config entry contains a configuration string and a file to
26write it to. For config entries that are a dictionary, ``filename`` sets the26write it to. For config entries that are a dictionary, ``filename`` sets the
27target filename and ``content`` specifies the config string to write. For27target filename and ``content`` specifies the config string to write. For
28config entries that are only a string, the string is used as the config string28config entries that are only a string, the string is used as the config string
29to write. If the filename to write the config to is not specified, the value of29to write. If the filename to write the config to is not specified, the value of
30the ``config_filename`` key is used. A file with the selected filename will30the ``config_filename`` key is used. A file with the selected filename will be
31be written inside the directory specified by ``config_dir``.31written inside the directory specified by ``config_dir``.
3232
33The command to use to reload the rsyslog service after the config has been33The command to use to reload the rsyslog service after the config has been
34updated can be specified in ``service_reload_command``. If this is set to34updated can be specified in ``service_reload_command``. If this is set to
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index e76b9c0..65f6e77 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -95,7 +95,8 @@ def handle_random_seed_command(command, required, env=None):
95 cmd = command[0]95 cmd = command[0]
96 if not util.which(cmd):96 if not util.which(cmd):
97 if required:97 if required:
98 raise ValueError("command '%s' not found but required=true", cmd)98 raise ValueError(
99 "command '{cmd}' not found but required=true".format(cmd=cmd))
99 else:100 else:
100 LOG.debug("command '%s' not found for seed_command", cmd)101 LOG.debug("command '%s' not found for seed_command", cmd)
101 return102 return
diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py
index fe0cc73..e82c081 100644
--- a/cloudinit/config/cc_snap_config.py
+++ b/cloudinit/config/cc_snap_config.py
@@ -87,7 +87,9 @@ def add_assertions(assertions=None):
87 assertions = []87 assertions = []
8888
89 if not isinstance(assertions, list):89 if not isinstance(assertions, list):
90 raise ValueError('assertion parameter was not a list: %s', assertions)90 raise ValueError(
91 'assertion parameter was not a list: {assertions}'.format(
92 assertions=assertions))
9193
92 snap_cmd = [SNAPPY_CMD, 'ack']94 snap_cmd = [SNAPPY_CMD, 'ack']
93 combined = "\n".join(assertions)95 combined = "\n".join(assertions)
@@ -115,7 +117,8 @@ def add_snap_user(cfg=None):
115 cfg = {}117 cfg = {}
116118
117 if not isinstance(cfg, dict):119 if not isinstance(cfg, dict):
118 raise ValueError('configuration parameter was not a dict: %s', cfg)120 raise ValueError(
121 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg))
119122
120 snapuser = cfg.get('email', None)123 snapuser = cfg.get('email', None)
121 if not snapuser:124 if not snapuser:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index d5becd1..55260ea 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -45,6 +45,10 @@ OSFAMILIES = {
4545
46LOG = logging.getLogger(__name__)46LOG = logging.getLogger(__name__)
4747
48# This is a best guess regex, based on current EC2 AZs on 2017-12-11.
49# It could break when Amazon adds new regions and new AZs.
50_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
51
4852
49@six.add_metaclass(abc.ABCMeta)53@six.add_metaclass(abc.ABCMeta)
50class Distro(object):54class Distro(object):
@@ -102,11 +106,8 @@ class Distro(object):
102 self._apply_hostname(writeable_hostname)106 self._apply_hostname(writeable_hostname)
103107
104 def uses_systemd(self):108 def uses_systemd(self):
105 try:109 """Wrapper to report whether this distro uses systemd or sysvinit."""
106 res = os.lstat('/run/systemd/system')110 return uses_systemd()
107 return stat.S_ISDIR(res.st_mode)
108 except Exception:
109 return False
110111
111 @abc.abstractmethod112 @abc.abstractmethod
112 def package_command(self, cmd, args=None, pkgs=None):113 def package_command(self, cmd, args=None, pkgs=None):
@@ -686,18 +687,13 @@ def _get_package_mirror_info(mirror_info, data_source=None,
686 if not mirror_info:687 if not mirror_info:
687 mirror_info = {}688 mirror_info = {}
688689
689 # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
690 # the region is us-east-1. so region = az[0:-1]
691 directions_re = '|'.join([
692 'central', 'east', 'north', 'northeast', 'northwest',
693 'south', 'southeast', 'southwest', 'west'])
694 ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re)
695
696 subst = {}690 subst = {}
697 if data_source and data_source.availability_zone:691 if data_source and data_source.availability_zone:
698 subst['availability_zone'] = data_source.availability_zone692 subst['availability_zone'] = data_source.availability_zone
699693
700 if re.match(ec2_az_re, data_source.availability_zone):694 # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
695 # the region is us-east-1. so region = az[0:-1]
696 if _EC2_AZ_RE.match(data_source.availability_zone):
701 subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]697 subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
702698
703 if data_source and data_source.region:699 if data_source and data_source.region:
@@ -761,4 +757,13 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
761 util.copy(tz_file, tz_local)757 util.copy(tz_file, tz_local)
762 return758 return
763759
760
761def uses_systemd():
762 try:
763 res = os.lstat('/run/systemd/system')
764 return stat.S_ISDIR(res.st_mode)
765 except Exception:
766 return False
767
768
764# vi: ts=4 expandtab769# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index bad112f..aa468bc 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -116,6 +116,7 @@ class Distro(distros.Distro):
116 (out, err) = util.subp(['ifconfig', '-a'])116 (out, err) = util.subp(['ifconfig', '-a'])
117 ifconfigoutput = [x for x in (out.strip()).splitlines()117 ifconfigoutput = [x for x in (out.strip()).splitlines()
118 if len(x.split()) > 0]118 if len(x.split()) > 0]
119 bsddev = 'NOT_FOUND'
119 for line in ifconfigoutput:120 for line in ifconfigoutput:
120 m = re.match('^\w+', line)121 m = re.match('^\w+', line)
121 if m:122 if m:
@@ -347,15 +348,9 @@ class Distro(distros.Distro):
347 bymac[Distro.get_interface_mac(n)] = {348 bymac[Distro.get_interface_mac(n)] = {
348 'name': n, 'up': self.is_up(n), 'downable': None}349 'name': n, 'up': self.is_up(n), 'downable': None}
349350
351 nics_with_addresses = set()
350 if check_downable:352 if check_downable:
351 nics_with_addresses = set()353 nics_with_addresses = set(self.get_ipv4() + self.get_ipv6())
352 ipv6 = self.get_ipv6()
353 ipv4 = self.get_ipv4()
354 for bytes_out in (ipv6, ipv4):
355 for i in ipv6:
356 nics_with_addresses.update(i)
357 for i in ipv4:
358 nics_with_addresses.update(i)
359354
360 for d in bymac.values():355 for d in bymac.values():
361 d['downable'] = (d['up'] is False or356 d['downable'] = (d['up'] is False or
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 723d6bd..d6c61e4 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -1,6 +1,8 @@
1# Copyright (C) 2012 Yahoo! Inc.1# Copyright (C) 2012 Yahoo! Inc.
2# Copyright (C) 2014 Amazon.com, Inc. or its affiliates.
2#3#
3# Author: Joshua Harlow <harlowja@yahoo-inc.com>4# Author: Joshua Harlow <harlowja@yahoo-inc.com>
5# Author: Andrew Jorgensen <ajorgens@amazon.com>
4#6#
5# This file is part of cloud-init. See LICENSE file for license information.7# This file is part of cloud-init. See LICENSE file for license information.
68
@@ -164,14 +166,11 @@ def get_instance_userdata(api_version='latest',
164 return user_data166 return user_data
165167
166168
167def get_instance_metadata(api_version='latest',169def _get_instance_metadata(tree, api_version='latest',
168 metadata_address='http://169.254.169.254',170 metadata_address='http://169.254.169.254',
169 ssl_details=None, timeout=5, retries=5,171 ssl_details=None, timeout=5, retries=5,
170 leaf_decoder=None):172 leaf_decoder=None):
171 md_url = url_helper.combine_url(metadata_address, api_version)173 md_url = url_helper.combine_url(metadata_address, api_version, tree)
172 # Note, 'meta-data' explicitly has trailing /.
173 # this is required for CloudStack (LP: #1356855)
174 md_url = url_helper.combine_url(md_url, 'meta-data/')
175 caller = functools.partial(util.read_file_or_url,174 caller = functools.partial(util.read_file_or_url,
176 ssl_details=ssl_details, timeout=timeout,175 ssl_details=ssl_details, timeout=timeout,
177 retries=retries)176 retries=retries)
@@ -189,7 +188,29 @@ def get_instance_metadata(api_version='latest',
189 md = {}188 md = {}
190 return md189 return md
191 except Exception:190 except Exception:
192 util.logexc(LOG, "Failed fetching metadata from url %s", md_url)191 util.logexc(LOG, "Failed fetching %s from url %s", tree, md_url)
193 return {}192 return {}
194193
194
195def get_instance_metadata(api_version='latest',
196 metadata_address='http://169.254.169.254',
197 ssl_details=None, timeout=5, retries=5,
198 leaf_decoder=None):
199 # Note, 'meta-data' explicitly has trailing /.
200 # this is required for CloudStack (LP: #1356855)
201 return _get_instance_metadata(tree='meta-data/', api_version=api_version,
202 metadata_address=metadata_address,
203 ssl_details=ssl_details, timeout=timeout,
204 retries=retries, leaf_decoder=leaf_decoder)
205
206
207def get_instance_identity(api_version='latest',
208 metadata_address='http://169.254.169.254',
209 ssl_details=None, timeout=5, retries=5,
210 leaf_decoder=None):
211 return _get_instance_metadata(tree='dynamic/instance-identity',
212 api_version=api_version,
213 metadata_address=metadata_address,
214 ssl_details=ssl_details, timeout=timeout,
215 retries=retries, leaf_decoder=leaf_decoder)
195# vi: ts=4 expandtab216# vi: ts=4 expandtab
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index a1b0db1..c015e79 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -18,7 +18,7 @@ SYS_CLASS_NET = "/sys/class/net/"
18DEFAULT_PRIMARY_INTERFACE = 'eth0'18DEFAULT_PRIMARY_INTERFACE = 'eth0'
1919
2020
21def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')):21def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
22 """Sorting for Humans: natural sort order. Can be use as the key to sort22 """Sorting for Humans: natural sort order. Can be use as the key to sort
23 functions.23 functions.
24 This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as24 This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as
@@ -224,7 +224,7 @@ def find_fallback_nic(blacklist_drivers=None):
224224
225 # if eth0 exists use it above anything else, otherwise get the interface225 # if eth0 exists use it above anything else, otherwise get the interface
226 # that we can read 'first' (using the sorted defintion of first).226 # that we can read 'first' (using the sorted defintion of first).
227 names = list(sorted(potential_interfaces, key=_natural_sort_key))227 names = list(sorted(potential_interfaces, key=natural_sort_key))
228 if DEFAULT_PRIMARY_INTERFACE in names:228 if DEFAULT_PRIMARY_INTERFACE in names:
229 names.remove(DEFAULT_PRIMARY_INTERFACE)229 names.remove(DEFAULT_PRIMARY_INTERFACE)
230 names.insert(0, DEFAULT_PRIMARY_INTERFACE)230 names.insert(0, DEFAULT_PRIMARY_INTERFACE)
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 38b27a5..7b2cc9d 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -116,10 +116,11 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None):
116 prev = names[name]['entry']116 prev = names[name]['entry']
117 if prev.get('mac_address') != entry.get('mac_address'):117 if prev.get('mac_address') != entry.get('mac_address'):
118 raise ValueError(118 raise ValueError(
119 "device '%s' was defined multiple times (%s)"119 "device '{name}' was defined multiple times ({files})"
120 " but had differing mac addresses: %s -> %s.",120 " but had differing mac addresses: {old} -> {new}.".format(
121 (name, ' '.join(names[name]['files']),121 name=name, files=' '.join(names[name]['files']),
122 prev.get('mac_address'), entry.get('mac_address')))122 old=prev.get('mac_address'),
123 new=entry.get('mac_address')))
123 prev['subnets'].extend(entry['subnets'])124 prev['subnets'].extend(entry['subnets'])
124 names[name]['files'].append(cfg_file)125 names[name]['files'].append(cfg_file)
125 else:126 else:
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 875a460..087c0c0 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -10,7 +10,9 @@ import os
10import re10import re
11import signal11import signal
1212
13from cloudinit.net import find_fallback_nic, get_devicelist13from cloudinit.net import (
14 EphemeralIPv4Network, find_fallback_nic, get_devicelist)
15from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
14from cloudinit import temp_utils16from cloudinit import temp_utils
15from cloudinit import util17from cloudinit import util
16from six import StringIO18from six import StringIO
@@ -29,6 +31,45 @@ class InvalidDHCPLeaseFileError(Exception):
29 pass31 pass
3032
3133
34class NoDHCPLeaseError(Exception):
35 """Raised when unable to get a DHCP lease."""
36 pass
37
38
39class EphemeralDHCPv4(object):
40 def __init__(self, iface=None):
41 self.iface = iface
42 self._ephipv4 = None
43
44 def __enter__(self):
45 try:
46 leases = maybe_perform_dhcp_discovery(self.iface)
47 except InvalidDHCPLeaseFileError:
48 raise NoDHCPLeaseError()
49 if not leases:
50 raise NoDHCPLeaseError()
51 lease = leases[-1]
52 LOG.debug("Received dhcp lease on %s for %s/%s",
53 lease['interface'], lease['fixed-address'],
54 lease['subnet-mask'])
55 nmap = {'interface': 'interface', 'ip': 'fixed-address',
56 'prefix_or_mask': 'subnet-mask',
57 'broadcast': 'broadcast-address',
58 'router': 'routers'}
59 kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()])
60 if not kwargs['broadcast']:
61 kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip'])
62 ephipv4 = EphemeralIPv4Network(**kwargs)
63 ephipv4.__enter__()
64 self._ephipv4 = ephipv4
65 return lease
66
67 def __exit__(self, excp_type, excp_value, excp_traceback):
68 if not self._ephipv4:
69 return
70 self._ephipv4.__exit__(excp_type, excp_value, excp_traceback)
71
72
32def maybe_perform_dhcp_discovery(nic=None):73def maybe_perform_dhcp_discovery(nic=None):
33 """Perform dhcp discovery if nic valid and dhclient command exists.74 """Perform dhcp discovery if nic valid and dhclient command exists.
3475
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index e9e2cf4..fe667d8 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -474,8 +474,9 @@ class NetworkStateInterpreter(object):
474 elif bridge_stp in ['off', '0', 0]:474 elif bridge_stp in ['off', '0', 0]:
475 bridge_stp = False475 bridge_stp = False
476 else:476 else:
477 raise ValueError("Cannot convert bridge_stp value"477 raise ValueError(
478 "(%s) to boolean", bridge_stp)478 'Cannot convert bridge_stp value ({stp}) to'
479 ' boolean'.format(stp=bridge_stp))
479 iface.update({'bridge_stp': bridge_stp})480 iface.update({'bridge_stp': bridge_stp})
480481
481 interfaces.update({iface['name']: iface})482 interfaces.update({iface['name']: iface})
@@ -692,7 +693,8 @@ class NetworkStateInterpreter(object):
692 elif cmd_type == "bond":693 elif cmd_type == "bond":
693 self.handle_bond(v1_cmd)694 self.handle_bond(v1_cmd)
694 else:695 else:
695 raise ValueError('Unknown command type: %s', cmd_type)696 raise ValueError('Unknown command type: {cmd_type}'.format(
697 cmd_type=cmd_type))
696698
697 def _v2_to_v1_ipcfg(self, cfg):699 def _v2_to_v1_ipcfg(self, cfg):
698 """Common ipconfig extraction from v2 to v1 subnets array."""700 """Common ipconfig extraction from v2 to v1 subnets array."""
@@ -959,4 +961,16 @@ def mask_to_net_prefix(mask):
959 return ipv4_mask_to_net_prefix(mask)961 return ipv4_mask_to_net_prefix(mask)
960962
961963
964def mask_and_ipv4_to_bcast_addr(mask, ip):
965 """Calculate the broadcast address from the subnet mask and ip addr.
966
967 Supports ipv4 only."""
968 ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2)
969 mask_dec = ipv4_mask_to_net_prefix(mask)
970 bcast_bin = ip_bin | (2**(32 - mask_dec) - 1)
971 bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF)
972 for i in range(4)[::-1]])
973 return bcast_str
974
975
962# vi: ts=4 expandtab976# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 43a7e42..7ac8288 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -11,6 +11,7 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
1111
12class DataSourceAliYun(EC2.DataSourceEc2):12class DataSourceAliYun(EC2.DataSourceEc2):
1313
14 dsname = 'AliYun'
14 metadata_urls = ['http://100.100.100.200']15 metadata_urls = ['http://100.100.100.200']
1516
16 # The minimum supported metadata_version from the ec2 metadata apis17 # The minimum supported metadata_version from the ec2 metadata apis
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index c78ad9e..e1d0055 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -74,6 +74,9 @@ def read_user_data_callback(mount_dir):
7474
7575
76class DataSourceAltCloud(sources.DataSource):76class DataSourceAltCloud(sources.DataSource):
77
78 dsname = 'AltCloud'
79
77 def __init__(self, sys_cfg, distro, paths):80 def __init__(self, sys_cfg, distro, paths):
78 sources.DataSource.__init__(self, sys_cfg, distro, paths)81 sources.DataSource.__init__(self, sys_cfg, distro, paths)
79 self.seed = None82 self.seed = None
@@ -112,7 +115,7 @@ class DataSourceAltCloud(sources.DataSource):
112115
113 return 'UNKNOWN'116 return 'UNKNOWN'
114117
115 def get_data(self):118 def _get_data(self):
116 '''119 '''
117 Description:120 Description:
118 User Data is passed to the launching instance which121 User Data is passed to the launching instance which
@@ -142,7 +145,7 @@ class DataSourceAltCloud(sources.DataSource):
142 else:145 else:
143 cloud_type = self.get_cloud_type()146 cloud_type = self.get_cloud_type()
144147
145 LOG.debug('cloud_type: ' + str(cloud_type))148 LOG.debug('cloud_type: %s', str(cloud_type))
146149
147 if 'RHEV' in cloud_type:150 if 'RHEV' in cloud_type:
148 if self.user_data_rhevm():151 if self.user_data_rhevm():
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 14367e9..4bcbf3a 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -11,13 +11,16 @@ from functools import partial
11import os11import os
12import os.path12import os.path
13import re13import re
14from time import time
14from xml.dom import minidom15from xml.dom import minidom
15import xml.etree.ElementTree as ET16import xml.etree.ElementTree as ET
1617
17from cloudinit import log as logging18from cloudinit import log as logging
18from cloudinit import net19from cloudinit import net
20from cloudinit.net.dhcp import EphemeralDHCPv4
19from cloudinit import sources21from cloudinit import sources
20from cloudinit.sources.helpers.azure import get_metadata_from_fabric22from cloudinit.sources.helpers.azure import get_metadata_from_fabric
23from cloudinit.url_helper import readurl, wait_for_url, UrlError
21from cloudinit import util24from cloudinit import util
2225
23LOG = logging.getLogger(__name__)26LOG = logging.getLogger(__name__)
@@ -26,10 +29,16 @@ DS_NAME = 'Azure'
26DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}29DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
27AGENT_START = ['service', 'walinuxagent', 'start']30AGENT_START = ['service', 'walinuxagent', 'start']
28AGENT_START_BUILTIN = "__builtin__"31AGENT_START_BUILTIN = "__builtin__"
29BOUNCE_COMMAND = [32BOUNCE_COMMAND_IFUP = [
30 'sh', '-xc',33 'sh', '-xc',
31 "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"34 "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"
32]35]
36BOUNCE_COMMAND_FREEBSD = [
37 'sh', '-xc',
38 ("i=$interface; x=0; ifconfig down $i || x=$?; "
39 "ifconfig up $i || x=$?; exit $x")
40]
41
33# azure systems will always have a resource disk, and 66-azure-ephemeral.rules42# azure systems will always have a resource disk, and 66-azure-ephemeral.rules
34# ensures that it gets linked to this path.43# ensures that it gets linked to this path.
35RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'44RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'
@@ -38,6 +47,9 @@ LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases'
38DEFAULT_FS = 'ext4'47DEFAULT_FS = 'ext4'
39# DMI chassis-asset-tag is set static for all azure instances48# DMI chassis-asset-tag is set static for all azure instances
40AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'49AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
50REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
51IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
52IMDS_RETRIES = 5
4153
4254
43def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):55def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -177,11 +189,6 @@ if util.is_FreeBSD():
177 RESOURCE_DISK_PATH = "/dev/" + res_disk189 RESOURCE_DISK_PATH = "/dev/" + res_disk
178 else:190 else:
179 LOG.debug("resource disk is None")191 LOG.debug("resource disk is None")
180 BOUNCE_COMMAND = [
181 'sh', '-xc',
182 ("i=$interface; x=0; ifconfig down $i || x=$?; "
183 "ifconfig up $i || x=$?; exit $x")
184 ]
185192
186BUILTIN_DS_CONFIG = {193BUILTIN_DS_CONFIG = {
187 'agent_command': AGENT_START_BUILTIN,194 'agent_command': AGENT_START_BUILTIN,
@@ -190,7 +197,7 @@ BUILTIN_DS_CONFIG = {
190 'hostname_bounce': {197 'hostname_bounce': {
191 'interface': DEFAULT_PRIMARY_NIC,198 'interface': DEFAULT_PRIMARY_NIC,
192 'policy': True,199 'policy': True,
193 'command': BOUNCE_COMMAND,200 'command': 'builtin',
194 'hostname_command': 'hostname',201 'hostname_command': 'hostname',
195 },202 },
196 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},203 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
@@ -246,6 +253,8 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
246253
247254
248class DataSourceAzure(sources.DataSource):255class DataSourceAzure(sources.DataSource):
256
257 dsname = 'Azure'
249 _negotiated = False258 _negotiated = False
250259
251 def __init__(self, sys_cfg, distro, paths):260 def __init__(self, sys_cfg, distro, paths):
@@ -273,19 +282,20 @@ class DataSourceAzure(sources.DataSource):
273282
274 with temporary_hostname(azure_hostname, self.ds_cfg,283 with temporary_hostname(azure_hostname, self.ds_cfg,
275 hostname_command=hostname_command) \284 hostname_command=hostname_command) \
276 as previous_hostname:285 as previous_hn:
277 if (previous_hostname is not None and286 if (previous_hn is not None and
278 util.is_true(self.ds_cfg.get('set_hostname'))):287 util.is_true(self.ds_cfg.get('set_hostname'))):
279 cfg = self.ds_cfg['hostname_bounce']288 cfg = self.ds_cfg['hostname_bounce']
280289
281 # "Bouncing" the network290 # "Bouncing" the network
282 try:291 try:
283 perform_hostname_bounce(hostname=azure_hostname,292 return perform_hostname_bounce(hostname=azure_hostname,
284 cfg=cfg,293 cfg=cfg,
285 prev_hostname=previous_hostname)294 prev_hostname=previous_hn)
286 except Exception as e:295 except Exception as e:
287 LOG.warning("Failed publishing hostname: %s", e)296 LOG.warning("Failed publishing hostname: %s", e)
288 util.logexc(LOG, "handling set_hostname failed")297 util.logexc(LOG, "handling set_hostname failed")
298 return False
289299
290 def get_metadata_from_agent(self):300 def get_metadata_from_agent(self):
291 temp_hostname = self.metadata.get('local-hostname')301 temp_hostname = self.metadata.get('local-hostname')
@@ -330,7 +340,7 @@ class DataSourceAzure(sources.DataSource):
330 metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)340 metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
331 return metadata341 return metadata
332342
333 def get_data(self):343 def _get_data(self):
334 # azure removes/ejects the cdrom containing the ovf-env.xml344 # azure removes/ejects the cdrom containing the ovf-env.xml
335 # file on reboot. So, in order to successfully reboot we345 # file on reboot. So, in order to successfully reboot we
336 # need to look in the datadir and consider that valid346 # need to look in the datadir and consider that valid
@@ -342,15 +352,20 @@ class DataSourceAzure(sources.DataSource):
342 ddir = self.ds_cfg['data_dir']352 ddir = self.ds_cfg['data_dir']
343353
344 candidates = [self.seed_dir]354 candidates = [self.seed_dir]
355 if os.path.isfile(REPROVISION_MARKER_FILE):
356 candidates.insert(0, "IMDS")
345 candidates.extend(list_possible_azure_ds_devs())357 candidates.extend(list_possible_azure_ds_devs())
346 if ddir:358 if ddir:
347 candidates.append(ddir)359 candidates.append(ddir)
348360
349 found = None361 found = None
350362 reprovision = False
351 for cdev in candidates:363 for cdev in candidates:
352 try:364 try:
353 if cdev.startswith("/dev/"):365 if cdev == "IMDS":
366 ret = None
367 reprovision = True
368 elif cdev.startswith("/dev/"):
354 if util.is_FreeBSD():369 if util.is_FreeBSD():
355 ret = util.mount_cb(cdev, load_azure_ds_dir,370 ret = util.mount_cb(cdev, load_azure_ds_dir,
356 mtype="udf", sync=False)371 mtype="udf", sync=False)
@@ -367,6 +382,8 @@ class DataSourceAzure(sources.DataSource):
367 LOG.warning("%s was not mountable", cdev)382 LOG.warning("%s was not mountable", cdev)
368 continue383 continue
369384
385 if reprovision or self._should_reprovision(ret):
386 ret = self._reprovision()
370 (md, self.userdata_raw, cfg, files) = ret387 (md, self.userdata_raw, cfg, files) = ret
371 self.seed = cdev388 self.seed = cdev
372 self.metadata = util.mergemanydict([md, DEFAULT_METADATA])389 self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
@@ -425,6 +442,83 @@ class DataSourceAzure(sources.DataSource):
425 LOG.debug("negotiating already done for %s",442 LOG.debug("negotiating already done for %s",
426 self.get_instance_id())443 self.get_instance_id())
427444
445 def _poll_imds(self, report_ready=True):
446 """Poll IMDS for the new provisioning data until we get a valid
447 response. Then return the returned JSON object."""
448 url = IMDS_URL + "?api-version=2017-04-02"
449 headers = {"Metadata": "true"}
450 LOG.debug("Start polling IMDS")
451
452 def sleep_cb(response, loop_n):
453 return 1
454
455 def exception_cb(msg, exception):
456 if isinstance(exception, UrlError) and exception.code == 404:
457 return
458 LOG.warning("Exception during polling. Will try DHCP.",
459 exc_info=True)
460
461 # If we get an exception while trying to call IMDS, we
462 # call DHCP and setup the ephemeral network to acquire the new IP.
463 raise exception
464
465 need_report = report_ready
466 for i in range(IMDS_RETRIES):
467 try:
468 with EphemeralDHCPv4() as lease:
469 if need_report:
470 self._report_ready(lease=lease)
471 need_report = False
472 wait_for_url([url], max_wait=None, timeout=60,
473 status_cb=LOG.info,
474 headers_cb=lambda url: headers, sleep_time=1,
475 exception_cb=exception_cb,
476 sleep_time_cb=sleep_cb)
477 return str(readurl(url, headers=headers))
478 except Exception:
479 LOG.debug("Exception during polling-retrying dhcp" +
480 " %d more time(s).", (IMDS_RETRIES - i),
481 exc_info=True)
482
483 def _report_ready(self, lease):
484 """Tells the fabric provisioning has completed
485 before we go into our polling loop."""
486 try:
487 get_metadata_from_fabric(None, lease['unknown-245'])
488 except Exception as exc:
489 LOG.warning(
490 "Error communicating with Azure fabric; You may experience."
491 "connectivity issues.", exc_info=True)
492
493 def _should_reprovision(self, ret):
494 """Whether or not we should poll IMDS for reprovisioning data.
495 Also sets a marker file to poll IMDS.
496
497 The marker file is used for the following scenario: the VM boots into
498 this polling loop, which we expect to be proceeding infinitely until
499 the VM is picked. If for whatever reason the platform moves us to a
500 new host (for instance a hardware issue), we need to keep polling.
501 However, since the VM reports ready to the Fabric, we will not attach
502 the ISO, thus cloud-init needs to have a way of knowing that it should
503 jump back into the polling loop in order to retrieve the ovf_env."""
504 if not ret:
505 return False
506 (md, self.userdata_raw, cfg, files) = ret
507 path = REPROVISION_MARKER_FILE
508 if (cfg.get('PreprovisionedVm') is True or
509 os.path.isfile(path)):
510 if not os.path.isfile(path):
511 LOG.info("Creating a marker file to poll imds")
512 util.write_file(path, "%s: %s\n" % (os.getpid(), time()))
513 return True
514 return False
515
516 def _reprovision(self):
517 """Initiate the reprovisioning workflow."""
518 contents = self._poll_imds()
519 md, ud, cfg = read_azure_ovf(contents)
520 return (md, ud, cfg, {'ovf-env.xml': contents})
521
428 def _negotiate(self):522 def _negotiate(self):
429 """Negotiate with fabric and return data from it.523 """Negotiate with fabric and return data from it.
430524
@@ -450,7 +544,7 @@ class DataSourceAzure(sources.DataSource):
450 "Error communicating with Azure fabric; You may experience."544 "Error communicating with Azure fabric; You may experience."
451 "connectivity issues.", exc_info=True)545 "connectivity issues.", exc_info=True)
452 return False546 return False
453547 util.del_file(REPROVISION_MARKER_FILE)
454 return fabric_data548 return fabric_data
455549
456 def activate(self, cfg, is_new_instance):550 def activate(self, cfg, is_new_instance):
@@ -580,18 +674,19 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
580 if os.path.exists(sempath):674 if os.path.exists(sempath):
581 try:675 try:
582 os.unlink(sempath)676 os.unlink(sempath)
583 LOG.debug(bmsg + " removed.")677 LOG.debug('%s removed.', bmsg)
584 except Exception as e:678 except Exception as e:
585 # python3 throws FileNotFoundError, python2 throws OSError679 # python3 throws FileNotFoundError, python2 throws OSError
586 LOG.warning(bmsg + ": remove failed! (%s)", e)680 LOG.warning('%s: remove failed! (%s)', bmsg, e)
587 else:681 else:
588 LOG.debug(bmsg + " did not exist.")682 LOG.debug('%s did not exist.', bmsg)
589 return683 return
590684
591685
592def perform_hostname_bounce(hostname, cfg, prev_hostname):686def perform_hostname_bounce(hostname, cfg, prev_hostname):
593 # set the hostname to 'hostname' if it is not already set to that.687 # set the hostname to 'hostname' if it is not already set to that.
594 # then, if policy is not off, bounce the interface using command688 # then, if policy is not off, bounce the interface using command
689 # Returns True if the network was bounced, False otherwise.
595 command = cfg['command']690 command = cfg['command']
596 interface = cfg['interface']691 interface = cfg['interface']
597 policy = cfg['policy']692 policy = cfg['policy']
@@ -604,8 +699,15 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
604 env['old_hostname'] = prev_hostname699 env['old_hostname'] = prev_hostname
605700
606 if command == "builtin":701 if command == "builtin":
607 command = BOUNCE_COMMAND702 if util.is_FreeBSD():
608703 command = BOUNCE_COMMAND_FREEBSD
704 elif util.which('ifup'):
705 command = BOUNCE_COMMAND_IFUP
706 else:
707 LOG.debug(
708 "Skipping network bounce: ifupdown utils aren't present.")
709 # Don't bounce as networkd handles hostname DDNS updates
710 return False
609 LOG.debug("pubhname: publishing hostname [%s]", msg)711 LOG.debug("pubhname: publishing hostname [%s]", msg)
610 shell = not isinstance(command, (list, tuple))712 shell = not isinstance(command, (list, tuple))
611 # capture=False, see comments in bug 1202758 and bug 1206164.713 # capture=False, see comments in bug 1202758 and bug 1206164.
@@ -613,6 +715,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
613 get_uptime=True, func=util.subp,715 get_uptime=True, func=util.subp,
614 kwargs={'args': command, 'shell': shell, 'capture': False,716 kwargs={'args': command, 'shell': shell, 'capture': False,
615 'env': env})717 'env': env})
718 return True
616719
617720
618def crtfile_to_pubkey(fname, data=None):721def crtfile_to_pubkey(fname, data=None):
@@ -829,9 +932,35 @@ def read_azure_ovf(contents):
829 if 'ssh_pwauth' not in cfg and password:932 if 'ssh_pwauth' not in cfg and password:
830 cfg['ssh_pwauth'] = True933 cfg['ssh_pwauth'] = True
831934
935 cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom)
936
832 return (md, ud, cfg)937 return (md, ud, cfg)
833938
834939
940def _extract_preprovisioned_vm_setting(dom):
941 """Read the preprovision flag from the ovf. It should not
942 exist unless true."""
943 platform_settings_section = find_child(
944 dom.documentElement,
945 lambda n: n.localName == "PlatformSettingsSection")
946 if not platform_settings_section or len(platform_settings_section) == 0:
947 LOG.debug("PlatformSettingsSection not found")
948 return False
949 platform_settings = find_child(
950 platform_settings_section[0],
951 lambda n: n.localName == "PlatformSettings")
952 if not platform_settings or len(platform_settings) == 0:
953 LOG.debug("PlatformSettings not found")
954 return False
955 preprovisionedVm = find_child(
956 platform_settings[0],
957 lambda n: n.localName == "PreprovisionedVm")
958 if not preprovisionedVm or len(preprovisionedVm) == 0:
959 LOG.debug("PreprovisionedVm not found")
960 return False
961 return util.translate_bool(preprovisionedVm[0].firstChild.nodeValue)
962
963
835def encrypt_pass(password, salt_id="$6$"):964def encrypt_pass(password, salt_id="$6$"):
836 return crypt.crypt(password, salt_id + util.rand_str(strlen=16))965 return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
837966
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index d7fcd45..699a85b 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -16,13 +16,16 @@ LOG = logging.getLogger(__name__)
1616
1717
18class DataSourceBigstep(sources.DataSource):18class DataSourceBigstep(sources.DataSource):
19
20 dsname = 'Bigstep'
21
19 def __init__(self, sys_cfg, distro, paths):22 def __init__(self, sys_cfg, distro, paths):
20 sources.DataSource.__init__(self, sys_cfg, distro, paths)23 sources.DataSource.__init__(self, sys_cfg, distro, paths)
21 self.metadata = {}24 self.metadata = {}
22 self.vendordata_raw = ""25 self.vendordata_raw = ""
23 self.userdata_raw = ""26 self.userdata_raw = ""
2427
25 def get_data(self, apply_filter=False):28 def _get_data(self, apply_filter=False):
26 url = get_url_from_file()29 url = get_url_from_file()
27 if url is None:30 if url is None:
28 return False31 return False
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 19df16b..4eaad47 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -23,6 +23,9 @@ class DataSourceCloudSigma(sources.DataSource):
23 For more information about CloudSigma's Server Context:23 For more information about CloudSigma's Server Context:
24 http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html24 http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
25 """25 """
26
27 dsname = 'CloudSigma'
28
26 def __init__(self, sys_cfg, distro, paths):29 def __init__(self, sys_cfg, distro, paths):
27 self.cepko = Cepko()30 self.cepko = Cepko()
28 self.ssh_public_key = ''31 self.ssh_public_key = ''
@@ -46,7 +49,7 @@ class DataSourceCloudSigma(sources.DataSource):
46 LOG.warning("failed to query dmi data for system product name")49 LOG.warning("failed to query dmi data for system product name")
47 return False50 return False
4851
49 def get_data(self):52 def _get_data(self):
50 """53 """
51 Metadata is the whole server context and /meta/cloud-config is used54 Metadata is the whole server context and /meta/cloud-config is used
52 as userdata.55 as userdata.
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 9dc473f..0df545f 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -65,6 +65,9 @@ class CloudStackPasswordServerClient(object):
6565
6666
67class DataSourceCloudStack(sources.DataSource):67class DataSourceCloudStack(sources.DataSource):
68
69 dsname = 'CloudStack'
70
68 def __init__(self, sys_cfg, distro, paths):71 def __init__(self, sys_cfg, distro, paths):
69 sources.DataSource.__init__(self, sys_cfg, distro, paths)72 sources.DataSource.__init__(self, sys_cfg, distro, paths)
70 self.seed_dir = os.path.join(paths.seed_dir, 'cs')73 self.seed_dir = os.path.join(paths.seed_dir, 'cs')
@@ -117,7 +120,7 @@ class DataSourceCloudStack(sources.DataSource):
117 def get_config_obj(self):120 def get_config_obj(self):
118 return self.cfg121 return self.cfg
119122
120 def get_data(self):123 def _get_data(self):
121 seed_ret = {}124 seed_ret = {}
122 if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):125 if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
123 self.userdata_raw = seed_ret['user-data']126 self.userdata_raw = seed_ret['user-data']
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index ef374f3..b8db626 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -25,13 +25,16 @@ DEFAULT_METADATA = {
25 "instance-id": DEFAULT_IID,25 "instance-id": DEFAULT_IID,
26}26}
27FS_TYPES = ('vfat', 'iso9660')27FS_TYPES = ('vfat', 'iso9660')
28LABEL_TYPES = ('config-2',)28LABEL_TYPES = ('config-2', 'CONFIG-2')
29POSSIBLE_MOUNTS = ('sr', 'cd')29POSSIBLE_MOUNTS = ('sr', 'cd')
30OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS30OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
31 for i in range(0, 2)))31 for i in range(0, 2)))
3232
3333
34class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):34class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
35
36 dsname = 'ConfigDrive'
37
35 def __init__(self, sys_cfg, distro, paths):38 def __init__(self, sys_cfg, distro, paths):
36 super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)39 super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
37 self.source = None40 self.source = None
@@ -50,7 +53,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
50 mstr += "[source=%s]" % (self.source)53 mstr += "[source=%s]" % (self.source)
51 return mstr54 return mstr
5255
53 def get_data(self):56 def _get_data(self):
54 found = None57 found = None
55 md = {}58 md = {}
56 results = {}59 results = {}
@@ -221,7 +224,7 @@ def find_candidate_devs(probe_optical=True):
221 config drive v2:224 config drive v2:
222 Disk should be:225 Disk should be:
223 * either vfat or iso9660 formated226 * either vfat or iso9660 formated
224 * labeled with 'config-2'227 * labeled with 'config-2' or 'CONFIG-2'
225 """228 """
226 # query optical drive to get it in blkid cache for 2.6 kernels229 # query optical drive to get it in blkid cache for 2.6 kernels
227 if probe_optical:230 if probe_optical:
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 5e7e66b..e0ef665 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -27,6 +27,9 @@ MD_USE_IPV4LL = True
2727
2828
29class DataSourceDigitalOcean(sources.DataSource):29class DataSourceDigitalOcean(sources.DataSource):
30
31 dsname = 'DigitalOcean'
32
30 def __init__(self, sys_cfg, distro, paths):33 def __init__(self, sys_cfg, distro, paths):
31 sources.DataSource.__init__(self, sys_cfg, distro, paths)34 sources.DataSource.__init__(self, sys_cfg, distro, paths)
32 self.distro = distro35 self.distro = distro
@@ -44,7 +47,7 @@ class DataSourceDigitalOcean(sources.DataSource):
44 def _get_sysinfo(self):47 def _get_sysinfo(self):
45 return do_helper.read_sysinfo()48 return do_helper.read_sysinfo()
4649
47 def get_data(self):50 def _get_data(self):
48 (is_do, droplet_id) = self._get_sysinfo()51 (is_do, droplet_id) = self._get_sysinfo()
4952
50 # only proceed if we know we are on DigitalOcean53 # only proceed if we know we are on DigitalOcean
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 7bbbfb6..e14553b 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -14,7 +14,7 @@ import time
14from cloudinit import ec2_utils as ec214from cloudinit import ec2_utils as ec2
15from cloudinit import log as logging15from cloudinit import log as logging
16from cloudinit import net16from cloudinit import net
17from cloudinit.net import dhcp17from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
18from cloudinit import sources18from cloudinit import sources
19from cloudinit import url_helper as uhelp19from cloudinit import url_helper as uhelp
20from cloudinit import util20from cloudinit import util
@@ -31,6 +31,7 @@ _unset = "_unset"
3131
3232
33class Platforms(object):33class Platforms(object):
34 # TODO Rename and move to cloudinit.cloud.CloudNames
34 ALIYUN = "AliYun"35 ALIYUN = "AliYun"
35 AWS = "AWS"36 AWS = "AWS"
36 BRIGHTBOX = "Brightbox"37 BRIGHTBOX = "Brightbox"
@@ -45,6 +46,7 @@ class Platforms(object):
4546
46class DataSourceEc2(sources.DataSource):47class DataSourceEc2(sources.DataSource):
4748
49 dsname = 'Ec2'
48 # Default metadata urls that will be used if none are provided50 # Default metadata urls that will be used if none are provided
49 # They will be checked for 'resolveability' and some of the51 # They will be checked for 'resolveability' and some of the
50 # following may be discarded if they do not resolve52 # following may be discarded if they do not resolve
@@ -68,11 +70,15 @@ class DataSourceEc2(sources.DataSource):
68 _fallback_interface = None70 _fallback_interface = None
6971
70 def __init__(self, sys_cfg, distro, paths):72 def __init__(self, sys_cfg, distro, paths):
71 sources.DataSource.__init__(self, sys_cfg, distro, paths)73 super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
72 self.metadata_address = None74 self.metadata_address = None
73 self.seed_dir = os.path.join(paths.seed_dir, "ec2")75 self.seed_dir = os.path.join(paths.seed_dir, "ec2")
7476
75 def get_data(self):77 def _get_cloud_name(self):
78 """Return the cloud name as identified during _get_data."""
79 return self.cloud_platform
80
81 def _get_data(self):
76 seed_ret = {}82 seed_ret = {}
77 if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):83 if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
78 self.userdata_raw = seed_ret['user-data']84 self.userdata_raw = seed_ret['user-data']
@@ -96,22 +102,13 @@ class DataSourceEc2(sources.DataSource):
96 if util.is_FreeBSD():102 if util.is_FreeBSD():
97 LOG.debug("FreeBSD doesn't support running dhclient with -sf")103 LOG.debug("FreeBSD doesn't support running dhclient with -sf")
98 return False104 return False
99 dhcp_leases = dhcp.maybe_perform_dhcp_discovery(105 try:
100 self.fallback_interface)106 with EphemeralDHCPv4(self.fallback_interface):
101 if not dhcp_leases:107 return util.log_time(
102 # DataSourceEc2Local failed in init-local stage. DataSourceEc2108 logfunc=LOG.debug, msg='Crawl of metadata service',
103 # will still run in init-network stage.109 func=self._crawl_metadata)
110 except NoDHCPLeaseError:
104 return False111 return False
105 dhcp_opts = dhcp_leases[-1]
106 net_params = {'interface': dhcp_opts.get('interface'),
107 'ip': dhcp_opts.get('fixed-address'),
108 'prefix_or_mask': dhcp_opts.get('subnet-mask'),
109 'broadcast': dhcp_opts.get('broadcast-address'),
110 'router': dhcp_opts.get('routers')}
111 with net.EphemeralIPv4Network(**net_params):
112 return util.log_time(
113 logfunc=LOG.debug, msg='Crawl of metadata service',
114 func=self._crawl_metadata)
115 else:112 else:
116 return self._crawl_metadata()113 return self._crawl_metadata()
117114
@@ -148,7 +145,12 @@ class DataSourceEc2(sources.DataSource):
148 return self.min_metadata_version145 return self.min_metadata_version
149146
150 def get_instance_id(self):147 def get_instance_id(self):
151 return self.metadata['instance-id']148 if self.cloud_platform == Platforms.AWS:
149 # Prefer the ID from the instance identity document, but fall back
150 return self.identity.get(
151 'instanceId', self.metadata['instance-id'])
152 else:
153 return self.metadata['instance-id']
152154
153 def _get_url_settings(self):155 def _get_url_settings(self):
154 mcfg = self.ds_cfg156 mcfg = self.ds_cfg
@@ -262,19 +264,31 @@ class DataSourceEc2(sources.DataSource):
262 @property264 @property
263 def availability_zone(self):265 def availability_zone(self):
264 try:266 try:
265 return self.metadata['placement']['availability-zone']267 if self.cloud_platform == Platforms.AWS:
268 return self.identity.get(
269 'availabilityZone',
270 self.metadata['placement']['availability-zone'])
271 else:
272 return self.metadata['placement']['availability-zone']
266 except KeyError:273 except KeyError:
267 return None274 return None
268275
269 @property276 @property
270 def region(self):277 def region(self):
271 az = self.availability_zone278 if self.cloud_platform == Platforms.AWS:
272 if az is not None:279 region = self.identity.get('region')
273 return az[:-1]280 # Fallback to trimming the availability zone if region is missing
281 if self.availability_zone and not region:
282 region = self.availability_zone[:-1]
283 return region
284 else:
285 az = self.availability_zone
286 if az is not None:
287 return az[:-1]
274 return None288 return None
275289
276 @property290 @property
277 def cloud_platform(self):291 def cloud_platform(self): # TODO rename cloud_name
278 if self._cloud_platform is None:292 if self._cloud_platform is None:
279 self._cloud_platform = identify_platform()293 self._cloud_platform = identify_platform()
280 return self._cloud_platform294 return self._cloud_platform
@@ -351,6 +365,9 @@ class DataSourceEc2(sources.DataSource):
351 api_version, self.metadata_address)365 api_version, self.metadata_address)
352 self.metadata = ec2.get_instance_metadata(366 self.metadata = ec2.get_instance_metadata(
353 api_version, self.metadata_address)367 api_version, self.metadata_address)
368 if self.cloud_platform == Platforms.AWS:
369 self.identity = ec2.get_instance_identity(
370 api_version, self.metadata_address).get('document', {})
354 except Exception:371 except Exception:
355 util.logexc(372 util.logexc(
356 LOG, "Failed reading from metadata address %s",373 LOG, "Failed reading from metadata address %s",
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index ccae420..2da34a9 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -2,8 +2,12 @@
2#2#
3# This file is part of cloud-init. See LICENSE file for license information.3# This file is part of cloud-init. See LICENSE file for license information.
44
5import datetime
6import json
7
5from base64 import b64decode8from base64 import b64decode
69
10from cloudinit.distros import ug_util
7from cloudinit import log as logging11from cloudinit import log as logging
8from cloudinit import sources12from cloudinit import sources
9from cloudinit import url_helper13from cloudinit import url_helper
@@ -17,16 +21,18 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
1721
1822
19class GoogleMetadataFetcher(object):23class GoogleMetadataFetcher(object):
20 headers = {'X-Google-Metadata-Request': 'True'}24 headers = {'Metadata-Flavor': 'Google'}
2125
22 def __init__(self, metadata_address):26 def __init__(self, metadata_address):
23 self.metadata_address = metadata_address27 self.metadata_address = metadata_address
2428
25 def get_value(self, path, is_text):29 def get_value(self, path, is_text, is_recursive=False):
26 value = None30 value = None
27 try:31 try:
28 resp = url_helper.readurl(url=self.metadata_address + path,32 url = self.metadata_address + path
29 headers=self.headers)33 if is_recursive:
34 url += '/?recursive=True'
35 resp = url_helper.readurl(url=url, headers=self.headers)
30 except url_helper.UrlError as exc:36 except url_helper.UrlError as exc:
31 msg = "url %s raised exception %s"37 msg = "url %s raised exception %s"
32 LOG.debug(msg, path, exc)38 LOG.debug(msg, path, exc)
@@ -35,22 +41,29 @@ class GoogleMetadataFetcher(object):
35 if is_text:41 if is_text:
36 value = util.decode_binary(resp.contents)42 value = util.decode_binary(resp.contents)
37 else:43 else:
38 value = resp.contents44 value = resp.contents.decode('utf-8')
39 else:45 else:
40 LOG.debug("url %s returned code %s", path, resp.code)46 LOG.debug("url %s returned code %s", path, resp.code)
41 return value47 return value
4248
4349
44class DataSourceGCE(sources.DataSource):50class DataSourceGCE(sources.DataSource):
51
52 dsname = 'GCE'
53
45 def __init__(self, sys_cfg, distro, paths):54 def __init__(self, sys_cfg, distro, paths):
46 sources.DataSource.__init__(self, sys_cfg, distro, paths)55 sources.DataSource.__init__(self, sys_cfg, distro, paths)
56 self.default_user = None
57 if distro:
58 (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro)
59 (self.default_user, _user_config) = ug_util.extract_default(users)
47 self.metadata = dict()60 self.metadata = dict()
48 self.ds_cfg = util.mergemanydict([61 self.ds_cfg = util.mergemanydict([
49 util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),62 util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
50 BUILTIN_DS_CONFIG])63 BUILTIN_DS_CONFIG])
51 self.metadata_address = self.ds_cfg['metadata_url']64 self.metadata_address = self.ds_cfg['metadata_url']
5265
53 def get_data(self):66 def _get_data(self):
54 ret = util.log_time(67 ret = util.log_time(
55 LOG.debug, 'Crawl of GCE metadata service',68 LOG.debug, 'Crawl of GCE metadata service',
56 read_md, kwargs={'address': self.metadata_address})69 read_md, kwargs={'address': self.metadata_address})
@@ -67,17 +80,18 @@ class DataSourceGCE(sources.DataSource):
6780
68 @property81 @property
69 def launch_index(self):82 def launch_index(self):
70 # GCE does not provide lauch_index property83 # GCE does not provide lauch_index property.
71 return None84 return None
7285
73 def get_instance_id(self):86 def get_instance_id(self):
74 return self.metadata['instance-id']87 return self.metadata['instance-id']
7588
76 def get_public_ssh_keys(self):89 def get_public_ssh_keys(self):
77 return self.metadata['public-keys']90 public_keys_data = self.metadata['public-keys-data']
91 return _parse_public_keys(public_keys_data, self.default_user)
7892
79 def get_hostname(self, fqdn=False, resolve_ip=False):93 def get_hostname(self, fqdn=False, resolve_ip=False):
80 # GCE has long FDQN's and has asked for short hostnames94 # GCE has long FDQN's and has asked for short hostnames.
81 return self.metadata['local-hostname'].split('.')[0]95 return self.metadata['local-hostname'].split('.')[0]
8296
83 @property97 @property
@@ -89,15 +103,58 @@ class DataSourceGCE(sources.DataSource):
89 return self.availability_zone.rsplit('-', 1)[0]103 return self.availability_zone.rsplit('-', 1)[0]
90104
91105
92def _trim_key(public_key):106def _has_expired(public_key):
93 # GCE takes sshKeys attribute in the format of '<user>:<public_key>'107 # Check whether an SSH key is expired. Public key input is a single SSH
94 # so we have to trim each key to remove the username part108 # public key in the GCE specific key format documented here:
109 # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat
110 try:
111 # Check for the Google-specific schema identifier.
112 schema, json_str = public_key.split(None, 3)[2:]
113 except (ValueError, AttributeError):
114 return False
115
116 # Do not expire keys if they do not have the expected schema identifier.
117 if schema != 'google-ssh':
118 return False
119
120 try:
121 json_obj = json.loads(json_str)
122 except ValueError:
123 return False
124
125 # Do not expire keys if there is no expriation timestamp.
126 if 'expireOn' not in json_obj:
127 return False
128
129 expire_str = json_obj['expireOn']
130 format_str = '%Y-%m-%dT%H:%M:%S+0000'
95 try:131 try:
96 index = public_key.index(':')132 expire_time = datetime.datetime.strptime(expire_str, format_str)
97 if index > 0:133 except ValueError:
98 return public_key[(index + 1):]134 return False
99 except Exception:135
100 return public_key136 # Expire the key if and only if we have exceeded the expiration timestamp.
137 return datetime.datetime.utcnow() > expire_time
138
139
140def _parse_public_keys(public_keys_data, default_user=None):
141 # Parse the SSH key data for the default user account. Public keys input is
142 # a list containing SSH public keys in the GCE specific key format
143 # documented here:
144 # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat
145 public_keys = []
146 if not public_keys_data:
147 return public_keys
148 for public_key in public_keys_data:
149 if not public_key or not all(ord(c) < 128 for c in public_key):
150 continue
151 split_public_key = public_key.split(':', 1)
152 if len(split_public_key) != 2:
153 continue
154 user, key = split_public_key
155 if user in ('cloudinit', default_user) and not _has_expired(key):
156 public_keys.append(key)
157 return public_keys
101158
102159
103def read_md(address=None, platform_check=True):160def read_md(address=None, platform_check=True):
@@ -113,31 +170,28 @@ def read_md(address=None, platform_check=True):
113 ret['reason'] = "Not running on GCE."170 ret['reason'] = "Not running on GCE."
114 return ret171 return ret
115172
116 # if we cannot resolve the metadata server, then no point in trying173 # If we cannot resolve the metadata server, then no point in trying.
117 if not util.is_resolvable_url(address):174 if not util.is_resolvable_url(address):
118 LOG.debug("%s is not resolvable", address)175 LOG.debug("%s is not resolvable", address)
119 ret['reason'] = 'address "%s" is not resolvable' % address176 ret['reason'] = 'address "%s" is not resolvable' % address
120 return ret177 return ret
121178
122 # url_map: (our-key, path, required, is_text)179 # url_map: (our-key, path, required, is_text, is_recursive)
123 url_map = [180 url_map = [
124 ('instance-id', ('instance/id',), True, True),181 ('instance-id', ('instance/id',), True, True, False),
125 ('availability-zone', ('instance/zone',), True, True),182 ('availability-zone', ('instance/zone',), True, True, False),
126 ('local-hostname', ('instance/hostname',), True, True),183 ('local-hostname', ('instance/hostname',), True, True, False),
127 ('public-keys', ('project/attributes/sshKeys',184 ('instance-data', ('instance/attributes',), False, False, True),
128 'instance/attributes/ssh-keys'), False, True),185 ('project-data', ('project/attributes',), False, False, True),
129 ('user-data', ('instance/attributes/user-data',), False, False),
130 ('user-data-encoding', ('instance/attributes/user-data-encoding',),
131 False, True),
132 ]186 ]
133187
134 metadata_fetcher = GoogleMetadataFetcher(address)188 metadata_fetcher = GoogleMetadataFetcher(address)
135 md = {}189 md = {}
136 # iterate over url_map keys to get metadata items190 # Iterate over url_map keys to get metadata items.
137 for (mkey, paths, required, is_text) in url_map:191 for (mkey, paths, required, is_text, is_recursive) in url_map:
138 value = None192 value = None
139 for path in paths:193 for path in paths:
140 new_value = metadata_fetcher.get_value(path, is_text)194 new_value = metadata_fetcher.get_value(path, is_text, is_recursive)
141 if new_value is not None:195 if new_value is not None:
142 value = new_value196 value = new_value
143 if required and value is None:197 if required and value is None:
@@ -146,17 +200,23 @@ def read_md(address=None, platform_check=True):
146 return ret200 return ret
147 md[mkey] = value201 md[mkey] = value
148202
149 if md['public-keys']:203 instance_data = json.loads(md['instance-data'] or '{}')
150 lines = md['public-keys'].splitlines()204 project_data = json.loads(md['project-data'] or '{}')
151 md['public-keys'] = [_trim_key(k) for k in lines]205 valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
206 block_project = instance_data.get('block-project-ssh-keys', '').lower()
207 if block_project != 'true' and not instance_data.get('sshKeys'):
208 valid_keys.append(project_data.get('ssh-keys'))
209 valid_keys.append(project_data.get('sshKeys'))
210 public_keys_data = '\n'.join([key for key in valid_keys if key])
211 md['public-keys-data'] = public_keys_data.splitlines()
152212
153 if md['availability-zone']:213 if md['availability-zone']:
154 md['availability-zone'] = md['availability-zone'].split('/')[-1]214 md['availability-zone'] = md['availability-zone'].split('/')[-1]
155215
156 encoding = md.get('user-data-encoding')216 encoding = instance_data.get('user-data-encoding')
157 if encoding:217 if encoding:
158 if encoding == 'base64':218 if encoding == 'base64':
159 md['user-data'] = b64decode(md['user-data'])219 md['user-data'] = b64decode(instance_data.get('user-data'))
160 else:220 else:
161 LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)221 LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)
162222
@@ -185,20 +245,19 @@ def platform_reports_gce():
185 return False245 return False
186246
187247
188# Used to match classes to dependencies248# Used to match classes to dependencies.
189datasources = [249datasources = [
190 (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),250 (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
191]251]
192252
193253
194# Return a list of data sources that match this set of dependencies254# Return a list of data sources that match this set of dependencies.
195def get_datasource_list(depends):255def get_datasource_list(depends):
196 return sources.list_from_depends(depends, datasources)256 return sources.list_from_depends(depends, datasources)
197257
198258
199if __name__ == "__main__":259if __name__ == "__main__":
200 import argparse260 import argparse
201 import json
202 import sys261 import sys
203262
204 from base64 import b64encode263 from base64 import b64encode
@@ -214,7 +273,7 @@ if __name__ == "__main__":
214 data = read_md(address=args.endpoint, platform_check=args.platform_check)273 data = read_md(address=args.endpoint, platform_check=args.platform_check)
215 if 'user-data' in data:274 if 'user-data' in data:
216 # user-data is bytes not string like other things. Handle it specially.275 # user-data is bytes not string like other things. Handle it specially.
217 # if it can be represented as utf-8 then do so. Otherwise print base64276 # If it can be represented as utf-8 then do so. Otherwise print base64
218 # encoded value in the key user-data-b64.277 # encoded value in the key user-data-b64.
219 try:278 try:
220 data['user-data'] = data['user-data'].decode()279 data['user-data'] = data['user-data'].decode()
@@ -222,7 +281,7 @@ if __name__ == "__main__":
222 sys.stderr.write("User-data cannot be decoded. "281 sys.stderr.write("User-data cannot be decoded. "
223 "Writing as base64\n")282 "Writing as base64\n")
224 del data['user-data']283 del data['user-data']
225 # b64encode returns a bytes value. decode to get the string.284 # b64encode returns a bytes value. Decode to get the string.
226 data['user-data-b64'] = b64encode(data['user-data']).decode()285 data['user-data-b64'] = b64encode(data['user-data']).decode()
227286
228 print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))287 print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 77df5a5..6ac8863 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -8,6 +8,7 @@
88
9from __future__ import print_function9from __future__ import print_function
1010
11import hashlib
11import os12import os
12import time13import time
1314
@@ -39,30 +40,28 @@ class DataSourceMAAS(sources.DataSource):
39 hostname40 hostname
40 vendor-data41 vendor-data
41 """42 """
43
44 dsname = "MAAS"
45 id_hash = None
46 _oauth_helper = None
47
42 def __init__(self, sys_cfg, distro, paths):48 def __init__(self, sys_cfg, distro, paths):
43 sources.DataSource.__init__(self, sys_cfg, distro, paths)49 sources.DataSource.__init__(self, sys_cfg, distro, paths)
44 self.base_url = None50 self.base_url = None
45 self.seed_dir = os.path.join(paths.seed_dir, 'maas')51 self.seed_dir = os.path.join(paths.seed_dir, 'maas')
46 self.oauth_helper = self._get_helper()52 self.id_hash = get_id_from_ds_cfg(self.ds_cfg)
47
48 def _get_helper(self):
49 mcfg = self.ds_cfg
50 # If we are missing token_key, token_secret or consumer_key
51 # then just do non-authed requests
52 for required in ('token_key', 'token_secret', 'consumer_key'):
53 if required not in mcfg:
54 return url_helper.OauthUrlHelper()
5553
56 return url_helper.OauthUrlHelper(54 @property
57 consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],55 def oauth_helper(self):
58 token_secret=mcfg['token_secret'],56 if not self._oauth_helper:
59 consumer_secret=mcfg.get('consumer_secret'))57 self._oauth_helper = get_oauth_helper(self.ds_cfg)
58 return self._oauth_helper
6059
61 def __str__(self):60 def __str__(self):
62 root = sources.DataSource.__str__(self)61 root = sources.DataSource.__str__(self)
63 return "%s [%s]" % (root, self.base_url)62 return "%s [%s]" % (root, self.base_url)
6463
65 def get_data(self):64 def _get_data(self):
66 mcfg = self.ds_cfg65 mcfg = self.ds_cfg
6766
68 try:67 try:
@@ -144,6 +143,36 @@ class DataSourceMAAS(sources.DataSource):
144143
145 return bool(url)144 return bool(url)
146145
146 def check_instance_id(self, sys_cfg):
147 """locally check if the current system is the same instance.
148
149 MAAS doesn't provide a real instance-id, and if it did, it is
150 still only available over the network. We need to check based
151 only on local resources. So compute a hash based on Oauth tokens."""
152 if self.id_hash is None:
153 return False
154 ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {})
155 return (self.id_hash == get_id_from_ds_cfg(ncfg))
156
157
158def get_oauth_helper(cfg):
159 """Return an oauth helper instance for values in cfg.
160
161 @raises ValueError from OauthUrlHelper if some required fields have
162 true-ish values but others do not."""
163 keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret')
164 kwargs = dict([(r, cfg.get(r)) for r in keys])
165 return url_helper.OauthUrlHelper(**kwargs)
166
167
168def get_id_from_ds_cfg(ds_cfg):
169 """Given a config, generate a unique identifier for this node."""
170 fields = ('consumer_key', 'token_key', 'token_secret')
171 idstr = '\0'.join([ds_cfg.get(f, "") for f in fields])
172 # store the encoding version as part of the hash in the event
173 # that it ever changed we can compute older versions.
174 return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest()
175
147176
148def read_maas_seed_dir(seed_d):177def read_maas_seed_dir(seed_d):
149 if seed_d.startswith("file://"):178 if seed_d.startswith("file://"):
@@ -319,7 +348,7 @@ if __name__ == "__main__":
319 sys.stderr.write("Must provide a url or a config with url.\n")348 sys.stderr.write("Must provide a url or a config with url.\n")
320 sys.exit(1)349 sys.exit(1)
321350
322 oauth_helper = url_helper.OauthUrlHelper(**creds)351 oauth_helper = get_oauth_helper(creds)
323352
324 def geturl(url):353 def geturl(url):
325 # the retry is to ensure that oauth timestamp gets fixed354 # the retry is to ensure that oauth timestamp gets fixed
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index e641244..5d3a8dd 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -20,6 +20,9 @@ LOG = logging.getLogger(__name__)
2020
2121
22class DataSourceNoCloud(sources.DataSource):22class DataSourceNoCloud(sources.DataSource):
23
24 dsname = "NoCloud"
25
23 def __init__(self, sys_cfg, distro, paths):26 def __init__(self, sys_cfg, distro, paths):
24 sources.DataSource.__init__(self, sys_cfg, distro, paths)27 sources.DataSource.__init__(self, sys_cfg, distro, paths)
25 self.seed = None28 self.seed = None
@@ -32,7 +35,7 @@ class DataSourceNoCloud(sources.DataSource):
32 root = sources.DataSource.__str__(self)35 root = sources.DataSource.__str__(self)
33 return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)36 return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
3437
35 def get_data(self):38 def _get_data(self):
36 defaults = {39 defaults = {
37 "instance-id": "nocloud",40 "instance-id": "nocloud",
38 "dsmode": self.dsmode,41 "dsmode": self.dsmode,
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index 906bb27..e63a7e3 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -11,12 +11,15 @@ LOG = logging.getLogger(__name__)
1111
1212
13class DataSourceNone(sources.DataSource):13class DataSourceNone(sources.DataSource):
14
15 dsname = "None"
16
14 def __init__(self, sys_cfg, distro, paths, ud_proc=None):17 def __init__(self, sys_cfg, distro, paths, ud_proc=None):
15 sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)18 sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
16 self.metadata = {}19 self.metadata = {}
17 self.userdata_raw = ''20 self.userdata_raw = ''
1821
19 def get_data(self):22 def _get_data(self):
20 # If the datasource config has any provided 'fallback'23 # If the datasource config has any provided 'fallback'
21 # userdata or metadata, use it...24 # userdata or metadata, use it...
22 if 'userdata_raw' in self.ds_cfg:25 if 'userdata_raw' in self.ds_cfg:
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index ccebf11..6e62f98 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -21,6 +21,8 @@ from cloudinit import util
2121
22from cloudinit.sources.helpers.vmware.imc.config \22from cloudinit.sources.helpers.vmware.imc.config \
23 import Config23 import Config
24from cloudinit.sources.helpers.vmware.imc.config_custom_script \
25 import PreCustomScript, PostCustomScript
24from cloudinit.sources.helpers.vmware.imc.config_file \26from cloudinit.sources.helpers.vmware.imc.config_file \
25 import ConfigFile27 import ConfigFile
26from cloudinit.sources.helpers.vmware.imc.config_nic \28from cloudinit.sources.helpers.vmware.imc.config_nic \
@@ -30,7 +32,7 @@ from cloudinit.sources.helpers.vmware.imc.config_passwd \
30from cloudinit.sources.helpers.vmware.imc.guestcust_error \32from cloudinit.sources.helpers.vmware.imc.guestcust_error \
31 import GuestCustErrorEnum33 import GuestCustErrorEnum
32from cloudinit.sources.helpers.vmware.imc.guestcust_event \34from cloudinit.sources.helpers.vmware.imc.guestcust_event \
33 import GuestCustEventEnum35 import GuestCustEventEnum as GuestCustEvent
34from cloudinit.sources.helpers.vmware.imc.guestcust_state \36from cloudinit.sources.helpers.vmware.imc.guestcust_state \
35 import GuestCustStateEnum37 import GuestCustStateEnum
36from cloudinit.sources.helpers.vmware.imc.guestcust_util import (38from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
@@ -43,6 +45,9 @@ LOG = logging.getLogger(__name__)
4345
4446
45class DataSourceOVF(sources.DataSource):47class DataSourceOVF(sources.DataSource):
48
49 dsname = "OVF"
50
46 def __init__(self, sys_cfg, distro, paths):51 def __init__(self, sys_cfg, distro, paths):
47 sources.DataSource.__init__(self, sys_cfg, distro, paths)52 sources.DataSource.__init__(self, sys_cfg, distro, paths)
48 self.seed = None53 self.seed = None
@@ -60,7 +65,7 @@ class DataSourceOVF(sources.DataSource):
60 root = sources.DataSource.__str__(self)65 root = sources.DataSource.__str__(self)
61 return "%s [seed=%s]" % (root, self.seed)66 return "%s [seed=%s]" % (root, self.seed)
6267
63 def get_data(self):68 def _get_data(self):
64 found = []69 found = []
65 md = {}70 md = {}
66 ud = ""71 ud = ""
@@ -124,17 +129,31 @@ class DataSourceOVF(sources.DataSource):
124 self._vmware_cust_conf = Config(cf)129 self._vmware_cust_conf = Config(cf)
125 (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)130 (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
126 self._vmware_nics_to_enable = get_nics_to_enable(nicspath)131 self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
127 markerid = self._vmware_cust_conf.marker_id132 imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
128 markerexists = check_marker_exists(markerid)133 product_marker = self._vmware_cust_conf.marker_id
134 hasmarkerfile = check_marker_exists(
135 product_marker, os.path.join(self.paths.cloud_dir, 'data'))
136 special_customization = product_marker and not hasmarkerfile
137 customscript = self._vmware_cust_conf.custom_script_name
129 except Exception as e:138 except Exception as e:
130 LOG.debug("Error parsing the customization Config File")139 _raise_error_status(
131 LOG.exception(e)140 "Error parsing the customization Config File",
132 set_customization_status(141 e,
133 GuestCustStateEnum.GUESTCUST_STATE_RUNNING,142 GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
134 GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)143 vmwareImcConfigFilePath)
135 raise e144
136 finally:145 if special_customization:
137 util.del_dir(os.path.dirname(vmwareImcConfigFilePath))146 if customscript:
147 try:
148 precust = PreCustomScript(customscript, imcdirpath)
149 precust.execute()
150 except Exception as e:
151 _raise_error_status(
152 "Error executing pre-customization script",
153 e,
154 GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
155 vmwareImcConfigFilePath)
156
138 try:157 try:
139 LOG.debug("Preparing the Network configuration")158 LOG.debug("Preparing the Network configuration")
140 self._network_config = get_network_config_from_conf(159 self._network_config = get_network_config_from_conf(
@@ -143,13 +162,13 @@ class DataSourceOVF(sources.DataSource):
143 True,162 True,
144 self.distro.osfamily)163 self.distro.osfamily)
145 except Exception as e:164 except Exception as e:
146 LOG.exception(e)165 _raise_error_status(
147 set_customization_status(166 "Error preparing Network Configuration",
148 GuestCustStateEnum.GUESTCUST_STATE_RUNNING,167 e,
149 GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)168 GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
150 raise e169 vmwareImcConfigFilePath)
151170
152 if markerid and not markerexists:171 if special_customization:
153 LOG.debug("Applying password customization")172 LOG.debug("Applying password customization")
154 pwdConfigurator = PasswordConfigurator()173 pwdConfigurator = PasswordConfigurator()
155 adminpwd = self._vmware_cust_conf.admin_password174 adminpwd = self._vmware_cust_conf.admin_password
@@ -161,27 +180,41 @@ class DataSourceOVF(sources.DataSource):
161 else:180 else:
162 LOG.debug("Changing password is not needed")181 LOG.debug("Changing password is not needed")
163 except Exception as e:182 except Exception as e:
164 LOG.debug("Error applying Password Configuration: %s", e)183 _raise_error_status(
165 set_customization_status(184 "Error applying Password Configuration",
166 GuestCustStateEnum.GUESTCUST_STATE_RUNNING,185 e,
167 GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)186 GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
168 return False187 vmwareImcConfigFilePath)
169 if markerid:188
170 LOG.debug("Handle marker creation")189 if customscript:
190 try:
191 postcust = PostCustomScript(customscript, imcdirpath)
192 postcust.execute()
193 except Exception as e:
194 _raise_error_status(
195 "Error executing post-customization script",
196 e,
197 GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
198 vmwareImcConfigFilePath)
199
200 if product_marker:
171 try:201 try:
172 setup_marker_files(markerid)202 setup_marker_files(
203 product_marker,
204 os.path.join(self.paths.cloud_dir, 'data'))
173 except Exception as e:205 except Exception as e:
174 LOG.debug("Error creating marker files: %s", e)206 _raise_error_status(
175 set_customization_status(207 "Error creating marker files",
176 GuestCustStateEnum.GUESTCUST_STATE_RUNNING,208 e,
177 GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)209 GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
178 return False210 vmwareImcConfigFilePath)
179211
180 self._vmware_cust_found = True212 self._vmware_cust_found = True
181 found.append('vmware-tools')213 found.append('vmware-tools')
182214
183 # TODO: Need to set the status to DONE only when the215 # TODO: Need to set the status to DONE only when the
184 # customization is done successfully.216 # customization is done successfully.
217 util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
185 enable_nics(self._vmware_nics_to_enable)218 enable_nics(self._vmware_nics_to_enable)
186 set_customization_status(219 set_customization_status(
187 GuestCustStateEnum.GUESTCUST_STATE_DONE,220 GuestCustStateEnum.GUESTCUST_STATE_DONE,
@@ -536,31 +569,52 @@ def get_datasource_list(depends):
536569
537570
538# To check if marker file exists571# To check if marker file exists
539def check_marker_exists(markerid):572def check_marker_exists(markerid, marker_dir):
540 """573 """
541 Check the existence of a marker file.574 Check the existence of a marker file.
542 Presence of marker file determines whether a certain code path is to be575 Presence of marker file determines whether a certain code path is to be
543 executed. It is needed for partial guest customization in VMware.576 executed. It is needed for partial guest customization in VMware.
577 @param markerid: is an unique string representing a particular product
578 marker.
579 @param: marker_dir: The directory in which markers exist.
544 """580 """
545 if not markerid:581 if not markerid:
546 return False582 return False
547 markerfile = "/.markerfile-" + markerid583 markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
548 if os.path.exists(markerfile):584 if os.path.exists(markerfile):
549 return True585 return True
550 return False586 return False
551587
552588
553# Create a marker file589# Create a marker file
554def setup_marker_files(markerid):590def setup_marker_files(markerid, marker_dir):
555 """591 """
556 Create a new marker file.592 Create a new marker file.
557 Marker files are unique to a full customization workflow in VMware593 Marker files are unique to a full customization workflow in VMware
558 environment.594 environment.
595 @param markerid: is an unique string representing a particular product
596 marker.
597 @param: marker_dir: The directory in which markers exist.
598
559 """599 """
560 if not markerid:600 LOG.debug("Handle marker creation")
561 return601 markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
562 markerfile = "/.markerfile-" + markerid602 for fname in os.listdir(marker_dir):
563 util.del_file("/.markerfile-*.txt")603 if fname.startswith(".markerfile"):
604 util.del_file(os.path.join(marker_dir, fname))
564 open(markerfile, 'w').close()605 open(markerfile, 'w').close()
565606
607
608def _raise_error_status(prefix, error, event, config_file):
609 """
610 Raise error and send customization status to the underlying VMware
611 Virtualization Platform. Also, cleanup the imc directory.
612 """
613 LOG.debug('%s: %s', prefix, error)
614 set_customization_status(
615 GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
616 event)
617 util.del_dir(os.path.dirname(config_file))
618 raise error
619
566# vi: ts=4 expandtab620# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 5fdac19..ce47b6b 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -12,6 +12,7 @@
12#12#
13# This file is part of cloud-init. See LICENSE file for license information.13# This file is part of cloud-init. See LICENSE file for license information.
1414
15import collections
15import os16import os
16import pwd17import pwd
17import re18import re
@@ -19,6 +20,7 @@ import string
1920
20from cloudinit import log as logging21from cloudinit import log as logging
21from cloudinit import net22from cloudinit import net
23from cloudinit.net import eni
22from cloudinit import sources24from cloudinit import sources
23from cloudinit import util25from cloudinit import util
2426
@@ -31,6 +33,9 @@ CONTEXT_DISK_FILES = ["context.sh"]
3133
3234
33class DataSourceOpenNebula(sources.DataSource):35class DataSourceOpenNebula(sources.DataSource):
36
37 dsname = "OpenNebula"
38
34 def __init__(self, sys_cfg, distro, paths):39 def __init__(self, sys_cfg, distro, paths):
35 sources.DataSource.__init__(self, sys_cfg, distro, paths)40 sources.DataSource.__init__(self, sys_cfg, distro, paths)
36 self.seed = None41 self.seed = None
@@ -40,7 +45,7 @@ class DataSourceOpenNebula(sources.DataSource):
40 root = sources.DataSource.__str__(self)45 root = sources.DataSource.__str__(self)
41 return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)46 return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
4247
43 def get_data(self):48 def _get_data(self):
44 defaults = {"instance-id": DEFAULT_IID}49 defaults = {"instance-id": DEFAULT_IID}
45 results = None50 results = None
46 seed = None51 seed = None
@@ -86,11 +91,18 @@ class DataSourceOpenNebula(sources.DataSource):
86 return False91 return False
8792
88 self.seed = seed93 self.seed = seed
89 self.network_eni = results.get("network_config")94 self.network_eni = results.get('network-interfaces')
90 self.metadata = md95 self.metadata = md
91 self.userdata_raw = results.get('userdata')96 self.userdata_raw = results.get('userdata')
92 return True97 return True
9398
99 @property
100 def network_config(self):
101 if self.network_eni is not None:
102 return eni.convert_eni_data(self.network_eni)
103 else:
104 return None
105
94 def get_hostname(self, fqdn=False, resolve_ip=None):106 def get_hostname(self, fqdn=False, resolve_ip=None):
95 if resolve_ip is None:107 if resolve_ip is None:
96 if self.dsmode == sources.DSMODE_NETWORK:108 if self.dsmode == sources.DSMODE_NETWORK:
@@ -113,58 +125,53 @@ class OpenNebulaNetwork(object):
113 self.context = context125 self.context = context
114 if system_nics_by_mac is None:126 if system_nics_by_mac is None:
115 system_nics_by_mac = get_physical_nics_by_mac()127 system_nics_by_mac = get_physical_nics_by_mac()
116 self.ifaces = system_nics_by_mac128 self.ifaces = collections.OrderedDict(
129 [k for k in sorted(system_nics_by_mac.items(),
130 key=lambda k: net.natural_sort_key(k[1]))])
131
132 # OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC.
133 # context_devname provides {mac.lower():ETHX, mac2.lower():ETHX}
134 self.context_devname = {}
135 for k, v in context.items():
136 m = re.match(r'^(.+)_MAC$', k)
137 if m:
138 self.context_devname[v.lower()] = m.group(1)
117139
118 def mac2ip(self, mac):140 def mac2ip(self, mac):
119 components = mac.split(':')[2:]141 return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]])
120 return [str(int(c, 16)) for c in components]
121142
122 def get_ip(self, dev, components):143 def mac2network(self, mac):
123 var_name = dev.upper() + '_IP'144 return self.mac2ip(mac).rpartition(".")[0] + ".0"
124 if var_name in self.context:
125 return self.context[var_name]
126 else:
127 return '.'.join(components)
128145
129 def get_mask(self, dev):146 def get_dns(self, dev):
130 var_name = dev.upper() + '_MASK'147 return self.get_field(dev, "dns", "").split()
131 if var_name in self.context:
132 return self.context[var_name]
133 else:
134 return '255.255.255.0'
135148
136 def get_network(self, dev, components):149 def get_domain(self, dev):
137 var_name = dev.upper() + '_NETWORK'150 return self.get_field(dev, "domain")
138 if var_name in self.context:151
139 return self.context[var_name]152 def get_ip(self, dev, mac):
140 else:153 return self.get_field(dev, "ip", self.mac2ip(mac))
141 return '.'.join(components[:-1]) + '.0'
142154
143 def get_gateway(self, dev):155 def get_gateway(self, dev):
144 var_name = dev.upper() + '_GATEWAY'156 return self.get_field(dev, "gateway")
145 if var_name in self.context:
146 return self.context[var_name]
147 else:
148 return None
149157
150 def get_dns(self, dev):158 def get_mask(self, dev):
151 var_name = dev.upper() + '_DNS'159 return self.get_field(dev, "mask", "255.255.255.0")
152 if var_name in self.context:
153 return self.context[var_name]
154 else:
155 return None
156160
157 def get_domain(self, dev):161 def get_network(self, dev, mac):
158 var_name = dev.upper() + '_DOMAIN'162 return self.get_field(dev, "network", self.mac2network(mac))
159 if var_name in self.context:163
160 return self.context[var_name]164 def get_field(self, dev, name, default=None):
161 else:165 """return the field name in context for device dev.
162 return None166
167 context stores <dev>_<NAME> (example: eth0_DOMAIN).
168 an empty string for value will return default."""
169 val = self.context.get('_'.join((dev, name,)).upper())
170 # allow empty string to return the default.
171 return default if val in (None, "") else val
163172
164 def gen_conf(self):173 def gen_conf(self):
165 global_dns = []174 global_dns = self.context.get('DNS', "").split()
166 if 'DNS' in self.context:
167 global_dns.append(self.context['DNS'])
168175
169 conf = []176 conf = []
170 conf.append('auto lo')177 conf.append('auto lo')
@@ -172,29 +179,31 @@ class OpenNebulaNetwork(object):
172 conf.append('')179 conf.append('')
173180
174 for mac, dev in self.ifaces.items():181 for mac, dev in self.ifaces.items():
175 ip_components = self.mac2ip(mac)182 mac = mac.lower()
183
184 # c_dev stores name in context 'ETHX' for this device.
185 # dev stores the current system name.
186 c_dev = self.context_devname.get(mac, dev)
176187
177 conf.append('auto ' + dev)188 conf.append('auto ' + dev)
178 conf.append('iface ' + dev + ' inet static')189 conf.append('iface ' + dev + ' inet static')
179 conf.append(' address ' + self.get_ip(dev, ip_components))190 conf.append(' #hwaddress %s' % mac)
180 conf.append(' network ' + self.get_network(dev, ip_components))191 conf.append(' address ' + self.get_ip(c_dev, mac))
181 conf.append(' netmask ' + self.get_mask(dev))192 conf.append(' network ' + self.get_network(c_dev, mac))
193 conf.append(' netmask ' + self.get_mask(c_dev))
182194
183 gateway = self.get_gateway(dev)195 gateway = self.get_gateway(c_dev)
184 if gateway:196 if gateway:
185 conf.append(' gateway ' + gateway)197 conf.append(' gateway ' + gateway)
186198
187 domain = self.get_domain(dev)199 domain = self.get_domain(c_dev)
188 if domain:200 if domain:
189 conf.append(' dns-search ' + domain)201 conf.append(' dns-search ' + domain)
190202
191 # add global DNS servers to all interfaces203 # add global DNS servers to all interfaces
192 dns = self.get_dns(dev)204 dns = self.get_dns(c_dev)
193 if global_dns or dns:205 if global_dns or dns:
194 all_dns = global_dns206 conf.append(' dns-nameservers ' + ' '.join(global_dns + dns))
195 if dns:
196 all_dns.append(dns)
197 conf.append(' dns-nameservers ' + ' '.join(all_dns))
198207
199 conf.append('')208 conf.append('')
200209
@@ -329,8 +338,9 @@ def read_context_disk_dir(source_dir, asuser=None):
329 try:338 try:
330 pwd.getpwnam(asuser)339 pwd.getpwnam(asuser)
331 except KeyError as e:340 except KeyError as e:
332 raise BrokenContextDiskDir("configured user '%s' "341 raise BrokenContextDiskDir(
333 "does not exist", asuser)342 "configured user '{user}' does not exist".format(
343 user=asuser))
334 try:344 try:
335 path = os.path.join(source_dir, 'context.sh')345 path = os.path.join(source_dir, 'context.sh')
336 content = util.load_file(path)346 content = util.load_file(path)
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index b64a7f2..e55a763 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -24,6 +24,9 @@ DEFAULT_METADATA = {
2424
2525
26class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):26class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
27
28 dsname = "OpenStack"
29
27 def __init__(self, sys_cfg, distro, paths):30 def __init__(self, sys_cfg, distro, paths):
28 super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)31 super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
29 self.metadata_address = None32 self.metadata_address = None
@@ -96,7 +99,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
96 self.metadata_address = url2base.get(avail_url)99 self.metadata_address = url2base.get(avail_url)
97 return bool(avail_url)100 return bool(avail_url)
98101
99 def get_data(self):102 def _get_data(self):
100 try:103 try:
101 if not self.wait_for_metadata_service():104 if not self.wait_for_metadata_service():
102 return False105 return False
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 3a8a8e8..b0b19c9 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -169,6 +169,8 @@ def query_data_api(api_type, api_address, retries, timeout):
169169
170class DataSourceScaleway(sources.DataSource):170class DataSourceScaleway(sources.DataSource):
171171
172 dsname = "Scaleway"
173
172 def __init__(self, sys_cfg, distro, paths):174 def __init__(self, sys_cfg, distro, paths):
173 super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)175 super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
174176
@@ -184,7 +186,7 @@ class DataSourceScaleway(sources.DataSource):
184 self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))186 self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
185 self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))187 self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
186188
187 def get_data(self):189 def _get_data(self):
188 if not on_scaleway():190 if not on_scaleway():
189 return False191 return False
190192
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 6c6902f..86bfa5d 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -159,6 +159,9 @@ LEGACY_USER_D = "/var/db"
159159
160160
161class DataSourceSmartOS(sources.DataSource):161class DataSourceSmartOS(sources.DataSource):
162
163 dsname = "Joyent"
164
162 _unset = "_unset"165 _unset = "_unset"
163 smartos_type = _unset166 smartos_type = _unset
164 md_client = _unset167 md_client = _unset
@@ -211,7 +214,7 @@ class DataSourceSmartOS(sources.DataSource):
211 os.rename('/'.join([svc_path, 'provisioning']),214 os.rename('/'.join([svc_path, 'provisioning']),
212 '/'.join([svc_path, 'provision_success']))215 '/'.join([svc_path, 'provision_success']))
213216
214 def get_data(self):217 def _get_data(self):
215 self._init()218 self._init()
216219
217 md = {}220 md = {}
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 9a43fbe..a05ca2f 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -10,9 +10,11 @@
1010
11import abc11import abc
12import copy12import copy
13import json
13import os14import os
14import six15import six
1516
17from cloudinit.atomic_helper import write_json
16from cloudinit import importer18from cloudinit import importer
17from cloudinit import log as logging19from cloudinit import log as logging
18from cloudinit import type_utils20from cloudinit import type_utils
@@ -33,6 +35,12 @@ DEP_FILESYSTEM = "FILESYSTEM"
33DEP_NETWORK = "NETWORK"35DEP_NETWORK = "NETWORK"
34DS_PREFIX = 'DataSource'36DS_PREFIX = 'DataSource'
3537
38# File in which instance meta-data, user-data and vendor-data is written
39INSTANCE_JSON_FILE = 'instance-data.json'
40
41# Key which can be provide a cloud's official product name to cloud-init
42METADATA_CLOUD_NAME_KEY = 'cloud-name'
43
36LOG = logging.getLogger(__name__)44LOG = logging.getLogger(__name__)
3745
3846
@@ -40,12 +48,39 @@ class DataSourceNotFoundException(Exception):
40 pass48 pass
4149
4250
51def process_base64_metadata(metadata, key_path=''):
52 """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
53 md_copy = copy.deepcopy(metadata)
54 md_copy['base64-encoded-keys'] = []
55 for key, val in metadata.items():
56 if key_path:
57 sub_key_path = key_path + '/' + key
58 else:
59 sub_key_path = key
60 if isinstance(val, str) and val.startswith('ci-b64:'):
61 md_copy['base64-encoded-keys'].append(sub_key_path)
62 md_copy[key] = val.replace('ci-b64:', '')
63 if isinstance(val, dict):
64 return_val = process_base64_metadata(val, sub_key_path)
65 md_copy['base64-encoded-keys'].extend(
66 return_val.pop('base64-encoded-keys'))
67 md_copy[key] = return_val
68 return md_copy
69
70
43@six.add_metaclass(abc.ABCMeta)71@six.add_metaclass(abc.ABCMeta)
44class DataSource(object):72class DataSource(object):
4573
46 dsmode = DSMODE_NETWORK74 dsmode = DSMODE_NETWORK
47 default_locale = 'en_US.UTF-8'75 default_locale = 'en_US.UTF-8'
4876
77 # Datasource name needs to be set by subclasses to determine which
78 # cloud-config datasource key is loaded
79 dsname = '_undef'
80
81 # Cached cloud_name as determined by _get_cloud_name
82 _cloud_name = None
83
49 def __init__(self, sys_cfg, distro, paths, ud_proc=None):84 def __init__(self, sys_cfg, distro, paths, ud_proc=None):
50 self.sys_cfg = sys_cfg85 self.sys_cfg = sys_cfg
51 self.distro = distro86 self.distro = distro
@@ -56,17 +91,8 @@ class DataSource(object):
56 self.vendordata = None91 self.vendordata = None
57 self.vendordata_raw = None92 self.vendordata_raw = None
5893
59 # find the datasource config name.94 self.ds_cfg = util.get_cfg_by_path(
60 # remove 'DataSource' from classname on front, and remove 'Net' on end.95 self.sys_cfg, ("datasource", self.dsname), {})
61 # Both Foo and FooNet sources expect config in cfg['sources']['Foo']
62 name = type_utils.obj_name(self)
63 if name.startswith(DS_PREFIX):
64 name = name[len(DS_PREFIX):]
65 if name.endswith('Net'):
66 name = name[0:-3]
67
68 self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
69 ("datasource", name), {})
70 if not self.ds_cfg:96 if not self.ds_cfg:
71 self.ds_cfg = {}97 self.ds_cfg = {}
7298
@@ -78,6 +104,51 @@ class DataSource(object):
78 def __str__(self):104 def __str__(self):
79 return type_utils.obj_name(self)105 return type_utils.obj_name(self)
80106
107 def _get_standardized_metadata(self):
108 """Return a dictionary of standardized metadata keys."""
109 return {'v1': {
110 'local-hostname': self.get_hostname(),
111 'instance-id': self.get_instance_id(),
112 'cloud-name': self.cloud_name,
113 'region': self.region,
114 'availability-zone': self.availability_zone}}
115
116 def get_data(self):
117 """Datasources implement _get_data to setup metadata and userdata_raw.
118
119 Minimally, the datasource should return a boolean True on success.
120 """
121 return_value = self._get_data()
122 json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
123 if not return_value:
124 return return_value
125
126 instance_data = {
127 'ds': {
128 'meta-data': self.metadata,
129 'user-data': self.get_userdata_raw(),
130 'vendor-data': self.get_vendordata_raw()}}
131 instance_data.update(
132 self._get_standardized_metadata())
133 try:
134 # Process content base64encoding unserializable values
135 content = util.json_dumps(instance_data)
136 # Strip base64: prefix and return base64-encoded-keys
137 processed_data = process_base64_metadata(json.loads(content))
138 except TypeError as e:
139 LOG.warning('Error persisting instance-data.json: %s', str(e))
140 return return_value
141 except UnicodeDecodeError as e:
142 LOG.warning('Error persisting instance-data.json: %s', str(e))
143 return return_value
144 write_json(json_file, processed_data, mode=0o600)
145 return return_value
146
147 def _get_data(self):
148 raise NotImplementedError(
149 'Subclasses of DataSource must implement _get_data which'
150 ' sets self.metadata, vendordata_raw and userdata_raw.')
151
81 def get_userdata(self, apply_filter=False):152 def get_userdata(self, apply_filter=False):
82 if self.userdata is None:153 if self.userdata is None:
83 self.userdata = self.ud_proc.process(self.get_userdata_raw())154 self.userdata = self.ud_proc.process(self.get_userdata_raw())
@@ -91,6 +162,34 @@ class DataSource(object):
91 return self.vendordata162 return self.vendordata
92163
93 @property164 @property
165 def cloud_name(self):
166 """Return lowercase cloud name as determined by the datasource.
167
168 Datasource can determine or define its own cloud product name in
169 metadata.
170 """
171 if self._cloud_name:
172 return self._cloud_name
173 if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY):
174 cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY)
175 if isinstance(cloud_name, six.string_types):
176 self._cloud_name = cloud_name.lower()
177 LOG.debug(
178 'Ignoring metadata provided key %s: non-string type %s',
179 METADATA_CLOUD_NAME_KEY, type(cloud_name))
180 else:
181 self._cloud_name = self._get_cloud_name().lower()
182 return self._cloud_name
183
184 def _get_cloud_name(self):
185 """Return the datasource name as it frequently matches cloud name.
186
187 Should be overridden in subclasses which can run on multiple
188 cloud names, such as DatasourceEc2.
189 """
190 return self.dsname
191
192 @property
94 def launch_index(self):193 def launch_index(self):
95 if not self.metadata:194 if not self.metadata:
96 return None195 return None
@@ -161,8 +260,11 @@ class DataSource(object):
161260
162 @property261 @property
163 def availability_zone(self):262 def availability_zone(self):
164 return self.metadata.get('availability-zone',263 top_level_az = self.metadata.get(
165 self.metadata.get('availability_zone'))264 'availability-zone', self.metadata.get('availability_zone'))
265 if top_level_az:
266 return top_level_az
267 return self.metadata.get('placement', {}).get('availability-zone')
166268
167 @property269 @property
168 def region(self):270 def region(self):
@@ -346,7 +448,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
346# Return an ordered list of classes that match (if any)448# Return an ordered list of classes that match (if any)
347def list_sources(cfg_list, depends, pkg_list):449def list_sources(cfg_list, depends, pkg_list):
348 src_list = []450 src_list = []
349 LOG.debug(("Looking for for data source in: %s,"451 LOG.debug(("Looking for data source in: %s,"
350 " via packages %s that matches dependencies %s"),452 " via packages %s that matches dependencies %s"),
351 cfg_list, pkg_list, depends)453 cfg_list, pkg_list, depends)
352 for ds_name in cfg_list:454 for ds_name in cfg_list:
@@ -417,4 +519,5 @@ def list_from_depends(depends, ds_list):
417 ret_list.append(cls)519 ret_list.append(cls)
418 return ret_list520 return ret_list
419521
522
420# vi: ts=4 expandtab523# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 959b1bd..90c12df 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -199,10 +199,10 @@ class WALinuxAgentShim(object):
199 ' </Container>',199 ' </Container>',
200 '</Health>'])200 '</Health>'])
201201
202 def __init__(self, fallback_lease_file=None):202 def __init__(self, fallback_lease_file=None, dhcp_options=None):
203 LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',203 LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
204 fallback_lease_file)204 fallback_lease_file)
205 self.dhcpoptions = None205 self.dhcpoptions = dhcp_options
206 self._endpoint = None206 self._endpoint = None
207 self.openssl_manager = None207 self.openssl_manager = None
208 self.values = {}208 self.values = {}
@@ -220,7 +220,8 @@ class WALinuxAgentShim(object):
220 @property220 @property
221 def endpoint(self):221 def endpoint(self):
222 if self._endpoint is None:222 if self._endpoint is None:
223 self._endpoint = self.find_endpoint(self.lease_file)223 self._endpoint = self.find_endpoint(self.lease_file,
224 self.dhcpoptions)
224 return self._endpoint225 return self._endpoint
225226
226 @staticmethod227 @staticmethod
@@ -274,7 +275,8 @@ class WALinuxAgentShim(object):
274 name = os.path.basename(hook_file).replace('.json', '')275 name = os.path.basename(hook_file).replace('.json', '')
275 dhcp_options[name] = json.loads(util.load_file((hook_file)))276 dhcp_options[name] = json.loads(util.load_file((hook_file)))
276 except ValueError:277 except ValueError:
277 raise ValueError("%s is not valid JSON data", hook_file)278 raise ValueError(
279 '{_file} is not valid JSON data'.format(_file=hook_file))
278 return dhcp_options280 return dhcp_options
279281
280 @staticmethod282 @staticmethod
@@ -291,10 +293,14 @@ class WALinuxAgentShim(object):
291 return _value293 return _value
292294
293 @staticmethod295 @staticmethod
294 def find_endpoint(fallback_lease_file=None):296 def find_endpoint(fallback_lease_file=None, dhcp245=None):
295 value = None297 value = None
296 LOG.debug('Finding Azure endpoint from networkd...')298 if dhcp245 is not None:
297 value = WALinuxAgentShim._networkd_get_value_from_leases()299 value = dhcp245
300 LOG.debug("Using Azure Endpoint from dhcp options")
301 if value is None:
302 LOG.debug('Finding Azure endpoint from networkd...')
303 value = WALinuxAgentShim._networkd_get_value_from_leases()
298 if value is None:304 if value is None:
299 # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json305 # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
300 # a dhclient exit hook that calls cloud-init-dhclient-hook306 # a dhclient exit hook that calls cloud-init-dhclient-hook
@@ -366,8 +372,9 @@ class WALinuxAgentShim(object):
366 LOG.info('Reported ready to Azure fabric.')372 LOG.info('Reported ready to Azure fabric.')
367373
368374
369def get_metadata_from_fabric(fallback_lease_file=None):375def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None):
370 shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file)376 shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
377 dhcp_options=dhcp_opts)
371 try:378 try:
372 return shim.register_with_azure_and_fetch_data()379 return shim.register_with_azure_and_fetch_data()
373 finally:380 finally:
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 49d441d..2eaeff3 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -100,4 +100,8 @@ class Config(object):
100 """Returns marker id."""100 """Returns marker id."""
101 return self._configFile.get(Config.MARKERID, None)101 return self._configFile.get(Config.MARKERID, None)
102102
103 @property
104 def custom_script_name(self):
105 """Return the name of custom (pre/post) script."""
106 return self._configFile.get(Config.CUSTOM_SCRIPT, None)
103# vi: ts=4 expandtab107# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
104new file mode 100644108new file mode 100644
index 0000000..a7d4ad9
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -0,0 +1,153 @@
1# Copyright (C) 2017 Canonical Ltd.
2# Copyright (C) 2017 VMware Inc.
3#
4# Author: Maitreyee Saikia <msaikia@vmware.com>
5#
6# This file is part of cloud-init. See LICENSE file for license information.
7
8import logging
9import os
10import stat
11from textwrap import dedent
12
13from cloudinit import util
14
15LOG = logging.getLogger(__name__)
16
17
18class CustomScriptNotFound(Exception):
19 pass
20
21
22class CustomScriptConstant(object):
23 RC_LOCAL = "/etc/rc.local"
24 POST_CUST_TMP_DIR = "/root/.customization"
25 POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh"
26 POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR,
27 POST_CUST_RUN_SCRIPT_NAME)
28 POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
29
30
31class RunCustomScript(object):
32 def __init__(self, scriptname, directory):
33 self.scriptname = scriptname
34 self.directory = directory
35 self.scriptpath = os.path.join(directory, scriptname)
36
37 def prepare_script(self):
38 if not os.path.exists(self.scriptpath):
39 raise CustomScriptNotFound("Script %s not found!! "
40 "Cannot execute custom script!"
41 % self.scriptpath)
42 # Strip any CR characters from the decoded script
43 util.load_file(self.scriptpath).replace("\r", "")
44 st = os.stat(self.scriptpath)
45 os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC)
46
47
48class PreCustomScript(RunCustomScript):
49 def execute(self):
50 """Executing custom script with precustomization argument."""
51 LOG.debug("Executing pre-customization script")
52 self.prepare_script()
53 util.subp(["/bin/sh", self.scriptpath, "precustomization"])
54
55
56class PostCustomScript(RunCustomScript):
57 def __init__(self, scriptname, directory):
58 super(PostCustomScript, self).__init__(scriptname, directory)
59 # Determine when to run custom script. When postreboot is True,
60 # the user uploaded script will run as part of rc.local after
61 # the machine reboots. This is determined by presence of rclocal.
62 # When postreboot is False, script will run as part of cloud-init.
63 self.postreboot = False
64
65 def _install_post_reboot_agent(self, rclocal):
66 """
67 Install post-reboot agent for running custom script after reboot.
68 As part of this process, we are editing the rclocal file to run a
69 VMware script, which in turn is resposible for handling the user
70 script.
71 @param: path to rc local.
72 """
73 LOG.debug("Installing post-reboot customization from %s to %s",
74 self.directory, rclocal)
75 if not self.has_previous_agent(rclocal):
76 LOG.info("Adding post-reboot customization agent to rc.local")
77 new_content = dedent("""
78 # Run post-reboot guest customization
79 /bin/sh %s
80 exit 0
81 """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT
82 existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '')
83 st = os.stat(rclocal)
84 # "x" flag should be set
85 mode = st.st_mode | stat.S_IEXEC
86 util.write_file(rclocal, existing_rclocal + new_content, mode)
87
88 else:
89 # We don't need to update rclocal file everytime a customization
90 # is requested. It just needs to be done for the first time.
91 LOG.info("Post-reboot guest customization agent is already "
92 "registered in rc.local")
93 LOG.debug("Installing post-reboot customization agent finished: %s",
94 self.postreboot)
95
96 def has_previous_agent(self, rclocal):
97 searchstring = "# Run post-reboot guest customization"
98 if searchstring in open(rclocal).read():
99 return True
100 return False
101
102 def find_rc_local(self):
103 """
104 Determine if rc local is present.
105 """
106 rclocal = ""
107 if os.path.exists(CustomScriptConstant.RC_LOCAL):
108 LOG.debug("rc.local detected.")
109 # resolving in case of symlink
110 rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL)
111 LOG.debug("rc.local resolved to %s", rclocal)
112 else:
113 LOG.warning("Can't find rc.local, post-customization "
114 "will be run before reboot")
115 return rclocal
116
117 def install_agent(self):
118 rclocal = self.find_rc_local()
119 if rclocal:
120 self._install_post_reboot_agent(rclocal)
121 self.postreboot = True
122
123 def execute(self):
124 """
125 This method executes post-customization script before or after reboot
126 based on the presence of rc local.
127 """
128 self.prepare_script()
129 self.install_agent()
130 if not self.postreboot:
131 LOG.warning("Executing post-customization script inline")
132 util.subp(["/bin/sh", self.scriptpath, "postcustomization"])
133 else:
134 LOG.debug("Scheduling custom script to run post reboot")
135 if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR):
136 os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR)
137 # Script "post-customize-guest.sh" and user uploaded script are
138 # are present in the same directory and needs to copied to a temp
139 # directory to be executed post reboot. User uploaded script is
140 # saved as customize.sh in the temp directory.
141 # post-customize-guest.sh excutes customize.sh after reboot.
142 LOG.debug("Copying post-customization script")
143 util.copy(self.scriptpath,
144 CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh")
145 LOG.debug("Copying script to run post-customization script")
146 util.copy(
147 os.path.join(self.directory,
148 CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME),
149 CustomScriptConstant.POST_CUST_RUN_SCRIPT)
150 LOG.info("Creating post-reboot pending marker")
151 util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER)
152
153# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 2fb07c5..2d8900e 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -161,7 +161,7 @@ class NicConfigurator(object):
161 if nic.primary and v4.gateways:161 if nic.primary and v4.gateways:
162 self.ipv4PrimaryGateway = v4.gateways[0]162 self.ipv4PrimaryGateway = v4.gateways[0]
163 subnet.update({'gateway': self.ipv4PrimaryGateway})163 subnet.update({'gateway': self.ipv4PrimaryGateway})
164 return [subnet]164 return ([subnet], route_list)
165165
166 # Add routes if there is no primary nic166 # Add routes if there is no primary nic
167 if not self._primaryNic:167 if not self._primaryNic:
diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py
168new file mode 100644168new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cloudinit/sources/tests/__init__.py
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
169new file mode 100644169new file mode 100644
index 0000000..af15115
--- /dev/null
+++ b/cloudinit/sources/tests/test_init.py
@@ -0,0 +1,202 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3import os
4import six
5import stat
6
7from cloudinit.helpers import Paths
8from cloudinit.sources import (
9 INSTANCE_JSON_FILE, DataSource)
10from cloudinit.tests.helpers import CiTestCase, skipIf
11from cloudinit.user_data import UserDataProcessor
12from cloudinit import util
13
14
15class DataSourceTestSubclassNet(DataSource):
16
17 dsname = 'MyTestSubclass'
18
19 def __init__(self, sys_cfg, distro, paths, custom_userdata=None):
20 super(DataSourceTestSubclassNet, self).__init__(
21 sys_cfg, distro, paths)
22 self._custom_userdata = custom_userdata
23
24 def _get_cloud_name(self):
25 return 'SubclassCloudName'
26
27 def _get_data(self):
28 self.metadata = {'availability_zone': 'myaz',
29 'local-hostname': 'test-subclass-hostname',
30 'region': 'myregion'}
31 if self._custom_userdata:
32 self.userdata_raw = self._custom_userdata
33 else:
34 self.userdata_raw = 'userdata_raw'
35 self.vendordata_raw = 'vendordata_raw'
36 return True
37
38
39class InvalidDataSourceTestSubclassNet(DataSource):
40 pass
41
42
43class TestDataSource(CiTestCase):
44
45 with_logs = True
46
47 def setUp(self):
48 super(TestDataSource, self).setUp()
49 self.sys_cfg = {'datasource': {'_undef': {'key1': False}}}
50 self.distro = 'distrotest' # generally should be a Distro object
51 self.paths = Paths({})
52 self.datasource = DataSource(self.sys_cfg, self.distro, self.paths)
53
54 def test_datasource_init(self):
55 """DataSource initializes metadata attributes, ds_cfg and ud_proc."""
56 self.assertEqual(self.paths, self.datasource.paths)
57 self.assertEqual(self.sys_cfg, self.datasource.sys_cfg)
58 self.assertEqual(self.distro, self.datasource.distro)
59 self.assertIsNone(self.datasource.userdata)
60 self.assertEqual({}, self.datasource.metadata)
61 self.assertIsNone(self.datasource.userdata_raw)
62 self.assertIsNone(self.datasource.vendordata)
63 self.assertIsNone(self.datasource.vendordata_raw)
64 self.assertEqual({'key1': False}, self.datasource.ds_cfg)
65 self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor)
66
67 def test_datasource_init_gets_ds_cfg_using_dsname(self):
68 """Init uses DataSource.dsname for sourcing ds_cfg."""
69 sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
70 distro = 'distrotest' # generally should be a Distro object
71 paths = Paths({})
72 datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths)
73 self.assertEqual({'key2': False}, datasource.ds_cfg)
74
75 def test_str_is_classname(self):
76 """The string representation of the datasource is the classname."""
77 self.assertEqual('DataSource', str(self.datasource))
78 self.assertEqual(
79 'DataSourceTestSubclassNet',
80 str(DataSourceTestSubclassNet('', '', self.paths)))
81
82 def test__get_data_unimplemented(self):
83 """Raise an error when _get_data is not implemented."""
84 with self.assertRaises(NotImplementedError) as context_manager:
85 self.datasource.get_data()
86 self.assertIn(
87 'Subclasses of DataSource must implement _get_data',
88 str(context_manager.exception))
89 datasource2 = InvalidDataSourceTestSubclassNet(
90 self.sys_cfg, self.distro, self.paths)
91 with self.assertRaises(NotImplementedError) as context_manager:
92 datasource2.get_data()
93 self.assertIn(
94 'Subclasses of DataSource must implement _get_data',
95 str(context_manager.exception))
96
97 def test_get_data_calls_subclass__get_data(self):
98 """Datasource.get_data uses the subclass' version of _get_data."""
99 tmp = self.tmp_dir()
100 datasource = DataSourceTestSubclassNet(
101 self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
102 self.assertTrue(datasource.get_data())
103 self.assertEqual(
104 {'availability_zone': 'myaz',
105 'local-hostname': 'test-subclass-hostname',
106 'region': 'myregion'},
107 datasource.metadata)
108 self.assertEqual('userdata_raw', datasource.userdata_raw)
109 self.assertEqual('vendordata_raw', datasource.vendordata_raw)
110
111 def test_get_data_write_json_instance_data(self):
112 """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""
113 tmp = self.tmp_dir()
114 datasource = DataSourceTestSubclassNet(
115 self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
116 datasource.get_data()
117 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
118 content = util.load_file(json_file)
119 expected = {
120 'base64-encoded-keys': [],
121 'v1': {
122 'availability-zone': 'myaz',
123 'cloud-name': 'subclasscloudname',
124 'instance-id': 'iid-datasource',
125 'local-hostname': 'test-subclass-hostname',
126 'region': 'myregion'},
127 'ds': {
128 'meta-data': {'availability_zone': 'myaz',
129 'local-hostname': 'test-subclass-hostname',
130 'region': 'myregion'},
131 'user-data': 'userdata_raw',
132 'vendor-data': 'vendordata_raw'}}
133 self.assertEqual(expected, util.load_json(content))
134 file_stat = os.stat(json_file)
135 self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
136
137 def test_get_data_handles_redacted_unserializable_content(self):
138 """get_data warns unserializable content in INSTANCE_JSON_FILE."""
139 tmp = self.tmp_dir()
140 datasource = DataSourceTestSubclassNet(
141 self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
142 custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
143 self.assertTrue(datasource.get_data())
144 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
145 content = util.load_file(json_file)
146 expected_userdata = {
147 'key1': 'val1',
148 'key2': {
149 'key2.1': "Warning: redacted unserializable type <class"
150 " 'cloudinit.helpers.Paths'>"}}
151 instance_json = util.load_json(content)
152 self.assertEqual(
153 expected_userdata, instance_json['ds']['user-data'])
154
155 @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
156 def test_get_data_base64encodes_unserializable_bytes(self):
157 """On py3, get_data base64encodes any unserializable content."""
158 tmp = self.tmp_dir()
159 datasource = DataSourceTestSubclassNet(
160 self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
161 custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
162 self.assertTrue(datasource.get_data())
163 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
164 content = util.load_file(json_file)
165 instance_json = util.load_json(content)
166 self.assertEqual(
167 ['ds/user-data/key2/key2.1'],
168 instance_json['base64-encoded-keys'])
169 self.assertEqual(
170 {'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
171 instance_json['ds']['user-data'])
172
173 @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
174 def test_get_data_handles_bytes_values(self):
175 """On py2 get_data handles bytes values without having to b64encode."""
176 tmp = self.tmp_dir()
177 datasource = DataSourceTestSubclassNet(
178 self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
179 custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
180 self.assertTrue(datasource.get_data())
181 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
182 content = util.load_file(json_file)
183 instance_json = util.load_json(content)
184 self.assertEqual([], instance_json['base64-encoded-keys'])
185 self.assertEqual(
186 {'key1': 'val1', 'key2': {'key2.1': '\x123'}},
187 instance_json['ds']['user-data'])
188
189 @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
190 def test_non_utf8_encoding_logs_warning(self):
191 """When non-utf-8 values exist in py2 instance-data is not written."""
192 tmp = self.tmp_dir()
193 datasource = DataSourceTestSubclassNet(
194 self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
195 custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
196 self.assertTrue(datasource.get_data())
197 json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
198 self.assertFalse(os.path.exists(json_file))
199 self.assertIn(
200 "WARNING: Error persisting instance-data.json: 'utf8' codec can't"
201 " decode byte 0xaa in position 2: invalid start byte",
202 self.logs.getvalue())
diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py
index 5d7adf7..c98a1b5 100644
--- a/cloudinit/temp_utils.py
+++ b/cloudinit/temp_utils.py
@@ -28,13 +28,18 @@ def _tempfile_dir_arg(odir=None, needs_exe=False):
28 if odir is not None:28 if odir is not None:
29 return odir29 return odir
3030
31 if needs_exe:
32 tdir = _EXE_ROOT_TMPDIR
33 if not os.path.isdir(tdir):
34 os.makedirs(tdir)
35 os.chmod(tdir, 0o1777)
36 return tdir
37
31 global _TMPDIR38 global _TMPDIR
32 if _TMPDIR:39 if _TMPDIR:
33 return _TMPDIR40 return _TMPDIR
3441
35 if needs_exe:42 if os.getuid() == 0:
36 tdir = _EXE_ROOT_TMPDIR
37 elif os.getuid() == 0:
38 tdir = _ROOT_TMPDIR43 tdir = _ROOT_TMPDIR
39 else:44 else:
40 tdir = os.environ.get('TMPDIR', '/tmp')45 tdir = os.environ.get('TMPDIR', '/tmp')
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 6f88a5b..0080c72 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -3,7 +3,6 @@
3from __future__ import print_function3from __future__ import print_function
44
5import functools5import functools
6import json
7import logging6import logging
8import os7import os
9import shutil8import shutil
@@ -20,6 +19,11 @@ try:
20except ImportError:19except ImportError:
21 from contextlib2 import ExitStack20 from contextlib2 import ExitStack
2221
22try:
23 from configparser import ConfigParser
24except ImportError:
25 from ConfigParser import ConfigParser
26
23from cloudinit import helpers as ch27from cloudinit import helpers as ch
24from cloudinit import util28from cloudinit import util
2529
@@ -114,6 +118,16 @@ class TestCase(unittest2.TestCase):
114 self.addCleanup(m.stop)118 self.addCleanup(m.stop)
115 setattr(self, attr, p)119 setattr(self, attr, p)
116120
121 # prefer python3 read_file over readfp but allow fallback
122 def parse_and_read(self, contents):
123 parser = ConfigParser()
124 if hasattr(parser, 'read_file'):
125 parser.read_file(contents)
126 elif hasattr(parser, 'readfp'):
127 # pylint: disable=W1505
128 parser.readfp(contents)
129 return parser
130
117131
118class CiTestCase(TestCase):132class CiTestCase(TestCase):
119 """This is the preferred test case base class unless user133 """This is the preferred test case base class unless user
@@ -159,6 +173,18 @@ class CiTestCase(TestCase):
159 dir = self.tmp_dir()173 dir = self.tmp_dir()
160 return os.path.normpath(os.path.abspath(os.path.join(dir, path)))174 return os.path.normpath(os.path.abspath(os.path.join(dir, path)))
161175
176 def assertRaisesCodeEqual(self, expected, found):
177 """Handle centos6 having different context manager for assertRaises.
178 with assertRaises(Exception) as e:
179 raise Exception("BOO")
180
181 centos6 will have e.exception as an integer.
182 anything nwere will have it as something with a '.code'"""
183 if isinstance(found, int):
184 self.assertEqual(expected, found)
185 else:
186 self.assertEqual(expected, found.code)
187
162188
163class ResourceUsingTestCase(CiTestCase):189class ResourceUsingTestCase(CiTestCase):
164190
@@ -337,12 +363,6 @@ def dir2dict(startdir, prefix=None):
337 return flist363 return flist
338364
339365
340def json_dumps(data):
341 # print data in nicely formatted json.
342 return json.dumps(data, indent=1, sort_keys=True,
343 separators=(',', ': '))
344
345
346def wrap_and_call(prefix, mocks, func, *args, **kwargs):366def wrap_and_call(prefix, mocks, func, *args, **kwargs):
347 """367 """
348 call func(args, **kwargs) with mocks applied, then unapplies mocks368 call func(args, **kwargs) with mocks applied, then unapplies mocks
@@ -402,4 +422,12 @@ if not hasattr(mock.Mock, 'assert_not_called'):
402 mock.Mock.assert_not_called = __mock_assert_not_called422 mock.Mock.assert_not_called = __mock_assert_not_called
403423
404424
425# older unittest2.TestCase (centos6) do not have assertRaisesRegex
426# And setting assertRaisesRegex to assertRaisesRegexp causes
427# https://github.com/PyCQA/pylint/issues/1653 . So the workaround.
428if not hasattr(unittest2.TestCase, 'assertRaisesRegex'):
429 def _tricky(*args, **kwargs):
430 return unittest2.TestCase.assertRaisesRegexp
431 unittest2.TestCase.assertRaisesRegex = _tricky
432
405# vi: ts=4 expandtab433# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
406new file mode 100644434new file mode 100644
index 0000000..ba6bf69
--- /dev/null
+++ b/cloudinit/tests/test_util.py
@@ -0,0 +1,46 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3"""Tests for cloudinit.util"""
4
5import logging
6
7import cloudinit.util as util
8
9from cloudinit.tests.helpers import CiTestCase, mock
10
11LOG = logging.getLogger(__name__)
12
13MOUNT_INFO = [
14 '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64',
15 '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'
16]
17
18
19class TestUtil(CiTestCase):
20
21 def test_parse_mount_info_no_opts_no_arg(self):
22 result = util.parse_mount_info('/home', MOUNT_INFO, LOG)
23 self.assertEqual(('/dev/sda2', 'xfs', '/home'), result)
24
25 def test_parse_mount_info_no_opts_arg(self):
26 result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False)
27 self.assertEqual(('/dev/sda2', 'xfs', '/home'), result)
28
29 def test_parse_mount_info_with_opts(self):
30 result = util.parse_mount_info('/', MOUNT_INFO, LOG, True)
31 self.assertEqual(
32 ('/dev/sda1', 'btrfs', '/', 'ro,relatime'),
33 result
34 )
35
36 @mock.patch('cloudinit.util.get_mount_info')
37 def test_mount_is_rw(self, m_mount_info):
38 m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime')
39 is_rw = util.mount_is_read_write('/')
40 self.assertEqual(is_rw, True)
41
42 @mock.patch('cloudinit.util.get_mount_info')
43 def test_mount_is_ro(self, m_mount_info):
44 m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime')
45 is_rw = util.mount_is_read_write('/')
46 self.assertEqual(is_rw, False)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 0e0f5b4..0a5be0b 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -273,7 +273,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
273273
274def wait_for_url(urls, max_wait=None, timeout=None,274def wait_for_url(urls, max_wait=None, timeout=None,
275 status_cb=None, headers_cb=None, sleep_time=1,275 status_cb=None, headers_cb=None, sleep_time=1,
276 exception_cb=None):276 exception_cb=None, sleep_time_cb=None):
277 """277 """
278 urls: a list of urls to try278 urls: a list of urls to try
279 max_wait: roughly the maximum time to wait before giving up279 max_wait: roughly the maximum time to wait before giving up
@@ -286,6 +286,8 @@ def wait_for_url(urls, max_wait=None, timeout=None,
286 for request.286 for request.
287 exception_cb: call method with 2 arguments 'msg' (per status_cb) and287 exception_cb: call method with 2 arguments 'msg' (per status_cb) and
288 'exception', the exception that occurred.288 'exception', the exception that occurred.
289 sleep_time_cb: call method with 2 arguments (response, loop_n) that
290 generates the next sleep time.
289291
290 the idea of this routine is to wait for the EC2 metdata service to292 the idea of this routine is to wait for the EC2 metdata service to
291 come up. On both Eucalyptus and EC2 we have seen the case where293 come up. On both Eucalyptus and EC2 we have seen the case where
@@ -301,6 +303,8 @@ def wait_for_url(urls, max_wait=None, timeout=None,
301 service but is not going to find one. It is possible that the instance303 service but is not going to find one. It is possible that the instance
302 data host (169.254.169.254) may be firewalled off Entirely for a sytem,304 data host (169.254.169.254) may be firewalled off Entirely for a sytem,
303 meaning that the connection will block forever unless a timeout is set.305 meaning that the connection will block forever unless a timeout is set.
306
307 A value of None for max_wait will retry indefinitely.
304 """308 """
305 start_time = time.time()309 start_time = time.time()
306310
@@ -311,18 +315,24 @@ def wait_for_url(urls, max_wait=None, timeout=None,
311 status_cb = log_status_cb315 status_cb = log_status_cb
312316
313 def timeup(max_wait, start_time):317 def timeup(max_wait, start_time):
314 return ((max_wait <= 0 or max_wait is None) or318 if (max_wait is None):
315 (time.time() - start_time > max_wait))319 return False
320 return ((max_wait <= 0) or (time.time() - start_time > max_wait))
316321
317 loop_n = 0322 loop_n = 0
323 response = None
318 while True:324 while True:
319 sleep_time = int(loop_n / 5) + 1325 if sleep_time_cb is not None:
326 sleep_time = sleep_time_cb(response, loop_n)
327 else:
328 sleep_time = int(loop_n / 5) + 1
320 for url in urls:329 for url in urls:
321 now = time.time()330 now = time.time()
322 if loop_n != 0:331 if loop_n != 0:
323 if timeup(max_wait, start_time):332 if timeup(max_wait, start_time):
324 break333 break
325 if timeout and (now + timeout > (start_time + max_wait)):334 if (max_wait is not None and
335 timeout and (now + timeout > (start_time + max_wait))):
326 # shorten timeout to not run way over max_time336 # shorten timeout to not run way over max_time
327 timeout = int((start_time + max_wait) - now)337 timeout = int((start_time + max_wait) - now)
328338
@@ -354,10 +364,11 @@ def wait_for_url(urls, max_wait=None, timeout=None,
354 url_exc = e364 url_exc = e
355365
356 time_taken = int(time.time() - start_time)366 time_taken = int(time.time() - start_time)
357 status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,367 max_wait_str = "%ss" % max_wait if max_wait else "unlimited"
358 time_taken,368 status_msg = "Calling '%s' failed [%s/%s]: %s" % (url,
359 max_wait,369 time_taken,
360 reason)370 max_wait_str,
371 reason)
361 status_cb(status_msg)372 status_cb(status_msg)
362 if exception_cb:373 if exception_cb:
363 # This can be used to alter the headers that will be sent374 # This can be used to alter the headers that will be sent
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 6c014ba..338fb97 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -253,12 +253,18 @@ class ProcessExecutionError(IOError):
253 self.exit_code = exit_code253 self.exit_code = exit_code
254254
255 if not stderr:255 if not stderr:
256 self.stderr = self.empty_attr256 if stderr is None:
257 self.stderr = self.empty_attr
258 else:
259 self.stderr = stderr
257 else:260 else:
258 self.stderr = self._indent_text(stderr)261 self.stderr = self._indent_text(stderr)
259262
260 if not stdout:263 if not stdout:
261 self.stdout = self.empty_attr264 if stdout is None:
265 self.stdout = self.empty_attr
266 else:
267 self.stdout = stdout
262 else:268 else:
263 self.stdout = self._indent_text(stdout)269 self.stdout = self._indent_text(stdout)
264270
@@ -533,15 +539,6 @@ def multi_log(text, console=True, stderr=True,
533 log.log(log_level, text)539 log.log(log_level, text)
534540
535541
536def load_json(text, root_types=(dict,)):
537 decoded = json.loads(decode_binary(text))
538 if not isinstance(decoded, tuple(root_types)):
539 expected_types = ", ".join([str(t) for t in root_types])
540 raise TypeError("(%s) root types expected, got %s instead"
541 % (expected_types, type(decoded)))
542 return decoded
543
544
545def is_ipv4(instr):542def is_ipv4(instr):
546 """determine if input string is a ipv4 address. return boolean."""543 """determine if input string is a ipv4 address. return boolean."""
547 toks = instr.split('.')544 toks = instr.split('.')
@@ -900,17 +897,17 @@ def load_yaml(blob, default=None, allowed=(dict,)):
900 "of length %s with allowed root types %s",897 "of length %s with allowed root types %s",
901 len(blob), allowed)898 len(blob), allowed)
902 converted = safeyaml.load(blob)899 converted = safeyaml.load(blob)
903 if not isinstance(converted, allowed):900 if converted is None:
901 LOG.debug("loaded blob returned None, returning default.")
902 converted = default
903 elif not isinstance(converted, allowed):
904 # Yes this will just be caught, but thats ok for now...904 # Yes this will just be caught, but thats ok for now...
905 raise TypeError(("Yaml load allows %s root types,"905 raise TypeError(("Yaml load allows %s root types,"
906 " but got %s instead") %906 " but got %s instead") %
907 (allowed, type_utils.obj_name(converted)))907 (allowed, type_utils.obj_name(converted)))
908 loaded = converted908 loaded = converted
909 except (yaml.YAMLError, TypeError, ValueError):909 except (yaml.YAMLError, TypeError, ValueError):
910 if len(blob) == 0:910 logexc(LOG, "Failed loading yaml blob")
911 LOG.debug("load_yaml given empty string, returning default")
912 else:
913 logexc(LOG, "Failed loading yaml blob")
914 return loaded911 return loaded
915912
916913
@@ -1398,6 +1395,32 @@ def get_output_cfg(cfg, mode):
1398 return ret1395 return ret
13991396
14001397
1398def get_config_logfiles(cfg):
1399 """Return a list of log file paths from the configuration dictionary.
1400
1401 @param cfg: The cloud-init merged configuration dictionary.
1402 """
1403 logs = []
1404 if not cfg or not isinstance(cfg, dict):
1405 return logs
1406 default_log = cfg.get('def_log_file')
1407 if default_log:
1408 logs.append(default_log)
1409 for fmt in get_output_cfg(cfg, None):
1410 if not fmt:
1411 continue
1412 match = re.match('(?P<type>\||>+)\s*(?P<target>.*)', fmt)
1413 if not match:
1414 continue
1415 target = match.group('target')
1416 parts = target.split()
1417 if len(parts) == 1:
1418 logs.append(target)
1419 elif ['tee', '-a'] == parts[:2]:
1420 logs.append(parts[2])
1421 return list(set(logs))
1422
1423
1401def logexc(log, msg, *args):1424def logexc(log, msg, *args):
1402 # Setting this here allows this to change1425 # Setting this here allows this to change
1403 # levels easily (not always error level)1426 # levels easily (not always error level)
@@ -1454,7 +1477,31 @@ def ensure_dirs(dirlist, mode=0o755):
1454 ensure_dir(d, mode)1477 ensure_dir(d, mode)
14551478
14561479
1480def load_json(text, root_types=(dict,)):
1481 decoded = json.loads(decode_binary(text))
1482 if not isinstance(decoded, tuple(root_types)):
1483 expected_types = ", ".join([str(t) for t in root_types])
1484 raise TypeError("(%s) root types expected, got %s instead"
1485 % (expected_types, type(decoded)))
1486 return decoded
1487
1488
1489def json_serialize_default(_obj):
1490 """Handler for types which aren't json serializable."""
1491 try:
1492 return 'ci-b64:{0}'.format(b64e(_obj))
1493 except AttributeError:
1494 return 'Warning: redacted unserializable type {0}'.format(type(_obj))
1495
1496
1497def json_dumps(data):
1498 """Return data in nicely formatted json."""
1499 return json.dumps(data, indent=1, sort_keys=True,
1500 separators=(',', ': '), default=json_serialize_default)
1501
1502
1457def yaml_dumps(obj, explicit_start=True, explicit_end=True):1503def yaml_dumps(obj, explicit_start=True, explicit_end=True):
1504 """Return data in nicely formatted yaml."""
1458 return yaml.safe_dump(obj,1505 return yaml.safe_dump(obj,
1459 line_break="\n",1506 line_break="\n",
1460 indent=4,1507 indent=4,
@@ -1540,6 +1587,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
1540 mtypes = list(mtype)1587 mtypes = list(mtype)
1541 elif mtype is None:1588 elif mtype is None:
1542 mtypes = None1589 mtypes = None
1590 else:
1591 raise TypeError(
1592 'Unsupported type provided for mtype parameter: {_type}'.format(
1593 _type=type(mtype)))
15431594
1544 # clean up 'mtype' input a bit based on platform.1595 # clean up 'mtype' input a bit based on platform.
1545 platsys = platform.system().lower()1596 platsys = platform.system().lower()
@@ -1788,58 +1839,60 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
1788 env = env.copy()1839 env = env.copy()
1789 env.update(update_env)1840 env.update(update_env)
17901841
1791 try:1842 if target_path(target) != "/":
1792 if target_path(target) != "/":1843 args = ['chroot', target] + list(args)
1793 args = ['chroot', target] + list(args)
17941844
1795 if not logstring:1845 if not logstring:
1796 LOG.debug(("Running command %s with allowed return codes %s"1846 LOG.debug(("Running command %s with allowed return codes %s"
1797 " (shell=%s, capture=%s)"), args, rcs, shell, capture)1847 " (shell=%s, capture=%s)"), args, rcs, shell, capture)
1798 else:1848 else:
1799 LOG.debug(("Running hidden command to protect sensitive "1849 LOG.debug(("Running hidden command to protect sensitive "
1800 "input/output logstring: %s"), logstring)1850 "input/output logstring: %s"), logstring)
18011851
1802 stdin = None1852 stdin = None
1803 stdout = None1853 stdout = None
1804 stderr = None1854 stderr = None
1805 if capture:1855 if capture:
1806 stdout = subprocess.PIPE1856 stdout = subprocess.PIPE
1807 stderr = subprocess.PIPE1857 stderr = subprocess.PIPE
1808 if data is None:1858 if data is None:
1809 # using devnull assures any reads get null, rather1859 # using devnull assures any reads get null, rather
1810 # than possibly waiting on input.1860 # than possibly waiting on input.
1811 devnull_fp = open(os.devnull)1861 devnull_fp = open(os.devnull)
1812 stdin = devnull_fp1862 stdin = devnull_fp
1813 else:1863 else:
1814 stdin = subprocess.PIPE1864 stdin = subprocess.PIPE
1815 if not isinstance(data, bytes):1865 if not isinstance(data, bytes):
1816 data = data.encode()1866 data = data.encode()
18171867
1868 try:
1818 sp = subprocess.Popen(args, stdout=stdout,1869 sp = subprocess.Popen(args, stdout=stdout,
1819 stderr=stderr, stdin=stdin,1870 stderr=stderr, stdin=stdin,
1820 env=env, shell=shell)1871 env=env, shell=shell)
1821 (out, err) = sp.communicate(data)1872 (out, err) = sp.communicate(data)
1822
1823 # Just ensure blank instead of none.
1824 if not out and capture:
1825 out = b''
1826 if not err and capture:
1827 err = b''
1828 if decode:
1829 def ldecode(data, m='utf-8'):
1830 if not isinstance(data, bytes):
1831 return data
1832 return data.decode(m, decode)
1833
1834 out = ldecode(out)
1835 err = ldecode(err)
1836 except OSError as e:1873 except OSError as e:
1837 raise ProcessExecutionError(cmd=args, reason=e,1874 raise ProcessExecutionError(
1838 errno=e.errno)1875 cmd=args, reason=e, errno=e.errno,
1876 stdout="-" if decode else b"-",
1877 stderr="-" if decode else b"-")
1839 finally:1878 finally:
1840 if devnull_fp:1879 if devnull_fp:
1841 devnull_fp.close()1880 devnull_fp.close()
18421881
1882 # Just ensure blank instead of none.
1883 if not out and capture:
1884 out = b''
1885 if not err and capture:
1886 err = b''
1887 if decode:
1888 def ldecode(data, m='utf-8'):
1889 if not isinstance(data, bytes):
1890 return data
1891 return data.decode(m, decode)
1892
1893 out = ldecode(out)
1894 err = ldecode(err)
1895
1843 rc = sp.returncode1896 rc = sp.returncode
1844 if rc not in rcs:1897 if rc not in rcs:
1845 raise ProcessExecutionError(stdout=out, stderr=err,1898 raise ProcessExecutionError(stdout=out, stderr=err,
@@ -2010,7 +2063,7 @@ def expand_package_list(version_fmt, pkgs):
2010 return pkglist2063 return pkglist
20112064
20122065
2013def parse_mount_info(path, mountinfo_lines, log=LOG):2066def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
2014 """Return the mount information for PATH given the lines from2067 """Return the mount information for PATH given the lines from
2015 /proc/$$/mountinfo."""2068 /proc/$$/mountinfo."""
20162069
@@ -2072,11 +2125,16 @@ def parse_mount_info(path, mountinfo_lines, log=LOG):
20722125
2073 match_mount_point = mount_point2126 match_mount_point = mount_point
2074 match_mount_point_elements = mount_point_elements2127 match_mount_point_elements = mount_point_elements
2128 mount_options = parts[5]
20752129
2076 if devpth and fs_type and match_mount_point:2130 if get_mnt_opts:
2077 return (devpth, fs_type, match_mount_point)2131 if devpth and fs_type and match_mount_point and mount_options:
2132 return (devpth, fs_type, match_mount_point, mount_options)
2078 else:2133 else:
2079 return None2134 if devpth and fs_type and match_mount_point:
2135 return (devpth, fs_type, match_mount_point)
2136
2137 return None
20802138
20812139
2082def parse_mtab(path):2140def parse_mtab(path):
@@ -2146,7 +2204,7 @@ def parse_mount(path):
2146 return None2204 return None
21472205
21482206
2149def get_mount_info(path, log=LOG):2207def get_mount_info(path, log=LOG, get_mnt_opts=False):
2150 # Use /proc/$$/mountinfo to find the device where path is mounted.2208 # Use /proc/$$/mountinfo to find the device where path is mounted.
2151 # This is done because with a btrfs filesystem using os.stat(path)2209 # This is done because with a btrfs filesystem using os.stat(path)
2152 # does not return the ID of the device.2210 # does not return the ID of the device.
@@ -2178,7 +2236,7 @@ def get_mount_info(path, log=LOG):
2178 mountinfo_path = '/proc/%s/mountinfo' % os.getpid()2236 mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
2179 if os.path.exists(mountinfo_path):2237 if os.path.exists(mountinfo_path):
2180 lines = load_file(mountinfo_path).splitlines()2238 lines = load_file(mountinfo_path).splitlines()
2181 return parse_mount_info(path, lines, log)2239 return parse_mount_info(path, lines, log, get_mnt_opts)
2182 elif os.path.exists("/etc/mtab"):2240 elif os.path.exists("/etc/mtab"):
2183 return parse_mtab(path)2241 return parse_mtab(path)
2184 else:2242 else:
@@ -2286,7 +2344,8 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
2286 missing.append(f)2344 missing.append(f)
22872345
2288 if len(missing):2346 if len(missing):
2289 raise ValueError("Missing required files: %s", ','.join(missing))2347 raise ValueError(
2348 'Missing required files: {files}'.format(files=','.join(missing)))
22902349
2291 return ret2350 return ret
22922351
@@ -2563,4 +2622,10 @@ def wait_for_files(flist, maxwait, naplen=.5, log_pre=""):
2563 return need2622 return need
25642623
25652624
2625def mount_is_read_write(mount_point):
2626 """Check whether the given mount point is mounted rw"""
2627 result = get_mount_info(mount_point, get_mnt_opts=True)
2628 mount_opts = result[-1].split(',')
2629 return mount_opts[0] == 'rw'
2630
2566# vi: ts=4 expandtab2631# vi: ts=4 expandtab
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 3255f39..be6262d 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
4#4#
5# This file is part of cloud-init. See LICENSE file for license information.5# This file is part of cloud-init. See LICENSE file for license information.
66
7__VERSION__ = "17.1"7__VERSION__ = "17.2"
88
9FEATURES = [9FEATURES = [
10 # supports network config version 110 # supports network config version 1
diff --git a/debian/changelog b/debian/changelog
index 03308d7..474c9ed 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,10 +1,62 @@
1cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.2) UNRELEASED; urgency=medium1cloud-init (17.2-30-gf7deaf15-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
22
3 * debian/patches/ds-identify-behavior-xenial.patch: refresh patch.3 * debian/patches/ds-identify-behavior-xenial.patch: refresh patch.
4 * debian/grub-legacy-ec2.install: install post(inst|rm) files correctly.4 * debian/grub-legacy-ec2.install: install post(inst|rm) files correctly.
5 [Simon Deziel] (LP: #1581416)5 [Simon Deziel] (LP: #1581416)
66 * New upstream snapshot (LP: #1747059)
7 -- Scott Moser <smoser@ubuntu.com> Tue, 12 Dec 2017 14:29:46 -05007 - docs: Update RTD content for cloud-init subcommands.
8 - OVF: Extend well-known labels to include OVFENV.
9 - Fix potential cases of uninitialized variables.
10 - tests: Collect script output as binary, collect systemd journal, fix lxd.
11 - HACKING.rst: mention setting user name and email via git config.
12 - Azure VM Preprovisioning support. [Douglas Jordan]
13 - tools/read-version: Fix read-version when in a git worktree.
14 - docs: Fix typos in docs and one debug message. [Florian Grignon]
15 - btrfs: support resizing if root is mounted ro.
16 [Robert Schweikert]
17 - OpenNebula: Improve network configuration support.
18 [Akihiko Ota]
19 - tests: Fix EC2 Platform to return console output as bytes.
20 - tests: Fix attempted use of /run in a test case.
21 - GCE: Improvements and changes to ssh key behavior for default user.
22 [Max Illfelder]
23 - subp: make ProcessExecutionError have expected types in stderr, stdout.
24 - tests: when querying ntp server, do not do dns resolution.
25 - Recognize uppercase vfat disk labels [James Penick]
26 - tests: remove zesty as supported OS to test
27 - Do not log warning on config files that represent None.
28 - tests: Use git hash pip dependency format for pylxd.
29 - tests: add integration requirements text file
30 - MAAS: add check_instance_id based off oauth tokens.
31 - tests: update apt sources list test
32 - tests: clean up image properties
33 - tests: rename test ssh keys to avoid appearance of leaking private keys.
34 - tests: Enable AWS EC2 Integration Testing
35 - cli: cloud-init clean handles symlinks
36 - SUSE: Add a basic test of network config rendering. [Robert Schweikert]
37 - Azure: Only bounce network when necessary.
38 - lint: Fix lints seen by pylint version 1.8.1.
39 - cli: Fix error in cloud-init modules --mode=init.
40 - release 17.2
41 - ds-identify: failure in NoCloud due to unset variable usage.
42 - tests: fix collect_console when not implemented
43 - ec2: Use instance-identity doc for region and instance-id
44 [Andrew Jorgensen]
45 - tests: remove leaked tmp files in config drive tests.
46 - setup.py: Do not include rendered files in SOURCES.txt
47 - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert]
48 - tests: move to using tox 1.7.5
49 - OVF: improve ds-identify to support finding OVF iso transport.
50 - VMware: Support for user provided pre and post-customization scripts
51 [Maitreyee Saikia]
52 - citest: In NoCloudKVM provide keys via metadata not userdata.
53 - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix
54 complaints.
55 - Datasources: Formalize DataSource get_data and related properties.
56 - cli: Add clean and status subcommands
57 - tests: consolidate platforms into specific dirs
58
59 -- Chad Smith <chad.smith@canonical.com> Fri, 02 Feb 2018 12:37:30 -0700
860
9cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.1) xenial-proposed; urgency=medium61cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
1062
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index 859409a..f2976fd 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -1,3 +1,5 @@
1.. _boot_stages:
2
1***********3***********
2Boot Stages4Boot Stages
3***********5***********
@@ -74,7 +76,7 @@ Network
74 * **systemd service**: ``cloud-init.service``76 * **systemd service**: ``cloud-init.service``
75 * **runs**: After local stage and configured networking is up.77 * **runs**: After local stage and configured networking is up.
76 * **blocks**: As much of remaining boot as possible.78 * **blocks**: As much of remaining boot as possible.
77 * **modules**: ``init_modules``79 * **modules**: ``cloud_init_modules`` in **/etc/cloud/cloud.cfg**
7880
79This stage requires all configured networking to be online, as it will fully81This stage requires all configured networking to be online, as it will fully
80process any user-data that is found. Here, processing means:82process any user-data that is found. Here, processing means:
@@ -104,7 +106,7 @@ Config
104 * **systemd service**: ``cloud-config.service``106 * **systemd service**: ``cloud-config.service``
105 * **runs**: After network stage.107 * **runs**: After network stage.
106 * **blocks**: None.108 * **blocks**: None.
107 * **modules**: ``config_modules``109 * **modules**: ``cloud_config_modules`` in **/etc/cloud/cloud.cfg**
108110
109This stage runs config modules only. Modules that do not really have an111This stage runs config modules only. Modules that do not really have an
110effect on other stages of boot are run here.112effect on other stages of boot are run here.
@@ -115,7 +117,7 @@ Final
115 * **systemd service**: ``cloud-final.service``117 * **systemd service**: ``cloud-final.service``
116 * **runs**: As final part of boot (traditional "rc.local")118 * **runs**: As final part of boot (traditional "rc.local")
117 * **blocks**: None.119 * **blocks**: None.
118 * **modules**: ``final_modules``120 * **modules**: ``cloud_final_modules`` in **/etc/cloud/cloud.cfg**
119121
120This stage runs as late in boot as possible. Any scripts that a user is122This stage runs as late in boot as possible. Any scripts that a user is
121accustomed to running after logging into a system should run correctly here.123accustomed to running after logging into a system should run correctly here.
@@ -125,4 +127,9 @@ Things that run here include
125 * configuration management plugins (puppet, chef, salt-minion)127 * configuration management plugins (puppet, chef, salt-minion)
126 * user-scripts (including ``runcmd``).128 * user-scripts (including ``runcmd``).
127129
130For scripts external to cloud-init looking to wait until cloud-init
131finished, the ``cloud-init status`` subcommand can help block external
132scripts until cloud-init is done without having to write your own systemd
133units dependency chains. See :ref:`cli_status` for more info.
134
128.. vi: textwidth=78135.. vi: textwidth=78
diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst
index 31eaba5..ae3a0c7 100644
--- a/doc/rtd/topics/capabilities.rst
+++ b/doc/rtd/topics/capabilities.rst
@@ -1,3 +1,5 @@
1.. _capabilities:
2
1************3************
2Capabilities4Capabilities
3************5************
@@ -39,17 +41,19 @@ Currently defined feature names include:
39 see :ref:`network_config_v2` documentation for examples.41 see :ref:`network_config_v2` documentation for examples.
4042
4143
42CLI Interface :44CLI Interface
45=============
4346
44``cloud-init features`` will print out each feature supported. If cloud-init47 The command line documentation is accessible on any cloud-init
45does not have the features subcommand, it also does not support any features48installed system:
46described in this document.
4749
48.. code-block:: bash50.. code-block:: bash
4951
50 % cloud-init --help52 % cloud-init --help
51 usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force]53 usage: cloud-init [-h] [--version] [--file FILES]
52 {init,modules,query,single,dhclient-hook,features} ...54 [--debug] [--force]
55 {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
56 ...
5357
54 optional arguments:58 optional arguments:
55 -h, --help show this help message and exit59 -h, --help show this help message and exit
@@ -61,7 +65,7 @@ described in this document.
61 your own risk)65 your own risk)
6266
63 Subcommands:67 Subcommands:
64 {init,modules,single,dhclient-hook,features,analyze,devel}68 {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
65 init initializes cloud-init and performs initial modules69 init initializes cloud-init and performs initial modules
66 modules activates modules using a given configuration key70 modules activates modules using a given configuration key
67 single run a single module71 single run a single module
@@ -69,11 +73,153 @@ described in this document.
69 features list defined features73 features list defined features
70 analyze Devel tool: Analyze cloud-init logs and data74 analyze Devel tool: Analyze cloud-init logs and data
71 devel Run development tools75 devel Run development tools
76 collect-logs Collect and tar all cloud-init debug info
77 clean Remove logs and artifacts so cloud-init can re-run.
78 status Report cloud-init status or wait on completion.
79
80CLI Subcommand details
81======================
82
83.. _cli_features:
84
85cloud-init features
86-------------------
87Print out each feature supported. If cloud-init does not have the
88features subcommand, it also does not support any features described in
89this document.
90
91.. code-block:: bash
7292
73 % cloud-init features93 % cloud-init features
74 NETWORK_CONFIG_V194 NETWORK_CONFIG_V1
75 NETWORK_CONFIG_V295 NETWORK_CONFIG_V2
7696
97.. _cli_status:
98
99cloud-init status
100-----------------
101Report whether cloud-init is running, done, disabled or errored. Exits
102non-zero if an error is detected in cloud-init.
103 * **--long**: Detailed status information.
104 * **--wait**: Block until cloud-init completes.
105
106.. code-block:: bash
107
108 % cloud-init status --long
109 status: done
110 time: Wed, 17 Jan 2018 20:41:59 +0000
111 detail:
112 DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]
113
114 # Cloud-init running still short versus long options
115 % cloud-init status
116 status: running
117 % cloud-init status --long
118 status: running
119 time: Fri, 26 Jan 2018 21:39:43 +0000
120 detail:
121 Running in stage: init-local
122
123.. _cli_collect_logs:
124
125cloud-init collect-logs
126-----------------------
127Collect and tar cloud-init generated logs, data files and system
128information for triage. This subcommand is integrated with apport.
129
130**Note**: Ubuntu users can file bugs with `ubuntu-bug cloud-init` to
131automaticaly attach these logs to a bug report.
132
133Logs collected are:
134
135 * /var/log/cloud-init*log
136 * /run/cloud-init
137 * cloud-init package version
138 * dmesg output
139 * journalctl output
140 * /var/lib/cloud/instance/user-data.txt
141
142.. _cli_analyze:
143
144cloud-init analyze
145------------------
146Get detailed reports of where cloud-init spends most of its time. See
147:ref:`boot_time_analysis` for more info.
148
149 * **blame** Report ordered by most costly operations.
150 * **dump** Machine-readable JSON dump of all cloud-init tracked events.
151 * **show** show time-ordered report of the cost of operations during each
152 boot stage.
153
154.. _cli_devel:
155
156cloud-init devel
157----------------
158Collection of development tools under active development. These tools will
159likely be promoted to top-level subcommands when stable.
160
161 * ``cloud-init devel schema``: A **#cloud-config** format and schema
162 validator. It accepts a cloud-config yaml file and annotates potential
163 schema errors locally without the need for deployment. Schema
164 validation is work in progress and supports a subset of cloud-config
165 modules.
166
167.. _cli_clean:
168
169cloud-init clean
170----------------
171Remove cloud-init artifacts from /var/lib/cloud and optionally reboot the
172machine to so cloud-init re-runs all stages as it did on first boot.
173
174 * **--logs**: Optionally remove /var/log/cloud-init*log files.
175 * **--reboot**: Reboot the system after removing artifacts.
176
177.. _cli_init:
178
179cloud-init init
180---------------
181Generally run by OS init systems to execute cloud-init's stages
182*init* and *init-local*. See :ref:`boot_stages` for more info.
183Can be run on the commandline, but is generally gated to run only once
184due to semaphores in **/var/lib/cloud/instance/sem/** and
185**/var/lib/cloud/sem**.
186
187 * **--local**: Run *init-local* stage instead of *init*.
188
189.. _cli_modules:
190
191cloud-init modules
192------------------
193Generally run by OS init systems to execute *modules:config* and
194*modules:final* boot stages. This executes cloud config :ref:`modules`
195configured to run in the init, config and final stages. The modules are
196declared to run in various boot stages in the file
197**/etc/cloud/cloud.cfg** under keys **cloud_init_modules**,
198**cloud_init_modules** and **cloud_init_modules**. Can be run on the
199commandline, but each module is gated to run only once due to semaphores
200in ``/var/lib/cloud/``.
201
202 * **--mode (init|config|final)**: Run *modules:init*, *modules:config* or
203 *modules:final* cloud-init stages. See :ref:`boot_stages` for more info.
204
205.. _cli_single:
206
207cloud-init single
208-----------------
209Attempt to run a single named cloud config module. The following example
210re-runs the cc_set_hostname module ignoring the module default frequency
211of once-per-instance:
212
213 * **--name**: The cloud-config module name to run
214 * **--frequency**: Optionally override the declared module frequency
215 with one of (always|once-per-instance|once)
216
217.. code-block:: bash
218
219 % cloud-init single --name set_hostname --frequency always
220
221**Note**: Mileage may vary trying to re-run each cloud-config module, as
222some are not idempotent.
77223
78.. _Cloud-init: https://launchpad.net/cloud-init224.. _Cloud-init: https://launchpad.net/cloud-init
79.. vi: textwidth=78225.. vi: textwidth=78
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index 4e43dd5..c2b47ed 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -7,6 +7,7 @@ Overview
7This topic will discuss general approaches for test and debug of cloud-init on7This topic will discuss general approaches for test and debug of cloud-init on
8deployed instances.8deployed instances.
99
10.. _boot_time_analysis:
1011
11Boot Time Analysis - cloud-init analyze12Boot Time Analysis - cloud-init analyze
12======================================13======================================
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index cdb0f41..7b14675 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -1,3 +1,5 @@
1.. _modules:
2
1*******3*******
2Modules4Modules
3*******5*******
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index ce3a1bd..2f8ab54 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -349,7 +349,7 @@ For any network device (one of the Config Types) users can define a list of
349entries will create interface alias allowing a single interface to use349entries will create interface alias allowing a single interface to use
350different ip configurations.350different ip configurations.
351351
352Valid keys for for ``subnets`` include the following:352Valid keys for ``subnets`` include the following:
353353
354- ``type``: Specify the subnet type.354- ``type``: Specify the subnet type.
355- ``control``: Specify manual, auto or hotplug. Indicates how the interface355- ``control``: Specify manual, auto or hotplug. Indicates how the interface
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index d668e3f..bf04bb3 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -118,19 +118,19 @@ TreeRun and TreeCollect
118118
119If working on a cloud-init feature or resolving a bug, it may be useful to119If working on a cloud-init feature or resolving a bug, it may be useful to
120run the current copy of cloud-init in the integration testing environment.120run the current copy of cloud-init in the integration testing environment.
121The integration testing suite can automatically build a deb based on the 121The integration testing suite can automatically build a deb based on the
122current working tree of cloud-init and run the test suite using this deb.122current working tree of cloud-init and run the test suite using this deb.
123123
124The ``tree_run`` and ``tree_collect`` commands take the same arguments as124The ``tree_run`` and ``tree_collect`` commands take the same arguments as
125the ``run`` and ``collect`` commands. These commands will build a deb and 125the ``run`` and ``collect`` commands. These commands will build a deb and
126write it into a temporary file, then start the test suite and pass that deb 126write it into a temporary file, then start the test suite and pass that deb
127in. To build a deb only, and not run the test suite, the ``bddeb`` command127in. To build a deb only, and not run the test suite, the ``bddeb`` command
128can be used.128can be used.
129129
130Note that code in the cloud-init working tree that has not been committed130Note that code in the cloud-init working tree that has not been committed
131when the cloud-init deb is built will still be included. To build a131when the cloud-init deb is built will still be included. To build a
132cloud-init deb from or use the ``tree_run`` command using a copy of132cloud-init deb from or use the ``tree_run`` command using a copy of
133cloud-init located in a different directory, use the option ``--cloud-init 133cloud-init located in a different directory, use the option ``--cloud-init
134/path/to/cloud-init``.134/path/to/cloud-init``.
135135
136.. code-block:: bash136.. code-block:: bash
@@ -383,7 +383,7 @@ Development Checklist
383 * Valid unit tests validating output collected383 * Valid unit tests validating output collected
384 * Passes pylint & pep8 checks384 * Passes pylint & pep8 checks
385 * Placed in the appropriate sub-folder in the test cases directory385 * Placed in the appropriate sub-folder in the test cases directory
386* Tested by running the test: 386* Tested by running the test:
387387
388 .. code-block:: bash388 .. code-block:: bash
389389
@@ -392,6 +392,32 @@ Development Checklist
392 --test modules/your_test.yaml \392 --test modules/your_test.yaml \
393 [--deb <build of cloud-init>]393 [--deb <build of cloud-init>]
394394
395
396Platforms
397=========
398
399EC2
400---
401To run on the EC2 platform it is required that the user has an AWS credentials
402configuration file specifying his or her access keys and a default region.
403These configuration files are the standard that the AWS cli and other AWS
404tools utilize for interacting directly with AWS itself and are normally
405generated when running ``aws configure``:
406
407.. code-block:: bash
408
409 $ cat $HOME/.aws/credentials
410 [default]
411 aws_access_key_id = <KEY HERE>
412 aws_secret_access_key = <KEY HERE>
413
414.. code-block:: bash
415
416 $ cat $HOME/.aws/config
417 [default]
418 region = us-west-2
419
420
395Architecture421Architecture
396============422============
397423
@@ -455,7 +481,7 @@ replace the default. If the data is a dictionary then the value will be the
455result of merging that dictionary from the default config and that481result of merging that dictionary from the default config and that
456dictionary from the overrides.482dictionary from the overrides.
457483
458Merging is done using the function 484Merging is done using the function
459``tests.cloud_tests.config.merge_config``, which can be examined for more485``tests.cloud_tests.config.merge_config``, which can be examined for more
460detail on config merging behavior.486detail on config merging behavior.
461487
diff --git a/integration-requirements.txt b/integration-requirements.txt
462new file mode 100644488new file mode 100644
index 0000000..45baac6
--- /dev/null
+++ b/integration-requirements.txt
@@ -0,0 +1,20 @@
1# PyPI requirements for cloud-init integration testing
2# https://cloudinit.readthedocs.io/en/latest/topics/tests.html
3#
4# Note: Changes to this requirements may require updates to
5# the packages/pkg-deps.json file as well.
6#
7
8# ec2 backend
9boto3==1.5.9
10
11# ssh communication
12paramiko==2.4.0
13
14# lxd backend
15# 01/10/2018: enables use of lxd as snap support
16git+https://github.com/lxc/pylxd.git@0722955260a6557e6d2ffde1896bfe0707bbca27
17
18
19# finds latest image information
20bzr+lp:simplestreams
diff --git a/setup.py b/setup.py
index bf697d7..bc3f52a 100755
--- a/setup.py
+++ b/setup.py
@@ -18,11 +18,14 @@ import tempfile
1818
19import setuptools19import setuptools
20from setuptools.command.install import install20from setuptools.command.install import install
21from setuptools.command.egg_info import egg_info
2122
22from distutils.errors import DistutilsArgError23from distutils.errors import DistutilsArgError
2324
24import subprocess25import subprocess
2526
27RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
28
2629
27def is_f(p):30def is_f(p):
28 return os.path.isfile(p)31 return os.path.isfile(p)
@@ -107,7 +110,7 @@ def render_tmpl(template):
107 return template110 return template
108111
109 topdir = os.path.dirname(sys.argv[0])112 topdir = os.path.dirname(sys.argv[0])
110 tmpd = tempfile.mkdtemp(dir=topdir)113 tmpd = tempfile.mkdtemp(dir=topdir, prefix=RENDERED_TMPD_PREFIX)
111 atexit.register(shutil.rmtree, tmpd)114 atexit.register(shutil.rmtree, tmpd)
112 bname = os.path.basename(template).rstrip(tmpl_ext)115 bname = os.path.basename(template).rstrip(tmpl_ext)
113 fpath = os.path.join(tmpd, bname)116 fpath = os.path.join(tmpd, bname)
@@ -156,6 +159,25 @@ elif os.path.isfile('/etc/redhat-release'):
156 USR_LIB_EXEC = "usr/libexec"159 USR_LIB_EXEC = "usr/libexec"
157160
158161
162class MyEggInfo(egg_info):
163 """This makes sure to not include the rendered files in SOURCES.txt."""
164
165 def find_sources(self):
166 ret = egg_info.find_sources(self)
167 # update the self.filelist.
168 self.filelist.exclude_pattern(RENDERED_TMPD_PREFIX + ".*",
169 is_regex=True)
170 # but since mfname is already written we have to update it also.
171 mfname = os.path.join(self.egg_info, "SOURCES.txt")
172 if os.path.exists(mfname):
173 with open(mfname) as fp:
174 files = [f for f in fp
175 if not f.startswith(RENDERED_TMPD_PREFIX)]
176 with open(mfname, "w") as fp:
177 fp.write(''.join(files))
178 return ret
179
180
159# TODO: Is there a better way to do this??181# TODO: Is there a better way to do this??
160class InitsysInstallData(install):182class InitsysInstallData(install):
161 init_system = None183 init_system = None
@@ -229,6 +251,7 @@ if os.uname()[0] != 'FreeBSD':
229# adding on the right init system configuration files251# adding on the right init system configuration files
230cmdclass = {252cmdclass = {
231 'install': InitsysInstallData,253 'install': InitsysInstallData,
254 'egg_info': MyEggInfo,
232}255}
233256
234requirements = read_requires()257requirements = read_requires()
diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl
index bf6b296..ff9c644 100644
--- a/systemd/cloud-init-local.service.tmpl
+++ b/systemd/cloud-init-local.service.tmpl
@@ -13,12 +13,6 @@ Before=shutdown.target
13Before=sysinit.target13Before=sysinit.target
14Conflicts=shutdown.target14Conflicts=shutdown.target
15{% endif %}15{% endif %}
16{% if variant in ["suse"] %}
17# Other distros use Before=sysinit.target. There is not a clearly identified
18# reason for usage of basic.target instead.
19Before=basic.target
20Conflicts=shutdown.target
21{% endif %}
22RequiresMountsFor=/var/lib/cloud16RequiresMountsFor=/var/lib/cloud
2317
24[Service]18[Service]
diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py
index 98c1d6c..dd43698 100644
--- a/tests/cloud_tests/__init__.py
+++ b/tests/cloud_tests/__init__.py
@@ -10,6 +10,12 @@ TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases')
10TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases')10TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases')
11TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2])11TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2])
1212
13# This domain contains reverse lookups for hostnames that are used.
14# The primary reason is so sudo will return quickly when it attempts
15# to look up the hostname. i9n is just short for 'integration'.
16# see also bug 1730744 for why we had to do this.
17CI_DOMAIN = "i9n.cloud-init.io"
18
1319
14def _initialize_logging():20def _initialize_logging():
15 """Configure logging for cloud_tests."""21 """Configure logging for cloud_tests."""
diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py
index fba8a0c..a6d5069 100644
--- a/tests/cloud_tests/bddeb.py
+++ b/tests/cloud_tests/bddeb.py
@@ -8,7 +8,7 @@ import tempfile
88
9from cloudinit import util as c_util9from cloudinit import util as c_util
10from tests.cloud_tests import (config, LOG)10from tests.cloud_tests import (config, LOG)
11from tests.cloud_tests import (platforms, images, snapshots, instances)11from tests.cloud_tests import platforms
12from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)12from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
1313
14pre_reqs = ['devscripts', 'equivs', 'git', 'tar']14pre_reqs = ['devscripts', 'equivs', 'git', 'tar']
@@ -84,18 +84,18 @@ def setup_build(args):
84 # set up image84 # set up image
85 LOG.info('acquiring image for os: %s', args.build_os)85 LOG.info('acquiring image for os: %s', args.build_os)
86 img_conf = config.load_os_config(platform.platform_name, args.build_os)86 img_conf = config.load_os_config(platform.platform_name, args.build_os)
87 image_call = partial(images.get_image, platform, img_conf)87 image_call = partial(platforms.get_image, platform, img_conf)
88 with PlatformComponent(image_call) as image:88 with PlatformComponent(image_call) as image:
8989
90 # set up snapshot90 # set up snapshot
91 snapshot_call = partial(snapshots.get_snapshot, image)91 snapshot_call = partial(platforms.get_snapshot, image)
92 with PlatformComponent(snapshot_call) as snapshot:92 with PlatformComponent(snapshot_call) as snapshot:
9393
94 # create instance with cloud-config to set it up94 # create instance with cloud-config to set it up
95 LOG.info('creating instance to build deb in')95 LOG.info('creating instance to build deb in')
96 empty_cloud_config = "#cloud-config\n{}"96 empty_cloud_config = "#cloud-config\n{}"
97 instance_call = partial(97 instance_call = partial(
98 instances.get_instance, snapshot, empty_cloud_config,98 platforms.get_instance, snapshot, empty_cloud_config,
99 use_desc='build cloud-init deb')99 use_desc='build cloud-init deb')
100 with PlatformComponent(instance_call) as instance:100 with PlatformComponent(instance_call) as instance:
101101
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index 71ee764..5ea88e5 100644
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches