Merge ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial

Proposed by Chad Smith
Status: Merged
Merged at revision: d489374c5a9e7f81649f87c48401bf12e795a7e7
Proposed branch: ~chad.smith/cloud-init:ubuntu/xenial
Merge into: cloud-init:ubuntu/xenial
Diff against target: 15710 lines (+9573/-1295)
271 files modified
ChangeLog (+422/-0)
Makefile (+3/-3)
cloudinit/analyze/__init__.py (+0/-0)
cloudinit/analyze/__main__.py (+155/-0)
cloudinit/analyze/dump.py (+176/-0)
cloudinit/analyze/show.py (+207/-0)
cloudinit/analyze/tests/test_dump.py (+210/-0)
cloudinit/apport.py (+105/-0)
cloudinit/cmd/devel/__init__.py (+0/-0)
cloudinit/cmd/devel/logs.py (+101/-0)
cloudinit/cmd/devel/parser.py (+26/-0)
cloudinit/cmd/devel/tests/__init__.py (+0/-0)
cloudinit/cmd/devel/tests/test_logs.py (+120/-0)
cloudinit/cmd/main.py (+35/-34)
cloudinit/config/cc_bootcmd.py (+60/-30)
cloudinit/config/cc_chef.py (+33/-11)
cloudinit/config/cc_landscape.py (+2/-2)
cloudinit/config/cc_ntp.py (+57/-49)
cloudinit/config/cc_puppet.py (+18/-15)
cloudinit/config/cc_resizefs.py (+87/-70)
cloudinit/config/cc_resolv_conf.py (+1/-1)
cloudinit/config/cc_runcmd.py (+57/-27)
cloudinit/config/cc_snappy.py (+2/-2)
cloudinit/config/cc_ssh_authkey_fingerprints.py (+2/-2)
cloudinit/config/cc_zypper_add_repo.py (+218/-0)
cloudinit/config/schema.py (+181/-43)
cloudinit/distros/__init__.py (+8/-1)
cloudinit/distros/arch.py (+59/-31)
cloudinit/distros/debian.py (+71/-23)
cloudinit/distros/opensuse.py (+212/-0)
cloudinit/distros/sles.py (+5/-155)
cloudinit/helpers.py (+7/-7)
cloudinit/log.py (+5/-0)
cloudinit/net/__init__.py (+21/-30)
cloudinit/net/dhcp.py (+163/-0)
cloudinit/net/eni.py (+3/-0)
cloudinit/net/netplan.py (+12/-28)
cloudinit/net/network_state.py (+84/-18)
cloudinit/net/sysconfig.py (+5/-1)
cloudinit/net/tests/test_dhcp.py (+260/-0)
cloudinit/net/tests/test_init.py (+2/-2)
cloudinit/netinfo.py (+4/-4)
cloudinit/simpletable.py (+62/-0)
cloudinit/sources/DataSourceAliYun.py (+6/-3)
cloudinit/sources/DataSourceAltCloud.py (+2/-2)
cloudinit/sources/DataSourceAzure.py (+7/-3)
cloudinit/sources/DataSourceCloudStack.py (+37/-14)
cloudinit/sources/DataSourceEc2.py (+164/-22)
cloudinit/sources/DataSourceGCE.py (+126/-72)
cloudinit/sources/DataSourceOVF.py (+169/-51)
cloudinit/sources/__init__.py (+8/-1)
cloudinit/sources/helpers/azure.py (+16/-8)
cloudinit/sources/helpers/vmware/imc/config.py (+21/-3)
cloudinit/sources/helpers/vmware/imc/config_nic.py (+130/-71)
cloudinit/sources/helpers/vmware/imc/config_passwd.py (+67/-0)
cloudinit/sources/helpers/vmware/imc/guestcust_util.py (+7/-5)
cloudinit/stages.py (+20/-13)
cloudinit/temp_utils.py (+101/-0)
cloudinit/tests/__init__.py (+0/-0)
cloudinit/tests/helpers.py (+16/-2)
cloudinit/tests/test_simpletable.py (+100/-0)
cloudinit/tests/test_temp_utils.py (+101/-0)
cloudinit/tests/test_url_helper.py (+40/-0)
cloudinit/url_helper.py (+5/-1)
cloudinit/util.py (+41/-42)
cloudinit/version.py (+1/-1)
config/cloud.cfg.tmpl (+9/-5)
debian/changelog (+115/-2)
dev/null (+0/-2)
doc/examples/cloud-config-chef.txt (+4/-0)
doc/rtd/index.rst (+1/-0)
doc/rtd/topics/capabilities.rst (+40/-10)
doc/rtd/topics/datasources.rst (+1/-0)
doc/rtd/topics/datasources/gce.rst (+20/-0)
doc/rtd/topics/debugging.rst (+146/-0)
doc/rtd/topics/format.rst (+1/-0)
doc/rtd/topics/modules.rst (+0/-1)
packages/bddeb (+4/-4)
packages/debian/copyright (+10/-15)
packages/debian/dirs (+0/-1)
packages/debian/rules.in (+2/-1)
packages/pkg-deps.json (+0/-3)
packages/redhat/cloud-init.spec.in (+0/-6)
requirements.txt (+0/-3)
setup.py (+4/-4)
systemd/cloud-final.service.tmpl (+3/-1)
systemd/cloud-init-local.service.tmpl (+6/-0)
systemd/cloud-init.service.tmpl (+10/-0)
sysvinit/suse/cloud-config (+113/-0)
sysvinit/suse/cloud-final (+113/-0)
sysvinit/suse/cloud-init (+114/-0)
sysvinit/suse/cloud-init-local (+113/-0)
templates/hosts.opensuse.tmpl (+26/-0)
templates/hosts.suse.tmpl (+0/-3)
templates/sources.list.debian.tmpl (+8/-8)
templates/timesyncd.conf.tmpl (+8/-0)
tests/cloud_tests/__init__.py (+1/-1)
tests/cloud_tests/__main__.py (+4/-1)
tests/cloud_tests/args.py (+2/-2)
tests/cloud_tests/bddeb.py (+10/-9)
tests/cloud_tests/collect.py (+3/-0)
tests/cloud_tests/config.py (+1/-0)
tests/cloud_tests/images/nocloudkvm.py (+88/-0)
tests/cloud_tests/instances/base.py (+7/-5)
tests/cloud_tests/instances/lxd.py (+9/-1)
tests/cloud_tests/instances/nocloudkvm.py (+217/-0)
tests/cloud_tests/platforms.yaml (+4/-0)
tests/cloud_tests/platforms/__init__.py (+2/-0)
tests/cloud_tests/platforms/nocloudkvm.py (+90/-0)
tests/cloud_tests/releases.yaml (+18/-1)
tests/cloud_tests/setup_image.py (+24/-8)
tests/cloud_tests/snapshots/nocloudkvm.py (+74/-0)
tests/cloud_tests/testcases/bugs/README.md (+0/-0)
tests/cloud_tests/testcases/bugs/lp1511485.yaml (+0/-0)
tests/cloud_tests/testcases/bugs/lp1611074.yaml (+0/-0)
tests/cloud_tests/testcases/bugs/lp1628337.yaml (+0/-0)
tests/cloud_tests/testcases/examples/README.md (+0/-0)
tests/cloud_tests/testcases/examples/TODO.md (+0/-0)
tests/cloud_tests/testcases/examples/add_apt_repositories.yaml (+0/-0)
tests/cloud_tests/testcases/examples/alter_completion_message.yaml (+0/-0)
tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml (+0/-0)
tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml (+0/-0)
tests/cloud_tests/testcases/examples/including_user_groups.yaml (+0/-0)
tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml (+0/-0)
tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml (+0/-0)
tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml (+0/-0)
tests/cloud_tests/testcases/examples/run_commands.yaml (+0/-0)
tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml (+0/-0)
tests/cloud_tests/testcases/examples/setup_run_puppet.yaml (+0/-0)
tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml (+0/-0)
tests/cloud_tests/testcases/main/README.md (+0/-0)
tests/cloud_tests/testcases/main/command_output_simple.yaml (+0/-0)
tests/cloud_tests/testcases/modules/README.md (+0/-0)
tests/cloud_tests/testcases/modules/TODO.md (+0/-2)
tests/cloud_tests/testcases/modules/apt_configure_conf.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_configure_primary.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_configure_security.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml (+0/-0)
tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml (+0/-0)
tests/cloud_tests/testcases/modules/bootcmd.yaml (+0/-0)
tests/cloud_tests/testcases/modules/byobu.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ca_certs.yaml (+0/-0)
tests/cloud_tests/testcases/modules/debug_disable.yaml (+0/-0)
tests/cloud_tests/testcases/modules/debug_enable.yaml (+0/-0)
tests/cloud_tests/testcases/modules/final_message.yaml (+0/-0)
tests/cloud_tests/testcases/modules/keys_to_console.yaml (+0/-0)
tests/cloud_tests/testcases/modules/landscape.yaml (+0/-0)
tests/cloud_tests/testcases/modules/locale.yaml (+0/-0)
tests/cloud_tests/testcases/modules/lxd_bridge.yaml (+0/-0)
tests/cloud_tests/testcases/modules/lxd_dir.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ntp.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ntp_pools.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ntp_servers.yaml (+0/-0)
tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+0/-0)
tests/cloud_tests/testcases/modules/runcmd.yaml (+0/-0)
tests/cloud_tests/testcases/modules/salt_minion.yaml (+0/-0)
tests/cloud_tests/testcases/modules/seed_random_command.yaml (+0/-0)
tests/cloud_tests/testcases/modules/seed_random_data.yaml (+0/-0)
tests/cloud_tests/testcases/modules/set_hostname.yaml (+0/-0)
tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml (+0/-0)
tests/cloud_tests/testcases/modules/set_password.yaml (+0/-0)
tests/cloud_tests/testcases/modules/set_password_expire.yaml (+0/-0)
tests/cloud_tests/testcases/modules/set_password_list.yaml (+0/-0)
tests/cloud_tests/testcases/modules/set_password_list_string.yaml (+0/-0)
tests/cloud_tests/testcases/modules/snappy.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ssh_import_id.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml (+0/-0)
tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml (+0/-0)
tests/cloud_tests/testcases/modules/timezone.yaml (+0/-0)
tests/cloud_tests/testcases/modules/user_groups.yaml (+0/-0)
tests/cloud_tests/testcases/modules/write_files.yaml (+0/-0)
tests/cloud_tests/util.py (+43/-0)
tests/unittests/test__init__.py (+1/-1)
tests/unittests/test_atomic_helper.py (+1/-1)
tests/unittests/test_builtin_handlers.py (+1/-1)
tests/unittests/test_cli.py (+146/-4)
tests/unittests/test_cs_util.py (+1/-1)
tests/unittests/test_data.py (+1/-1)
tests/unittests/test_datasource/test_aliyun.py (+7/-6)
tests/unittests/test_datasource/test_altcloud.py (+3/-3)
tests/unittests/test_datasource/test_azure.py (+4/-2)
tests/unittests/test_datasource/test_azure_helper.py (+97/-50)
tests/unittests/test_datasource/test_cloudsigma.py (+1/-1)
tests/unittests/test_datasource/test_cloudstack.py (+83/-7)
tests/unittests/test_datasource/test_common.py (+2/-1)
tests/unittests/test_datasource/test_configdrive.py (+1/-1)
tests/unittests/test_datasource/test_digitalocean.py (+1/-1)
tests/unittests/test_datasource/test_ec2.py (+255/-35)
tests/unittests/test_datasource/test_gce.py (+3/-2)
tests/unittests/test_datasource/test_maas.py (+1/-1)
tests/unittests/test_datasource/test_nocloud.py (+1/-1)
tests/unittests/test_datasource/test_opennebula.py (+1/-1)
tests/unittests/test_datasource/test_openstack.py (+4/-1)
tests/unittests/test_datasource/test_ovf.py (+165/-1)
tests/unittests/test_datasource/test_scaleway.py (+1/-1)
tests/unittests/test_datasource/test_smartos.py (+1/-1)
tests/unittests/test_distros/__init__.py (+21/-0)
tests/unittests/test_distros/test_arch.py (+45/-0)
tests/unittests/test_distros/test_create_users.py (+1/-1)
tests/unittests/test_distros/test_debian.py (+42/-24)
tests/unittests/test_distros/test_generic.py (+17/-1)
tests/unittests/test_distros/test_netconfig.py (+3/-3)
tests/unittests/test_distros/test_opensuse.py (+12/-0)
tests/unittests/test_distros/test_resolv.py (+1/-1)
tests/unittests/test_distros/test_sles.py (+12/-0)
tests/unittests/test_distros/test_sysconfig.py (+1/-1)
tests/unittests/test_distros/test_user_data_normalize.py (+1/-1)
tests/unittests/test_ds_identify.py (+47/-4)
tests/unittests/test_ec2_util.py (+1/-1)
tests/unittests/test_filters/test_launch_index.py (+1/-1)
tests/unittests/test_handler/test_handler_apt_conf_v1.py (+1/-1)
tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+1/-1)
tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py (+1/-1)
tests/unittests/test_handler/test_handler_apt_source_v1.py (+1/-1)
tests/unittests/test_handler/test_handler_apt_source_v3.py (+1/-1)
tests/unittests/test_handler/test_handler_bootcmd.py (+146/-0)
tests/unittests/test_handler/test_handler_ca_certs.py (+1/-1)
tests/unittests/test_handler/test_handler_chef.py (+76/-12)
tests/unittests/test_handler/test_handler_debug.py (+7/-4)
tests/unittests/test_handler/test_handler_disk_setup.py (+1/-1)
tests/unittests/test_handler/test_handler_growpart.py (+1/-1)
tests/unittests/test_handler/test_handler_landscape.py (+130/-0)
tests/unittests/test_handler/test_handler_locale.py (+57/-3)
tests/unittests/test_handler/test_handler_lxd.py (+1/-1)
tests/unittests/test_handler/test_handler_mcollective.py (+1/-1)
tests/unittests/test_handler/test_handler_mounts.py (+1/-1)
tests/unittests/test_handler/test_handler_ntp.py (+102/-5)
tests/unittests/test_handler/test_handler_power_state.py (+2/-2)
tests/unittests/test_handler/test_handler_puppet.py (+142/-0)
tests/unittests/test_handler/test_handler_resizefs.py (+222/-7)
tests/unittests/test_handler/test_handler_rsyslog.py (+1/-1)
tests/unittests/test_handler/test_handler_runcmd.py (+108/-0)
tests/unittests/test_handler/test_handler_seed_random.py (+1/-1)
tests/unittests/test_handler/test_handler_set_hostname.py (+4/-3)
tests/unittests/test_handler/test_handler_snappy.py (+2/-2)
tests/unittests/test_handler/test_handler_spacewalk.py (+1/-1)
tests/unittests/test_handler/test_handler_timezone.py (+1/-1)
tests/unittests/test_handler/test_handler_write_files.py (+1/-1)
tests/unittests/test_handler/test_handler_yum_add_repo.py (+1/-1)
tests/unittests/test_handler/test_handler_zypper_add_repo.py (+237/-0)
tests/unittests/test_handler/test_schema.py (+151/-16)
tests/unittests/test_helpers.py (+1/-1)
tests/unittests/test_log.py (+58/-0)
tests/unittests/test_merging.py (+1/-1)
tests/unittests/test_net.py (+128/-8)
tests/unittests/test_pathprefix2dict.py (+1/-1)
tests/unittests/test_registry.py (+1/-1)
tests/unittests/test_reporting.py (+1/-1)
tests/unittests/test_rh_subscription.py (+1/-1)
tests/unittests/test_runs/test_merge_run.py (+1/-1)
tests/unittests/test_runs/test_simple_run.py (+106/-21)
tests/unittests/test_sshutil.py (+2/-1)
tests/unittests/test_templating.py (+1/-1)
tests/unittests/test_util.py (+13/-2)
tests/unittests/test_version.py (+1/-1)
tests/unittests/test_vmware_config_file.py (+247/-2)
tools/build-on-freebsd (+0/-1)
tools/ds-identify (+6/-0)
tools/make-tarball (+1/-1)
tools/read-version (+1/-1)
tools/render-cloudcfg (+3/-2)
tools/xkvm (+664/-0)
tox.ini (+31/-11)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
cloud-init Commiters Pending
Review via email: mp+331973@code.launchpad.net

Description of the change

Merge upstream into Xenial for SRU

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:9612b7bdf7b57244c5612c3c12136467bbd217e1
https://jenkins.ubuntu.com/server/job/cloud-init-ci/393/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    SUCCESS: MAAS Compatability Testing
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/393/rebuild

review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) wrote :

Merged with these 2 commits
http://paste.ubuntu.com/25688323/

There was an error fetching revisions from git servers. Please try again in a few minutes. If the problem persists, contact Launchpad support.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/ChangeLog b/ChangeLog
index 80405bc..0260c57 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,425 @@
117.1:
2 - doc: document GCE datasource. [Arnd Hannemann]
3 - suse: updates to templates to support openSUSE and SLES.
4 [Robert Schweikert] (LP: #1718640)
5 - suse: Copy sysvinit files from redhat with slight changes.
6 [Robert Schweikert] (LP: #1718649)
7 - docs: fix sphinx module schema documentation [Chad Smith]
8 - tests: Add cloudinit package to all test targets [Chad Smith]
9 - Makefile: No longer look for yaml files in obsolete ./bin/.
10 - tests: fix ds-identify unit tests to set EC2_STRICT_ID_DEFAULT.
11 - ec2: Fix maybe_perform_dhcp_discovery to use /var/tmp as a tmpdir
12 [Chad Smith] (LP: #1717627)
13 - Azure: wait longer for SSH pub keys to arrive.
14 [Paul Meyer] (LP: #1717611)
15 - GCE: Fix usage of user-data. (LP: #1717598)
16 - cmdline: add collect-logs subcommand. [Chad Smith] (LP: #1607345)
17 - CloudStack: consider dhclient lease files named with a hyphen.
18 (LP: #1717147)
19 - resizefs: Drop check for read-only device file, do not warn on
20 overlayroot. [Chad Smith]
21 - Do not provide systemd-fsck drop-in which could cause ordering cycles.
22 [Balint Reczey] (LP: #1717477)
23 - tests: Enable the NoCloud KVM platform [Joshua Powers]
24 - resizefs: pass mount point to xfs_growfs [Dusty Mabe]
25 - vmware: Enable nics before sending the SUCCESS event. [Sankar Tanguturi]
26 - cloud-config modules: honor distros definitions in each module
27 [Chad Smith] (LP: #1715738, #1715690)
28 - chef: Add option to pin chef omnibus install version
29 [Ethan Apodaca] (LP: #1462693)
30 - tests: execute: support command as string [Joshua Powers]
31 - schema and docs: Add jsonschema to resizefs and bootcmd modules
32 [Chad Smith]
33 - tools: Add xkvm script, wrapper around qemu-system [Joshua Powers]
34 - vmware customization: return network config format
35 [Sankar Tanguturi] (LP: #1675063)
36 - Ec2: only attempt to operate at local mode on known platforms.
37 (LP: #1715128)
38 - Use /run/cloud-init for tempfile operations. (LP: #1707222)
39 - ds-identify: Make OpenStack return maybe on arch other than intel.
40 (LP: #1715241)
41 - tests: mock missed openstack metadata uri network_data.json
42 [Chad Smith] (LP: #1714376)
43 - relocate tests/unittests/helpers.py to cloudinit/tests
44 [Lars Kellogg-Stedman]
45 - tox: add nose timer output [Joshua Powers]
46 - upstart: do not package upstart jobs, drop ubuntu-init-switch module.
47 - tests: Stop leaking calls through unmocked metadata addresses
48 [Chad Smith] (LP: #1714117)
49 - distro: allow distro to specify a default locale [Ryan Harper]
50 - tests: fix two recently added tests for sles distro.
51 - url_helper: dynamically import oauthlib import from inside oauth_headers
52 [Chad Smith]
53 - tox: make xenial environment run with python3.6
54 - suse: Add support for openSUSE and return SLES to a working state.
55 [Robert Schweikert]
56 - GCE: Add a main to the GCE Datasource.
57 - ec2: Add IPv6 dhcp support to Ec2DataSource. [Chad Smith] (LP: #1639030)
58 - url_helper: fail gracefully if oauthlib is not available
59 [Lars Kellogg-Stedman] (LP: #1713760)
60 - cloud-init analyze: fix issues running under python 2. [Andrew Jorgensen]
61 - Configure logging module to always use UTC time.
62 [Ryan Harper] (LP: #1713158)
63 - Log a helpful message if a user script does not include shebang.
64 [Andrew Jorgensen]
65 - cli: Fix command line parsing of coniditionally loaded subcommands.
66 [Chad Smith] (LP: #1712676)
67 - doc: Explain error behavior in user data include file format.
68 [Jason Butz]
69 - cc_landscape & cc_puppet: Fix six.StringIO use in writing configs
70 [Chad Smith] (LP: #1699282, #1710932)
71 - schema cli: Add schema subcommand to cloud-init cli and cc_runcmd schema
72 [Chad Smith]
73 - Debian: Remove non-free repositories from apt sources template.
74 [Joonas Kylmälä] (LP: #1700091)
75 - tools: Add tooling for basic cloud-init performance analysis.
76 [Chad Smith] (LP: #1709761)
77 - network: add v2 passthrough and fix parsing v2 config with bonds/bridge
78 params [Ryan Harper] (LP: #1709180)
79 - doc: update capabilities with features available, link doc reference,
80 cli example [Ryan Harper]
81 - vcloud directory: Guest Customization support for passwords
82 [Maitreyee Saikia]
83 - ec2: Allow Ec2 to run in init-local using dhclient in a sandbox.
84 [Chad Smith] (LP: #1709772)
85 - cc_ntp: fallback on timesyncd configuration if ntp is not installable
86 [Ryan Harper] (LP: #1686485)
87 - net: Reduce duplicate code. Have get_interfaces_by_mac use
88 get_interfaces.
89 - tests: Fix build tree integration tests [Joshua Powers]
90 - sysconfig: Dont repeat header when rendering resolv.conf
91 [Ryan Harper] (LP: #1701420)
92 - archlinux: Fix bug with empty dns, do not render 'lo' devices.
93 (LP: #1663045, #1706593)
94 - cloudinit.net: add initialize_network_device function and tests
95 [Chad Smith]
96 - makefile: fix ci-deps-ubuntu target [Chad Smith]
97 - tests: adjust locale integration test to parse default locale.
98 - tests: remove 'yakkety' from releases as it is EOL.
99 - tests: Add initial tests for EC2 and improve a docstring.
100 - locale: Do not re-run locale-gen if provided locale is system default.
101 - archlinux: fix set hostname usage of write_file.
102 [Joshua Powers] (LP: #1705306)
103 - sysconfig: support subnet type of 'manual'.
104 - tools/run-centos: make running with no argument show help.
105 - Drop rand_str() usage in DNS redirection detection
106 [Bob Aman] (LP: #1088611)
107 - sysconfig: use MACADDR on bonds/bridges to configure mac_address
108 [Ryan Harper] (LP: #1701417)
109 - net: eni route rendering missed ipv6 default route config
110 [Ryan Harper] (LP: #1701097)
111 - sysconfig: enable mtu set per subnet, including ipv6 mtu
112 [Ryan Harper] (LP: #1702513)
113 - sysconfig: handle manual type subnets [Ryan Harper] (LP: #1687725)
114 - sysconfig: fix ipv6 gateway routes [Ryan Harper] (LP: #1694801)
115 - sysconfig: fix rendering of bond, bridge and vlan types.
116 [Ryan Harper] (LP: #1695092)
117 - Templatize systemd unit files for cross distro deltas. [Ryan Harper]
118 - sysconfig: ipv6 and default gateway fixes. [Ryan Harper] (LP: #1704872)
119 - net: fix renaming of nics to support mac addresses written in upper
120 case. (LP: #1705147)
121 - tests: fixes for issues uncovered when moving to python 3.6.
122 (LP: #1703697)
123 - sysconfig: include GATEWAY value if set in subnet
124 [Ryan Harper] (LP: #1686856)
125 - Scaleway: add datasource with user and vendor data for Scaleway.
126 [Julien Castets]
127 - Support comments in content read by load_shell_content.
128 - cloudinitlocal fail to run during boot [Hongjiang Zhang]
129 - doc: fix disk setup example table_type options
130 [Sandor Zeestraten] (LP: #1703789)
131 - tools: Fix exception handling. [Joonas Kylmälä] (LP: #1701527)
132 - tests: fix usage of mock in GCE test.
133 - test_gce: Fix invalid mock of platform_reports_gce to return False
134 [Chad Smith]
135 - test: fix incorrect keyid for apt repository.
136 [Joshua Powers] (LP: #1702717)
137 - tests: Update version of pylxd [Joshua Powers]
138 - write_files: Remove log from helper function signatures.
139 [Andrew Jorgensen]
140 - doc: document the cmdline options to NoCloud [Brian Candler]
141 - read_dmi_data: always return None when inside a container. (LP: #1701325)
142 - requirements.txt: remove trailing white space.
143 - Azure: Add network-config, Refactor net layer to handle duplicate macs.
144 [Ryan Harper]
145 - Tests: Simplify the check on ssh-import-id [Joshua Powers]
146 - tests: update ntp tests after sntp added [Joshua Powers]
147 - FreeBSD: Make freebsd a variant, fix unittests and
148 tools/build-on-freebsd.
149 - FreeBSD: fix test failure
150 - FreeBSD: replace ifdown/ifup with "ifconfig down" and "ifconfig up".
151 [Hongjiang Zhang] (LP: #1697815)
152 - FreeBSD: fix cdrom mounting failure if /mnt/cdrom/secure did not exist.
153 [Hongjiang Zhang] (LP: #1696295)
154 - main: Don't use templater to format the welcome message
155 [Andrew Jorgensen]
156 - docs: Automatically generate module docs form schema if present.
157 [Chad Smith]
158 - debian: fix path comment in /etc/hosts template.
159 [Jens Sandmann] (LP: #1606406)
160 - suse: add hostname and fully qualified domain to template.
161 [Jens Sandmann]
162 - write_file(s): Print permissions as octal, not decimal [Andrew Jorgensen]
163 - ci deps: Add --test-distro to read-dependencies to install all deps
164 [Chad Smith]
165 - tools/run-centos: cleanups and move to using read-dependencies
166 - pkg build ci: Add make ci-deps-<distro> target to install pkgs
167 [Chad Smith]
168 - systemd: make cloud-final.service run before apt daily services.
169 (LP: #1693361)
170 - selinux: Allow restorecon to be non-fatal. [Ryan Harper] (LP: #1686751)
171 - net: Allow netinfo subprocesses to return 0 or 1.
172 [Ryan Harper] (LP: #1686751)
173 - net: Allow for NetworkManager configuration [Ryan McCabe] (LP: #1693251)
174 - Use distro release version to determine if we use systemd in redhat spec
175 [Ryan Harper]
176 - net: normalize data in network_state object
177 - Integration Testing: tox env, pyxld 2.2.3, and revamp framework
178 [Wesley Wiedenmeier]
179 - Chef: Update omnibus url to chef.io, minor doc changes. [JJ Asghar]
180 - tools: add centos scripts to build and test [Joshua Powers]
181 - Drop cheetah python module as it is not needed by trunk [Ryan Harper]
182 - rhel/centos spec cleanups.
183 - cloud.cfg: move to a template. setup.py changes along the way.
184 - Makefile: add deb-src and srpm targets. use PYVER more places.
185 - makefile: fix python 2/3 detection in the Makefile [Chad Smith]
186 - snap: Removing snapcraft plug line [Joshua Powers] (LP: #1695333)
187 - RHEL/CentOS: Fix default routes for IPv4/IPv6 configuration.
188 [Andreas Karis] (LP: #1696176)
189 - test: Fix pyflakes complaint of unused import.
190 [Joshua Powers] (LP: #1695918)
191 - NoCloud: support seed of nocloud from smbios information
192 [Vladimir Pouzanov] (LP: #1691772)
193 - net: when selecting a network device, use natural sort order
194 [Marc-Aurèle Brothier]
195 - fix typos and remove whitespace in various docs [Stephan Telling]
196 - systemd: Fix typo in comment in cloud-init.target. [Chen-Han Hsiao]
197 - Tests: Skip jsonschema related unit tests when dependency is absent.
198 [Chad Smith] (LP: #1695318)
199 - azure: remove accidental duplicate line in merge.
200 - azure: identify platform by well known value in chassis asset tag.
201 [Chad Smith] (LP: #1693939)
202 - tools/net-convert.py: support old cloudinit versions by using kwargs.
203 - ntp: Add schema definition and passive schema validation.
204 [Chad Smith] (LP: #1692916)
205 - Fix eni rendering for bridge params that require repeated key for
206 values. [Ryan Harper]
207 - net: remove systemd link file writing from eni renderer [Ryan Harper]
208 - AliYun: Enable platform identification and enable by default.
209 [Junjie Wang] (LP: #1638931)
210 - net: fix reading and rendering addresses in cidr format.
211 [Dimitri John Ledkov] (LP: #1689346, #1684349)
212 - disk_setup: udev settle before attempting partitioning or fs creation.
213 (LP: #1692093)
214 - GCE: Update the attribute used to find instance SSH keys.
215 [Daniel Watkins] (LP: #1693582)
216 - nplan: For bonds, allow dashed or underscore names of keys.
217 [Dimitri John Ledkov] (LP: #1690480)
218 - python2.6: fix unit tests usage of assertNone and format.
219 - test: update docstring on test_configured_list_with_none
220 - fix tools/ds-identify to not write None twice.
221 - tox/build: do not package depend on style requirements.
222 - cc_ntp: Restructure cc_ntp unit tests. [Chad Smith] (LP: #1692794)
223 - flake8: move the pinned version of flake8 up to 3.3.0
224 - tests: Apply workaround for snapd bug in test case. [Joshua Powers]
225 - RHEL/CentOS: Fix dual stack IPv4/IPv6 configuration.
226 [Andreas Karis] (LP: #1679817, #1685534, #1685532)
227 - disk_setup: fix several issues with gpt disk partitions. (LP: #1692087)
228 - function spelling & docstring update [Joshua Powers]
229 - Fixing wrong file name regression. [Joshua Powers]
230 - tox: move pylint target to 1.7.1
231 - Fix get_interfaces_by_mac for empty macs (LP: #1692028)
232 - DigitalOcean: remove routes except for the public interface.
233 [Ben Howard] (LP: #1681531.)
234 - netplan: pass macaddress, when specified, for vlans
235 [Dimitri John Ledkov] (LP: #1690388)
236 - doc: various improvements for the docs on cc_users_groups.
237 [Felix Dreissig]
238 - cc_ntp: write template before installing and add service restart
239 [Ryan Harper] (LP: #1645644)
240 - cloudstack: fix tests to avoid accessing /var/lib/NetworkManager
241 [Lars Kellogg-Stedman]
242 - tests: fix hardcoded path to mkfs.ext4 [Joshua Powers] (LP: #1691517)
243 - Actually skip warnings when .skip file is present.
244 [Chris Brinker] (LP: #1691551)
245 - netplan: fix netplan render_network_state signature.
246 [Dimitri John Ledkov] (LP: #1685944)
247 - Azure: fix reformatting of ephemeral disks on resize to large types.
248 (LP: #1686514)
249 - Revert "tools/net-convert: fix argument order for render_network_state"
250 - make deb: Add devscripts dependency for make deb. Cleanup
251 packages/bddeb. [Chad Smith] (LP: #1685935)
252 - tools/net-convert: fix argument order for render_network_state
253 [Ryan Harper] (LP: #1685944)
254 - openstack: fix log message copy/paste typo in _get_url_settings
255 [Lars Kellogg-Stedman]
256 - unittests: fix unittests run on centos [Joshua Powers]
257 - Improve detection of snappy to include os-release and kernel cmdline.
258 (LP: #1689944)
259 - Add address to config entry generated by _klibc_to_config_entry.
260 [Julien Castets] (LP: #1691135)
261 - sysconfig: Raise ValueError when multiple default gateways are present.
262 [Chad Smith] (LP: #1687485)
263 - FreeBSD: improvements and fixes for use on Azure
264 [Hongjiang Zhang] (LP: #1636345)
265 - Add unit tests for ds-identify, fix Ec2 bug found.
266 - fs_setup: if cmd is specified, use shell interpretation.
267 [Paul Meyer] (LP: #1687712)
268 - doc: document network configuration defaults policy and formats.
269 [Ryan Harper]
270 - Fix name of "uri" key in docs for "cc_apt_configure" module
271 [Felix Dreissig]
272 - tests: Enable artful [Joshua Powers]
273 - nova-lxd: read product_name from environment, not platform.
274 (LP: #1685810)
275 - Fix yum repo config where keys contain array values
276 [Dylan Perry] (LP: #1592150)
277 - template: Update debian backports template [Joshua Powers] (LP: #1627293)
278 - rsyslog: replace ~ with stop [Joshua Powers] (LP: #1367899)
279 - Doc: add additional RTD examples [Joshua Powers] (LP: #1459604)
280 - Fix growpart for some cases when booted with root=PARTUUID.
281 (LP: #1684869)
282 - pylint: update output style to parseable [Joshua Powers]
283 - pylint: fix all logging warnings [Joshua Powers]
284 - CloudStack: Add NetworkManager to list of supported DHCP lease dirs.
285 [Syed]
286 - net: kernel lies about vlans not stealing mac addresses, when they do
287 [Dimitri John Ledkov] (LP: #1682871)
288 - ds-identify: Check correct path for "latest" config drive
289 [Daniel Watkins] (LP: #1673637)
290 - doc: Fix example for resolve.conf configuration.
291 [Jon Grimm] (LP: #1531582)
292 - Fix examples that reference upstream chef repository.
293 [Jon Grimm] (LP: #1678145)
294 - doc: correct grammar and improve clarity in merging documentation.
295 [David Tagatac]
296 - doc: Add missing doc link to snap-config module. [Ryan Harper]
297 - snap: allows for creating cloud-init snap [Joshua Powers]
298 - DigitalOcean: assign IPv4ll address to lowest indexed interface.
299 [Ben Howard]
300 - DigitalOcean: configure all NICs presented in meta-data. [Ben Howard]
301 - Remove (and/or fix) URL shortener references [Jon Grimm] (LP: #1669727)
302 - HACKING.rst: more info on filling out contributors agreement.
303 - util: teach write_file about copy_mode option
304 [Lars Kellogg-Stedman] (LP: #1644064)
305 - DigitalOcean: bind resolvers to loopback interface. [Ben Howard]
306 - tests: fix AltCloud tests to not rely on blkid (LP: #1636531)
307 - OpenStack: add 'dvs' to the list of physical link types. (LP: #1674946)
308 - Fix bug that resulted in an attempt to rename bonds or vlans.
309 (LP: #1669860)
310 - tests: update OpenNebula and Digital Ocean to not rely on host
311 interfaces.
312 - net: in netplan renderer delete known image-builtin content.
313 (LP: #1675576)
314 - doc: correct grammar in capabilities.rst [David Tagatac]
315 - ds-identify: fix detecting of maas datasource. (LP: #1677710)
316 - netplan: remove debugging prints, add debug logging [Ryan Harper]
317 - ds-identify: do not write None twice to datasource_list.
318 - support resizing partition and rootfs on system booted without
319 initramfs. [Steve Langasek] (LP: #1677376)
320 - apt_configure: run only when needed. (LP: #1675185)
321 - OpenStack: identify OpenStack by product 'OpenStack Compute'.
322 (LP: #1675349)
323 - GCE: Search GCE in ds-identify, consider serial number in check.
324 (LP: #1674861)
325 - Add support for setting hashed passwords [Tore S. Lonoy] (LP: #1570325)
326 - Fix filesystem creation when using "partition: auto"
327 [Jonathan Ballet] (LP: #1634678)
328 - ConfigDrive: support reading config drive data from /config-drive.
329 (LP: #1673411)
330 - ds-identify: fix detection of Bigstep datasource. (LP: #1674766)
331 - test: add running of pylint [Joshua Powers]
332 - ds-identify: fix bug where filename expansion was left on.
333 - advertise network config v2 support (NETWORK_CONFIG_V2) in features.
334 - Bigstep: fix bug when executing in python3. [root]
335 - Fix unit test when running in a system deployed with cloud-init.
336 - Bounce network interface for Azure when using the built-in path.
337 [Brent Baude] (LP: #1674685)
338 - cloudinit.net: add network config v2 parsing and rendering [Ryan Harper]
339 - net: Fix incorrect call to isfile [Joshua Powers] (LP: #1674317)
340 - net: add renderers for automatically selecting the renderer.
341 - doc: fix config drive doc with regard to unpartitioned disks.
342 (LP: #1673818)
343 - test: Adding integratiron test for password as list [Joshua Powers]
344 - render_network_state: switch arguments around, do not require target
345 - support 'loopback' as a device type.
346 - Integration Testing: improve testcase subclassing [Wesley Wiedenmeier]
347 - gitignore: adding doc/rtd_html [Joshua Powers]
348 - doc: add instructions for running integration tests via tox.
349 [Joshua Powers]
350 - test: avoid differences in 'date' output due to daylight savings.
351 - Fix chef config module in omnibus install. [Jeremy Melvin] (LP: #1583837)
352 - Add feature flags to cloudinit.version. [Wesley Wiedenmeier]
353 - tox: add a citest environment
354 - Further fix regression to support 'password' for default user.
355 - fix regression when no chpasswd/list was provided.
356 - Support chpasswd/list being a list in addition to a string.
357 [Sergio Lystopad] (LP: #1665694)
358 - doc: Fix configuration example for cc_set_passwords module.
359 [Sergio Lystopad] (LP: #1665773)
360 - net: support both ipv4 and ipv6 gateways in sysconfig.
361 [Lars Kellogg-Stedman] (LP: #1669504)
362 - net: do not raise exception for > 3 nameservers
363 [Lars Kellogg-Stedman] (LP: #1670052)
364 - ds-identify: report cleanups for config and exit value. (LP: #1669949)
365 - ds-identify: move default setting for Ec2/strict_id to a global.
366 - ds-identify: record not found in cloud.cfg and always add None.
367 - Support warning if the used datasource is not in ds-identify's list.
368 - tools/ds-identify: make report mode write namespaced results.
369 - Move warning functionality to cloudinit/warnings.py
370 - Add profile.d script for showing warnings on login.
371 - Z99-cloud-locale-test.sh: install and make consistent.
372 - tools/ds-identify: look at cloud.cfg when looking for ec2 strict_id.
373 - tools/ds-identify: disable vmware_guest_customization by default.
374 - tools/ds-identify: ovf identify vmware guest customization.
375 - Identify Brightbox as an Ec2 datasource user. (LP: #1661693)
376 - DatasourceEc2: add warning message when not on AWS.
377 - ds-identify: add reading of datasource/Ec2/strict_id
378 - tools/ds-identify: add support for found or maybe contributing config.
379 - tools/ds-identify: read the seed directory on Ec2
380 - tools/ds-identify: use quotes in local declarations.
381 - tools/ds-identify: fix documentation of policy setting in a comment.
382 - ds-identify: only run once per boot unless --force is given.
383 - flake8: fix flake8 complaints in previous commit.
384 - net: correct errors in cloudinit/net/sysconfig.py
385 [Lars Kellogg-Stedman] (LP: #1665441)
386 - ec2_utils: fix MetadataLeafDecoder that returned bytes on empty
387 - apply the runtime configuration written by ds-identify.
388 - ds-identify: fix checking for filesystem label (LP: #1663735)
389 - ds-identify: read ds=nocloud properly (LP: #1663723)
390 - support nova-lxd by reading platform from environment of pid 1.
391 (LP: #1661797)
392 - ds-identify: change aarch64 to use the default for non-dmi systems.
393 - Remove style checking during build and add latest style checks to tox
394 [Joshua Powers] (LP: #1652329)
395 - code-style: make master pass pycodestyle (2.3.1) cleanly, currently:
396 [Joshua Powers]
397 - manual_cache_clean: When manually cleaning touch a file in instance dir.
398 - Add tools/ds-identify to identify datasources available.
399 - Fix small typo and change iso-filename for consistency [Robin Naundorf]
400 - Fix eni rendering of multiple IPs per interface
401 [Ryan Harper] (LP: #1657940)
402 - tools/mock-meta: support python2 or python3 and ipv6 in both.
403 - tests: remove executable bit on test_net, so it runs, and fix it.
404 - tests: No longer monkey patch httpretty for python 3.4.2
405 - Add 3 ecdsa-sha2-nistp* ssh key types now that they are standardized
406 [Lars Kellogg-Stedman] (LP: #1658174)
407 - reset httppretty for each test [Lars Kellogg-Stedman] (LP: #1658200)
408 - build: fix running Make on a branch with tags other than master
409 - EC2: Do not cache security credentials on disk
410 [Andrew Jorgensen] (LP: #1638312)
411 - doc: Fix typos and clarify some aspects of the part-handler
412 [Erik M. Bray]
413 - doc: add some documentation on OpenStack datasource.
414 - OpenStack: Use timeout and retries from config in get_data.
415 [Lars Kellogg-Stedman] (LP: #1657130)
416 - Fixed Misc issues related to VMware customization. [Sankar Tanguturi]
417 - Fix minor docs typo: perserve > preserve [Jeremy Bicha]
418 - Use dnf instead of yum when available
419 [Lars Kellogg-Stedman] (LP: #1647118)
420 - validate-yaml: use python rather than explicitly python3
421 - Get early logging logged, including failures of cmdline url.
422
10.7.9:4230.7.9:
2 - doc: adjust headers in tests documentation for consistency.424 - doc: adjust headers in tests documentation for consistency.
3 - pep8: fix issue found in zesty build with pycodestyle.425 - pep8: fix issue found in zesty build with pycodestyle.
diff --git a/Makefile b/Makefile
index f280911..4ace227 100644
--- a/Makefile
+++ b/Makefile
@@ -4,7 +4,7 @@ PYVER ?= $(shell for p in python3 python2; do \
44
5noseopts ?= -v5noseopts ?= -v
66
7YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f )7YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f )
8YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )8YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
99
10PIP_INSTALL := pip install10PIP_INSTALL := pip install
@@ -48,10 +48,10 @@ pyflakes3:
48 @$(CWD)/tools/run-pyflakes348 @$(CWD)/tools/run-pyflakes3
4949
50unittest: clean_pyc50unittest: clean_pyc
51 nosetests $(noseopts) tests/unittests51 nosetests $(noseopts) tests/unittests cloudinit
5252
53unittest3: clean_pyc53unittest3: clean_pyc
54 nosetests3 $(noseopts) tests/unittests54 nosetests3 $(noseopts) tests/unittests cloudinit
5555
56ci-deps-ubuntu:56ci-deps-ubuntu:
57 @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro57 @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro
diff --git a/cloudinit/analyze/__init__.py b/cloudinit/analyze/__init__.py
58new file mode 10064458new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cloudinit/analyze/__init__.py
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
59new file mode 10064459new file mode 100644
index 0000000..69b9e43
--- /dev/null
+++ b/cloudinit/analyze/__main__.py
@@ -0,0 +1,155 @@
1# Copyright (C) 2017 Canonical Ltd.
2#
3# This file is part of cloud-init. See LICENSE file for license information.
4
5import argparse
6import re
7import sys
8
9from . import dump
10from . import show
11
12
13def get_parser(parser=None):
14 if not parser:
15 parser = argparse.ArgumentParser(
16 prog='cloudinit-analyze',
17 description='Devel tool: Analyze cloud-init logs and data')
18 subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
19 subparsers.required = True
20
21 parser_blame = subparsers.add_parser(
22 'blame', help='Print list of executed stages ordered by time to init')
23 parser_blame.add_argument(
24 '-i', '--infile', action='store', dest='infile',
25 default='/var/log/cloud-init.log',
26 help='specify where to read input.')
27 parser_blame.add_argument(
28 '-o', '--outfile', action='store', dest='outfile', default='-',
29 help='specify where to write output. ')
30 parser_blame.set_defaults(action=('blame', analyze_blame))
31
32 parser_show = subparsers.add_parser(
33 'show', help='Print list of in-order events during execution')
34 parser_show.add_argument('-f', '--format', action='store',
35 dest='print_format', default='%I%D @%Es +%ds',
36 help='specify formatting of output.')
37 parser_show.add_argument('-i', '--infile', action='store',
38 dest='infile', default='/var/log/cloud-init.log',
39 help='specify where to read input.')
40 parser_show.add_argument('-o', '--outfile', action='store',
41 dest='outfile', default='-',
42 help='specify where to write output.')
43 parser_show.set_defaults(action=('show', analyze_show))
44 parser_dump = subparsers.add_parser(
45 'dump', help='Dump cloud-init events in JSON format')
46 parser_dump.add_argument('-i', '--infile', action='store',
47 dest='infile', default='/var/log/cloud-init.log',
48 help='specify where to read input. ')
49 parser_dump.add_argument('-o', '--outfile', action='store',
50 dest='outfile', default='-',
51 help='specify where to write output. ')
52 parser_dump.set_defaults(action=('dump', analyze_dump))
53 return parser
54
55
56def analyze_blame(name, args):
57 """Report a list of records sorted by largest time delta.
58
59 For example:
60 30.210s (init-local) searching for datasource
61 8.706s (init-network) reading and applying user-data
62 166ms (modules-config) ....
63 807us (modules-final) ...
64
65 We generate event records parsing cloud-init logs, formatting the output
66 and sorting by record data ('delta')
67 """
68 (infh, outfh) = configure_io(args)
69 blame_format = ' %ds (%n)'
70 r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE)
71 for idx, record in enumerate(show.show_events(_get_events(infh),
72 blame_format)):
73 srecs = sorted(filter(r.match, record), reverse=True)
74 outfh.write('-- Boot Record %02d --\n' % (idx + 1))
75 outfh.write('\n'.join(srecs) + '\n')
76 outfh.write('\n')
77 outfh.write('%d boot records analyzed\n' % (idx + 1))
78
79
80def analyze_show(name, args):
81 """Generate output records using the 'standard' format to printing events.
82
83 Example output follows:
84 Starting stage: (init-local)
85 ...
86 Finished stage: (init-local) 0.105195 seconds
87
88 Starting stage: (init-network)
89 ...
90 Finished stage: (init-network) 0.339024 seconds
91
92 Starting stage: (modules-config)
93 ...
94 Finished stage: (modules-config) 0.NNN seconds
95
96 Starting stage: (modules-final)
97 ...
98 Finished stage: (modules-final) 0.NNN seconds
99 """
100 (infh, outfh) = configure_io(args)
101 for idx, record in enumerate(show.show_events(_get_events(infh),
102 args.print_format)):
103 outfh.write('-- Boot Record %02d --\n' % (idx + 1))
104 outfh.write('The total time elapsed since completing an event is'
105 ' printed after the "@" character.\n')
106 outfh.write('The time the event takes is printed after the "+" '
107 'character.\n\n')
108 outfh.write('\n'.join(record) + '\n')
109 outfh.write('%d boot records analyzed\n' % (idx + 1))
110
111
112def analyze_dump(name, args):
113 """Dump cloud-init events in json format"""
114 (infh, outfh) = configure_io(args)
115 outfh.write(dump.json_dumps(_get_events(infh)) + '\n')
116
117
118def _get_events(infile):
119 rawdata = None
120 events, rawdata = show.load_events(infile, None)
121 if not events:
122 events, _ = dump.dump_events(rawdata=rawdata)
123 return events
124
125
126def configure_io(args):
127 """Common parsing and setup of input/output files"""
128 if args.infile == '-':
129 infh = sys.stdin
130 else:
131 try:
132 infh = open(args.infile, 'r')
133 except OSError:
134 sys.stderr.write('Cannot open file %s\n' % args.infile)
135 sys.exit(1)
136
137 if args.outfile == '-':
138 outfh = sys.stdout
139 else:
140 try:
141 outfh = open(args.outfile, 'w')
142 except OSError:
143 sys.stderr.write('Cannot open file %s\n' % args.outfile)
144 sys.exit(1)
145
146 return (infh, outfh)
147
148
149if __name__ == '__main__':
150 parser = get_parser()
151 args = parser.parse_args()
152 (name, action_functor) = args.action
153 action_functor(name, args)
154
155# vi: ts=4 expandtab
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
0new file mode 100644156new file mode 100644
index 0000000..ca4da49
--- /dev/null
+++ b/cloudinit/analyze/dump.py
@@ -0,0 +1,176 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3import calendar
4from datetime import datetime
5import json
6import sys
7
8from cloudinit import util
9
10stage_to_description = {
11 'finished': 'finished running cloud-init',
12 'init-local': 'starting search for local datasources',
13 'init-network': 'searching for network datasources',
14 'init': 'searching for network datasources',
15 'modules-config': 'running config modules',
16 'modules-final': 'finalizing modules',
17 'modules': 'running modules for',
18 'single': 'running single module ',
19}
20
21# logger's asctime format
22CLOUD_INIT_ASCTIME_FMT = "%Y-%m-%d %H:%M:%S,%f"
23
24# journctl -o short-precise
25CLOUD_INIT_JOURNALCTL_FMT = "%b %d %H:%M:%S.%f %Y"
26
27# other
28DEFAULT_FMT = "%b %d %H:%M:%S %Y"
29
30
31def parse_timestamp(timestampstr):
32 # default syslog time does not include the current year
33 months = [calendar.month_abbr[m] for m in range(1, 13)]
34 if timestampstr.split()[0] in months:
35 # Aug 29 22:55:26
36 FMT = DEFAULT_FMT
37 if '.' in timestampstr:
38 FMT = CLOUD_INIT_JOURNALCTL_FMT
39 dt = datetime.strptime(timestampstr + " " +
40 str(datetime.now().year),
41 FMT)
42 timestamp = dt.strftime("%s.%f")
43 elif "," in timestampstr:
44 # 2016-09-12 14:39:20,839
45 dt = datetime.strptime(timestampstr, CLOUD_INIT_ASCTIME_FMT)
46 timestamp = dt.strftime("%s.%f")
47 else:
48 # allow date(1) to handle other formats we don't expect
49 timestamp = parse_timestamp_from_date(timestampstr)
50
51 return float(timestamp)
52
53
54def parse_timestamp_from_date(timestampstr):
55 out, _ = util.subp(['date', '+%s.%3N', '-d', timestampstr])
56 timestamp = out.strip()
57 return float(timestamp)
58
59
60def parse_ci_logline(line):
61 # Stage Starts:
62 # Cloud-init v. 0.7.7 running 'init-local' at \
63 # Fri, 02 Sep 2016 19:28:07 +0000. Up 1.0 seconds.
64 # Cloud-init v. 0.7.7 running 'init' at \
65 # Fri, 02 Sep 2016 19:28:08 +0000. Up 2.0 seconds.
66 # Cloud-init v. 0.7.7 finished at
67 # Aug 29 22:55:26 test1 [CLOUDINIT] handlers.py[DEBUG]: \
68 # finish: modules-final: SUCCESS: running modules for final
69 # 2016-08-30T21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: \
70 # finish: modules-final: SUCCESS: running modules for final
71 #
72 # Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]: \
73 # Cloud-init v. 0.7.8 running 'init-local' at \
74 # Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.
75 #
76 # 2017-05-22 18:02:01,088 - util.py[DEBUG]: Cloud-init v. 0.7.9 running \
77 # 'init-local' at Mon, 22 May 2017 18:02:01 +0000. Up 2.0 seconds.
78
79 separators = [' - ', ' [CLOUDINIT] ']
80 found = False
81 for sep in separators:
82 if sep in line:
83 found = True
84 break
85
86 if not found:
87 return None
88
89 (timehost, eventstr) = line.split(sep)
90
91 # journalctl -o short-precise
92 if timehost.endswith(":"):
93 timehost = " ".join(timehost.split()[0:-1])
94
95 if "," in timehost:
96 timestampstr, extra = timehost.split(",")
97 timestampstr += ",%s" % extra.split()[0]
98 if ' ' in extra:
99 hostname = extra.split()[-1]
100 else:
101 hostname = timehost.split()[-1]
102 timestampstr = timehost.split(hostname)[0].strip()
103 if 'Cloud-init v.' in eventstr:
104 event_type = 'start'
105 if 'running' in eventstr:
106 stage_and_timestamp = eventstr.split('running')[1].lstrip()
107 event_name, _ = stage_and_timestamp.split(' at ')
108 event_name = event_name.replace("'", "").replace(":", "-")
109 if event_name == "init":
110 event_name = "init-network"
111 else:
112 # don't generate a start for the 'finished at' banner
113 return None
114 event_description = stage_to_description[event_name]
115 else:
116 (pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
117 event_description = eventstr.split(event_name)[1].strip()
118
119 event = {
120 'name': event_name.rstrip(":"),
121 'description': event_description,
122 'timestamp': parse_timestamp(timestampstr),
123 'origin': 'cloudinit',
124 'event_type': event_type.rstrip(":"),
125 }
126 if event['event_type'] == "finish":
127 result = event_description.split(":")[0]
128 desc = event_description.split(result)[1].lstrip(':').strip()
129 event['result'] = result
130 event['description'] = desc.strip()
131
132 return event
133
134
135def json_dumps(data):
136 return json.dumps(data, indent=1, sort_keys=True,
137 separators=(',', ': '))
138
139
140def dump_events(cisource=None, rawdata=None):
141 events = []
142 event = None
143 CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.']
144
145 if not any([cisource, rawdata]):
146 raise ValueError('Either cisource or rawdata parameters are required')
147
148 if rawdata:
149 data = rawdata.splitlines()
150 else:
151 data = cisource.readlines()
152
153 for line in data:
154 for match in CI_EVENT_MATCHES:
155 if match in line:
156 try:
157 event = parse_ci_logline(line)
158 except ValueError:
159 sys.stderr.write('Skipping invalid entry\n')
160 if event:
161 events.append(event)
162
163 return events, data
164
165
166def main():
167 if len(sys.argv) > 1:
168 cisource = open(sys.argv[1])
169 else:
170 cisource = sys.stdin
171
172 return json_dumps(dump_events(cisource))
173
174
175if __name__ == "__main__":
176 print(main())
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
0new file mode 100644177new file mode 100644
index 0000000..3e778b8
--- /dev/null
+++ b/cloudinit/analyze/show.py
@@ -0,0 +1,207 @@
1# Copyright (C) 2016 Canonical Ltd.
2#
3# Author: Ryan Harper <ryan.harper@canonical.com>
4#
5# This file is part of cloud-init. See LICENSE file for license information.
6
7import base64
8import datetime
9import json
10import os
11
12from cloudinit import util
13
14# An event:
15'''
16{
17 "description": "executing late commands",
18 "event_type": "start",
19 "level": "INFO",
20 "name": "cmd-install/stage-late"
21 "origin": "cloudinit",
22 "timestamp": 1461164249.1590767,
23},
24
25 {
26 "description": "executing late commands",
27 "event_type": "finish",
28 "level": "INFO",
29 "name": "cmd-install/stage-late",
30 "origin": "cloudinit",
31 "result": "SUCCESS",
32 "timestamp": 1461164249.1590767
33 }
34
35'''
36format_key = {
37 '%d': 'delta',
38 '%D': 'description',
39 '%E': 'elapsed',
40 '%e': 'event_type',
41 '%I': 'indent',
42 '%l': 'level',
43 '%n': 'name',
44 '%o': 'origin',
45 '%r': 'result',
46 '%t': 'timestamp',
47 '%T': 'total_time',
48}
49
50formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v)
51 for k, v in format_key.items()])
52
53
54def format_record(msg, event):
55 for i, j in format_key.items():
56 if i in msg:
57 # ensure consistent formatting of time values
58 if j in ['delta', 'elapsed', 'timestamp']:
59 msg = msg.replace(i, "{%s:08.5f}" % j)
60 else:
61 msg = msg.replace(i, "{%s}" % j)
62 return msg.format(**event)
63
64
65def dump_event_files(event):
66 content = dict((k, v) for k, v in event.items() if k not in ['content'])
67 files = content['files']
68 saved = []
69 for f in files:
70 fname = f['path']
71 fn_local = os.path.basename(fname)
72 fcontent = base64.b64decode(f['content']).decode('ascii')
73 util.write_file(fn_local, fcontent)
74 saved.append(fn_local)
75
76 return saved
77
78
79def event_name(event):
80 if event:
81 return event.get('name')
82 return None
83
84
85def event_type(event):
86 if event:
87 return event.get('event_type')
88 return None
89
90
91def event_parent(event):
92 if event:
93 return event_name(event).split("/")[0]
94 return None
95
96
97def event_timestamp(event):
98 return float(event.get('timestamp'))
99
100
101def event_datetime(event):
102 return datetime.datetime.utcfromtimestamp(event_timestamp(event))
103
104
105def delta_seconds(t1, t2):
106 return (t2 - t1).total_seconds()
107
108
109def event_duration(start, finish):
110 return delta_seconds(event_datetime(start), event_datetime(finish))
111
112
113def event_record(start_time, start, finish):
114 record = finish.copy()
115 record.update({
116 'delta': event_duration(start, finish),
117 'elapsed': delta_seconds(start_time, event_datetime(start)),
118 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->',
119 })
120
121 return record
122
123
124def total_time_record(total_time):
125 return 'Total Time: %3.5f seconds\n' % total_time
126
127
128def generate_records(events, blame_sort=False,
129 print_format="(%n) %d seconds in %I%D",
130 dump_files=False, log_datafiles=False):
131
132 sorted_events = sorted(events, key=lambda x: x['timestamp'])
133 records = []
134 start_time = None
135 total_time = 0.0
136 stage_start_time = {}
137 stages_seen = []
138 boot_records = []
139
140 unprocessed = []
141 for e in range(0, len(sorted_events)):
142 event = events[e]
143 try:
144 next_evt = events[e + 1]
145 except IndexError:
146 next_evt = None
147
148 if event_type(event) == 'start':
149 if event.get('name') in stages_seen:
150 records.append(total_time_record(total_time))
151 boot_records.append(records)
152 records = []
153 start_time = None
154 total_time = 0.0
155
156 if start_time is None:
157 stages_seen = []
158 start_time = event_datetime(event)
159 stage_start_time[event_parent(event)] = start_time
160
161 # see if we have a pair
162 if event_name(event) == event_name(next_evt):
163 if event_type(next_evt) == 'finish':
164 records.append(format_record(print_format,
165 event_record(start_time,
166 event,
167 next_evt)))
168 else:
169 # This is a parent event
170 records.append("Starting stage: %s" % event.get('name'))
171 unprocessed.append(event)
172 stages_seen.append(event.get('name'))
173 continue
174 else:
175 prev_evt = unprocessed.pop()
176 if event_name(event) == event_name(prev_evt):
177 record = event_record(start_time, prev_evt, event)
178 records.append(format_record("Finished stage: "
179 "(%n) %d seconds ",
180 record) + "\n")
181 total_time += record.get('delta')
182 else:
183 # not a match, put it back
184 unprocessed.append(prev_evt)
185
186 records.append(total_time_record(total_time))
187 boot_records.append(records)
188 return boot_records
189
190
191def show_events(events, print_format):
192 return generate_records(events, print_format=print_format)
193
194
195def load_events(infile, rawdata=None):
196 if rawdata:
197 data = rawdata.read()
198 else:
199 data = infile.read()
200
201 j = None
202 try:
203 j = json.loads(data)
204 except ValueError:
205 pass
206
207 return j, data
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
0new file mode 100644208new file mode 100644
index 0000000..f4c4284
--- /dev/null
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -0,0 +1,210 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3from datetime import datetime
4from textwrap import dedent
5
6from cloudinit.analyze.dump import (
7 dump_events, parse_ci_logline, parse_timestamp)
8from cloudinit.util import subp, write_file
9from cloudinit.tests.helpers import CiTestCase
10
11
12class TestParseTimestamp(CiTestCase):
13
14 def test_parse_timestamp_handles_cloud_init_default_format(self):
15 """Logs with cloud-init detailed formats will be properly parsed."""
16 trusty_fmt = '%Y-%m-%d %H:%M:%S,%f'
17 trusty_stamp = '2016-09-12 14:39:20,839'
18
19 parsed = parse_timestamp(trusty_stamp)
20
21 # convert ourselves
22 dt = datetime.strptime(trusty_stamp, trusty_fmt)
23 expected = float(dt.strftime('%s.%f'))
24
25 # use date(1)
26 out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp])
27 timestamp = out.strip()
28 date_ts = float(timestamp)
29
30 self.assertEqual(expected, parsed)
31 self.assertEqual(expected, date_ts)
32 self.assertEqual(date_ts, parsed)
33
34 def test_parse_timestamp_handles_syslog_adding_year(self):
35 """Syslog timestamps lack a year. Add year and properly parse."""
36 syslog_fmt = '%b %d %H:%M:%S %Y'
37 syslog_stamp = 'Aug 08 15:12:51'
38
39 # convert stamp ourselves by adding the missing year value
40 year = datetime.now().year
41 dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
42 expected = float(dt.strftime('%s.%f'))
43 parsed = parse_timestamp(syslog_stamp)
44
45 # use date(1)
46 out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp])
47 timestamp = out.strip()
48 date_ts = float(timestamp)
49
50 self.assertEqual(expected, parsed)
51 self.assertEqual(expected, date_ts)
52 self.assertEqual(date_ts, parsed)
53
54 def test_parse_timestamp_handles_journalctl_format_adding_year(self):
55 """Journalctl precise timestamps lack a year. Add year and parse."""
56 journal_fmt = '%b %d %H:%M:%S.%f %Y'
57 journal_stamp = 'Aug 08 17:15:50.606811'
58
59 # convert stamp ourselves by adding the missing year value
60 year = datetime.now().year
61 dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
62 expected = float(dt.strftime('%s.%f'))
63 parsed = parse_timestamp(journal_stamp)
64
65 # use date(1)
66 out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp])
67 timestamp = out.strip()
68 date_ts = float(timestamp)
69
70 self.assertEqual(expected, parsed)
71 self.assertEqual(expected, date_ts)
72 self.assertEqual(date_ts, parsed)
73
74 def test_parse_unexpected_timestamp_format_with_date_command(self):
75 """Dump sends unexpected timestamp formats to data for processing."""
76 new_fmt = '%H:%M %m/%d %Y'
77 new_stamp = '17:15 08/08'
78
79 # convert stamp ourselves by adding the missing year value
80 year = datetime.now().year
81 dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
82 expected = float(dt.strftime('%s.%f'))
83 parsed = parse_timestamp(new_stamp)
84
85 # use date(1)
86 out, _ = subp(['date', '+%s.%6N', '-d', new_stamp])
87 timestamp = out.strip()
88 date_ts = float(timestamp)
89
90 self.assertEqual(expected, parsed)
91 self.assertEqual(expected, date_ts)
92 self.assertEqual(date_ts, parsed)
93
94
95class TestParseCILogLine(CiTestCase):
96
97 def test_parse_logline_returns_none_without_separators(self):
98 """When no separators are found, parse_ci_logline returns None."""
99 expected_parse_ignores = [
100 '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT']
101 for parse_ignores in expected_parse_ignores:
102 self.assertIsNone(parse_ci_logline(parse_ignores))
103
104 def test_parse_logline_returns_event_for_cloud_init_logs(self):
105 """parse_ci_logline returns an event parse from cloud-init format."""
106 line = (
107 "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9"
108 " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up"
109 " 6.26 seconds.")
110 dt = datetime.strptime(
111 '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f')
112 timestamp = float(dt.strftime('%s.%f'))
113 expected = {
114 'description': 'starting search for local datasources',
115 'event_type': 'start',
116 'name': 'init-local',
117 'origin': 'cloudinit',
118 'timestamp': timestamp}
119 self.assertEqual(expected, parse_ci_logline(line))
120
121 def test_parse_logline_returns_event_for_journalctl_logs(self):
122 """parse_ci_logline returns an event parse from journalctl format."""
123 line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]"
124 " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at"
125 " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.")
126 year = datetime.now().year
127 dt = datetime.strptime(
128 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
129 timestamp = float(dt.strftime('%s.%f'))
130 expected = {
131 'description': 'starting search for local datasources',
132 'event_type': 'start',
133 'name': 'init-local',
134 'origin': 'cloudinit',
135 'timestamp': timestamp}
136 self.assertEqual(expected, parse_ci_logline(line))
137
138 def test_parse_logline_returns_event_for_finish_events(self):
139 """parse_ci_logline returns a finish event for a parsed log line."""
140 line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]'
141 ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running'
142 ' modules for final')
143 expected = {
144 'description': 'running modules for final',
145 'event_type': 'finish',
146 'name': 'modules-final',
147 'origin': 'cloudinit',
148 'result': 'SUCCESS',
149 'timestamp': 1472594005.972}
150 self.assertEqual(expected, parse_ci_logline(line))
151
152
153SAMPLE_LOGS = dedent("""\
154Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
155 Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\
156 06:51:06 +0000. Up 1.0 seconds.
1572016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\
158 modules-final: SUCCESS: running modules for final
159""")
160
161
162class TestDumpEvents(CiTestCase):
163 maxDiff = None
164
165 def test_dump_events_with_rawdata(self):
166 """Rawdata is split and parsed into a tuple of events and data"""
167 events, data = dump_events(rawdata=SAMPLE_LOGS)
168 expected_data = SAMPLE_LOGS.splitlines()
169 year = datetime.now().year
170 dt1 = datetime.strptime(
171 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
172 timestamp1 = float(dt1.strftime('%s.%f'))
173 expected_events = [{
174 'description': 'starting search for local datasources',
175 'event_type': 'start',
176 'name': 'init-local',
177 'origin': 'cloudinit',
178 'timestamp': timestamp1}, {
179 'description': 'running modules for final',
180 'event_type': 'finish',
181 'name': 'modules-final',
182 'origin': 'cloudinit',
183 'result': 'SUCCESS',
184 'timestamp': 1472594005.972}]
185 self.assertEqual(expected_events, events)
186 self.assertEqual(expected_data, data)
187
188 def test_dump_events_with_cisource(self):
189 """Cisource file is read and parsed into a tuple of events and data."""
190 tmpfile = self.tmp_path('logfile')
191 write_file(tmpfile, SAMPLE_LOGS)
192 events, data = dump_events(cisource=open(tmpfile))
193 year = datetime.now().year
194 dt1 = datetime.strptime(
195 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
196 timestamp1 = float(dt1.strftime('%s.%f'))
197 expected_events = [{
198 'description': 'starting search for local datasources',
199 'event_type': 'start',
200 'name': 'init-local',
201 'origin': 'cloudinit',
202 'timestamp': timestamp1}, {
203 'description': 'running modules for final',
204 'event_type': 'finish',
205 'name': 'modules-final',
206 'origin': 'cloudinit',
207 'result': 'SUCCESS',
208 'timestamp': 1472594005.972}]
209 self.assertEqual(expected_events, events)
210 self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
0new file mode 100644211new file mode 100644
index 0000000..221f341
--- /dev/null
+++ b/cloudinit/apport.py
@@ -0,0 +1,105 @@
1# Copyright (C) 2017 Canonical Ltd.
2#
3# This file is part of cloud-init. See LICENSE file for license information.
4
5'''Cloud-init apport interface'''
6
7try:
8 from apport.hookutils import (
9 attach_file, attach_root_command_outputs, root_command_output)
10 has_apport = True
11except ImportError:
12 has_apport = False
13
14
15KNOWN_CLOUD_NAMES = [
16 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma',
17 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS',
18 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS',
19 'VMware', 'Other']
20
21# Potentially clear text collected logs
22CLOUDINIT_LOG = '/var/log/cloud-init.log'
23CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log'
24USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
25
26
27def attach_cloud_init_logs(report, ui=None):
28 '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.'''
29 attach_root_command_outputs(report, {
30 'cloud-init-log-warnings':
31 'egrep -i "warn|error" /var/log/cloud-init.log',
32 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'})
33 root_command_output(
34 ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz'])
35 attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz')
36
37
38def attach_hwinfo(report, ui=None):
39 '''Optionally attach hardware info from lshw.'''
40 prompt = (
41 'Your device details (lshw) may be useful to developers when'
42 ' addressing this bug, but gathering it requires admin privileges.'
43 ' Would you like to include this info?')
44 if ui and ui.yesno(prompt):
45 attach_root_command_outputs(report, {'lshw.txt': 'lshw'})
46
47
48def attach_cloud_info(report, ui=None):
49 '''Prompt for cloud details if available.'''
50 if ui:
51 prompt = 'Is this machine running in a cloud environment?'
52 response = ui.yesno(prompt)
53 if response is None:
54 raise StopIteration # User cancelled
55 if response:
56 prompt = ('Please select the cloud vendor or environment in which'
57 ' this instance is running')
58 response = ui.choice(prompt, KNOWN_CLOUD_NAMES)
59 if response:
60 report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]]
61 else:
62 report['CloudName'] = 'None'
63
64
65def attach_user_data(report, ui=None):
66 '''Optionally provide user-data if desired.'''
67 if ui:
68 prompt = (
69 'Your user-data or cloud-config file can optionally be provided'
70 ' from {0} and could be useful to developers when addressing this'
71 ' bug. Do you wish to attach user-data to this bug?'.format(
72 USER_DATA_FILE))
73 response = ui.yesno(prompt)
74 if response is None:
75 raise StopIteration # User cancelled
76 if response:
77 attach_file(report, USER_DATA_FILE, 'user_data.txt')
78
79
80def add_bug_tags(report):
81 '''Add any appropriate tags to the bug.'''
82 if 'JournalErrors' in report.keys():
83 errors = report['JournalErrors']
84 if 'Breaking ordering cycle' in errors:
85 report['Tags'] = 'systemd-ordering'
86
87
88def add_info(report, ui):
89 '''This is an entry point to run cloud-init's apport functionality.
90
91 Distros which want apport support will have a cloud-init package-hook at
92 /usr/share/apport/package-hooks/cloud-init.py which defines an add_info
93 function and returns the result of cloudinit.apport.add_info(report, ui).
94 '''
95 if not has_apport:
96 raise RuntimeError(
97 'No apport imports discovered. Apport functionality disabled')
98 attach_cloud_init_logs(report, ui)
99 attach_hwinfo(report, ui)
100 attach_cloud_info(report, ui)
101 attach_user_data(report, ui)
102 add_bug_tags(report)
103 return True
104
105# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
0new file mode 100644106new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cloudinit/cmd/devel/__init__.py
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
1new file mode 100644107new file mode 100644
index 0000000..35ca478
--- /dev/null
+++ b/cloudinit/cmd/devel/logs.py
@@ -0,0 +1,101 @@
1# Copyright (C) 2017 Canonical Ltd.
2#
3# This file is part of cloud-init. See LICENSE file for license information.
4
5"""Define 'collect-logs' utility and handler to include in cloud-init cmd."""
6
7import argparse
8from cloudinit.util import (
9 ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file)
10from cloudinit.temp_utils import tempdir
11from datetime import datetime
12import os
13import shutil
14
15
16CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
17CLOUDINIT_RUN_DIR = '/run/cloud-init'
18USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
19
20
21def get_parser(parser=None):
22 """Build or extend and arg parser for collect-logs utility.
23
24 @param parser: Optional existing ArgumentParser instance representing the
25 collect-logs subcommand which will be extended to support the args of
26 this utility.
27
28 @returns: ArgumentParser with proper argument configuration.
29 """
30 if not parser:
31 parser = argparse.ArgumentParser(
32 prog='collect-logs',
33 description='Collect and tar all cloud-init debug info')
34 parser.add_argument(
35 "--tarfile", '-t', default='cloud-init.tar.gz',
36 help=('The tarfile to create containing all collected logs.'
37 ' Default: cloud-init.tar.gz'))
38 parser.add_argument(
39 "--include-userdata", '-u', default=False, action='store_true',
40 dest='userdata', help=(
41 'Optionally include user-data from {0} which could contain'
42 ' sensitive information.'.format(USER_DATA_FILE)))
43 return parser
44
45
46def _write_command_output_to_file(cmd, filename):
47 """Helper which runs a command and writes output or error to filename."""
48 try:
49 out, _ = subp(cmd)
50 except ProcessExecutionError as e:
51 write_file(filename, str(e))
52 else:
53 write_file(filename, out)
54
55
56def collect_logs(tarfile, include_userdata):
57 """Collect all cloud-init logs and tar them up into the provided tarfile.
58
59 @param tarfile: The path of the tar-gzipped file to create.
60 @param include_userdata: Boolean, true means include user-data.
61 """
62 tarfile = os.path.abspath(tarfile)
63 date = datetime.utcnow().date().strftime('%Y-%m-%d')
64 log_dir = 'cloud-init-logs-{0}'.format(date)
65 with tempdir(dir='/tmp') as tmp_dir:
66 log_dir = os.path.join(tmp_dir, log_dir)
67 _write_command_output_to_file(
68 ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
69 os.path.join(log_dir, 'version'))
70 _write_command_output_to_file(
71 ['dmesg'], os.path.join(log_dir, 'dmesg.txt'))
72 _write_command_output_to_file(
73 ['journalctl', '-o', 'short-precise'],
74 os.path.join(log_dir, 'journal.txt'))
75 for log in CLOUDINIT_LOGS:
76 copy(log, log_dir)
77 if include_userdata:
78 copy(USER_DATA_FILE, log_dir)
79 run_dir = os.path.join(log_dir, 'run')
80 ensure_dir(run_dir)
81 shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init'))
82 with chdir(tmp_dir):
83 subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
84
85
86def handle_collect_logs_args(name, args):
87 """Handle calls to 'cloud-init collect-logs' as a subcommand."""
88 collect_logs(args.tarfile, args.userdata)
89
90
91def main():
92 """Tool to collect and tar all cloud-init related logs."""
93 parser = get_parser()
94 handle_collect_logs_args('collect-logs', parser.parse_args())
95 return 0
96
97
98if __name__ == '__main__':
99 main()
100
101# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
0new file mode 100644102new file mode 100644
index 0000000..acacc4e
--- /dev/null
+++ b/cloudinit/cmd/devel/parser.py
@@ -0,0 +1,26 @@
1# Copyright (C) 2017 Canonical Ltd.
2#
3# This file is part of cloud-init. See LICENSE file for license information.
4
5"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
6
7import argparse
8from cloudinit.config.schema import (
9 get_parser as schema_parser, handle_schema_args)
10
11
12def get_parser(parser=None):
13 if not parser:
14 parser = argparse.ArgumentParser(
15 prog='cloudinit-devel',
16 description='Run development cloud-init tools')
17 subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
18 subparsers.required = True
19
20 parser_schema = subparsers.add_parser(
21 'schema', help='Validate cloud-config files or document schema')
22 # Construct schema subcommand parser
23 schema_parser(parser_schema)
24 parser_schema.set_defaults(action=('schema', handle_schema_args))
25
26 return parser
diff --git a/cloudinit/cmd/devel/tests/__init__.py b/cloudinit/cmd/devel/tests/__init__.py
0new file mode 10064427new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cloudinit/cmd/devel/tests/__init__.py
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
1new file mode 10064428new file mode 100644
index 0000000..dc4947c
--- /dev/null
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -0,0 +1,120 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3from cloudinit.cmd.devel import logs
4from cloudinit.util import ensure_dir, load_file, subp, write_file
5from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call
6from datetime import datetime
7import os
8
9
10class TestCollectLogs(FilesystemMockingTestCase):
11
12 def setUp(self):
13 super(TestCollectLogs, self).setUp()
14 self.new_root = self.tmp_dir()
15 self.run_dir = self.tmp_path('run', self.new_root)
16
17 def test_collect_logs_creates_tarfile(self):
18 """collect-logs creates a tarfile with all related cloud-init info."""
19 log1 = self.tmp_path('cloud-init.log', self.new_root)
20 write_file(log1, 'cloud-init-log')
21 log2 = self.tmp_path('cloud-init-output.log', self.new_root)
22 write_file(log2, 'cloud-init-output-log')
23 ensure_dir(self.run_dir)
24 write_file(self.tmp_path('results.json', self.run_dir), 'results')
25 output_tarfile = self.tmp_path('logs.tgz')
26
27 date = datetime.utcnow().date().strftime('%Y-%m-%d')
28 date_logdir = 'cloud-init-logs-{0}'.format(date)
29
30 expected_subp = {
31 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
32 '0.7fake\n',
33 ('dmesg',): 'dmesg-out\n',
34 ('journalctl', '-o', 'short-precise'): 'journal-out\n',
35 ('tar', 'czvf', output_tarfile, date_logdir): ''
36 }
37
38 def fake_subp(cmd):
39 cmd_tuple = tuple(cmd)
40 if cmd_tuple not in expected_subp:
41 raise AssertionError(
42 'Unexpected command provided to subp: {0}'.format(cmd))
43 if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
44 subp(cmd) # Pass through tar cmd so we can check output
45 return expected_subp[cmd_tuple], ''
46
47 wrap_and_call(
48 'cloudinit.cmd.devel.logs',
49 {'subp': {'side_effect': fake_subp},
50 'CLOUDINIT_LOGS': {'new': [log1, log2]},
51 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
52 logs.collect_logs, output_tarfile, include_userdata=False)
53 # unpack the tarfile and check file contents
54 subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
55 out_logdir = self.tmp_path(date_logdir, self.new_root)
56 self.assertEqual(
57 '0.7fake\n',
58 load_file(os.path.join(out_logdir, 'version')))
59 self.assertEqual(
60 'cloud-init-log',
61 load_file(os.path.join(out_logdir, 'cloud-init.log')))
62 self.assertEqual(
63 'cloud-init-output-log',
64 load_file(os.path.join(out_logdir, 'cloud-init-output.log')))
65 self.assertEqual(
66 'dmesg-out\n',
67 load_file(os.path.join(out_logdir, 'dmesg.txt')))
68 self.assertEqual(
69 'journal-out\n',
70 load_file(os.path.join(out_logdir, 'journal.txt')))
71 self.assertEqual(
72 'results',
73 load_file(
74 os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
75
76 def test_collect_logs_includes_optional_userdata(self):
77 """collect-logs include userdata when --include-userdata is set."""
78 log1 = self.tmp_path('cloud-init.log', self.new_root)
79 write_file(log1, 'cloud-init-log')
80 log2 = self.tmp_path('cloud-init-output.log', self.new_root)
81 write_file(log2, 'cloud-init-output-log')
82 userdata = self.tmp_path('user-data.txt', self.new_root)
83 write_file(userdata, 'user-data')
84 ensure_dir(self.run_dir)
85 write_file(self.tmp_path('results.json', self.run_dir), 'results')
86 output_tarfile = self.tmp_path('logs.tgz')
87
88 date = datetime.utcnow().date().strftime('%Y-%m-%d')
89 date_logdir = 'cloud-init-logs-{0}'.format(date)
90
91 expected_subp = {
92 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
93 '0.7fake',
94 ('dmesg',): 'dmesg-out\n',
95 ('journalctl', '-o', 'short-precise'): 'journal-out\n',
96 ('tar', 'czvf', output_tarfile, date_logdir): ''
97 }
98
99 def fake_subp(cmd):
100 cmd_tuple = tuple(cmd)
101 if cmd_tuple not in expected_subp:
102 raise AssertionError(
103 'Unexpected command provided to subp: {0}'.format(cmd))
104 if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
105 subp(cmd) # Pass through tar cmd so we can check output
106 return expected_subp[cmd_tuple], ''
107
108 wrap_and_call(
109 'cloudinit.cmd.devel.logs',
110 {'subp': {'side_effect': fake_subp},
111 'CLOUDINIT_LOGS': {'new': [log1, log2]},
112 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
113 'USER_DATA_FILE': {'new': userdata}},
114 logs.collect_logs, output_tarfile, include_userdata=True)
115 # unpack the tarfile and check file contents
116 subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
117 out_logdir = self.tmp_path(date_logdir, self.new_root)
118 self.assertEqual(
119 'user-data',
120 load_file(os.path.join(out_logdir, 'user-data.txt')))
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 139e03b..6fb9d9e 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -50,13 +50,6 @@ WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at "
50# Module section template50# Module section template
51MOD_SECTION_TPL = "cloud_%s_modules"51MOD_SECTION_TPL = "cloud_%s_modules"
5252
53# Things u can query on
54QUERY_DATA_TYPES = [
55 'data',
56 'data_raw',
57 'instance_id',
58]
59
60# Frequency shortname to full name53# Frequency shortname to full name
61# (so users don't have to remember the full name...)54# (so users don't have to remember the full name...)
62FREQ_SHORT_NAMES = {55FREQ_SHORT_NAMES = {
@@ -510,11 +503,6 @@ def main_modules(action_name, args):
510 return run_module_section(mods, name, name)503 return run_module_section(mods, name, name)
511504
512505
513def main_query(name, _args):
514 raise NotImplementedError(("Action '%s' is not"
515 " currently implemented") % (name))
516
517
518def main_single(name, args):506def main_single(name, args):
519 # Cloud-init single stage is broken up into the following sub-stages507 # Cloud-init single stage is broken up into the following sub-stages
520 # 1. Ensure that the init object fetches its config without errors508 # 1. Ensure that the init object fetches its config without errors
@@ -688,11 +676,10 @@ def main_features(name, args):
688676
689677
690def main(sysv_args=None):678def main(sysv_args=None):
691 if sysv_args is not None:679 if not sysv_args:
692 parser = argparse.ArgumentParser(prog=sysv_args[0])680 sysv_args = sys.argv
693 sysv_args = sysv_args[1:]681 parser = argparse.ArgumentParser(prog=sysv_args[0])
694 else:682 sysv_args = sysv_args[1:]
695 parser = argparse.ArgumentParser()
696683
697 # Top level args684 # Top level args
698 parser.add_argument('--version', '-v', action='version',685 parser.add_argument('--version', '-v', action='version',
@@ -713,7 +700,8 @@ def main(sysv_args=None):
713 default=False)700 default=False)
714701
715 parser.set_defaults(reporter=None)702 parser.set_defaults(reporter=None)
716 subparsers = parser.add_subparsers()703 subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
704 subparsers.required = True
717705
718 # Each action and its sub-options (if any)706 # Each action and its sub-options (if any)
719 parser_init = subparsers.add_parser('init',707 parser_init = subparsers.add_parser('init',
@@ -737,17 +725,6 @@ def main(sysv_args=None):
737 choices=('init', 'config', 'final'))725 choices=('init', 'config', 'final'))
738 parser_mod.set_defaults(action=('modules', main_modules))726 parser_mod.set_defaults(action=('modules', main_modules))
739727
740 # These settings are used when you want to query information
741 # stored in the cloud-init data objects/directories/files
742 parser_query = subparsers.add_parser('query',
743 help=('query information stored '
744 'in cloud-init'))
745 parser_query.add_argument("--name", '-n', action="store",
746 help="item name to query on",
747 required=True,
748 choices=QUERY_DATA_TYPES)
749 parser_query.set_defaults(action=('query', main_query))
750
751 # This subcommand allows you to run a single module728 # This subcommand allows you to run a single module
752 parser_single = subparsers.add_parser('single',729 parser_single = subparsers.add_parser('single',
753 help=('run a single module '))730 help=('run a single module '))
@@ -781,15 +758,39 @@ def main(sysv_args=None):
781 help=('list defined features'))758 help=('list defined features'))
782 parser_features.set_defaults(action=('features', main_features))759 parser_features.set_defaults(action=('features', main_features))
783760
761 parser_analyze = subparsers.add_parser(
762 'analyze', help='Devel tool: Analyze cloud-init logs and data')
763
764 parser_devel = subparsers.add_parser(
765 'devel', help='Run development tools')
766
767 parser_collect_logs = subparsers.add_parser(
768 'collect-logs', help='Collect and tar all cloud-init debug info')
769
770 if sysv_args:
771 # Only load subparsers if subcommand is specified to avoid load cost
772 if sysv_args[0] == 'analyze':
773 from cloudinit.analyze.__main__ import get_parser as analyze_parser
774 # Construct analyze subcommand parser
775 analyze_parser(parser_analyze)
776 elif sysv_args[0] == 'devel':
777 from cloudinit.cmd.devel.parser import get_parser as devel_parser
778 # Construct devel subcommand parser
779 devel_parser(parser_devel)
780 elif sysv_args[0] == 'collect-logs':
781 from cloudinit.cmd.devel.logs import (
782 get_parser as logs_parser, handle_collect_logs_args)
783 logs_parser(parser_collect_logs)
784 parser_collect_logs.set_defaults(
785 action=('collect-logs', handle_collect_logs_args))
786
784 args = parser.parse_args(args=sysv_args)787 args = parser.parse_args(args=sysv_args)
785788
786 try:789 # Subparsers.required = True and each subparser sets action=(name, functor)
787 (name, functor) = args.action790 (name, functor) = args.action
788 except AttributeError:
789 parser.error('too few arguments')
790791
791 # Setup basic logging to start (until reinitialized)792 # Setup basic logging to start (until reinitialized)
792 # iff in debug mode...793 # iff in debug mode.
793 if args.debug:794 if args.debug:
794 logging.setupBasicLogging()795 logging.setupBasicLogging()
795796
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 604f93b..233da1e 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -3,44 +3,73 @@
3#3#
4# Author: Scott Moser <scott.moser@canonical.com>4# Author: Scott Moser <scott.moser@canonical.com>
5# Author: Juerg Haefliger <juerg.haefliger@hp.com>5# Author: Juerg Haefliger <juerg.haefliger@hp.com>
6# Author: Chad Smith <chad.smith@canonical.com>
6#7#
7# This file is part of cloud-init. See LICENSE file for license information.8# This file is part of cloud-init. See LICENSE file for license information.
89
9"""10"""Bootcmd: run arbitrary commands early in the boot process."""
10Bootcmd
11-------
12**Summary:** run commands early in boot process
13
14This module runs arbitrary commands very early in the boot process,
15only slightly after a boothook would run. This is very similar to a
16boothook, but more user friendly. The environment variable ``INSTANCE_ID``
17will be set to the current instance id for all run commands. Commands can be
18specified either as lists or strings. For invocation details, see ``runcmd``.
19
20.. note::
21 bootcmd should only be used for things that could not be done later in the
22 boot process.
23
24**Internal name:** ``cc_bootcmd``
25
26**Module frequency:** per always
27
28**Supported distros:** all
29
30**Config keys**::
31
32 bootcmd:
33 - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
34 - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
35"""
3611
37import os12import os
13from textwrap import dedent
3814
15from cloudinit.config.schema import (
16 get_schema_doc, validate_cloudconfig_schema)
39from cloudinit.settings import PER_ALWAYS17from cloudinit.settings import PER_ALWAYS
18from cloudinit import temp_utils
40from cloudinit import util19from cloudinit import util
4120
42frequency = PER_ALWAYS21frequency = PER_ALWAYS
4322
23# The schema definition for each cloud-config module is a strict contract for
24# describing supported configuration parameters for each cloud-config section.
25# It allows cloud-config to validate and alert users to invalid or ignored
26# configuration options before actually attempting to deploy with said
27# configuration.
28
29distros = ['all']
30
31schema = {
32 'id': 'cc_bootcmd',
33 'name': 'Bootcmd',
34 'title': 'Run arbitrary commands early in the boot process',
35 'description': dedent("""\
36 This module runs arbitrary commands very early in the boot process,
37 only slightly after a boothook would run. This is very similar to a
38 boothook, but more user friendly. The environment variable
39 ``INSTANCE_ID`` will be set to the current instance id for all run
40 commands. Commands can be specified either as lists or strings. For
41 invocation details, see ``runcmd``.
42
43 .. note::
44 bootcmd should only be used for things that could not be done later
45 in the boot process."""),
46 'distros': distros,
47 'examples': [dedent("""\
48 bootcmd:
49 - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
50 - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
51 """)],
52 'frequency': PER_ALWAYS,
53 'type': 'object',
54 'properties': {
55 'bootcmd': {
56 'type': 'array',
57 'items': {
58 'oneOf': [
59 {'type': 'array', 'items': {'type': 'string'}},
60 {'type': 'string'}]
61 },
62 'additionalItems': False, # Reject items of non-string non-list
63 'additionalProperties': False,
64 'minItems': 1,
65 'required': [],
66 'uniqueItems': True
67 }
68 }
69}
70
71__doc__ = get_schema_doc(schema) # Supplement python help()
72
4473
45def handle(name, cfg, cloud, log, _args):74def handle(name, cfg, cloud, log, _args):
4675
@@ -49,13 +78,14 @@ def handle(name, cfg, cloud, log, _args):
49 " no 'bootcmd' key in configuration"), name)78 " no 'bootcmd' key in configuration"), name)
50 return79 return
5180
52 with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:81 validate_cloudconfig_schema(cfg, schema)
82 with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf:
53 try:83 try:
54 content = util.shellify(cfg["bootcmd"])84 content = util.shellify(cfg["bootcmd"])
55 tmpf.write(util.encode_text(content))85 tmpf.write(util.encode_text(content))
56 tmpf.flush()86 tmpf.flush()
57 except Exception:87 except Exception as e:
58 util.logexc(log, "Failed to shellify bootcmd")88 util.logexc(log, "Failed to shellify bootcmd: %s", str(e))
59 raise89 raise
6090
61 try:91 try:
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 02c70b1..46abedd 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -58,6 +58,9 @@ file).
58 log_level:58 log_level:
59 log_location:59 log_location:
60 node_name:60 node_name:
61 omnibus_url:
62 omnibus_url_retries:
63 omnibus_version:
61 pid_file:64 pid_file:
62 server_url:65 server_url:
63 show_time:66 show_time:
@@ -279,6 +282,31 @@ def run_chef(chef_cfg, log):
279 util.subp(cmd, capture=False)282 util.subp(cmd, capture=False)
280283
281284
285def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
286 """Install an omnibus unified package from url.
287
288 @param url: URL where blob of chef content may be downloaded. Defaults to
289 OMNIBUS_URL.
290 @param retries: Number of retries to perform when attempting to read url.
291 Defaults to OMNIBUS_URL_RETRIES
292 @param omnibus_version: Optional version string to require for omnibus
293 install.
294 """
295 if url is None:
296 url = OMNIBUS_URL
297 if retries is None:
298 retries = OMNIBUS_URL_RETRIES
299
300 if omnibus_version is None:
301 args = []
302 else:
303 args = ['-v', omnibus_version]
304 content = url_helper.readurl(url=url, retries=retries).contents
305 return util.subp_blob_in_tempfile(
306 blob=content, args=args,
307 basename='chef-omnibus-install', capture=False)
308
309
282def install_chef(cloud, chef_cfg, log):310def install_chef(cloud, chef_cfg, log):
283 # If chef is not installed, we install chef based on 'install_type'311 # If chef is not installed, we install chef based on 'install_type'
284 install_type = util.get_cfg_option_str(chef_cfg, 'install_type',312 install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
@@ -297,17 +325,11 @@ def install_chef(cloud, chef_cfg, log):
297 # This will install and run the chef-client from packages325 # This will install and run the chef-client from packages
298 cloud.distro.install_packages(('chef',))326 cloud.distro.install_packages(('chef',))
299 elif install_type == 'omnibus':327 elif install_type == 'omnibus':
300 # This will install as a omnibus unified package328 omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
301 url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)329 install_chef_from_omnibus(
302 retries = max(0, util.get_cfg_option_int(chef_cfg,330 url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
303 "omnibus_url_retries",331 retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
304 default=OMNIBUS_URL_RETRIES))332 omnibus_version=omnibus_version)
305 content = url_helper.readurl(url=url, retries=retries).contents
306 with util.tempdir() as tmpd:
307 # Use tmpdir over tmpfile to avoid 'text file busy' on execute
308 tmpf = "%s/chef-omnibus-install" % tmpd
309 util.write_file(tmpf, content, mode=0o700)
310 util.subp([tmpf], capture=False)
311 else:333 else:
312 log.warn("Unknown chef install type '%s'", install_type)334 log.warn("Unknown chef install type '%s'", install_type)
313 run = False335 run = False
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 86b7138..8f9f1ab 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -57,7 +57,7 @@ The following default client config is provided, but can be overridden::
5757
58import os58import os
5959
60from six import StringIO60from six import BytesIO
6161
62from configobj import ConfigObj62from configobj import ConfigObj
6363
@@ -109,7 +109,7 @@ def handle(_name, cfg, cloud, log, _args):
109 ls_cloudcfg,109 ls_cloudcfg,
110 ]110 ]
111 merged = merge_together(merge_data)111 merged = merge_together(merge_data)
112 contents = StringIO()112 contents = BytesIO()
113 merged.write(contents)113 merged.write(contents)
114114
115 util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))115 util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 31ed64e..15ae1ec 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -4,39 +4,10 @@
4#4#
5# This file is part of cloud-init. See LICENSE file for license information.5# This file is part of cloud-init. See LICENSE file for license information.
66
7"""7"""NTP: enable and configure ntp"""
8NTP
9---
10**Summary:** enable and configure ntp
11
12Handle ntp configuration. If ntp is not installed on the system and ntp
13configuration is specified, ntp will be installed. If there is a default ntp
14config file in the image or one is present in the distro's ntp package, it will
15be copied to ``/etc/ntp.conf.dist`` before any changes are made. A list of ntp
16pools and ntp servers can be provided under the ``ntp`` config key. If no ntp
17servers or pools are provided, 4 pools will be used in the format
18``{0-3}.{distro}.pool.ntp.org``.
19
20**Internal name:** ``cc_ntp``
21
22**Module frequency:** per instance
23
24**Supported distros:** centos, debian, fedora, opensuse, ubuntu
25
26**Config keys**::
27
28 ntp:
29 pools:
30 - 0.company.pool.ntp.org
31 - 1.company.pool.ntp.org
32 - ntp.myorg.org
33 servers:
34 - my.ntp.server.local
35 - ntp.ubuntu.com
36 - 192.168.23.2
37"""
388
39from cloudinit.config.schema import validate_cloudconfig_schema9from cloudinit.config.schema import (
10 get_schema_doc, validate_cloudconfig_schema)
40from cloudinit import log as logging11from cloudinit import log as logging
41from cloudinit.settings import PER_INSTANCE12from cloudinit.settings import PER_INSTANCE
42from cloudinit import templater13from cloudinit import templater
@@ -50,6 +21,7 @@ LOG = logging.getLogger(__name__)
5021
51frequency = PER_INSTANCE22frequency = PER_INSTANCE
52NTP_CONF = '/etc/ntp.conf'23NTP_CONF = '/etc/ntp.conf'
24TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
53NR_POOL_SERVERS = 425NR_POOL_SERVERS = 4
54distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']26distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
5527
@@ -75,10 +47,13 @@ schema = {
75 ``{0-3}.{distro}.pool.ntp.org``."""),47 ``{0-3}.{distro}.pool.ntp.org``."""),
76 'distros': distros,48 'distros': distros,
77 'examples': [49 'examples': [
78 {'ntp': {'pools': ['0.company.pool.ntp.org', '1.company.pool.ntp.org',50 dedent("""\
79 'ntp.myorg.org'],51 ntp:
80 'servers': ['my.ntp.server.local', 'ntp.ubuntu.com',52 pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]
81 '192.168.23.2']}}],53 servers:
54 - ntp.server.local
55 - ntp.ubuntu.com
56 - 192.168.23.2""")],
82 'frequency': PER_INSTANCE,57 'frequency': PER_INSTANCE,
83 'type': 'object',58 'type': 'object',
84 'properties': {59 'properties': {
@@ -116,6 +91,8 @@ schema = {
116 }91 }
117}92}
11893
94__doc__ = get_schema_doc(schema) # Supplement python help()
95
11996
120def handle(name, cfg, cloud, log, _args):97def handle(name, cfg, cloud, log, _args):
121 """Enable and configure ntp."""98 """Enable and configure ntp."""
@@ -132,20 +109,50 @@ def handle(name, cfg, cloud, log, _args):
132 " is a %s %instead"), type_utils.obj_name(ntp_cfg))109 " is a %s %instead"), type_utils.obj_name(ntp_cfg))
133110
134 validate_cloudconfig_schema(cfg, schema)111 validate_cloudconfig_schema(cfg, schema)
112 if ntp_installable():
113 service_name = 'ntp'
114 confpath = NTP_CONF
115 template_name = None
116 packages = ['ntp']
117 check_exe = 'ntpd'
118 else:
119 service_name = 'systemd-timesyncd'
120 confpath = TIMESYNCD_CONF
121 template_name = 'timesyncd.conf'
122 packages = []
123 check_exe = '/lib/systemd/systemd-timesyncd'
124
135 rename_ntp_conf()125 rename_ntp_conf()
136 # ensure when ntp is installed it has a configuration file126 # ensure when ntp is installed it has a configuration file
137 # to use instead of starting up with packaged defaults127 # to use instead of starting up with packaged defaults
138 write_ntp_config_template(ntp_cfg, cloud)128 write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name)
139 install_ntp(cloud.distro.install_packages, packages=['ntp'],129 install_ntp(cloud.distro.install_packages, packages=packages,
140 check_exe="ntpd")130 check_exe=check_exe)
141 # if ntp was already installed, it may not have started131
142 try:132 try:
143 reload_ntp(systemd=cloud.distro.uses_systemd())133 reload_ntp(service_name, systemd=cloud.distro.uses_systemd())
144 except util.ProcessExecutionError as e:134 except util.ProcessExecutionError as e:
145 LOG.exception("Failed to reload/start ntp service: %s", e)135 LOG.exception("Failed to reload/start ntp service: %s", e)
146 raise136 raise
147137
148138
139def ntp_installable():
140 """Check if we can install ntp package
141
142 Ubuntu-Core systems do not have an ntp package available, so
143 we always return False. Other systems require package managers to install
144 the ntp package If we fail to find one of the package managers, then we
145 cannot install ntp.
146 """
147 if util.system_is_snappy():
148 return False
149
150 if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])):
151 return True
152
153 return False
154
155
149def install_ntp(install_func, packages=None, check_exe="ntpd"):156def install_ntp(install_func, packages=None, check_exe="ntpd"):
150 if util.which(check_exe):157 if util.which(check_exe):
151 return158 return
@@ -156,7 +163,7 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"):
156163
157164
158def rename_ntp_conf(config=None):165def rename_ntp_conf(config=None):
159 """Rename any existing ntp.conf file and render from template"""166 """Rename any existing ntp.conf file"""
160 if config is None: # For testing167 if config is None: # For testing
161 config = NTP_CONF168 config = NTP_CONF
162 if os.path.exists(config):169 if os.path.exists(config):
@@ -171,7 +178,7 @@ def generate_server_names(distro):
171 return names178 return names
172179
173180
174def write_ntp_config_template(cfg, cloud):181def write_ntp_config_template(cfg, cloud, path, template=None):
175 servers = cfg.get('servers', [])182 servers = cfg.get('servers', [])
176 pools = cfg.get('pools', [])183 pools = cfg.get('pools', [])
177184
@@ -185,19 +192,20 @@ def write_ntp_config_template(cfg, cloud):
185 'pools': pools,192 'pools': pools,
186 }193 }
187194
188 template_fn = cloud.get_template_filename('ntp.conf.%s' %195 if template is None:
189 (cloud.distro.name))196 template = 'ntp.conf.%s' % cloud.distro.name
197
198 template_fn = cloud.get_template_filename(template)
190 if not template_fn:199 if not template_fn:
191 template_fn = cloud.get_template_filename('ntp.conf')200 template_fn = cloud.get_template_filename('ntp.conf')
192 if not template_fn:201 if not template_fn:
193 raise RuntimeError(("No template found, "202 raise RuntimeError(("No template found, "
194 "not rendering %s"), NTP_CONF)203 "not rendering %s"), path)
195204
196 templater.render_to_file(template_fn, NTP_CONF, params)205 templater.render_to_file(template_fn, path, params)
197206
198207
199def reload_ntp(systemd=False):208def reload_ntp(service, systemd=False):
200 service = 'ntp'
201 if systemd:209 if systemd:
202 cmd = ['systemctl', 'reload-or-restart', service]210 cmd = ['systemctl', 'reload-or-restart', service]
203 else:211 else:
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index dc11561..28b1d56 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -15,21 +15,23 @@ This module handles puppet installation and configuration. If the ``puppet``
15key does not exist in global configuration, no action will be taken. If a15key does not exist in global configuration, no action will be taken. If a
16config entry for ``puppet`` is present, then by default the latest version of16config entry for ``puppet`` is present, then by default the latest version of
17puppet will be installed. If ``install`` is set to ``false``, puppet will not17puppet will be installed. If ``install`` is set to ``false``, puppet will not
18be installed. However, this may result in an error if puppet is not already18be installed. However, this will result in an error if puppet is not already
19present on the system. The version of puppet to be installed can be specified19present on the system. The version of puppet to be installed can be specified
20under ``version``, and defaults to ``none``, which selects the latest version20under ``version``, and defaults to ``none``, which selects the latest version
21in the repos. If the ``puppet`` config key exists in the config archive, this21in the repos. If the ``puppet`` config key exists in the config archive, this
22module will attempt to start puppet even if no installation was performed.22module will attempt to start puppet even if no installation was performed.
2323
24Puppet configuration can be specified under the ``conf`` key. The configuration24Puppet configuration can be specified under the ``conf`` key. The
25is specified as a dictionary which is converted into ``<key>=<value>`` format25configuration is specified as a dictionary containing high-level ``<section>``
26and appended to ``puppet.conf`` under the ``[puppetd]`` section. The26keys and lists of ``<key>=<value>`` pairs within each section. Each section
27name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
28such, section names should be one of: ``main``, ``master``, ``agent`` or
29``user`` and keys should be valid puppet configuration options. The
27``certname`` key supports string substitutions for ``%i`` and ``%f``,30``certname`` key supports string substitutions for ``%i`` and ``%f``,
28corresponding to the instance id and fqdn of the machine respectively.31corresponding to the instance id and fqdn of the machine respectively.
29If ``ca_cert`` is present under ``conf``, it will not be written to32If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
30``puppet.conf``, but instead will be used as the puppermaster certificate.33instead will be used as the puppermaster certificate. It should be specified
31It should be specified in pem format as a multi-line string (using the ``|``34in pem format as a multi-line string (using the ``|`` yaml notation).
32yaml notation).
3335
34**Internal name:** ``cc_puppet``36**Internal name:** ``cc_puppet``
3537
@@ -43,12 +45,13 @@ yaml notation).
43 install: <true/false>45 install: <true/false>
44 version: <version>46 version: <version>
45 conf:47 conf:
46 server: "puppetmaster.example.org"48 agent:
47 certname: "%i.%f"49 server: "puppetmaster.example.org"
48 ca_cert: |50 certname: "%i.%f"
49 -------BEGIN CERTIFICATE-------51 ca_cert: |
50 <cert data>52 -------BEGIN CERTIFICATE-------
51 -------END CERTIFICATE-------53 <cert data>
54 -------END CERTIFICATE-------
52"""55"""
5356
54from six import StringIO57from six import StringIO
@@ -127,7 +130,7 @@ def handle(name, cfg, cloud, log, _args):
127 util.write_file(PUPPET_SSL_CERT_PATH, cfg)130 util.write_file(PUPPET_SSL_CERT_PATH, cfg)
128 util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')131 util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
129 else:132 else:
130 # Iterate throug the config items, we'll use ConfigParser.set133 # Iterate through the config items, we'll use ConfigParser.set
131 # to overwrite or create new items as needed134 # to overwrite or create new items as needed
132 for (o, v) in cfg.items():135 for (o, v) in cfg.items():
133 if o == 'certname':136 if o == 'certname':
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index ceee952..f774baa 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -6,31 +6,8 @@
6#6#
7# This file is part of cloud-init. See LICENSE file for license information.7# This file is part of cloud-init. See LICENSE file for license information.
88
9"""9"""Resizefs: cloud-config module which resizes the filesystem"""
10Resizefs
11--------
12**Summary:** resize filesystem
1310
14Resize a filesystem to use all avaliable space on partition. This module is
15useful along with ``cc_growpart`` and will ensure that if the root partition
16has been resized the root filesystem will be resized along with it. By default,
17``cc_resizefs`` will resize the root partition and will block the boot process
18while the resize command is running. Optionally, the resize operation can be
19performed in the background while cloud-init continues running modules. This
20can be enabled by setting ``resize_rootfs`` to ``true``. This module can be
21disabled altogether by setting ``resize_rootfs`` to ``false``.
22
23**Internal name:** ``cc_resizefs``
24
25**Module frequency:** per always
26
27**Supported distros:** all
28
29**Config keys**::
30
31 resize_rootfs: <true/false/"noblock">
32 resize_rootfs_tmp: <directory>
33"""
3411
35import errno12import errno
36import getopt13import getopt
@@ -38,11 +15,47 @@ import os
38import re15import re
39import shlex16import shlex
40import stat17import stat
18from textwrap import dedent
4119
20from cloudinit.config.schema import (
21 get_schema_doc, validate_cloudconfig_schema)
42from cloudinit.settings import PER_ALWAYS22from cloudinit.settings import PER_ALWAYS
43from cloudinit import util23from cloudinit import util
4424
25NOBLOCK = "noblock"
26
45frequency = PER_ALWAYS27frequency = PER_ALWAYS
28distros = ['all']
29
30schema = {
31 'id': 'cc_resizefs',
32 'name': 'Resizefs',
33 'title': 'Resize filesystem',
34 'description': dedent("""\
35 Resize a filesystem to use all avaliable space on partition. This
36 module is useful along with ``cc_growpart`` and will ensure that if the
37 root partition has been resized the root filesystem will be resized
38 along with it. By default, ``cc_resizefs`` will resize the root
39 partition and will block the boot process while the resize command is
40 running. Optionally, the resize operation can be performed in the
41 background while cloud-init continues running modules. This can be
42 enabled by setting ``resize_rootfs`` to ``true``. This module can be
43 disabled altogether by setting ``resize_rootfs`` to ``false``."""),
44 'distros': distros,
45 'examples': [
46 'resize_rootfs: false # disable root filesystem resize operation'],
47 'frequency': PER_ALWAYS,
48 'type': 'object',
49 'properties': {
50 'resize_rootfs': {
51 'enum': [True, False, NOBLOCK],
52 'description': dedent("""\
53 Whether to resize the root partition. Default: 'true'""")
54 }
55 }
56}
57
58__doc__ = get_schema_doc(schema) # Supplement python help()
4659
4760
48def _resize_btrfs(mount_point, devpth):61def _resize_btrfs(mount_point, devpth):
@@ -54,7 +67,7 @@ def _resize_ext(mount_point, devpth):
5467
5568
56def _resize_xfs(mount_point, devpth):69def _resize_xfs(mount_point, devpth):
57 return ('xfs_growfs', devpth)70 return ('xfs_growfs', mount_point)
5871
5972
60def _resize_ufs(mount_point, devpth):73def _resize_ufs(mount_point, devpth):
@@ -131,8 +144,6 @@ RESIZE_FS_PRECHECK_CMDS = {
131 'ufs': _can_skip_resize_ufs144 'ufs': _can_skip_resize_ufs
132}145}
133146
134NOBLOCK = "noblock"
135
136147
137def rootdev_from_cmdline(cmdline):148def rootdev_from_cmdline(cmdline):
138 found = None149 found = None
@@ -161,71 +172,77 @@ def can_skip_resize(fs_type, resize_what, devpth):
161 return False172 return False
162173
163174
164def handle(name, cfg, _cloud, log, args):175def is_device_path_writable_block(devpath, info, log):
165 if len(args) != 0:176 """Return True if devpath is a writable block device.
166 resize_root = args[0]
167 else:
168 resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
169177
170 if not util.translate_bool(resize_root, addons=[NOBLOCK]):178 @param devpath: Path to the root device we want to resize.
171 log.debug("Skipping module named %s, resizing disabled", name)179 @param info: String representing information about the requested device.
172 return180 @param log: Logger to which logs will be added upon error.
173
174 # TODO(harlowja) is the directory ok to be used??
175 resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
176 util.ensure_dir(resize_root_d)
177
178 # TODO(harlowja): allow what is to be resized to be configurable??
179 resize_what = "/"
180 result = util.get_mount_info(resize_what, log)
181 if not result:
182 log.warn("Could not determine filesystem type of %s", resize_what)
183 return
184
185 (devpth, fs_type, mount_point) = result
186
187 info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
188 log.debug("resize_info: %s" % info)
189181
182 @returns Boolean True if block device is writable
183 """
190 container = util.is_container()184 container = util.is_container()
191185
192 # Ensure the path is a block device.186 # Ensure the path is a block device.
193 if (devpth == "/dev/root" and not os.path.exists(devpth) and187 if (devpath == "/dev/root" and not os.path.exists(devpath) and
194 not container):188 not container):
195 devpth = util.rootdev_from_cmdline(util.get_cmdline())189 devpath = util.rootdev_from_cmdline(util.get_cmdline())
196 if devpth is None:190 if devpath is None:
197 log.warn("Unable to find device '/dev/root'")191 log.warn("Unable to find device '/dev/root'")
198 return192 return False
199 log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)193 log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
194
195 if devpath == 'overlayroot':
196 log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
197 return False
200198
201 try:199 try:
202 statret = os.stat(devpth)200 statret = os.stat(devpath)
203 except OSError as exc:201 except OSError as exc:
204 if container and exc.errno == errno.ENOENT:202 if container and exc.errno == errno.ENOENT:
205 log.debug("Device '%s' did not exist in container. "203 log.debug("Device '%s' did not exist in container. "
206 "cannot resize: %s", devpth, info)204 "cannot resize: %s", devpath, info)
207 elif exc.errno == errno.ENOENT:205 elif exc.errno == errno.ENOENT:
208 log.warn("Device '%s' did not exist. cannot resize: %s",206 log.warn("Device '%s' did not exist. cannot resize: %s",
209 devpth, info)207 devpath, info)
210 else:208 else:
211 raise exc209 raise exc
212 return210 return False
213
214 if not os.access(devpth, os.W_OK):
215 if container:
216 log.debug("'%s' not writable in container. cannot resize: %s",
217 devpth, info)
218 else:
219 log.warn("'%s' not writable. cannot resize: %s", devpth, info)
220 return
221211
222 if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):212 if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
223 if container:213 if container:
224 log.debug("device '%s' not a block device in container."214 log.debug("device '%s' not a block device in container."
225 " cannot resize: %s" % (devpth, info))215 " cannot resize: %s" % (devpath, info))
226 else:216 else:
227 log.warn("device '%s' not a block device. cannot resize: %s" %217 log.warn("device '%s' not a block device. cannot resize: %s" %
228 (devpth, info))218 (devpath, info))
219 return False
220 return True
221
222
223def handle(name, cfg, _cloud, log, args):
224 if len(args) != 0:
225 resize_root = args[0]
226 else:
227 resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
228 validate_cloudconfig_schema(cfg, schema)
229 if not util.translate_bool(resize_root, addons=[NOBLOCK]):
230 log.debug("Skipping module named %s, resizing disabled", name)
231 return
232
233 # TODO(harlowja): allow what is to be resized to be configurable??
234 resize_what = "/"
235 result = util.get_mount_info(resize_what, log)
236 if not result:
237 log.warn("Could not determine filesystem type of %s", resize_what)
238 return
239
240 (devpth, fs_type, mount_point) = result
241
242 info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
243 log.debug("resize_info: %s" % info)
244
245 if not is_device_path_writable_block(devpth, info, log):
229 return246 return
230247
231 resizer = None248 resizer = None
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 2548d1f..9812562 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__)
5555
56frequency = PER_INSTANCE56frequency = PER_INSTANCE
5757
58distros = ['fedora', 'rhel', 'sles']58distros = ['fedora', 'opensuse', 'rhel', 'sles']
5959
6060
61def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):61def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index dfa8cb3..449872f 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -6,41 +6,70 @@
6#6#
7# This file is part of cloud-init. See LICENSE file for license information.7# This file is part of cloud-init. See LICENSE file for license information.
88
9"""9"""Runcmd: run arbitrary commands at rc.local with output to the console"""
10Runcmd
11------
12**Summary:** run commands
1310
14Run arbitrary commands at a rc.local like level with output to the console.11from cloudinit.config.schema import (
15Each item can be either a list or a string. If the item is a list, it will be12 get_schema_doc, validate_cloudconfig_schema)
16properly executed as if passed to ``execve()`` (with the first arg as the13from cloudinit.distros import ALL_DISTROS
17command). If the item is a string, it will be written to a file and interpreted14from cloudinit.settings import PER_INSTANCE
18using ``sh``.15from cloudinit import util
19
20.. note::
21 all commands must be proper yaml, so you have to quote any characters yaml
22 would eat (':' can be problematic)
23
24**Internal name:** ``cc_runcmd``
2516
26**Module frequency:** per instance17import os
18from textwrap import dedent
2719
28**Supported distros:** all
2920
30**Config keys**::21# The schema definition for each cloud-config module is a strict contract for
22# describing supported configuration parameters for each cloud-config section.
23# It allows cloud-config to validate and alert users to invalid or ignored
24# configuration options before actually attempting to deploy with said
25# configuration.
3126
32 runcmd:27distros = [ALL_DISTROS]
33 - [ ls, -l, / ]
34 - [ sh, -xc, "echo $(date) ': hello world!'" ]
35 - [ sh, -c, echo "=========hello world'=========" ]
36 - ls -l /root
37 - [ wget, "http://example.org", -O, /tmp/index.html ]
38"""
3928
29schema = {
30 'id': 'cc_runcmd',
31 'name': 'Runcmd',
32 'title': 'Run arbitrary commands',
33 'description': dedent("""\
34 Run arbitrary commands at a rc.local like level with output to the
35 console. Each item can be either a list or a string. If the item is a
36 list, it will be properly executed as if passed to ``execve()`` (with
37 the first arg as the command). If the item is a string, it will be
38 written to a file and interpreted
39 using ``sh``.
4040
41import os41 .. note::
42 all commands must be proper yaml, so you have to quote any characters
43 yaml would eat (':' can be problematic)"""),
44 'distros': distros,
45 'examples': [dedent("""\
46 runcmd:
47 - [ ls, -l, / ]
48 - [ sh, -xc, "echo $(date) ': hello world!'" ]
49 - [ sh, -c, echo "=========hello world'=========" ]
50 - ls -l /root
51 - [ wget, "http://example.org", -O, /tmp/index.html ]
52 """)],
53 'frequency': PER_INSTANCE,
54 'type': 'object',
55 'properties': {
56 'runcmd': {
57 'type': 'array',
58 'items': {
59 'oneOf': [
60 {'type': 'array', 'items': {'type': 'string'}},
61 {'type': 'string'}]
62 },
63 'additionalItems': False, # Reject items of non-string non-list
64 'additionalProperties': False,
65 'minItems': 1,
66 'required': [],
67 'uniqueItems': True
68 }
69 }
70}
4271
43from cloudinit import util72__doc__ = get_schema_doc(schema) # Supplement python help()
4473
4574
46def handle(name, cfg, cloud, log, _args):75def handle(name, cfg, cloud, log, _args):
@@ -49,6 +78,7 @@ def handle(name, cfg, cloud, log, _args):
49 " no 'runcmd' key in configuration"), name)78 " no 'runcmd' key in configuration"), name)
50 return79 return
5180
81 validate_cloudconfig_schema(cfg, schema)
52 out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")82 out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
53 cmd = cfg["runcmd"]83 cmd = cfg["runcmd"]
54 try:84 try:
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index a9682f1..eecb817 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -63,11 +63,11 @@ is ``auto``. Options are:
6363
64from cloudinit import log as logging64from cloudinit import log as logging
65from cloudinit.settings import PER_INSTANCE65from cloudinit.settings import PER_INSTANCE
66from cloudinit import temp_utils
66from cloudinit import util67from cloudinit import util
6768
68import glob69import glob
69import os70import os
70import tempfile
7171
72LOG = logging.getLogger(__name__)72LOG = logging.getLogger(__name__)
7373
@@ -183,7 +183,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
183 # config183 # config
184 # Note, however, we do not touch config files on disk.184 # Note, however, we do not touch config files on disk.
185 nested_cfg = {'config': {shortname: config}}185 nested_cfg = {'config': {shortname: config}}
186 (fd, cfg_tmpf) = tempfile.mkstemp()186 (fd, cfg_tmpf) = temp_utils.mkstemp()
187 os.write(fd, util.yaml_dumps(nested_cfg).encode())187 os.write(fd, util.yaml_dumps(nested_cfg).encode())
188 os.close(fd)188 os.close(fd)
189 cfgfile = cfg_tmpf189 cfgfile = cfg_tmpf
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 0066e97..35d8c57 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -28,7 +28,7 @@ the keys can be specified, but defaults to ``md5``.
28import base6428import base64
29import hashlib29import hashlib
3030
31from prettytable import PrettyTable31from cloudinit.simpletable import SimpleTable
3232
33from cloudinit.distros import ug_util33from cloudinit.distros import ug_util
34from cloudinit import ssh_util34from cloudinit import ssh_util
@@ -74,7 +74,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
74 return74 return
75 tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',75 tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
76 'Comment']76 'Comment']
77 tbl = PrettyTable(tbl_fields)77 tbl = SimpleTable(tbl_fields)
78 for entry in key_entries:78 for entry in key_entries:
79 if _is_printable_key(entry):79 if _is_printable_key(entry):
80 row = []80 row = []
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
81deleted file mode 10064481deleted file mode 100644
index 5dd2690..0000000
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ /dev/null
@@ -1,160 +0,0 @@
1# Copyright (C) 2014 Canonical Ltd.
2#
3# Author: Scott Moser <scott.moser@canonical.com>
4#
5# This file is part of cloud-init. See LICENSE file for license information.
6
7"""
8Ubuntu Init Switch
9------------------
10**Summary:** reboot system into another init.
11
12This module provides a way for the user to boot with systemd even if the image
13is set to boot with upstart. It should be run as one of the first
14``cloud_init_modules``, and will switch the init system and then issue a
15reboot. The next boot will come up in the target init system and no action
16will be taken. This should be inert on non-ubuntu systems, and also
17exit quickly.
18
19.. note::
20 best effort is made, but it's possible this system will break, and probably
21 won't interact well with any other mechanism you've used to switch the init
22 system.
23
24**Internal name:** ``cc_ubuntu_init_switch``
25
26**Module frequency:** once per instance
27
28**Supported distros:** ubuntu
29
30**Config keys**::
31
32 init_switch:
33 target: systemd (can be 'systemd' or 'upstart')
34 reboot: true (reboot if a change was made, or false to not reboot)
35"""
36
37from cloudinit.distros import ubuntu
38from cloudinit import log as logging
39from cloudinit.settings import PER_INSTANCE
40from cloudinit import util
41
42import os
43import time
44
45frequency = PER_INSTANCE
46REBOOT_CMD = ["/sbin/reboot", "--force"]
47
48DEFAULT_CONFIG = {
49 'init_switch': {'target': None, 'reboot': True}
50}
51
52SWITCH_INIT = """
53#!/bin/sh
54# switch_init: [upstart | systemd]
55
56is_systemd() {
57 [ "$(dpkg-divert --listpackage /sbin/init)" = "systemd-sysv" ]
58}
59debug() { echo "$@" 1>&2; }
60fail() { echo "$@" 1>&2; exit 1; }
61
62if [ "$1" = "systemd" ]; then
63 if is_systemd; then
64 debug "already systemd, nothing to do"
65 else
66 [ -f /lib/systemd/systemd ] || fail "no systemd available";
67 dpkg-divert --package systemd-sysv --divert /sbin/init.diverted \\
68 --rename /sbin/init
69 fi
70 [ -f /sbin/init ] || ln /lib/systemd/systemd /sbin/init
71elif [ "$1" = "upstart" ]; then
72 if is_systemd; then
73 rm -f /sbin/init
74 dpkg-divert --package systemd-sysv --rename --remove /sbin/init
75 else
76 debug "already upstart, nothing to do."
77 fi
78else
79 fail "Error. expect 'upstart' or 'systemd'"
80fi
81"""
82
83distros = ['ubuntu']
84
85
86def handle(name, cfg, cloud, log, args):
87 """Handler method activated by cloud-init."""
88
89 if not isinstance(cloud.distro, ubuntu.Distro):
90 log.debug("%s: distro is '%s', not ubuntu. returning",
91 name, cloud.distro.__class__)
92 return
93
94 cfg = util.mergemanydict([cfg, DEFAULT_CONFIG])
95 target = cfg['init_switch']['target']
96 reboot = cfg['init_switch']['reboot']
97
98 if len(args) != 0:
99 target = args[0]
100 if len(args) > 1:
101 reboot = util.is_true(args[1])
102
103 if not target:
104 log.debug("%s: target=%s. nothing to do", name, target)
105 return
106
107 if not util.which('dpkg'):
108 log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name)
109 return
110
111 supported = ('upstart', 'systemd')
112 if target not in supported:
113 log.warn("%s: target set to %s, expected one of: %s",
114 name, target, str(supported))
115
116 if os.path.exists("/run/systemd/system"):
117 current = "systemd"
118 else:
119 current = "upstart"
120
121 if current == target:
122 log.debug("%s: current = target = %s. nothing to do", name, target)
123 return
124
125 try:
126 util.subp(['sh', '-s', target], data=SWITCH_INIT)
127 except util.ProcessExecutionError as e:
128 log.warn("%s: Failed to switch to init '%s'. %s", name, target, e)
129 return
130
131 if util.is_false(reboot):
132 log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.",
133 name, current, target)
134 return
135
136 try:
137 log.warn("%s: switched '%s' to '%s'. rebooting.",
138 name, current, target)
139 logging.flushLoggers(log)
140 _fire_reboot(log, wait_attempts=4, initial_sleep=4)
141 except Exception as e:
142 util.logexc(log, "Requested reboot did not happen!")
143 raise
144
145
146def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
147 util.subp(REBOOT_CMD)
148 start = time.time()
149 wait_time = initial_sleep
150 for _i in range(0, wait_attempts):
151 time.sleep(wait_time)
152 wait_time *= backoff
153 elapsed = time.time() - start
154 log.debug("Rebooted, but still running after %s seconds", int(elapsed))
155 # If we got here, not good
156 elapsed = time.time() - start
157 raise RuntimeError(("Reboot did not happen"
158 " after %s seconds!") % (int(elapsed)))
159
160# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
161new file mode 1006440new file mode 100644
index 0000000..aba2695
--- /dev/null
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -0,0 +1,218 @@
1#
2# Copyright (C) 2017 SUSE LLC.
3#
4# This file is part of cloud-init. See LICENSE file for license information.
5
6"""zypper_add_repo: Add zyper repositories to the system"""
7
8import configobj
9import os
10from six import string_types
11from textwrap import dedent
12
13from cloudinit.config.schema import get_schema_doc
14from cloudinit import log as logging
15from cloudinit.settings import PER_ALWAYS
16from cloudinit import util
17
18distros = ['opensuse', 'sles']
19
20schema = {
21 'id': 'cc_zypper_add_repo',
22 'name': 'ZypperAddRepo',
23 'title': 'Configure zypper behavior and add zypper repositories',
24 'description': dedent("""\
25 Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
26 configuration writer is "dumb" and will simply append the provided
27 configuration options to the configuration file. Option settings
28 that may be duplicate will be resolved by the way the zypp.conf file
29 is parsed. The file is in INI format.
30 Add repositories to the system. No validation is performed on the
31 repository file entries, it is assumed the user is familiar with
32 the zypper repository file format."""),
33 'distros': distros,
34 'examples': [dedent("""\
35 zypper:
36 repos:
37 - id: opensuse-oss
38 name: os-oss
39 baseurl: http://dl.opensuse.org/dist/leap/v/repo/oss/
40 enabled: 1
41 autorefresh: 1
42 - id: opensuse-oss-update
43 name: os-oss-up
44 baseurl: http://dl.opensuse.org/dist/leap/v/update
45 # any setting per
46 # https://en.opensuse.org/openSUSE:Standards_RepoInfo
47 # enable and autorefresh are on by default
48 config:
49 reposdir: /etc/zypp/repos.dir
50 servicesdir: /etc/zypp/services.d
51 download.use_deltarpm: true
52 # any setting in /etc/zypp/zypp.conf
53 """)],
54 'frequency': PER_ALWAYS,
55 'type': 'object',
56 'properties': {
57 'zypper': {
58 'type': 'object',
59 'properties': {
60 'repos': {
61 'type': 'array',
62 'items': {
63 'type': 'object',
64 'properties': {
65 'id': {
66 'type': 'string',
67 'description': dedent("""\
68 The unique id of the repo, used when
69 writing
70 /etc/zypp/repos.d/<id>.repo.""")
71 },
72 'baseurl': {
73 'type': 'string',
74 'format': 'uri', # built-in format type
75 'description': 'The base repositoy URL'
76 }
77 },
78 'required': ['id', 'baseurl'],
79 'additionalProperties': True
80 },
81 'minItems': 1
82 },
83 'config': {
84 'type': 'object',
85 'description': dedent("""\
86 Any supported zypo.conf key is written to
87 /etc/zypp/zypp.conf'""")
88 }
89 },
90 'required': [],
91 'minProperties': 1, # Either config or repo must be provided
92 'additionalProperties': False, # only repos and config allowed
93 }
94 }
95}
96
97__doc__ = get_schema_doc(schema) # Supplement python help()
98
99LOG = logging.getLogger(__name__)
100
101
102def _canonicalize_id(repo_id):
103 repo_id = repo_id.replace(" ", "_")
104 return repo_id
105
106
107def _format_repo_value(val):
108 if isinstance(val, bool):
109 # zypp prefers 1/0
110 return 1 if val else 0
111 if isinstance(val, (list, tuple)):
112 return "\n ".join([_format_repo_value(v) for v in val])
113 if not isinstance(val, string_types):
114 return str(val)
115 return val
116
117
118def _format_repository_config(repo_id, repo_config):
119 to_be = configobj.ConfigObj()
120 to_be[repo_id] = {}
121 # Do basic translation of the items -> values
122 for (k, v) in repo_config.items():
123 # For now assume that people using this know the format
124 # of zypper repos and don't verify keys/values further
125 to_be[repo_id][k] = _format_repo_value(v)
126 lines = to_be.write()
127 return "\n".join(lines)
128
129
130def _write_repos(repos, repo_base_path):
131 """Write the user-provided repo definition files
132 @param repos: A list of repo dictionary objects provided by the user's
133 cloud config.
134 @param repo_base_path: The directory path to which repo definitions are
135 written.
136 """
137
138 if not repos:
139 return
140 valid_repos = {}
141 for index, user_repo_config in enumerate(repos):
142 # Skip on absent required keys
143 missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config))
144 if missing_keys:
145 LOG.warning(
146 "Repo config at index %d is missing required config keys: %s",
147 index, ",".join(missing_keys))
148 continue
149 repo_id = user_repo_config.get('id')
150 canon_repo_id = _canonicalize_id(repo_id)
151 repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
152 if os.path.exists(repo_fn_pth):
153 LOG.info("Skipping repo %s, file %s already exists!",
154 repo_id, repo_fn_pth)
155 continue
156 elif repo_id in valid_repos:
157 LOG.info("Skipping repo %s, file %s already pending!",
158 repo_id, repo_fn_pth)
159 continue
160
161 # Do some basic key formatting
162 repo_config = dict(
163 (k.lower().strip().replace("-", "_"), v)
164 for k, v in user_repo_config.items()
165 if k and k != 'id')
166
167 # Set defaults if not present
168 for field in ['enabled', 'autorefresh']:
169 if field not in repo_config:
170 repo_config[field] = '1'
171
172 valid_repos[repo_id] = (repo_fn_pth, repo_config)
173
174 for (repo_id, repo_data) in valid_repos.items():
175 repo_blob = _format_repository_config(repo_id, repo_data[-1])
176 util.write_file(repo_data[0], repo_blob)
177
178
179def _write_zypp_config(zypper_config):
180 """Write to the default zypp configuration file /etc/zypp/zypp.conf"""
181 if not zypper_config:
182 return
183 zypp_config = '/etc/zypp/zypp.conf'
184 zypp_conf_content = util.load_file(zypp_config)
185 new_settings = ['# Added via cloud.cfg']
186 for setting, value in zypper_config.items():
187 if setting == 'configdir':
188 msg = 'Changing the location of the zypper configuration is '
189 msg += 'not supported, skipping "configdir" setting'
190 LOG.warning(msg)
191 continue
192 if value:
193 new_settings.append('%s=%s' % (setting, value))
194 if len(new_settings) > 1:
195 new_config = zypp_conf_content + '\n'.join(new_settings)
196 else:
197 new_config = zypp_conf_content
198 util.write_file(zypp_config, new_config)
199
200
201def handle(name, cfg, _cloud, log, _args):
202 zypper_section = cfg.get('zypper')
203 if not zypper_section:
204 LOG.debug(("Skipping module named %s,"
205 " no 'zypper' relevant configuration found"), name)
206 return
207 repos = zypper_section.get('repos')
208 if not repos:
209 LOG.debug(("Skipping module named %s,"
210 " no 'repos' configuration found"), name)
211 return
212 zypper_config = zypper_section.get('config', {})
213 repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/')
214
215 _write_zypp_config(zypper_config)
216 _write_repos(repos, repo_base_path)
217
218# vi: ts=4 expandtab
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 6400f00..bb291ff 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -3,19 +3,24 @@
33
4from __future__ import print_function4from __future__ import print_function
55
6from cloudinit.util import read_file_or_url6from cloudinit import importer
7from cloudinit.util import find_modules, read_file_or_url
78
8import argparse9import argparse
10from collections import defaultdict
11from copy import deepcopy
9import logging12import logging
10import os13import os
14import re
11import sys15import sys
12import yaml16import yaml
1317
18_YAML_MAP = {True: 'true', False: 'false', None: 'null'}
14SCHEMA_UNDEFINED = b'UNDEFINED'19SCHEMA_UNDEFINED = b'UNDEFINED'
15CLOUD_CONFIG_HEADER = b'#cloud-config'20CLOUD_CONFIG_HEADER = b'#cloud-config'
16SCHEMA_DOC_TMPL = """21SCHEMA_DOC_TMPL = """
17{name}22{name}
18---23{title_underbar}
19**Summary:** {title}24**Summary:** {title}
2025
21{description}26{description}
@@ -31,6 +36,8 @@ SCHEMA_DOC_TMPL = """
31{examples}36{examples}
32"""37"""
33SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'38SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
39SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
40SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
3441
3542
36class SchemaValidationError(ValueError):43class SchemaValidationError(ValueError):
@@ -83,11 +90,49 @@ def validate_cloudconfig_schema(config, schema, strict=False):
83 logging.warning('Invalid config:\n%s', '\n'.join(messages))90 logging.warning('Invalid config:\n%s', '\n'.join(messages))
8491
8592
86def validate_cloudconfig_file(config_path, schema):93def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
94 """Return contents of the cloud-config file annotated with schema errors.
95
96 @param cloudconfig: YAML-loaded object from the original_content.
97 @param original_content: The contents of a cloud-config file
98 @param schema_errors: List of tuples from a JSONSchemaValidationError. The
99 tuples consist of (schemapath, error_message).
100 """
101 if not schema_errors:
102 return original_content
103 schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content)
104 errors_by_line = defaultdict(list)
105 error_count = 1
106 error_footer = []
107 annotated_content = []
108 for path, msg in schema_errors:
109 errors_by_line[schemapaths[path]].append(msg)
110 error_footer.append('# E{0}: {1}'.format(error_count, msg))
111 error_count += 1
112 lines = original_content.decode().split('\n')
113 error_count = 1
114 for line_number, line in enumerate(lines):
115 errors = errors_by_line[line_number + 1]
116 if errors:
117 error_label = ','.join(
118 ['E{0}'.format(count + error_count)
119 for count in range(0, len(errors))])
120 error_count += len(errors)
121 annotated_content.append(line + '\t\t# ' + error_label)
122 else:
123 annotated_content.append(line)
124 annotated_content.append(
125 '# Errors: -------------\n{0}\n\n'.format('\n'.join(error_footer)))
126 return '\n'.join(annotated_content)
127
128
129def validate_cloudconfig_file(config_path, schema, annotate=False):
87 """Validate cloudconfig file adheres to a specific jsonschema.130 """Validate cloudconfig file adheres to a specific jsonschema.
88131
89 @param config_path: Path to the yaml cloud-config file to parse.132 @param config_path: Path to the yaml cloud-config file to parse.
90 @param schema: Dict describing a valid jsonschema to validate against.133 @param schema: Dict describing a valid jsonschema to validate against.
134 @param annotate: Boolean set True to print original config file with error
135 annotations on the offending lines.
91136
92 @raises SchemaValidationError containing any of schema_errors encountered.137 @raises SchemaValidationError containing any of schema_errors encountered.
93 @raises RuntimeError when config_path does not exist.138 @raises RuntimeError when config_path does not exist.
@@ -108,18 +153,83 @@ def validate_cloudconfig_file(config_path, schema):
108 ('format', 'File {0} is not valid yaml. {1}'.format(153 ('format', 'File {0} is not valid yaml. {1}'.format(
109 config_path, str(e))),)154 config_path, str(e))),)
110 raise SchemaValidationError(errors)155 raise SchemaValidationError(errors)
111 validate_cloudconfig_schema(156
112 cloudconfig, schema, strict=True)157 try:
158 validate_cloudconfig_schema(
159 cloudconfig, schema, strict=True)
160 except SchemaValidationError as e:
161 if annotate:
162 print(annotated_cloudconfig_file(
163 cloudconfig, content, e.schema_errors))
164 raise
165
166
167def _schemapath_for_cloudconfig(config, original_content):
168 """Return a dictionary mapping schemapath to original_content line number.
169
170 @param config: The yaml.loaded config dictionary of a cloud-config file.
171 @param original_content: The simple file content of the cloud-config file
172 """
173 # FIXME Doesn't handle multi-line lists or multi-line strings
174 content_lines = original_content.decode().split('\n')
175 schema_line_numbers = {}
176 list_index = 0
177 RE_YAML_INDENT = r'^(\s*)'
178 scopes = []
179 for line_number, line in enumerate(content_lines):
180 indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
181 line = line.strip()
182 if not line or line.startswith('#'):
183 continue
184 if scopes:
185 previous_depth, path_prefix = scopes[-1]
186 else:
187 previous_depth = -1
188 path_prefix = ''
189 if line.startswith('- '):
190 key = str(list_index)
191 value = line[1:]
192 list_index += 1
193 else:
194 list_index = 0
195 key, value = line.split(':', 1)
196 while indent_depth <= previous_depth:
197 if scopes:
198 previous_depth, path_prefix = scopes.pop()
199 else:
200 previous_depth = -1
201 path_prefix = ''
202 if path_prefix:
203 key = path_prefix + '.' + key
204 scopes.append((indent_depth, key))
205 if value:
206 value = value.strip()
207 if value.startswith('['):
208 scopes.append((indent_depth + 2, key + '.0'))
209 for inner_list_index in range(0, len(yaml.safe_load(value))):
210 list_key = key + '.' + str(inner_list_index)
211 schema_line_numbers[list_key] = line_number + 1
212 schema_line_numbers[key] = line_number + 1
213 return schema_line_numbers
113214
114215
115def _get_property_type(property_dict):216def _get_property_type(property_dict):
116 """Return a string representing a property type from a given jsonschema."""217 """Return a string representing a property type from a given jsonschema."""
117 property_type = property_dict.get('type', SCHEMA_UNDEFINED)218 property_type = property_dict.get('type', SCHEMA_UNDEFINED)
219 if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'):
220 property_type = [
221 str(_YAML_MAP.get(k, k)) for k in property_dict['enum']]
118 if isinstance(property_type, list):222 if isinstance(property_type, list):
119 property_type = '/'.join(property_type)223 property_type = '/'.join(property_type)
120 item_type = property_dict.get('items', {}).get('type')224 items = property_dict.get('items', {})
121 if item_type:225 sub_property_type = items.get('type', '')
122 property_type = '{0} of {1}'.format(property_type, item_type)226 # Collect each item type
227 for sub_item in items.get('oneOf', {}):
228 if sub_property_type:
229 sub_property_type += '/'
230 sub_property_type += '(' + _get_property_type(sub_item) + ')'
231 if sub_property_type:
232 return '{0} of {1}'.format(property_type, sub_property_type)
123 return property_type233 return property_type
124234
125235
@@ -146,12 +256,14 @@ def _get_schema_examples(schema, prefix=''):
146 examples = schema.get('examples')256 examples = schema.get('examples')
147 if not examples:257 if not examples:
148 return ''258 return ''
149 rst_content = '\n**Examples**::\n\n'259 rst_content = SCHEMA_EXAMPLES_HEADER
150 for example in examples:260 for count, example in enumerate(examples):
151 example_yaml = yaml.dump(example, default_flow_style=False)
152 # Python2.6 is missing textwrapper.indent261 # Python2.6 is missing textwrapper.indent
153 lines = example_yaml.split('\n')262 lines = example.split('\n')
154 indented_lines = [' {0}'.format(line) for line in lines]263 indented_lines = [' {0}'.format(line) for line in lines]
264 if rst_content != SCHEMA_EXAMPLES_HEADER:
265 indented_lines.insert(
266 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1))
155 rst_content += '\n'.join(indented_lines)267 rst_content += '\n'.join(indented_lines)
156 return rst_content268 return rst_content
157269
@@ -162,61 +274,87 @@ def get_schema_doc(schema):
162 @param schema: Dict of jsonschema to render.274 @param schema: Dict of jsonschema to render.
163 @raise KeyError: If schema lacks an expected key.275 @raise KeyError: If schema lacks an expected key.
164 """276 """
165 schema['property_doc'] = _get_property_doc(schema)277 schema_copy = deepcopy(schema)
166 schema['examples'] = _get_schema_examples(schema)278 schema_copy['property_doc'] = _get_property_doc(schema)
167 schema['distros'] = ', '.join(schema['distros'])279 schema_copy['examples'] = _get_schema_examples(schema)
168 return SCHEMA_DOC_TMPL.format(**schema)280 schema_copy['distros'] = ', '.join(schema['distros'])
169281 # Need an underbar of the same length as the name
170282 schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name'])
171def get_schema(section_key=None):283 return SCHEMA_DOC_TMPL.format(**schema_copy)
172 """Return a dict of jsonschema defined in any cc_* module.284
173285
174 @param: section_key: Optionally limit schema to a specific top-level key.286FULL_SCHEMA = None
175 """287
176 # TODO use util.find_modules in subsequent branch288
177 from cloudinit.config.cc_ntp import schema289def get_schema():
178 return schema290 """Return jsonschema coalesced from all cc_* cloud-config module."""
291 global FULL_SCHEMA
292 if FULL_SCHEMA:
293 return FULL_SCHEMA
294 full_schema = {
295 '$schema': 'http://json-schema.org/draft-04/schema#',
296 'id': 'cloud-config-schema', 'allOf': []}
297
298 configs_dir = os.path.dirname(os.path.abspath(__file__))
299 potential_handlers = find_modules(configs_dir)
300 for (fname, mod_name) in potential_handlers.items():
301 mod_locs, looked_locs = importer.find_module(
302 mod_name, ['cloudinit.config'], ['schema'])
303 if mod_locs:
304 mod = importer.import_module(mod_locs[0])
305 full_schema['allOf'].append(mod.schema)
306 FULL_SCHEMA = full_schema
307 return full_schema
179308
180309
181def error(message):310def error(message):
182 print(message, file=sys.stderr)311 print(message, file=sys.stderr)
183 return 1312 sys.exit(1)
184313
185314
186def get_parser():315def get_parser(parser=None):
187 """Return a parser for supported cmdline arguments."""316 """Return a parser for supported cmdline arguments."""
188 parser = argparse.ArgumentParser()317 if not parser:
318 parser = argparse.ArgumentParser(
319 prog='cloudconfig-schema',
320 description='Validate cloud-config files or document schema')
189 parser.add_argument('-c', '--config-file',321 parser.add_argument('-c', '--config-file',
190 help='Path of the cloud-config yaml file to validate')322 help='Path of the cloud-config yaml file to validate')
191 parser.add_argument('-d', '--doc', action="store_true", default=False,323 parser.add_argument('-d', '--doc', action="store_true", default=False,
192 help='Print schema documentation')324 help='Print schema documentation')
193 parser.add_argument('-k', '--key',325 parser.add_argument('--annotate', action="store_true", default=False,
194 help='Limit validation or docs to a section key')326 help='Annotate existing cloud-config file with errors')
195 return parser327 return parser
196328
197329
198def main():330def handle_schema_args(name, args):
199 """Tool to validate schema of a cloud-config file or print schema docs."""331 """Handle provided schema args and perform the appropriate actions."""
200 parser = get_parser()
201 args = parser.parse_args()
202 exclusive_args = [args.config_file, args.doc]332 exclusive_args = [args.config_file, args.doc]
203 if not any(exclusive_args) or all(exclusive_args):333 if not any(exclusive_args) or all(exclusive_args):
204 return error('Expected either --config-file argument or --doc')334 error('Expected either --config-file argument or --doc')
205335 full_schema = get_schema()
206 schema = get_schema()
207 if args.config_file:336 if args.config_file:
208 try:337 try:
209 validate_cloudconfig_file(args.config_file, schema)338 validate_cloudconfig_file(
339 args.config_file, full_schema, args.annotate)
210 except (SchemaValidationError, RuntimeError) as e:340 except (SchemaValidationError, RuntimeError) as e:
211 return error(str(e))341 if not args.annotate:
212 print("Valid cloud-config file {0}".format(args.config_file))342 error(str(e))
343 else:
344 print("Valid cloud-config file {0}".format(args.config_file))
213 if args.doc:345 if args.doc:
214 print(get_schema_doc(schema))346 for subschema in full_schema['allOf']:
347 print(get_schema_doc(subschema))
348
349
350def main():
351 """Tool to validate schema of a cloud-config file or print schema docs."""
352 parser = get_parser()
353 handle_schema_args('cloudconfig-schema', parser.parse_args())
215 return 0354 return 0
216355
217356
218if __name__ == '__main__':357if __name__ == '__main__':
219 sys.exit(main())358 sys.exit(main())
220359
221
222# vi: ts=4 expandtab360# vi: ts=4 expandtab
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 1fd48a7..d5becd1 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -30,12 +30,16 @@ from cloudinit import util
30from cloudinit.distros.parsers import hosts30from cloudinit.distros.parsers import hosts
3131
3232
33# Used when a cloud-config module can be run on all cloud-init distibutions.
34# The value 'all' is surfaced in module documentation for distro support.
35ALL_DISTROS = 'all'
36
33OSFAMILIES = {37OSFAMILIES = {
34 'debian': ['debian', 'ubuntu'],38 'debian': ['debian', 'ubuntu'],
35 'redhat': ['centos', 'fedora', 'rhel'],39 'redhat': ['centos', 'fedora', 'rhel'],
36 'gentoo': ['gentoo'],40 'gentoo': ['gentoo'],
37 'freebsd': ['freebsd'],41 'freebsd': ['freebsd'],
38 'suse': ['sles'],42 'suse': ['opensuse', 'sles'],
39 'arch': ['arch'],43 'arch': ['arch'],
40}44}
4145
@@ -188,6 +192,9 @@ class Distro(object):
188 def _get_localhost_ip(self):192 def _get_localhost_ip(self):
189 return "127.0.0.1"193 return "127.0.0.1"
190194
195 def get_locale(self):
196 raise NotImplementedError()
197
191 @abc.abstractmethod198 @abc.abstractmethod
192 def _read_hostname(self, filename, default=None):199 def _read_hostname(self, filename, default=None):
193 raise NotImplementedError()200 raise NotImplementedError()
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index b4c0ba7..f87a343 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -14,6 +14,8 @@ from cloudinit.distros.parsers.hostname import HostnameConf
1414
15from cloudinit.settings import PER_INSTANCE15from cloudinit.settings import PER_INSTANCE
1616
17import os
18
17LOG = logging.getLogger(__name__)19LOG = logging.getLogger(__name__)
1820
1921
@@ -52,31 +54,10 @@ class Distro(distros.Distro):
52 entries = net_util.translate_network(settings)54 entries = net_util.translate_network(settings)
53 LOG.debug("Translated ubuntu style network settings %s into %s",55 LOG.debug("Translated ubuntu style network settings %s into %s",
54 settings, entries)56 settings, entries)
55 dev_names = entries.keys()57 return _render_network(
56 # Format for netctl58 entries, resolv_conf=self.resolve_conf_fn,
57 for (dev, info) in entries.items():59 conf_dir=self.network_conf_dir,
58 nameservers = []60 enable_func=self._enable_interface)
59 net_fn = self.network_conf_dir + dev
60 net_cfg = {
61 'Connection': 'ethernet',
62 'Interface': dev,
63 'IP': info.get('bootproto'),
64 'Address': "('%s/%s')" % (info.get('address'),
65 info.get('netmask')),
66 'Gateway': info.get('gateway'),
67 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')
68 }
69 util.write_file(net_fn, convert_netctl(net_cfg))
70 if info.get('auto'):
71 self._enable_interface(dev)
72 if 'dns-nameservers' in info:
73 nameservers.extend(info['dns-nameservers'])
74
75 if nameservers:
76 util.write_file(self.resolve_conf_fn,
77 convert_resolv_conf(nameservers))
78
79 return dev_names
8061
81 def _enable_interface(self, device_name):62 def _enable_interface(self, device_name):
82 cmd = ['netctl', 'reenable', device_name]63 cmd = ['netctl', 'reenable', device_name]
@@ -173,13 +154,60 @@ class Distro(distros.Distro):
173 ["-y"], freq=PER_INSTANCE)154 ["-y"], freq=PER_INSTANCE)
174155
175156
157def _render_network(entries, target="/", conf_dir="etc/netctl",
158 resolv_conf="etc/resolv.conf", enable_func=None):
159 """Render the translate_network format into netctl files in target.
160 Paths will be rendered under target.
161 """
162
163 devs = []
164 nameservers = []
165 resolv_conf = util.target_path(target, resolv_conf)
166 conf_dir = util.target_path(target, conf_dir)
167
168 for (dev, info) in entries.items():
169 if dev == 'lo':
170 # no configuration should be rendered for 'lo'
171 continue
172 devs.append(dev)
173 net_fn = os.path.join(conf_dir, dev)
174 net_cfg = {
175 'Connection': 'ethernet',
176 'Interface': dev,
177 'IP': info.get('bootproto'),
178 'Address': "%s/%s" % (info.get('address'),
179 info.get('netmask')),
180 'Gateway': info.get('gateway'),
181 'DNS': info.get('dns-nameservers', []),
182 }
183 util.write_file(net_fn, convert_netctl(net_cfg))
184 if enable_func and info.get('auto'):
185 enable_func(dev)
186 if 'dns-nameservers' in info:
187 nameservers.extend(info['dns-nameservers'])
188
189 if nameservers:
190 util.write_file(resolv_conf,
191 convert_resolv_conf(nameservers))
192 return devs
193
194
176def convert_netctl(settings):195def convert_netctl(settings):
177 """Returns a settings string formatted for netctl."""196 """Given a dictionary, returns a string in netctl profile format.
178 result = ''197
179 if isinstance(settings, dict):198 netctl profile is described at:
180 for k, v in settings.items():199 https://git.archlinux.org/netctl.git/tree/docs/netctl.profile.5.txt
181 result = result + '%s=%s\n' % (k, v)200
182 return result201 Note that the 'Special Quoting Rules' are not handled here."""
202 result = []
203 for key in sorted(settings):
204 val = settings[key]
205 if val is None:
206 val = ""
207 elif isinstance(val, (tuple, list)):
208 val = "(" + ' '.join("'%s'" % v for v in val) + ")"
209 result.append("%s=%s\n" % (key, val))
210 return ''.join(result)
183211
184212
185def convert_resolv_conf(settings):213def convert_resolv_conf(settings):
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index abfb81f..33cc0bf 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -61,11 +61,49 @@ class Distro(distros.Distro):
61 # should only happen say once per instance...)61 # should only happen say once per instance...)
62 self._runner = helpers.Runners(paths)62 self._runner = helpers.Runners(paths)
63 self.osfamily = 'debian'63 self.osfamily = 'debian'
64 self.default_locale = 'en_US.UTF-8'
65 self.system_locale = None
6466
65 def apply_locale(self, locale, out_fn=None):67 def get_locale(self):
68 """Return the default locale if set, else use default locale"""
69
70 # read system locale value
71 if not self.system_locale:
72 self.system_locale = read_system_locale()
73
74 # Return system_locale setting if valid, else use default locale
75 return (self.system_locale if self.system_locale else
76 self.default_locale)
77
78 def apply_locale(self, locale, out_fn=None, keyname='LANG'):
79 """Apply specified locale to system, regenerate if specified locale
80 differs from system default."""
66 if not out_fn:81 if not out_fn:
67 out_fn = LOCALE_CONF_FN82 out_fn = LOCALE_CONF_FN
68 apply_locale(locale, out_fn)83
84 if not locale:
85 raise ValueError('Failed to provide locale value.')
86
87 # Only call locale regeneration if needed
88 # Update system locale config with specified locale if needed
89 distro_locale = self.get_locale()
90 conf_fn_exists = os.path.exists(out_fn)
91 sys_locale_unset = False if self.system_locale else True
92 need_regen = (locale.lower() != distro_locale.lower() or
93 not conf_fn_exists or sys_locale_unset)
94 need_conf = not conf_fn_exists or need_regen or sys_locale_unset
95
96 if need_regen:
97 regenerate_locale(locale, out_fn, keyname=keyname)
98 else:
99 LOG.debug(
100 "System has '%s=%s' requested '%s', skipping regeneration.",
101 keyname, self.system_locale, locale)
102
103 if need_conf:
104 update_locale_conf(locale, out_fn, keyname=keyname)
105 # once we've updated the system config, invalidate cache
106 self.system_locale = None
69107
70 def install_packages(self, pkglist):108 def install_packages(self, pkglist):
71 self.update_package_sources()109 self.update_package_sources()
@@ -218,37 +256,47 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
218 LOG.warning(msg)256 LOG.warning(msg)
219257
220258
221def apply_locale(locale, sys_path=LOCALE_CONF_FN, keyname='LANG'):259def read_system_locale(sys_path=LOCALE_CONF_FN, keyname='LANG'):
222 """Apply the locale.260 """Read system default locale setting, if present"""
223261 sys_val = ""
224 Run locale-gen for the provided locale and set the default
225 system variable `keyname` appropriately in the provided `sys_path`.
226
227 If sys_path indicates that `keyname` is already set to `locale`
228 then no changes will be made and locale-gen not called.
229 This allows images built with a locale already generated to not re-run
230 locale-gen which can be very heavy.
231 """
232 if not locale:
233 raise ValueError('Failed to provide locale value.')
234
235 if not sys_path:262 if not sys_path:
236 raise ValueError('Invalid path: %s' % sys_path)263 raise ValueError('Invalid path: %s' % sys_path)
237264
238 if os.path.exists(sys_path):265 if os.path.exists(sys_path):
239 locale_content = util.load_file(sys_path)266 locale_content = util.load_file(sys_path)
240 # if LANG isn't present, regen
241 sys_defaults = util.load_shell_content(locale_content)267 sys_defaults = util.load_shell_content(locale_content)
242 sys_val = sys_defaults.get(keyname, "")268 sys_val = sys_defaults.get(keyname, "")
243 if sys_val.lower() == locale.lower():
244 LOG.debug(
245 "System has '%s=%s' requested '%s', skipping regeneration.",
246 keyname, sys_val, locale)
247 return
248269
249 util.subp(['locale-gen', locale], capture=False)270 return sys_val
271
272
273def update_locale_conf(locale, sys_path, keyname='LANG'):
274 """Update system locale config"""
275 LOG.debug('Updating %s with locale setting %s=%s',
276 sys_path, keyname, locale)
250 util.subp(277 util.subp(
251 ['update-locale', '--locale-file=' + sys_path,278 ['update-locale', '--locale-file=' + sys_path,
252 '%s=%s' % (keyname, locale)], capture=False)279 '%s=%s' % (keyname, locale)], capture=False)
253280
281
282def regenerate_locale(locale, sys_path, keyname='LANG'):
283 """
284 Run locale-gen for the provided locale and set the default
285 system variable `keyname` appropriately in the provided `sys_path`.
286
287 """
288 # special case for locales which do not require regen
289 # % locale -a
290 # C
291 # C.UTF-8
292 # POSIX
293 if locale.lower() in ['c', 'c.utf-8', 'posix']:
294 LOG.debug('%s=%s does not require rengeneration', keyname, locale)
295 return
296
297 # finally, trigger regeneration
298 LOG.debug('Generating locales for %s', locale)
299 util.subp(['locale-gen', locale], capture=False)
300
301
254# vi: ts=4 expandtab302# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
255new file mode 100644303new file mode 100644
index 0000000..a219e9f
--- /dev/null
+++ b/cloudinit/distros/opensuse.py
@@ -0,0 +1,212 @@
1# Copyright (C) 2017 SUSE LLC
2# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
3#
4# Author: Robert Schweikert <rjschwei@suse.com>
5# Author: Juerg Haefliger <juerg.haefliger@hp.com>
6#
7# Leaning very heavily on the RHEL and Debian implementation
8#
9# This file is part of cloud-init. See LICENSE file for license information.
10
11from cloudinit import distros
12
13from cloudinit.distros.parsers.hostname import HostnameConf
14
15from cloudinit import helpers
16from cloudinit import log as logging
17from cloudinit import util
18
19from cloudinit.distros import net_util
20from cloudinit.distros import rhel_util as rhutil
21from cloudinit.settings import PER_INSTANCE
22
23LOG = logging.getLogger(__name__)
24
25
26class Distro(distros.Distro):
27 clock_conf_fn = '/etc/sysconfig/clock'
28 hostname_conf_fn = '/etc/HOSTNAME'
29 init_cmd = ['service']
30 locale_conf_fn = '/etc/sysconfig/language'
31 network_conf_fn = '/etc/sysconfig/network'
32 network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
33 resolve_conf_fn = '/etc/resolv.conf'
34 route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
35 systemd_hostname_conf_fn = '/etc/hostname'
36 systemd_locale_conf_fn = '/etc/locale.conf'
37 tz_local_fn = '/etc/localtime'
38
39 def __init__(self, name, cfg, paths):
40 distros.Distro.__init__(self, name, cfg, paths)
41 self._runner = helpers.Runners(paths)
42 self.osfamily = 'suse'
43 cfg['ssh_svcname'] = 'sshd'
44 if self.uses_systemd():
45 self.init_cmd = ['systemctl']
46 cfg['ssh_svcname'] = 'sshd.service'
47
48 def apply_locale(self, locale, out_fn=None):
49 if self.uses_systemd():
50 if not out_fn:
51 out_fn = self.systemd_locale_conf_fn
52 locale_cfg = {'LANG': locale}
53 else:
54 if not out_fn:
55 out_fn = self.locale_conf_fn
56 locale_cfg = {'RC_LANG': locale}
57 rhutil.update_sysconfig_file(out_fn, locale_cfg)
58
59 def install_packages(self, pkglist):
60 self.package_command(
61 'install',
62 args='--auto-agree-with-licenses',
63 pkgs=pkglist
64 )
65
66 def package_command(self, command, args=None, pkgs=None):
67 if pkgs is None:
68 pkgs = []
69
70 cmd = ['zypper']
71 # No user interaction possible, enable non-interactive mode
72 cmd.append('--non-interactive')
73
74 # Comand is the operation, such as install
75 if command == 'upgrade':
76 command = 'update'
77 cmd.append(command)
78
79 # args are the arguments to the command, not global options
80 if args and isinstance(args, str):
81 cmd.append(args)
82 elif args and isinstance(args, list):
83 cmd.extend(args)
84
85 pkglist = util.expand_package_list('%s-%s', pkgs)
86 cmd.extend(pkglist)
87
88 # Allow the output of this to flow outwards (ie not be captured)
89 util.subp(cmd, capture=False)
90
91 def set_timezone(self, tz):
92 tz_file = self._find_tz_file(tz)
93 if self.uses_systemd():
94 # Currently, timedatectl complains if invoked during startup
95 # so for compatibility, create the link manually.
96 util.del_file(self.tz_local_fn)
97 util.sym_link(tz_file, self.tz_local_fn)
98 else:
99 # Adjust the sysconfig clock zone setting
100 clock_cfg = {
101 'TIMEZONE': str(tz),
102 }
103 rhutil.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
104 # This ensures that the correct tz will be used for the system
105 util.copy(tz_file, self.tz_local_fn)
106
107 def update_package_sources(self):
108 self._runner.run("update-sources", self.package_command,
109 ['refresh'], freq=PER_INSTANCE)
110
111 def _bring_up_interfaces(self, device_names):
112 if device_names and 'all' in device_names:
113 raise RuntimeError(('Distro %s can not translate '
114 'the device name "all"') % (self.name))
115 return distros.Distro._bring_up_interfaces(self, device_names)
116
117 def _read_hostname(self, filename, default=None):
118 if self.uses_systemd() and filename.endswith('/previous-hostname'):
119 return util.load_file(filename).strip()
120 elif self.uses_systemd():
121 (out, _err) = util.subp(['hostname'])
122 if len(out):
123 return out
124 else:
125 return default
126 else:
127 try:
128 conf = self._read_hostname_conf(filename)
129 hostname = conf.hostname
130 except IOError:
131 pass
132 if not hostname:
133 return default
134 return hostname
135
136 def _read_hostname_conf(self, filename):
137 conf = HostnameConf(util.load_file(filename))
138 conf.parse()
139 return conf
140
141 def _read_system_hostname(self):
142 if self.uses_systemd():
143 host_fn = self.systemd_hostname_conf_fn
144 else:
145 host_fn = self.hostname_conf_fn
146 return (host_fn, self._read_hostname(host_fn))
147
148 def _write_hostname(self, hostname, out_fn):
149 if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
150 util.write_file(out_fn, hostname)
151 elif self.uses_systemd():
152 util.subp(['hostnamectl', 'set-hostname', str(hostname)])
153 else:
154 conf = None
155 try:
156 # Try to update the previous one
157 # so lets see if we can read it first.
158 conf = self._read_hostname_conf(out_fn)
159 except IOError:
160 pass
161 if not conf:
162 conf = HostnameConf('')
163 conf.set_hostname(hostname)
164 util.write_file(out_fn, str(conf), 0o644)
165
166 def _write_network(self, settings):
167 # Convert debian settings to ifcfg format
168 entries = net_util.translate_network(settings)
169 LOG.debug("Translated ubuntu style network settings %s into %s",
170 settings, entries)
171 # Make the intermediate format as the suse format...
172 nameservers = []
173 searchservers = []
174 dev_names = entries.keys()
175 for (dev, info) in entries.items():
176 net_fn = self.network_script_tpl % (dev)
177 route_fn = self.route_conf_tpl % (dev)
178 mode = None
179 if info.get('auto', None):
180 mode = 'auto'
181 else:
182 mode = 'manual'
183 bootproto = info.get('bootproto', None)
184 gateway = info.get('gateway', None)
185 net_cfg = {
186 'BOOTPROTO': bootproto,
187 'BROADCAST': info.get('broadcast'),
188 'GATEWAY': gateway,
189 'IPADDR': info.get('address'),
190 'LLADDR': info.get('hwaddress'),
191 'NETMASK': info.get('netmask'),
192 'STARTMODE': mode,
193 'USERCONTROL': 'no'
194 }
195 if dev != 'lo':
196 net_cfg['ETHTOOL_OPTIONS'] = ''
197 else:
198 net_cfg['FIREWALL'] = 'no'
199 rhutil.update_sysconfig_file(net_fn, net_cfg, True)
200 if gateway and bootproto == 'static':
201 default_route = 'default %s' % gateway
202 util.write_file(route_fn, default_route, 0o644)
203 if 'dns-nameservers' in info:
204 nameservers.extend(info['dns-nameservers'])
205 if 'dns-search' in info:
206 searchservers.extend(info['dns-search'])
207 if nameservers or searchservers:
208 rhutil.update_resolve_conf_file(self.resolve_conf_fn,
209 nameservers, searchservers)
210 return dev_names
211
212# vi: ts=4 expandtab
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index dbec2ed..6e336cb 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -1,167 +1,17 @@
1# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.1# Copyright (C) 2017 SUSE LLC
2#2#
3# Author: Juerg Haefliger <juerg.haefliger@hp.com>3# Author: Robert Schweikert <rjschwei@suse.com>
4#4#
5# This file is part of cloud-init. See LICENSE file for license information.5# This file is part of cloud-init. See LICENSE file for license information.
66
7from cloudinit import distros7from cloudinit.distros import opensuse
88
9from cloudinit.distros.parsers.hostname import HostnameConf
10
11from cloudinit import helpers
12from cloudinit import log as logging9from cloudinit import log as logging
13from cloudinit import util
14
15from cloudinit.distros import net_util
16from cloudinit.distros import rhel_util
17from cloudinit.settings import PER_INSTANCE
1810
19LOG = logging.getLogger(__name__)11LOG = logging.getLogger(__name__)
2012
2113
22class Distro(distros.Distro):14class Distro(opensuse.Distro):
23 clock_conf_fn = '/etc/sysconfig/clock'15 pass
24 locale_conf_fn = '/etc/sysconfig/language'
25 network_conf_fn = '/etc/sysconfig/network'
26 hostname_conf_fn = '/etc/HOSTNAME'
27 network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
28 resolve_conf_fn = '/etc/resolv.conf'
29 tz_local_fn = '/etc/localtime'
30
31 def __init__(self, name, cfg, paths):
32 distros.Distro.__init__(self, name, cfg, paths)
33 # This will be used to restrict certain
34 # calls from repeatly happening (when they
35 # should only happen say once per instance...)
36 self._runner = helpers.Runners(paths)
37 self.osfamily = 'suse'
38
39 def install_packages(self, pkglist):
40 self.package_command('install', args='-l', pkgs=pkglist)
41
42 def _write_network(self, settings):
43 # Convert debian settings to ifcfg format
44 entries = net_util.translate_network(settings)
45 LOG.debug("Translated ubuntu style network settings %s into %s",
46 settings, entries)
47 # Make the intermediate format as the suse format...
48 nameservers = []
49 searchservers = []
50 dev_names = entries.keys()
51 for (dev, info) in entries.items():
52 net_fn = self.network_script_tpl % (dev)
53 mode = info.get('auto')
54 if mode and mode.lower() == 'true':
55 mode = 'auto'
56 else:
57 mode = 'manual'
58 net_cfg = {
59 'BOOTPROTO': info.get('bootproto'),
60 'BROADCAST': info.get('broadcast'),
61 'GATEWAY': info.get('gateway'),
62 'IPADDR': info.get('address'),
63 'LLADDR': info.get('hwaddress'),
64 'NETMASK': info.get('netmask'),
65 'STARTMODE': mode,
66 'USERCONTROL': 'no'
67 }
68 if dev != 'lo':
69 net_cfg['ETHERDEVICE'] = dev
70 net_cfg['ETHTOOL_OPTIONS'] = ''
71 else:
72 net_cfg['FIREWALL'] = 'no'
73 rhel_util.update_sysconfig_file(net_fn, net_cfg, True)
74 if 'dns-nameservers' in info:
75 nameservers.extend(info['dns-nameservers'])
76 if 'dns-search' in info:
77 searchservers.extend(info['dns-search'])
78 if nameservers or searchservers:
79 rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
80 nameservers, searchservers)
81 return dev_names
82
83 def apply_locale(self, locale, out_fn=None):
84 if not out_fn:
85 out_fn = self.locale_conf_fn
86 locale_cfg = {
87 'RC_LANG': locale,
88 }
89 rhel_util.update_sysconfig_file(out_fn, locale_cfg)
90
91 def _write_hostname(self, hostname, out_fn):
92 conf = None
93 try:
94 # Try to update the previous one
95 # so lets see if we can read it first.
96 conf = self._read_hostname_conf(out_fn)
97 except IOError:
98 pass
99 if not conf:
100 conf = HostnameConf('')
101 conf.set_hostname(hostname)
102 util.write_file(out_fn, str(conf), 0o644)
103
104 def _read_system_hostname(self):
105 host_fn = self.hostname_conf_fn
106 return (host_fn, self._read_hostname(host_fn))
107
108 def _read_hostname_conf(self, filename):
109 conf = HostnameConf(util.load_file(filename))
110 conf.parse()
111 return conf
112
113 def _read_hostname(self, filename, default=None):
114 hostname = None
115 try:
116 conf = self._read_hostname_conf(filename)
117 hostname = conf.hostname
118 except IOError:
119 pass
120 if not hostname:
121 return default
122 return hostname
123
124 def _bring_up_interfaces(self, device_names):
125 if device_names and 'all' in device_names:
126 raise RuntimeError(('Distro %s can not translate '
127 'the device name "all"') % (self.name))
128 return distros.Distro._bring_up_interfaces(self, device_names)
129
130 def set_timezone(self, tz):
131 tz_file = self._find_tz_file(tz)
132 # Adjust the sysconfig clock zone setting
133 clock_cfg = {
134 'TIMEZONE': str(tz),
135 }
136 rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
137 # This ensures that the correct tz will be used for the system
138 util.copy(tz_file, self.tz_local_fn)
139
140 def package_command(self, command, args=None, pkgs=None):
141 if pkgs is None:
142 pkgs = []
143
144 cmd = ['zypper']
145 # No user interaction possible, enable non-interactive mode
146 cmd.append('--non-interactive')
147
148 # Comand is the operation, such as install
149 cmd.append(command)
150
151 # args are the arguments to the command, not global options
152 if args and isinstance(args, str):
153 cmd.append(args)
154 elif args and isinstance(args, list):
155 cmd.extend(args)
156
157 pkglist = util.expand_package_list('%s-%s', pkgs)
158 cmd.extend(pkglist)
159
160 # Allow the output of this to flow outwards (ie not be captured)
161 util.subp(cmd, capture=False)
162
163 def update_package_sources(self):
164 self._runner.run("update-sources", self.package_command,
165 ['refresh'], freq=PER_INSTANCE)
16616
167# vi: ts=4 expandtab17# vi: ts=4 expandtab
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index f01021a..1979cd9 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -13,7 +13,7 @@ from time import time
13import contextlib13import contextlib
14import os14import os
1515
16import six16from six import StringIO
17from six.moves.configparser import (17from six.moves.configparser import (
18 NoSectionError, NoOptionError, RawConfigParser)18 NoSectionError, NoOptionError, RawConfigParser)
1919
@@ -441,12 +441,12 @@ class DefaultingConfigParser(RawConfigParser):
441441
442 def stringify(self, header=None):442 def stringify(self, header=None):
443 contents = ''443 contents = ''
444 with six.StringIO() as outputstream:444 outputstream = StringIO()
445 self.write(outputstream)445 self.write(outputstream)
446 outputstream.flush()446 outputstream.flush()
447 contents = outputstream.getvalue()447 contents = outputstream.getvalue()
448 if header:448 if header:
449 contents = "\n".join([header, contents])449 contents = '\n'.join([header, contents, ''])
450 return contents450 return contents
451451
452# vi: ts=4 expandtab452# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 3861709..1d75c9f 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -19,6 +19,8 @@ import sys
19import six19import six
20from six import StringIO20from six import StringIO
2121
22import time
23
22# Logging levels for easy access24# Logging levels for easy access
23CRITICAL = logging.CRITICAL25CRITICAL = logging.CRITICAL
24FATAL = logging.FATAL26FATAL = logging.FATAL
@@ -32,6 +34,9 @@ NOTSET = logging.NOTSET
32# Default basic format34# Default basic format
33DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'35DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
3436
37# Always format logging timestamps as UTC time
38logging.Formatter.converter = time.gmtime
39
3540
36def setupBasicLogging(level=DEBUG):41def setupBasicLogging(level=DEBUG):
37 root = logging.getLogger()42 root = logging.getLogger()
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 46cb9c8..a1b0db1 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -175,13 +175,8 @@ def is_disabled_cfg(cfg):
175 return cfg.get('config') == "disabled"175 return cfg.get('config') == "disabled"
176176
177177
178def generate_fallback_config(blacklist_drivers=None, config_driver=None):178def find_fallback_nic(blacklist_drivers=None):
179 """Determine which attached net dev is most likely to have a connection and179 """Return the name of the 'fallback' network device."""
180 generate network state to run dhcp on that interface"""
181
182 if not config_driver:
183 config_driver = False
184
185 if not blacklist_drivers:180 if not blacklist_drivers:
186 blacklist_drivers = []181 blacklist_drivers = []
187182
@@ -233,15 +228,24 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None):
233 if DEFAULT_PRIMARY_INTERFACE in names:228 if DEFAULT_PRIMARY_INTERFACE in names:
234 names.remove(DEFAULT_PRIMARY_INTERFACE)229 names.remove(DEFAULT_PRIMARY_INTERFACE)
235 names.insert(0, DEFAULT_PRIMARY_INTERFACE)230 names.insert(0, DEFAULT_PRIMARY_INTERFACE)
236 target_name = None231
237 target_mac = None232 # pick the first that has a mac-address
238 for name in names:233 for name in names:
239 mac = read_sys_net_safe(name, 'address')234 if read_sys_net_safe(name, 'address'):
240 if mac:235 return name
241 target_name = name236 return None
242 target_mac = mac237
243 break238
244 if target_mac and target_name:239def generate_fallback_config(blacklist_drivers=None, config_driver=None):
240 """Determine which attached net dev is most likely to have a connection and
241 generate network state to run dhcp on that interface"""
242
243 if not config_driver:
244 config_driver = False
245
246 target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers)
247 if target_name:
248 target_mac = read_sys_net_safe(target_name, 'address')
245 nconf = {'config': [], 'version': 1}249 nconf = {'config': [], 'version': 1}
246 cfg = {'type': 'physical', 'name': target_name,250 cfg = {'type': 'physical', 'name': target_name,
247 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}251 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}
@@ -511,21 +515,7 @@ def get_interfaces_by_mac():
511515
512 Bridges and any devices that have a 'stolen' mac are excluded."""516 Bridges and any devices that have a 'stolen' mac are excluded."""
513 ret = {}517 ret = {}
514 devs = get_devicelist()518 for name, mac, _driver, _devid in get_interfaces():
515 empty_mac = '00:00:00:00:00:00'
516 for name in devs:
517 if not interface_has_own_mac(name):
518 continue
519 if is_bridge(name):
520 continue
521 if is_vlan(name):
522 continue
523 mac = get_interface_mac(name)
524 # some devices may not have a mac (tun0)
525 if not mac:
526 continue
527 if mac == empty_mac and name != 'lo':
528 continue
529 if mac in ret:519 if mac in ret:
530 raise RuntimeError(520 raise RuntimeError(
531 "duplicate mac found! both '%s' and '%s' have mac '%s'" %521 "duplicate mac found! both '%s' and '%s' have mac '%s'" %
@@ -599,6 +589,7 @@ class EphemeralIPv4Network(object):
599 self._bringup_router()589 self._bringup_router()
600590
601 def __exit__(self, excp_type, excp_value, excp_traceback):591 def __exit__(self, excp_type, excp_value, excp_traceback):
592 """Teardown anything we set up."""
602 for cmd in self.cleanup_cmds:593 for cmd in self.cleanup_cmds:
603 util.subp(cmd, capture=True)594 util.subp(cmd, capture=True)
604595
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
605new file mode 100644596new file mode 100644
index 0000000..0cba703
--- /dev/null
+++ b/cloudinit/net/dhcp.py
@@ -0,0 +1,163 @@
1# Copyright (C) 2017 Canonical Ltd.
2#
3# Author: Chad Smith <chad.smith@canonical.com>
4#
5# This file is part of cloud-init. See LICENSE file for license information.
6
7import configobj
8import logging
9import os
10import re
11
12from cloudinit.net import find_fallback_nic, get_devicelist
13from cloudinit import temp_utils
14from cloudinit import util
15from six import StringIO
16
17LOG = logging.getLogger(__name__)
18
19NETWORKD_LEASES_DIR = '/run/systemd/netif/leases'
20
21
22class InvalidDHCPLeaseFileError(Exception):
23 """Raised when parsing an empty or invalid dhcp.leases file.
24
25 Current uses are DataSourceAzure and DataSourceEc2 during ephemeral
26 boot to scrape metadata.
27 """
28 pass
29
30
31def maybe_perform_dhcp_discovery(nic=None):
32 """Perform dhcp discovery if nic valid and dhclient command exists.
33
34 If the nic is invalid or undiscoverable or dhclient command is not found,
35 skip dhcp_discovery and return an empty dict.
36
37 @param nic: Name of the network interface we want to run dhclient on.
38 @return: A dict of dhcp options from the dhclient discovery if run,
39 otherwise an empty dict is returned.
40 """
41 if nic is None:
42 nic = find_fallback_nic()
43 if nic is None:
44 LOG.debug(
45 'Skip dhcp_discovery: Unable to find fallback nic.')
46 return {}
47 elif nic not in get_devicelist():
48 LOG.debug(
49 'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic)
50 return {}
51 dhclient_path = util.which('dhclient')
52 if not dhclient_path:
53 LOG.debug('Skip dhclient configuration: No dhclient command found.')
54 return {}
55 with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir:
56 # Use /var/tmp because /run/cloud-init/tmp is mounted noexec
57 return dhcp_discovery(dhclient_path, nic, tdir)
58
59
60def parse_dhcp_lease_file(lease_file):
61 """Parse the given dhcp lease file for the most recent lease.
62
63 Return a dict of dhcp options as key value pairs for the most recent lease
64 block.
65
66 @raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile
67 content.
68 """
69 lease_regex = re.compile(r"lease {(?P<lease>[^}]*)}\n")
70 dhcp_leases = []
71 lease_content = util.load_file(lease_file)
72 if len(lease_content) == 0:
73 raise InvalidDHCPLeaseFileError(
74 'Cannot parse empty dhcp lease file {0}'.format(lease_file))
75 for lease in lease_regex.findall(lease_content):
76 lease_options = []
77 for line in lease.split(';'):
78 # Strip newlines, double-quotes and option prefix
79 line = line.strip().replace('"', '').replace('option ', '')
80 if not line:
81 continue
82 lease_options.append(line.split(' ', 1))
83 dhcp_leases.append(dict(lease_options))
84 if not dhcp_leases:
85 raise InvalidDHCPLeaseFileError(
86 'Cannot parse dhcp lease file {0}. No leases found'.format(
87 lease_file))
88 return dhcp_leases
89
90
91def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
92 """Run dhclient on the interface without scripts or filesystem artifacts.
93
94 @param dhclient_cmd_path: Full path to the dhclient used.
95 @param interface: Name of the network inteface on which to dhclient.
96 @param cleandir: The directory from which to run dhclient as well as store
97 dhcp leases.
98
99 @return: A dict of dhcp options parsed from the dhcp.leases file or empty
100 dict.
101 """
102 LOG.debug('Performing a dhcp discovery on %s', interface)
103
104 # XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
105 # app armor profiles which disallow running dhclient -sf <our-script-file>.
106 # We want to avoid running /sbin/dhclient-script because of side-effects in
107 # /etc/resolv.conf any any other vendor specific scripts in
108 # /etc/dhcp/dhclient*hooks.d.
109 sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient')
110 util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
111 pid_file = os.path.join(cleandir, 'dhclient.pid')
112 lease_file = os.path.join(cleandir, 'dhcp.leases')
113
114 # ISC dhclient needs the interface up to send initial discovery packets.
115 # Generally dhclient relies on dhclient-script PREINIT action to bring the
116 # link up before attempting discovery. Since we are using -sf /bin/true,
117 # we need to do that "link up" ourselves first.
118 util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
119 cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
120 '-pf', pid_file, interface, '-sf', '/bin/true']
121 util.subp(cmd, capture=True)
122 return parse_dhcp_lease_file(lease_file)
123
124
125def networkd_parse_lease(content):
126 """Parse a systemd lease file content as in /run/systemd/netif/leases/
127
128 Parse this (almost) ini style file even though it says:
129 # This is private data. Do not parse.
130
131 Simply return a dictionary of key/values."""
132
133 return dict(configobj.ConfigObj(StringIO(content), list_values=False))
134
135
136def networkd_load_leases(leases_d=None):
137 """Return a dictionary of dictionaries representing each lease
138 found in lease_d.i
139
140 The top level key will be the filename, which is typically the ifindex."""
141
142 if leases_d is None:
143 leases_d = NETWORKD_LEASES_DIR
144
145 ret = {}
146 if not os.path.isdir(leases_d):
147 return ret
148 for lfile in os.listdir(leases_d):
149 ret[lfile] = networkd_parse_lease(
150 util.load_file(os.path.join(leases_d, lfile)))
151 return ret
152
153
154def networkd_get_option_from_leases(keyname, leases_d=None):
155 if leases_d is None:
156 leases_d = NETWORKD_LEASES_DIR
157 leases = networkd_load_leases(leases_d=leases_d)
158 for ifindex, data in sorted(leases.items()):
159 if data.get(keyname):
160 return data[keyname]
161 return None
162
163# vi: ts=4 expandtab
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index bb80ec0..c6a71d1 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -95,6 +95,9 @@ def _iface_add_attrs(iface, index):
95 ignore_map.append('mac_address')95 ignore_map.append('mac_address')
9696
97 for key, value in iface.items():97 for key, value in iface.items():
98 # convert bool to string for eni
99 if type(value) == bool:
100 value = 'on' if iface[key] else 'off'
98 if not value or key in ignore_map:101 if not value or key in ignore_map:
99 continue102 continue
100 if key in multiline_keys:103 if key in multiline_keys:
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 9f35b72..d3788af 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -4,7 +4,7 @@ import copy
4import os4import os
55
6from . import renderer6from . import renderer
7from .network_state import subnet_is_ipv67from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2
88
9from cloudinit import log as logging9from cloudinit import log as logging
10from cloudinit import util10from cloudinit import util
@@ -27,31 +27,6 @@ network:
27"""27"""
2828
29LOG = logging.getLogger(__name__)29LOG = logging.getLogger(__name__)
30NET_CONFIG_TO_V2 = {
31 'bond': {'bond-ad-select': 'ad-select',
32 'bond-arp-interval': 'arp-interval',
33 'bond-arp-ip-target': 'arp-ip-target',
34 'bond-arp-validate': 'arp-validate',
35 'bond-downdelay': 'down-delay',
36 'bond-fail-over-mac': 'fail-over-mac-policy',
37 'bond-lacp-rate': 'lacp-rate',
38 'bond-miimon': 'mii-monitor-interval',
39 'bond-min-links': 'min-links',
40 'bond-mode': 'mode',
41 'bond-num-grat-arp': 'gratuitious-arp',
42 'bond-primary-reselect': 'primary-reselect-policy',
43 'bond-updelay': 'up-delay',
44 'bond-xmit-hash-policy': 'transmit-hash-policy'},
45 'bridge': {'bridge_ageing': 'ageing-time',
46 'bridge_bridgeprio': 'priority',
47 'bridge_fd': 'forward-delay',
48 'bridge_gcint': None,
49 'bridge_hello': 'hello-time',
50 'bridge_maxage': 'max-age',
51 'bridge_maxwait': None,
52 'bridge_pathcost': 'path-cost',
53 'bridge_portprio': None,
54 'bridge_waitport': None}}
5530
5631
57def _get_params_dict_by_match(config, match):32def _get_params_dict_by_match(config, match):
@@ -247,6 +222,14 @@ class Renderer(renderer.Renderer):
247 util.subp(cmd, capture=True)222 util.subp(cmd, capture=True)
248223
249 def _render_content(self, network_state):224 def _render_content(self, network_state):
225
226 # if content already in netplan format, pass it back
227 if network_state.version == 2:
228 LOG.debug('V2 to V2 passthrough')
229 return util.yaml_dumps({'network': network_state.config},
230 explicit_start=False,
231 explicit_end=False)
232
250 ethernets = {}233 ethernets = {}
251 wifis = {}234 wifis = {}
252 bridges = {}235 bridges = {}
@@ -261,9 +244,9 @@ class Renderer(renderer.Renderer):
261244
262 for config in network_state.iter_interfaces():245 for config in network_state.iter_interfaces():
263 ifname = config.get('name')246 ifname = config.get('name')
264 # filter None entries up front so we can do simple if key in dict247 # filter None (but not False) entries up front
265 ifcfg = dict((key, value) for (key, value) in config.items()248 ifcfg = dict((key, value) for (key, value) in config.items()
266 if value)249 if value is not None)
267250
268 if_type = ifcfg.get('type')251 if_type = ifcfg.get('type')
269 if if_type == 'physical':252 if if_type == 'physical':
@@ -335,6 +318,7 @@ class Renderer(renderer.Renderer):
335 (port, cost) = costval.split()318 (port, cost) = costval.split()
336 newvalue[port] = int(cost)319 newvalue[port] = int(cost)
337 br_config.update({newname: newvalue})320 br_config.update({newname: newvalue})
321
338 if len(br_config) > 0:322 if len(br_config) > 0:
339 bridge.update({'parameters': br_config})323 bridge.update({'parameters': br_config})
340 _extract_addresses(ifcfg, bridge)324 _extract_addresses(ifcfg, bridge)
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 87a7222..0e830ee 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -23,6 +23,34 @@ NETWORK_V2_KEY_FILTER = [
23 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan'23 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan'
24]24]
2525
26NET_CONFIG_TO_V2 = {
27 'bond': {'bond-ad-select': 'ad-select',
28 'bond-arp-interval': 'arp-interval',
29 'bond-arp-ip-target': 'arp-ip-target',
30 'bond-arp-validate': 'arp-validate',
31 'bond-downdelay': 'down-delay',
32 'bond-fail-over-mac': 'fail-over-mac-policy',
33 'bond-lacp-rate': 'lacp-rate',
34 'bond-miimon': 'mii-monitor-interval',
35 'bond-min-links': 'min-links',
36 'bond-mode': 'mode',
37 'bond-num-grat-arp': 'gratuitious-arp',
38 'bond-primary': 'primary',
39 'bond-primary-reselect': 'primary-reselect-policy',
40 'bond-updelay': 'up-delay',
41 'bond-xmit-hash-policy': 'transmit-hash-policy'},
42 'bridge': {'bridge_ageing': 'ageing-time',
43 'bridge_bridgeprio': 'priority',
44 'bridge_fd': 'forward-delay',
45 'bridge_gcint': None,
46 'bridge_hello': 'hello-time',
47 'bridge_maxage': 'max-age',
48 'bridge_maxwait': None,
49 'bridge_pathcost': 'path-cost',
50 'bridge_portprio': None,
51 'bridge_stp': 'stp',
52 'bridge_waitport': None}}
53
2654
27def parse_net_config_data(net_config, skip_broken=True):55def parse_net_config_data(net_config, skip_broken=True):
28 """Parses the config, returns NetworkState object56 """Parses the config, returns NetworkState object
@@ -120,6 +148,10 @@ class NetworkState(object):
120 self.use_ipv6 = network_state.get('use_ipv6', False)148 self.use_ipv6 = network_state.get('use_ipv6', False)
121149
122 @property150 @property
151 def config(self):
152 return self._network_state['config']
153
154 @property
123 def version(self):155 def version(self):
124 return self._version156 return self._version
125157
@@ -166,12 +198,14 @@ class NetworkStateInterpreter(object):
166 'search': [],198 'search': [],
167 },199 },
168 'use_ipv6': False,200 'use_ipv6': False,
201 'config': None,
169 }202 }
170203
171 def __init__(self, version=NETWORK_STATE_VERSION, config=None):204 def __init__(self, version=NETWORK_STATE_VERSION, config=None):
172 self._version = version205 self._version = version
173 self._config = config206 self._config = config
174 self._network_state = copy.deepcopy(self.initial_network_state)207 self._network_state = copy.deepcopy(self.initial_network_state)
208 self._network_state['config'] = config
175 self._parsed = False209 self._parsed = False
176210
177 @property211 @property
@@ -432,6 +466,18 @@ class NetworkStateInterpreter(object):
432 for param, val in command.get('params', {}).items():466 for param, val in command.get('params', {}).items():
433 iface.update({param: val})467 iface.update({param: val})
434468
469 # convert value to boolean
470 bridge_stp = iface.get('bridge_stp')
471 if bridge_stp is not None and type(bridge_stp) != bool:
472 if bridge_stp in ['on', '1', 1]:
473 bridge_stp = True
474 elif bridge_stp in ['off', '0', 0]:
475 bridge_stp = False
476 else:
477 raise ValueError("Cannot convert bridge_stp value"
478 "(%s) to boolean", bridge_stp)
479 iface.update({'bridge_stp': bridge_stp})
480
435 interfaces.update({iface['name']: iface})481 interfaces.update({iface['name']: iface})
436482
437 @ensure_command_keys(['address'])483 @ensure_command_keys(['address'])
@@ -460,12 +506,15 @@ class NetworkStateInterpreter(object):
460 v2_command = {506 v2_command = {
461 bond0: {507 bond0: {
462 'interfaces': ['interface0', 'interface1'],508 'interfaces': ['interface0', 'interface1'],
463 'miimon': 100,509 'parameters': {
464 'mode': '802.3ad',510 'mii-monitor-interval': 100,
465 'xmit_hash_policy': 'layer3+4'},511 'mode': '802.3ad',
512 'xmit_hash_policy': 'layer3+4'}},
466 bond1: {513 bond1: {
467 'bond-slaves': ['interface2', 'interface7'],514 'bond-slaves': ['interface2', 'interface7'],
468 'mode': 1515 'parameters': {
516 'mode': 1,
517 }
469 }518 }
470 }519 }
471520
@@ -489,8 +538,8 @@ class NetworkStateInterpreter(object):
489 v2_command = {538 v2_command = {
490 br0: {539 br0: {
491 'interfaces': ['interface0', 'interface1'],540 'interfaces': ['interface0', 'interface1'],
492 'fd': 0,541 'forward-delay': 0,
493 'stp': 'off',542 'stp': False,
494 'maxwait': 0,543 'maxwait': 0,
495 }544 }
496 }545 }
@@ -554,6 +603,7 @@ class NetworkStateInterpreter(object):
554 if not mac_address:603 if not mac_address:
555 LOG.debug('NetworkState Version2: missing "macaddress" info '604 LOG.debug('NetworkState Version2: missing "macaddress" info '
556 'in config entry: %s: %s', eth, str(cfg))605 'in config entry: %s: %s', eth, str(cfg))
606 phy_cmd.update({'mac_address': mac_address})
557607
558 for key in ['mtu', 'match', 'wakeonlan']:608 for key in ['mtu', 'match', 'wakeonlan']:
559 if key in cfg:609 if key in cfg:
@@ -598,8 +648,8 @@ class NetworkStateInterpreter(object):
598 self.handle_vlan(vlan_cmd)648 self.handle_vlan(vlan_cmd)
599649
600 def handle_wifis(self, command):650 def handle_wifis(self, command):
601 raise NotImplementedError("NetworkState V2: "651 LOG.warning('Wifi configuration is only available to distros with'
602 "Skipping wifi configuration")652 'netplan rendering support.')
603653
604 def _v2_common(self, cfg):654 def _v2_common(self, cfg):
605 LOG.debug('v2_common: handling config:\n%s', cfg)655 LOG.debug('v2_common: handling config:\n%s', cfg)
@@ -616,6 +666,11 @@ class NetworkStateInterpreter(object):
616666
617 def _handle_bond_bridge(self, command, cmd_type=None):667 def _handle_bond_bridge(self, command, cmd_type=None):
618 """Common handler for bond and bridge types"""668 """Common handler for bond and bridge types"""
669
670 # inverse mapping for v2 keynames to v1 keynames
671 v2key_to_v1 = dict((v, k) for k, v in
672 NET_CONFIG_TO_V2.get(cmd_type).items())
673
619 for item_name, item_cfg in command.items():674 for item_name, item_cfg in command.items():
620 item_params = dict((key, value) for (key, value) in675 item_params = dict((key, value) for (key, value) in
621 item_cfg.items() if key not in676 item_cfg.items() if key not in
@@ -624,14 +679,20 @@ class NetworkStateInterpreter(object):
624 'type': cmd_type,679 'type': cmd_type,
625 'name': item_name,680 'name': item_name,
626 cmd_type + '_interfaces': item_cfg.get('interfaces'),681 cmd_type + '_interfaces': item_cfg.get('interfaces'),
627 'params': item_params,682 'params': dict((v2key_to_v1[k], v) for k, v in
683 item_params.get('parameters', {}).items())
628 }684 }
629 subnets = self._v2_to_v1_ipcfg(item_cfg)685 subnets = self._v2_to_v1_ipcfg(item_cfg)
630 if len(subnets) > 0:686 if len(subnets) > 0:
631 v1_cmd.update({'subnets': subnets})687 v1_cmd.update({'subnets': subnets})
632688
633 LOG.debug('v2(%ss) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd)689 LOG.debug('v2(%s) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd)
634 self.handle_bridge(v1_cmd)690 if cmd_type == "bridge":
691 self.handle_bridge(v1_cmd)
692 elif cmd_type == "bond":
693 self.handle_bond(v1_cmd)
694 else:
695 raise ValueError('Unknown command type: %s', cmd_type)
635696
636 def _v2_to_v1_ipcfg(self, cfg):697 def _v2_to_v1_ipcfg(self, cfg):
637 """Common ipconfig extraction from v2 to v1 subnets array."""698 """Common ipconfig extraction from v2 to v1 subnets array."""
@@ -651,12 +712,6 @@ class NetworkStateInterpreter(object):
651 'address': address,712 'address': address,
652 }713 }
653714
654 routes = []
655 for route in cfg.get('routes', []):
656 routes.append(_normalize_route(
657 {'address': route.get('to'), 'gateway': route.get('via')}))
658 subnet['routes'] = routes
659
660 if ":" in address:715 if ":" in address:
661 if 'gateway6' in cfg and gateway6 is None:716 if 'gateway6' in cfg and gateway6 is None:
662 gateway6 = cfg.get('gateway6')717 gateway6 = cfg.get('gateway6')
@@ -667,6 +722,17 @@ class NetworkStateInterpreter(object):
667 subnet.update({'gateway': gateway4})722 subnet.update({'gateway': gateway4})
668723
669 subnets.append(subnet)724 subnets.append(subnet)
725
726 routes = []
727 for route in cfg.get('routes', []):
728 routes.append(_normalize_route(
729 {'destination': route.get('to'), 'gateway': route.get('via')}))
730
731 # v2 routes are bound to the interface, in v1 we add them under
732 # the first subnet since there isn't an equivalent interface level.
733 if len(subnets) and len(routes):
734 subnets[0]['routes'] = routes
735
670 return subnets736 return subnets
671737
672738
@@ -721,7 +787,7 @@ def _normalize_net_keys(network, address_keys=()):
721 elif netmask:787 elif netmask:
722 prefix = mask_to_net_prefix(netmask)788 prefix = mask_to_net_prefix(netmask)
723 elif 'prefix' in net:789 elif 'prefix' in net:
724 prefix = int(prefix)790 prefix = int(net['prefix'])
725 else:791 else:
726 prefix = 64 if ipv6 else 24792 prefix = 64 if ipv6 else 24
727793
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index a550f97..f572796 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -484,7 +484,11 @@ class Renderer(renderer.Renderer):
484 content.add_nameserver(nameserver)484 content.add_nameserver(nameserver)
485 for searchdomain in network_state.dns_searchdomains:485 for searchdomain in network_state.dns_searchdomains:
486 content.add_search_domain(searchdomain)486 content.add_search_domain(searchdomain)
487 return "\n".join([_make_header(';'), str(content)])487 header = _make_header(';')
488 content_str = str(content)
489 if not content_str.startswith(header):
490 content_str = header + '\n' + content_str
491 return content_str
488492
489 @staticmethod493 @staticmethod
490 def _render_networkmanager_conf(network_state):494 def _render_networkmanager_conf(network_state):
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
491new file mode 100644495new file mode 100644
index 0000000..1c1f504
--- /dev/null
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -0,0 +1,260 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3import mock
4import os
5from textwrap import dedent
6
7from cloudinit.net.dhcp import (
8 InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery,
9 parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases)
10from cloudinit.util import ensure_file, write_file
11from cloudinit.tests.helpers import CiTestCase, wrap_and_call, populate_dir
12
13
14class TestParseDHCPLeasesFile(CiTestCase):
15
16 def test_parse_empty_lease_file_errors(self):
17 """parse_dhcp_lease_file errors when file content is empty."""
18 empty_file = self.tmp_path('leases')
19 ensure_file(empty_file)
20 with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
21 parse_dhcp_lease_file(empty_file)
22 error = context_manager.exception
23 self.assertIn('Cannot parse empty dhcp lease file', str(error))
24
25 def test_parse_malformed_lease_file_content_errors(self):
26 """parse_dhcp_lease_file errors when file content isn't dhcp leases."""
27 non_lease_file = self.tmp_path('leases')
28 write_file(non_lease_file, 'hi mom.')
29 with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
30 parse_dhcp_lease_file(non_lease_file)
31 error = context_manager.exception
32 self.assertIn('Cannot parse dhcp lease file', str(error))
33
34 def test_parse_multiple_leases(self):
35 """parse_dhcp_lease_file returns a list of all leases within."""
36 lease_file = self.tmp_path('leases')
37 content = dedent("""
38 lease {
39 interface "wlp3s0";
40 fixed-address 192.168.2.74;
41 option subnet-mask 255.255.255.0;
42 option routers 192.168.2.1;
43 renew 4 2017/07/27 18:02:30;
44 expire 5 2017/07/28 07:08:15;
45 }
46 lease {
47 interface "wlp3s0";
48 fixed-address 192.168.2.74;
49 option subnet-mask 255.255.255.0;
50 option routers 192.168.2.1;
51 }
52 """)
53 expected = [
54 {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
55 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
56 'renew': '4 2017/07/27 18:02:30',
57 'expire': '5 2017/07/28 07:08:15'},
58 {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
59 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}]
60 write_file(lease_file, content)
61 self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
62
63
64class TestDHCPDiscoveryClean(CiTestCase):
65 with_logs = True
66
67 @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
68 def test_no_fallback_nic_found(self, m_fallback_nic):
69 """Log and do nothing when nic is absent and no fallback is found."""
70 m_fallback_nic.return_value = None # No fallback nic found
71 self.assertEqual({}, maybe_perform_dhcp_discovery())
72 self.assertIn(
73 'Skip dhcp_discovery: Unable to find fallback nic.',
74 self.logs.getvalue())
75
76 def test_provided_nic_does_not_exist(self):
77 """When the provided nic doesn't exist, log a message and no-op."""
78 self.assertEqual({}, maybe_perform_dhcp_discovery('idontexist'))
79 self.assertIn(
80 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.',
81 self.logs.getvalue())
82
83 @mock.patch('cloudinit.net.dhcp.util.which')
84 @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
85 def test_absent_dhclient_command(self, m_fallback, m_which):
86 """When dhclient doesn't exist in the OS, log the issue and no-op."""
87 m_fallback.return_value = 'eth9'
88 m_which.return_value = None # dhclient isn't found
89 self.assertEqual({}, maybe_perform_dhcp_discovery())
90 self.assertIn(
91 'Skip dhclient configuration: No dhclient command found.',
92 self.logs.getvalue())
93
94 @mock.patch('cloudinit.temp_utils.os.getuid')
95 @mock.patch('cloudinit.net.dhcp.dhcp_discovery')
96 @mock.patch('cloudinit.net.dhcp.util.which')
97 @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
98 def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
99 """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
100 m_uid.return_value = 0 # Fake root user for tmpdir
101 m_fback.return_value = 'eth9'
102 m_which.return_value = '/sbin/dhclient'
103 m_dhcp.return_value = {'address': '192.168.2.2'}
104 retval = wrap_and_call(
105 'cloudinit.temp_utils',
106 {'_TMPDIR': {'new': None},
107 'os.getuid': 0},
108 maybe_perform_dhcp_discovery)
109 self.assertEqual({'address': '192.168.2.2'}, retval)
110 self.assertEqual(
111 1, m_dhcp.call_count, 'dhcp_discovery not called once')
112 call = m_dhcp.call_args_list[0]
113 self.assertEqual('/sbin/dhclient', call[0][0])
114 self.assertEqual('eth9', call[0][1])
115 self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2])
116
117 @mock.patch('cloudinit.net.dhcp.util.subp')
118 def test_dhcp_discovery_run_in_sandbox(self, m_subp):
119 """dhcp_discovery brings up the interface and runs dhclient.
120
121 It also returns the parsed dhcp.leases file generated in the sandbox.
122 """
123 tmpdir = self.tmp_dir()
124 dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
125 script_content = '#!/bin/bash\necho fake-dhclient'
126 write_file(dhclient_script, script_content, mode=0o755)
127 lease_content = dedent("""
128 lease {
129 interface "eth9";
130 fixed-address 192.168.2.74;
131 option subnet-mask 255.255.255.0;
132 option routers 192.168.2.1;
133 }
134 """)
135 lease_file = os.path.join(tmpdir, 'dhcp.leases')
136 write_file(lease_file, lease_content)
137 self.assertItemsEqual(
138 [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
139 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
140 dhcp_discovery(dhclient_script, 'eth9', tmpdir))
141 # dhclient script got copied
142 with open(os.path.join(tmpdir, 'dhclient')) as stream:
143 self.assertEqual(script_content, stream.read())
144 # Interface was brought up before dhclient called from sandbox
145 m_subp.assert_has_calls([
146 mock.call(
147 ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True),
148 mock.call(
149 [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf',
150 lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'),
151 'eth9', '-sf', '/bin/true'], capture=True)])
152
153
154class TestSystemdParseLeases(CiTestCase):
155
156 lxd_lease = dedent("""\
157 # This is private data. Do not parse.
158 ADDRESS=10.75.205.242
159 NETMASK=255.255.255.0
160 ROUTER=10.75.205.1
161 SERVER_ADDRESS=10.75.205.1
162 NEXT_SERVER=10.75.205.1
163 BROADCAST=10.75.205.255
164 T1=1580
165 T2=2930
166 LIFETIME=3600
167 DNS=10.75.205.1
168 DOMAINNAME=lxd
169 HOSTNAME=a1
170 CLIENTID=ffe617693400020000ab110c65a6a0866931c2
171 """)
172
173 lxd_parsed = {
174 'ADDRESS': '10.75.205.242',
175 'NETMASK': '255.255.255.0',
176 'ROUTER': '10.75.205.1',
177 'SERVER_ADDRESS': '10.75.205.1',
178 'NEXT_SERVER': '10.75.205.1',
179 'BROADCAST': '10.75.205.255',
180 'T1': '1580',
181 'T2': '2930',
182 'LIFETIME': '3600',
183 'DNS': '10.75.205.1',
184 'DOMAINNAME': 'lxd',
185 'HOSTNAME': 'a1',
186 'CLIENTID': 'ffe617693400020000ab110c65a6a0866931c2',
187 }
188
189 azure_lease = dedent("""\
190 # This is private data. Do not parse.
191 ADDRESS=10.132.0.5
192 NETMASK=255.255.255.255
193 ROUTER=10.132.0.1
194 SERVER_ADDRESS=169.254.169.254
195 NEXT_SERVER=10.132.0.1
196 MTU=1460
197 T1=43200
198 T2=75600
199 LIFETIME=86400
200 DNS=169.254.169.254
201 NTP=169.254.169.254
202 DOMAINNAME=c.ubuntu-foundations.internal
203 DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal
204 HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal
205 ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
206 CLIENTID=ff405663a200020000ab11332859494d7a8b4c
207 OPTION_245=624c3620
208 """)
209
210 azure_parsed = {
211 'ADDRESS': '10.132.0.5',
212 'NETMASK': '255.255.255.255',
213 'ROUTER': '10.132.0.1',
214 'SERVER_ADDRESS': '169.254.169.254',
215 'NEXT_SERVER': '10.132.0.1',
216 'MTU': '1460',
217 'T1': '43200',
218 'T2': '75600',
219 'LIFETIME': '86400',
220 'DNS': '169.254.169.254',
221 'NTP': '169.254.169.254',
222 'DOMAINNAME': 'c.ubuntu-foundations.internal',
223 'DOMAIN_SEARCH_LIST': 'c.ubuntu-foundations.internal google.internal',
224 'HOSTNAME': 'tribaal-test-171002-1349.c.ubuntu-foundations.internal',
225 'ROUTES': '10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1',
226 'CLIENTID': 'ff405663a200020000ab11332859494d7a8b4c',
227 'OPTION_245': '624c3620'}
228
229 def setUp(self):
230 super(TestSystemdParseLeases, self).setUp()
231 self.lease_d = self.tmp_dir()
232
233 def test_no_leases_returns_empty_dict(self):
234 """A leases dir with no lease files should return empty dictionary."""
235 self.assertEqual({}, networkd_load_leases(self.lease_d))
236
237 def test_no_leases_dir_returns_empty_dict(self):
238 """A non-existing leases dir should return empty dict."""
239 enodir = os.path.join(self.lease_d, 'does-not-exist')
240 self.assertEqual({}, networkd_load_leases(enodir))
241
242 def test_single_leases_file(self):
243 """A leases dir with one leases file."""
244 populate_dir(self.lease_d, {'2': self.lxd_lease})
245 self.assertEqual(
246 {'2': self.lxd_parsed}, networkd_load_leases(self.lease_d))
247
248 def test_single_azure_leases_file(self):
249 """On Azure, option 245 should be present, verify it specifically."""
250 populate_dir(self.lease_d, {'1': self.azure_lease})
251 self.assertEqual(
252 {'1': self.azure_parsed}, networkd_load_leases(self.lease_d))
253
254 def test_multiple_files(self):
255 """Multiple leases files on azure with one found return that value."""
256 self.maxDiff = None
257 populate_dir(self.lease_d, {'1': self.azure_lease,
258 '9': self.lxd_lease})
259 self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed},
260 networkd_load_leases(self.lease_d))
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 272a6eb..8cb4114 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -7,7 +7,7 @@ import os
77
8import cloudinit.net as net8import cloudinit.net as net
9from cloudinit.util import ensure_file, write_file, ProcessExecutionError9from cloudinit.util import ensure_file, write_file, ProcessExecutionError
10from tests.unittests.helpers import CiTestCase10from cloudinit.tests.helpers import CiTestCase
1111
1212
13class TestSysDevPath(CiTestCase):13class TestSysDevPath(CiTestCase):
@@ -414,7 +414,7 @@ class TestEphemeralIPV4Network(CiTestCase):
414 self.assertIn('Cannot init network on', str(error))414 self.assertIn('Cannot init network on', str(error))
415 self.assertEqual(0, m_subp.call_count)415 self.assertEqual(0, m_subp.call_count)
416416
417 def test_ephemeral_ipv4_network_errors_invalid_mask(self, m_subp):417 def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp):
418 """Raise an error when prefix_or_mask is not a netmask or prefix."""418 """Raise an error when prefix_or_mask is not a netmask or prefix."""
419 params = {419 params = {
420 'interface': 'eth0', 'ip': '192.168.2.2',420 'interface': 'eth0', 'ip': '192.168.2.2',
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 39c79de..8f99d99 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -13,7 +13,7 @@ import re
13from cloudinit import log as logging13from cloudinit import log as logging
14from cloudinit import util14from cloudinit import util
1515
16from prettytable import PrettyTable16from cloudinit.simpletable import SimpleTable
1717
18LOG = logging.getLogger()18LOG = logging.getLogger()
1919
@@ -170,7 +170,7 @@ def netdev_pformat():
170 lines.append(util.center("Net device info failed", '!', 80))170 lines.append(util.center("Net device info failed", '!', 80))
171 else:171 else:
172 fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']172 fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
173 tbl = PrettyTable(fields)173 tbl = SimpleTable(fields)
174 for (dev, d) in netdev.items():174 for (dev, d) in netdev.items():
175 tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])175 tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
176 if d.get('addr6'):176 if d.get('addr6'):
@@ -194,7 +194,7 @@ def route_pformat():
194 if routes.get('ipv4'):194 if routes.get('ipv4'):
195 fields_v4 = ['Route', 'Destination', 'Gateway',195 fields_v4 = ['Route', 'Destination', 'Gateway',
196 'Genmask', 'Interface', 'Flags']196 'Genmask', 'Interface', 'Flags']
197 tbl_v4 = PrettyTable(fields_v4)197 tbl_v4 = SimpleTable(fields_v4)
198 for (n, r) in enumerate(routes.get('ipv4')):198 for (n, r) in enumerate(routes.get('ipv4')):
199 route_id = str(n)199 route_id = str(n)
200 tbl_v4.add_row([route_id, r['destination'],200 tbl_v4.add_row([route_id, r['destination'],
@@ -207,7 +207,7 @@ def route_pformat():
207 if routes.get('ipv6'):207 if routes.get('ipv6'):
208 fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',208 fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
209 'Local Address', 'Foreign Address', 'State']209 'Local Address', 'Foreign Address', 'State']
210 tbl_v6 = PrettyTable(fields_v6)210 tbl_v6 = SimpleTable(fields_v6)
211 for (n, r) in enumerate(routes.get('ipv6')):211 for (n, r) in enumerate(routes.get('ipv6')):
212 route_id = str(n)212 route_id = str(n)
213 tbl_v6.add_row([route_id, r['proto'],213 tbl_v6.add_row([route_id, r['proto'],
diff --git a/cloudinit/simpletable.py b/cloudinit/simpletable.py
214new file mode 100644214new file mode 100644
index 0000000..9060322
--- /dev/null
+++ b/cloudinit/simpletable.py
@@ -0,0 +1,62 @@
1# Copyright (C) 2017 Amazon.com, Inc. or its affiliates
2#
3# Author: Ethan Faust <efaust@amazon.com>
4# Author: Andrew Jorgensen <ajorgens@amazon.com>
5#
6# This file is part of cloud-init. See LICENSE file for license information.
7
8
9class SimpleTable(object):
10 """A minimal implementation of PrettyTable
11 for distribution with cloud-init.
12 """
13
14 def __init__(self, fields):
15 self.fields = fields
16 self.rows = []
17
18 # initialize list of 0s the same length
19 # as the number of fields
20 self.column_widths = [0] * len(self.fields)
21 self.update_column_widths(fields)
22
23 def update_column_widths(self, values):
24 for i, value in enumerate(values):
25 self.column_widths[i] = max(
26 len(value),
27 self.column_widths[i])
28
29 def add_row(self, values):
30 if len(values) > len(self.fields):
31 raise TypeError('too many values')
32 values = [str(value) for value in values]
33 self.rows.append(values)
34 self.update_column_widths(values)
35
36 def _hdiv(self):
37 """Returns a horizontal divider for the table."""
38 return '+' + '+'.join(
39 ['-' * (w + 2) for w in self.column_widths]) + '+'
40
41 def _row(self, row):
42 """Returns a formatted row."""
43 return '|' + '|'.join(
44 [col.center(self.column_widths[i] + 2)
45 for i, col in enumerate(row)]) + '|'
46
47 def __str__(self):
48 """Returns a string representation of the table with lines around.
49
50 +-----+-----+
51 | one | two |
52 +-----+-----+
53 | 1 | 2 |
54 | 01 | 10 |
55 +-----+-----+
56 """
57 lines = [self._hdiv(), self._row(self.fields), self._hdiv()]
58 lines += [self._row(r) for r in self.rows] + [self._hdiv()]
59 return '\n'.join(lines)
60
61 def get_string(self):
62 return repr(self)
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 380e27c..43a7e42 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches