Merge ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial
- Git
- lp:~chad.smith/cloud-init
- ubuntu/xenial
- Merge into ubuntu/xenial
Proposed by
Chad Smith
Status: | Merged | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | d489374c5a9e7f81649f87c48401bf12e795a7e7 | ||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/xenial | ||||||||||||
Merge into: | cloud-init:ubuntu/xenial | ||||||||||||
Diff against target: |
15710 lines (+9573/-1295) 271 files modified
ChangeLog (+422/-0) Makefile (+3/-3) cloudinit/analyze/__init__.py (+0/-0) cloudinit/analyze/__main__.py (+155/-0) cloudinit/analyze/dump.py (+176/-0) cloudinit/analyze/show.py (+207/-0) cloudinit/analyze/tests/test_dump.py (+210/-0) cloudinit/apport.py (+105/-0) cloudinit/cmd/devel/__init__.py (+0/-0) cloudinit/cmd/devel/logs.py (+101/-0) cloudinit/cmd/devel/parser.py (+26/-0) cloudinit/cmd/devel/tests/__init__.py (+0/-0) cloudinit/cmd/devel/tests/test_logs.py (+120/-0) cloudinit/cmd/main.py (+35/-34) cloudinit/config/cc_bootcmd.py (+60/-30) cloudinit/config/cc_chef.py (+33/-11) cloudinit/config/cc_landscape.py (+2/-2) cloudinit/config/cc_ntp.py (+57/-49) cloudinit/config/cc_puppet.py (+18/-15) cloudinit/config/cc_resizefs.py (+87/-70) cloudinit/config/cc_resolv_conf.py (+1/-1) cloudinit/config/cc_runcmd.py (+57/-27) cloudinit/config/cc_snappy.py (+2/-2) cloudinit/config/cc_ssh_authkey_fingerprints.py (+2/-2) cloudinit/config/cc_zypper_add_repo.py (+218/-0) cloudinit/config/schema.py (+181/-43) cloudinit/distros/__init__.py (+8/-1) cloudinit/distros/arch.py (+59/-31) cloudinit/distros/debian.py (+71/-23) cloudinit/distros/opensuse.py (+212/-0) cloudinit/distros/sles.py (+5/-155) cloudinit/helpers.py (+7/-7) cloudinit/log.py (+5/-0) cloudinit/net/__init__.py (+21/-30) cloudinit/net/dhcp.py (+163/-0) cloudinit/net/eni.py (+3/-0) cloudinit/net/netplan.py (+12/-28) cloudinit/net/network_state.py (+84/-18) cloudinit/net/sysconfig.py (+5/-1) cloudinit/net/tests/test_dhcp.py (+260/-0) cloudinit/net/tests/test_init.py (+2/-2) cloudinit/netinfo.py (+4/-4) cloudinit/simpletable.py (+62/-0) cloudinit/sources/DataSourceAliYun.py (+6/-3) cloudinit/sources/DataSourceAltCloud.py (+2/-2) cloudinit/sources/DataSourceAzure.py (+7/-3) cloudinit/sources/DataSourceCloudStack.py (+37/-14) cloudinit/sources/DataSourceEc2.py (+164/-22) cloudinit/sources/DataSourceGCE.py (+126/-72) cloudinit/sources/DataSourceOVF.py (+169/-51) cloudinit/sources/__init__.py (+8/-1) cloudinit/sources/helpers/azure.py (+16/-8) cloudinit/sources/helpers/vmware/imc/config.py (+21/-3) cloudinit/sources/helpers/vmware/imc/config_nic.py (+130/-71) cloudinit/sources/helpers/vmware/imc/config_passwd.py (+67/-0) cloudinit/sources/helpers/vmware/imc/guestcust_util.py (+7/-5) cloudinit/stages.py (+20/-13) cloudinit/temp_utils.py (+101/-0) cloudinit/tests/__init__.py (+0/-0) cloudinit/tests/helpers.py (+16/-2) cloudinit/tests/test_simpletable.py (+100/-0) cloudinit/tests/test_temp_utils.py (+101/-0) cloudinit/tests/test_url_helper.py (+40/-0) cloudinit/url_helper.py (+5/-1) cloudinit/util.py (+41/-42) cloudinit/version.py (+1/-1) config/cloud.cfg.tmpl (+9/-5) debian/changelog (+115/-2) dev/null (+0/-2) doc/examples/cloud-config-chef.txt (+4/-0) doc/rtd/index.rst (+1/-0) doc/rtd/topics/capabilities.rst (+40/-10) doc/rtd/topics/datasources.rst (+1/-0) doc/rtd/topics/datasources/gce.rst (+20/-0) doc/rtd/topics/debugging.rst (+146/-0) doc/rtd/topics/format.rst (+1/-0) doc/rtd/topics/modules.rst (+0/-1) packages/bddeb (+4/-4) packages/debian/copyright (+10/-15) packages/debian/dirs (+0/-1) packages/debian/rules.in (+2/-1) packages/pkg-deps.json (+0/-3) packages/redhat/cloud-init.spec.in (+0/-6) requirements.txt (+0/-3) setup.py (+4/-4) systemd/cloud-final.service.tmpl (+3/-1) systemd/cloud-init-local.service.tmpl (+6/-0) systemd/cloud-init.service.tmpl (+10/-0) sysvinit/suse/cloud-config (+113/-0) sysvinit/suse/cloud-final (+113/-0) sysvinit/suse/cloud-init (+114/-0) sysvinit/suse/cloud-init-local (+113/-0) templates/hosts.opensuse.tmpl (+26/-0) templates/hosts.suse.tmpl (+0/-3) templates/sources.list.debian.tmpl (+8/-8) templates/timesyncd.conf.tmpl (+8/-0) tests/cloud_tests/__init__.py (+1/-1) tests/cloud_tests/__main__.py (+4/-1) tests/cloud_tests/args.py (+2/-2) tests/cloud_tests/bddeb.py (+10/-9) tests/cloud_tests/collect.py (+3/-0) tests/cloud_tests/config.py (+1/-0) tests/cloud_tests/images/nocloudkvm.py (+88/-0) tests/cloud_tests/instances/base.py (+7/-5) tests/cloud_tests/instances/lxd.py (+9/-1) tests/cloud_tests/instances/nocloudkvm.py (+217/-0) tests/cloud_tests/platforms.yaml (+4/-0) tests/cloud_tests/platforms/__init__.py (+2/-0) tests/cloud_tests/platforms/nocloudkvm.py (+90/-0) tests/cloud_tests/releases.yaml (+18/-1) tests/cloud_tests/setup_image.py (+24/-8) tests/cloud_tests/snapshots/nocloudkvm.py (+74/-0) tests/cloud_tests/testcases/bugs/README.md (+0/-0) tests/cloud_tests/testcases/bugs/lp1511485.yaml (+0/-0) tests/cloud_tests/testcases/bugs/lp1611074.yaml (+0/-0) tests/cloud_tests/testcases/bugs/lp1628337.yaml (+0/-0) tests/cloud_tests/testcases/examples/README.md (+0/-0) tests/cloud_tests/testcases/examples/TODO.md (+0/-0) tests/cloud_tests/testcases/examples/add_apt_repositories.yaml (+0/-0) tests/cloud_tests/testcases/examples/alter_completion_message.yaml (+0/-0) tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml (+0/-0) tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml (+0/-0) tests/cloud_tests/testcases/examples/including_user_groups.yaml (+0/-0) tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml (+0/-0) tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml (+0/-0) tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml (+0/-0) tests/cloud_tests/testcases/examples/run_commands.yaml (+0/-0) tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml (+0/-0) tests/cloud_tests/testcases/examples/setup_run_puppet.yaml (+0/-0) tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml (+0/-0) tests/cloud_tests/testcases/main/README.md (+0/-0) tests/cloud_tests/testcases/main/command_output_simple.yaml (+0/-0) tests/cloud_tests/testcases/modules/README.md (+0/-0) tests/cloud_tests/testcases/modules/TODO.md (+0/-2) tests/cloud_tests/testcases/modules/apt_configure_conf.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_primary.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_security.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml (+0/-0) tests/cloud_tests/testcases/modules/bootcmd.yaml (+0/-0) tests/cloud_tests/testcases/modules/byobu.yaml (+0/-0) tests/cloud_tests/testcases/modules/ca_certs.yaml (+0/-0) tests/cloud_tests/testcases/modules/debug_disable.yaml (+0/-0) tests/cloud_tests/testcases/modules/debug_enable.yaml (+0/-0) tests/cloud_tests/testcases/modules/final_message.yaml (+0/-0) tests/cloud_tests/testcases/modules/keys_to_console.yaml (+0/-0) tests/cloud_tests/testcases/modules/landscape.yaml (+0/-0) tests/cloud_tests/testcases/modules/locale.yaml (+0/-0) tests/cloud_tests/testcases/modules/lxd_bridge.yaml (+0/-0) tests/cloud_tests/testcases/modules/lxd_dir.yaml (+0/-0) tests/cloud_tests/testcases/modules/ntp.yaml (+0/-0) tests/cloud_tests/testcases/modules/ntp_pools.yaml (+0/-0) tests/cloud_tests/testcases/modules/ntp_servers.yaml (+0/-0) tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+0/-0) tests/cloud_tests/testcases/modules/runcmd.yaml (+0/-0) tests/cloud_tests/testcases/modules/salt_minion.yaml (+0/-0) tests/cloud_tests/testcases/modules/seed_random_command.yaml (+0/-0) tests/cloud_tests/testcases/modules/seed_random_data.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_hostname.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_password.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_password_expire.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_password_list.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_password_list_string.yaml (+0/-0) tests/cloud_tests/testcases/modules/snappy.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_import_id.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml (+0/-0) tests/cloud_tests/testcases/modules/timezone.yaml (+0/-0) tests/cloud_tests/testcases/modules/user_groups.yaml (+0/-0) tests/cloud_tests/testcases/modules/write_files.yaml (+0/-0) tests/cloud_tests/util.py (+43/-0) tests/unittests/test__init__.py (+1/-1) tests/unittests/test_atomic_helper.py (+1/-1) tests/unittests/test_builtin_handlers.py (+1/-1) tests/unittests/test_cli.py (+146/-4) tests/unittests/test_cs_util.py (+1/-1) tests/unittests/test_data.py (+1/-1) tests/unittests/test_datasource/test_aliyun.py (+7/-6) tests/unittests/test_datasource/test_altcloud.py (+3/-3) tests/unittests/test_datasource/test_azure.py (+4/-2) tests/unittests/test_datasource/test_azure_helper.py (+97/-50) tests/unittests/test_datasource/test_cloudsigma.py (+1/-1) tests/unittests/test_datasource/test_cloudstack.py (+83/-7) tests/unittests/test_datasource/test_common.py (+2/-1) tests/unittests/test_datasource/test_configdrive.py (+1/-1) tests/unittests/test_datasource/test_digitalocean.py (+1/-1) tests/unittests/test_datasource/test_ec2.py (+255/-35) tests/unittests/test_datasource/test_gce.py (+3/-2) tests/unittests/test_datasource/test_maas.py (+1/-1) tests/unittests/test_datasource/test_nocloud.py (+1/-1) tests/unittests/test_datasource/test_opennebula.py (+1/-1) tests/unittests/test_datasource/test_openstack.py (+4/-1) tests/unittests/test_datasource/test_ovf.py (+165/-1) tests/unittests/test_datasource/test_scaleway.py (+1/-1) tests/unittests/test_datasource/test_smartos.py (+1/-1) tests/unittests/test_distros/__init__.py (+21/-0) tests/unittests/test_distros/test_arch.py (+45/-0) tests/unittests/test_distros/test_create_users.py (+1/-1) tests/unittests/test_distros/test_debian.py (+42/-24) tests/unittests/test_distros/test_generic.py (+17/-1) tests/unittests/test_distros/test_netconfig.py (+3/-3) tests/unittests/test_distros/test_opensuse.py (+12/-0) tests/unittests/test_distros/test_resolv.py (+1/-1) tests/unittests/test_distros/test_sles.py (+12/-0) tests/unittests/test_distros/test_sysconfig.py (+1/-1) tests/unittests/test_distros/test_user_data_normalize.py (+1/-1) tests/unittests/test_ds_identify.py (+47/-4) tests/unittests/test_ec2_util.py (+1/-1) tests/unittests/test_filters/test_launch_index.py (+1/-1) tests/unittests/test_handler/test_handler_apt_conf_v1.py (+1/-1) tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+1/-1) tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py (+1/-1) tests/unittests/test_handler/test_handler_apt_source_v1.py (+1/-1) tests/unittests/test_handler/test_handler_apt_source_v3.py (+1/-1) tests/unittests/test_handler/test_handler_bootcmd.py (+146/-0) tests/unittests/test_handler/test_handler_ca_certs.py (+1/-1) tests/unittests/test_handler/test_handler_chef.py (+76/-12) tests/unittests/test_handler/test_handler_debug.py (+7/-4) tests/unittests/test_handler/test_handler_disk_setup.py (+1/-1) tests/unittests/test_handler/test_handler_growpart.py (+1/-1) tests/unittests/test_handler/test_handler_landscape.py (+130/-0) tests/unittests/test_handler/test_handler_locale.py (+57/-3) tests/unittests/test_handler/test_handler_lxd.py (+1/-1) tests/unittests/test_handler/test_handler_mcollective.py (+1/-1) tests/unittests/test_handler/test_handler_mounts.py (+1/-1) tests/unittests/test_handler/test_handler_ntp.py (+102/-5) tests/unittests/test_handler/test_handler_power_state.py (+2/-2) tests/unittests/test_handler/test_handler_puppet.py (+142/-0) tests/unittests/test_handler/test_handler_resizefs.py (+222/-7) tests/unittests/test_handler/test_handler_rsyslog.py (+1/-1) tests/unittests/test_handler/test_handler_runcmd.py (+108/-0) tests/unittests/test_handler/test_handler_seed_random.py (+1/-1) tests/unittests/test_handler/test_handler_set_hostname.py (+4/-3) tests/unittests/test_handler/test_handler_snappy.py (+2/-2) tests/unittests/test_handler/test_handler_spacewalk.py (+1/-1) tests/unittests/test_handler/test_handler_timezone.py (+1/-1) tests/unittests/test_handler/test_handler_write_files.py (+1/-1) tests/unittests/test_handler/test_handler_yum_add_repo.py (+1/-1) tests/unittests/test_handler/test_handler_zypper_add_repo.py (+237/-0) tests/unittests/test_handler/test_schema.py (+151/-16) tests/unittests/test_helpers.py (+1/-1) tests/unittests/test_log.py (+58/-0) tests/unittests/test_merging.py (+1/-1) tests/unittests/test_net.py (+128/-8) tests/unittests/test_pathprefix2dict.py (+1/-1) tests/unittests/test_registry.py (+1/-1) tests/unittests/test_reporting.py (+1/-1) tests/unittests/test_rh_subscription.py (+1/-1) tests/unittests/test_runs/test_merge_run.py (+1/-1) tests/unittests/test_runs/test_simple_run.py (+106/-21) tests/unittests/test_sshutil.py (+2/-1) tests/unittests/test_templating.py (+1/-1) tests/unittests/test_util.py (+13/-2) tests/unittests/test_version.py (+1/-1) tests/unittests/test_vmware_config_file.py (+247/-2) tools/build-on-freebsd (+0/-1) tools/ds-identify (+6/-0) tools/make-tarball (+1/-1) tools/read-version (+1/-1) tools/render-cloudcfg (+3/-2) tools/xkvm (+664/-0) tox.ini (+31/-11) |
||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
cloud-init Commiters | Pending | ||
Review via email: mp+331973@code.launchpad.net |
Commit message
Description of the change
Merge upstream into Xenial for SRU
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Revision history for this message
Scott Moser (smoser) wrote : | # |
Merged with these 2 commits
http://
There was an error fetching revisions from git servers. Please try again in a few minutes. If the problem persists, contact Launchpad support.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/ChangeLog b/ChangeLog | |||
2 | index 80405bc..0260c57 100644 | |||
3 | --- a/ChangeLog | |||
4 | +++ b/ChangeLog | |||
5 | @@ -1,3 +1,425 @@ | |||
6 | 1 | 17.1: | ||
7 | 2 | - doc: document GCE datasource. [Arnd Hannemann] | ||
8 | 3 | - suse: updates to templates to support openSUSE and SLES. | ||
9 | 4 | [Robert Schweikert] (LP: #1718640) | ||
10 | 5 | - suse: Copy sysvinit files from redhat with slight changes. | ||
11 | 6 | [Robert Schweikert] (LP: #1718649) | ||
12 | 7 | - docs: fix sphinx module schema documentation [Chad Smith] | ||
13 | 8 | - tests: Add cloudinit package to all test targets [Chad Smith] | ||
14 | 9 | - Makefile: No longer look for yaml files in obsolete ./bin/. | ||
15 | 10 | - tests: fix ds-identify unit tests to set EC2_STRICT_ID_DEFAULT. | ||
16 | 11 | - ec2: Fix maybe_perform_dhcp_discovery to use /var/tmp as a tmpdir | ||
17 | 12 | [Chad Smith] (LP: #1717627) | ||
18 | 13 | - Azure: wait longer for SSH pub keys to arrive. | ||
19 | 14 | [Paul Meyer] (LP: #1717611) | ||
20 | 15 | - GCE: Fix usage of user-data. (LP: #1717598) | ||
21 | 16 | - cmdline: add collect-logs subcommand. [Chad Smith] (LP: #1607345) | ||
22 | 17 | - CloudStack: consider dhclient lease files named with a hyphen. | ||
23 | 18 | (LP: #1717147) | ||
24 | 19 | - resizefs: Drop check for read-only device file, do not warn on | ||
25 | 20 | overlayroot. [Chad Smith] | ||
26 | 21 | - Do not provide systemd-fsck drop-in which could cause ordering cycles. | ||
27 | 22 | [Balint Reczey] (LP: #1717477) | ||
28 | 23 | - tests: Enable the NoCloud KVM platform [Joshua Powers] | ||
29 | 24 | - resizefs: pass mount point to xfs_growfs [Dusty Mabe] | ||
30 | 25 | - vmware: Enable nics before sending the SUCCESS event. [Sankar Tanguturi] | ||
31 | 26 | - cloud-config modules: honor distros definitions in each module | ||
32 | 27 | [Chad Smith] (LP: #1715738, #1715690) | ||
33 | 28 | - chef: Add option to pin chef omnibus install version | ||
34 | 29 | [Ethan Apodaca] (LP: #1462693) | ||
35 | 30 | - tests: execute: support command as string [Joshua Powers] | ||
36 | 31 | - schema and docs: Add jsonschema to resizefs and bootcmd modules | ||
37 | 32 | [Chad Smith] | ||
38 | 33 | - tools: Add xkvm script, wrapper around qemu-system [Joshua Powers] | ||
39 | 34 | - vmware customization: return network config format | ||
40 | 35 | [Sankar Tanguturi] (LP: #1675063) | ||
41 | 36 | - Ec2: only attempt to operate at local mode on known platforms. | ||
42 | 37 | (LP: #1715128) | ||
43 | 38 | - Use /run/cloud-init for tempfile operations. (LP: #1707222) | ||
44 | 39 | - ds-identify: Make OpenStack return maybe on arch other than intel. | ||
45 | 40 | (LP: #1715241) | ||
46 | 41 | - tests: mock missed openstack metadata uri network_data.json | ||
47 | 42 | [Chad Smith] (LP: #1714376) | ||
48 | 43 | - relocate tests/unittests/helpers.py to cloudinit/tests | ||
49 | 44 | [Lars Kellogg-Stedman] | ||
50 | 45 | - tox: add nose timer output [Joshua Powers] | ||
51 | 46 | - upstart: do not package upstart jobs, drop ubuntu-init-switch module. | ||
52 | 47 | - tests: Stop leaking calls through unmocked metadata addresses | ||
53 | 48 | [Chad Smith] (LP: #1714117) | ||
54 | 49 | - distro: allow distro to specify a default locale [Ryan Harper] | ||
55 | 50 | - tests: fix two recently added tests for sles distro. | ||
56 | 51 | - url_helper: dynamically import oauthlib import from inside oauth_headers | ||
57 | 52 | [Chad Smith] | ||
58 | 53 | - tox: make xenial environment run with python3.6 | ||
59 | 54 | - suse: Add support for openSUSE and return SLES to a working state. | ||
60 | 55 | [Robert Schweikert] | ||
61 | 56 | - GCE: Add a main to the GCE Datasource. | ||
62 | 57 | - ec2: Add IPv6 dhcp support to Ec2DataSource. [Chad Smith] (LP: #1639030) | ||
63 | 58 | - url_helper: fail gracefully if oauthlib is not available | ||
64 | 59 | [Lars Kellogg-Stedman] (LP: #1713760) | ||
65 | 60 | - cloud-init analyze: fix issues running under python 2. [Andrew Jorgensen] | ||
66 | 61 | - Configure logging module to always use UTC time. | ||
67 | 62 | [Ryan Harper] (LP: #1713158) | ||
68 | 63 | - Log a helpful message if a user script does not include shebang. | ||
69 | 64 | [Andrew Jorgensen] | ||
70 | 65 | - cli: Fix command line parsing of coniditionally loaded subcommands. | ||
71 | 66 | [Chad Smith] (LP: #1712676) | ||
72 | 67 | - doc: Explain error behavior in user data include file format. | ||
73 | 68 | [Jason Butz] | ||
74 | 69 | - cc_landscape & cc_puppet: Fix six.StringIO use in writing configs | ||
75 | 70 | [Chad Smith] (LP: #1699282, #1710932) | ||
76 | 71 | - schema cli: Add schema subcommand to cloud-init cli and cc_runcmd schema | ||
77 | 72 | [Chad Smith] | ||
78 | 73 | - Debian: Remove non-free repositories from apt sources template. | ||
79 | 74 | [Joonas Kylmälä] (LP: #1700091) | ||
80 | 75 | - tools: Add tooling for basic cloud-init performance analysis. | ||
81 | 76 | [Chad Smith] (LP: #1709761) | ||
82 | 77 | - network: add v2 passthrough and fix parsing v2 config with bonds/bridge | ||
83 | 78 | params [Ryan Harper] (LP: #1709180) | ||
84 | 79 | - doc: update capabilities with features available, link doc reference, | ||
85 | 80 | cli example [Ryan Harper] | ||
86 | 81 | - vcloud directory: Guest Customization support for passwords | ||
87 | 82 | [Maitreyee Saikia] | ||
88 | 83 | - ec2: Allow Ec2 to run in init-local using dhclient in a sandbox. | ||
89 | 84 | [Chad Smith] (LP: #1709772) | ||
90 | 85 | - cc_ntp: fallback on timesyncd configuration if ntp is not installable | ||
91 | 86 | [Ryan Harper] (LP: #1686485) | ||
92 | 87 | - net: Reduce duplicate code. Have get_interfaces_by_mac use | ||
93 | 88 | get_interfaces. | ||
94 | 89 | - tests: Fix build tree integration tests [Joshua Powers] | ||
95 | 90 | - sysconfig: Dont repeat header when rendering resolv.conf | ||
96 | 91 | [Ryan Harper] (LP: #1701420) | ||
97 | 92 | - archlinux: Fix bug with empty dns, do not render 'lo' devices. | ||
98 | 93 | (LP: #1663045, #1706593) | ||
99 | 94 | - cloudinit.net: add initialize_network_device function and tests | ||
100 | 95 | [Chad Smith] | ||
101 | 96 | - makefile: fix ci-deps-ubuntu target [Chad Smith] | ||
102 | 97 | - tests: adjust locale integration test to parse default locale. | ||
103 | 98 | - tests: remove 'yakkety' from releases as it is EOL. | ||
104 | 99 | - tests: Add initial tests for EC2 and improve a docstring. | ||
105 | 100 | - locale: Do not re-run locale-gen if provided locale is system default. | ||
106 | 101 | - archlinux: fix set hostname usage of write_file. | ||
107 | 102 | [Joshua Powers] (LP: #1705306) | ||
108 | 103 | - sysconfig: support subnet type of 'manual'. | ||
109 | 104 | - tools/run-centos: make running with no argument show help. | ||
110 | 105 | - Drop rand_str() usage in DNS redirection detection | ||
111 | 106 | [Bob Aman] (LP: #1088611) | ||
112 | 107 | - sysconfig: use MACADDR on bonds/bridges to configure mac_address | ||
113 | 108 | [Ryan Harper] (LP: #1701417) | ||
114 | 109 | - net: eni route rendering missed ipv6 default route config | ||
115 | 110 | [Ryan Harper] (LP: #1701097) | ||
116 | 111 | - sysconfig: enable mtu set per subnet, including ipv6 mtu | ||
117 | 112 | [Ryan Harper] (LP: #1702513) | ||
118 | 113 | - sysconfig: handle manual type subnets [Ryan Harper] (LP: #1687725) | ||
119 | 114 | - sysconfig: fix ipv6 gateway routes [Ryan Harper] (LP: #1694801) | ||
120 | 115 | - sysconfig: fix rendering of bond, bridge and vlan types. | ||
121 | 116 | [Ryan Harper] (LP: #1695092) | ||
122 | 117 | - Templatize systemd unit files for cross distro deltas. [Ryan Harper] | ||
123 | 118 | - sysconfig: ipv6 and default gateway fixes. [Ryan Harper] (LP: #1704872) | ||
124 | 119 | - net: fix renaming of nics to support mac addresses written in upper | ||
125 | 120 | case. (LP: #1705147) | ||
126 | 121 | - tests: fixes for issues uncovered when moving to python 3.6. | ||
127 | 122 | (LP: #1703697) | ||
128 | 123 | - sysconfig: include GATEWAY value if set in subnet | ||
129 | 124 | [Ryan Harper] (LP: #1686856) | ||
130 | 125 | - Scaleway: add datasource with user and vendor data for Scaleway. | ||
131 | 126 | [Julien Castets] | ||
132 | 127 | - Support comments in content read by load_shell_content. | ||
133 | 128 | - cloudinitlocal fail to run during boot [Hongjiang Zhang] | ||
134 | 129 | - doc: fix disk setup example table_type options | ||
135 | 130 | [Sandor Zeestraten] (LP: #1703789) | ||
136 | 131 | - tools: Fix exception handling. [Joonas Kylmälä] (LP: #1701527) | ||
137 | 132 | - tests: fix usage of mock in GCE test. | ||
138 | 133 | - test_gce: Fix invalid mock of platform_reports_gce to return False | ||
139 | 134 | [Chad Smith] | ||
140 | 135 | - test: fix incorrect keyid for apt repository. | ||
141 | 136 | [Joshua Powers] (LP: #1702717) | ||
142 | 137 | - tests: Update version of pylxd [Joshua Powers] | ||
143 | 138 | - write_files: Remove log from helper function signatures. | ||
144 | 139 | [Andrew Jorgensen] | ||
145 | 140 | - doc: document the cmdline options to NoCloud [Brian Candler] | ||
146 | 141 | - read_dmi_data: always return None when inside a container. (LP: #1701325) | ||
147 | 142 | - requirements.txt: remove trailing white space. | ||
148 | 143 | - Azure: Add network-config, Refactor net layer to handle duplicate macs. | ||
149 | 144 | [Ryan Harper] | ||
150 | 145 | - Tests: Simplify the check on ssh-import-id [Joshua Powers] | ||
151 | 146 | - tests: update ntp tests after sntp added [Joshua Powers] | ||
152 | 147 | - FreeBSD: Make freebsd a variant, fix unittests and | ||
153 | 148 | tools/build-on-freebsd. | ||
154 | 149 | - FreeBSD: fix test failure | ||
155 | 150 | - FreeBSD: replace ifdown/ifup with "ifconfig down" and "ifconfig up". | ||
156 | 151 | [Hongjiang Zhang] (LP: #1697815) | ||
157 | 152 | - FreeBSD: fix cdrom mounting failure if /mnt/cdrom/secure did not exist. | ||
158 | 153 | [Hongjiang Zhang] (LP: #1696295) | ||
159 | 154 | - main: Don't use templater to format the welcome message | ||
160 | 155 | [Andrew Jorgensen] | ||
161 | 156 | - docs: Automatically generate module docs form schema if present. | ||
162 | 157 | [Chad Smith] | ||
163 | 158 | - debian: fix path comment in /etc/hosts template. | ||
164 | 159 | [Jens Sandmann] (LP: #1606406) | ||
165 | 160 | - suse: add hostname and fully qualified domain to template. | ||
166 | 161 | [Jens Sandmann] | ||
167 | 162 | - write_file(s): Print permissions as octal, not decimal [Andrew Jorgensen] | ||
168 | 163 | - ci deps: Add --test-distro to read-dependencies to install all deps | ||
169 | 164 | [Chad Smith] | ||
170 | 165 | - tools/run-centos: cleanups and move to using read-dependencies | ||
171 | 166 | - pkg build ci: Add make ci-deps-<distro> target to install pkgs | ||
172 | 167 | [Chad Smith] | ||
173 | 168 | - systemd: make cloud-final.service run before apt daily services. | ||
174 | 169 | (LP: #1693361) | ||
175 | 170 | - selinux: Allow restorecon to be non-fatal. [Ryan Harper] (LP: #1686751) | ||
176 | 171 | - net: Allow netinfo subprocesses to return 0 or 1. | ||
177 | 172 | [Ryan Harper] (LP: #1686751) | ||
178 | 173 | - net: Allow for NetworkManager configuration [Ryan McCabe] (LP: #1693251) | ||
179 | 174 | - Use distro release version to determine if we use systemd in redhat spec | ||
180 | 175 | [Ryan Harper] | ||
181 | 176 | - net: normalize data in network_state object | ||
182 | 177 | - Integration Testing: tox env, pyxld 2.2.3, and revamp framework | ||
183 | 178 | [Wesley Wiedenmeier] | ||
184 | 179 | - Chef: Update omnibus url to chef.io, minor doc changes. [JJ Asghar] | ||
185 | 180 | - tools: add centos scripts to build and test [Joshua Powers] | ||
186 | 181 | - Drop cheetah python module as it is not needed by trunk [Ryan Harper] | ||
187 | 182 | - rhel/centos spec cleanups. | ||
188 | 183 | - cloud.cfg: move to a template. setup.py changes along the way. | ||
189 | 184 | - Makefile: add deb-src and srpm targets. use PYVER more places. | ||
190 | 185 | - makefile: fix python 2/3 detection in the Makefile [Chad Smith] | ||
191 | 186 | - snap: Removing snapcraft plug line [Joshua Powers] (LP: #1695333) | ||
192 | 187 | - RHEL/CentOS: Fix default routes for IPv4/IPv6 configuration. | ||
193 | 188 | [Andreas Karis] (LP: #1696176) | ||
194 | 189 | - test: Fix pyflakes complaint of unused import. | ||
195 | 190 | [Joshua Powers] (LP: #1695918) | ||
196 | 191 | - NoCloud: support seed of nocloud from smbios information | ||
197 | 192 | [Vladimir Pouzanov] (LP: #1691772) | ||
198 | 193 | - net: when selecting a network device, use natural sort order | ||
199 | 194 | [Marc-Aurèle Brothier] | ||
200 | 195 | - fix typos and remove whitespace in various docs [Stephan Telling] | ||
201 | 196 | - systemd: Fix typo in comment in cloud-init.target. [Chen-Han Hsiao] | ||
202 | 197 | - Tests: Skip jsonschema related unit tests when dependency is absent. | ||
203 | 198 | [Chad Smith] (LP: #1695318) | ||
204 | 199 | - azure: remove accidental duplicate line in merge. | ||
205 | 200 | - azure: identify platform by well known value in chassis asset tag. | ||
206 | 201 | [Chad Smith] (LP: #1693939) | ||
207 | 202 | - tools/net-convert.py: support old cloudinit versions by using kwargs. | ||
208 | 203 | - ntp: Add schema definition and passive schema validation. | ||
209 | 204 | [Chad Smith] (LP: #1692916) | ||
210 | 205 | - Fix eni rendering for bridge params that require repeated key for | ||
211 | 206 | values. [Ryan Harper] | ||
212 | 207 | - net: remove systemd link file writing from eni renderer [Ryan Harper] | ||
213 | 208 | - AliYun: Enable platform identification and enable by default. | ||
214 | 209 | [Junjie Wang] (LP: #1638931) | ||
215 | 210 | - net: fix reading and rendering addresses in cidr format. | ||
216 | 211 | [Dimitri John Ledkov] (LP: #1689346, #1684349) | ||
217 | 212 | - disk_setup: udev settle before attempting partitioning or fs creation. | ||
218 | 213 | (LP: #1692093) | ||
219 | 214 | - GCE: Update the attribute used to find instance SSH keys. | ||
220 | 215 | [Daniel Watkins] (LP: #1693582) | ||
221 | 216 | - nplan: For bonds, allow dashed or underscore names of keys. | ||
222 | 217 | [Dimitri John Ledkov] (LP: #1690480) | ||
223 | 218 | - python2.6: fix unit tests usage of assertNone and format. | ||
224 | 219 | - test: update docstring on test_configured_list_with_none | ||
225 | 220 | - fix tools/ds-identify to not write None twice. | ||
226 | 221 | - tox/build: do not package depend on style requirements. | ||
227 | 222 | - cc_ntp: Restructure cc_ntp unit tests. [Chad Smith] (LP: #1692794) | ||
228 | 223 | - flake8: move the pinned version of flake8 up to 3.3.0 | ||
229 | 224 | - tests: Apply workaround for snapd bug in test case. [Joshua Powers] | ||
230 | 225 | - RHEL/CentOS: Fix dual stack IPv4/IPv6 configuration. | ||
231 | 226 | [Andreas Karis] (LP: #1679817, #1685534, #1685532) | ||
232 | 227 | - disk_setup: fix several issues with gpt disk partitions. (LP: #1692087) | ||
233 | 228 | - function spelling & docstring update [Joshua Powers] | ||
234 | 229 | - Fixing wrong file name regression. [Joshua Powers] | ||
235 | 230 | - tox: move pylint target to 1.7.1 | ||
236 | 231 | - Fix get_interfaces_by_mac for empty macs (LP: #1692028) | ||
237 | 232 | - DigitalOcean: remove routes except for the public interface. | ||
238 | 233 | [Ben Howard] (LP: #1681531.) | ||
239 | 234 | - netplan: pass macaddress, when specified, for vlans | ||
240 | 235 | [Dimitri John Ledkov] (LP: #1690388) | ||
241 | 236 | - doc: various improvements for the docs on cc_users_groups. | ||
242 | 237 | [Felix Dreissig] | ||
243 | 238 | - cc_ntp: write template before installing and add service restart | ||
244 | 239 | [Ryan Harper] (LP: #1645644) | ||
245 | 240 | - cloudstack: fix tests to avoid accessing /var/lib/NetworkManager | ||
246 | 241 | [Lars Kellogg-Stedman] | ||
247 | 242 | - tests: fix hardcoded path to mkfs.ext4 [Joshua Powers] (LP: #1691517) | ||
248 | 243 | - Actually skip warnings when .skip file is present. | ||
249 | 244 | [Chris Brinker] (LP: #1691551) | ||
250 | 245 | - netplan: fix netplan render_network_state signature. | ||
251 | 246 | [Dimitri John Ledkov] (LP: #1685944) | ||
252 | 247 | - Azure: fix reformatting of ephemeral disks on resize to large types. | ||
253 | 248 | (LP: #1686514) | ||
254 | 249 | - Revert "tools/net-convert: fix argument order for render_network_state" | ||
255 | 250 | - make deb: Add devscripts dependency for make deb. Cleanup | ||
256 | 251 | packages/bddeb. [Chad Smith] (LP: #1685935) | ||
257 | 252 | - tools/net-convert: fix argument order for render_network_state | ||
258 | 253 | [Ryan Harper] (LP: #1685944) | ||
259 | 254 | - openstack: fix log message copy/paste typo in _get_url_settings | ||
260 | 255 | [Lars Kellogg-Stedman] | ||
261 | 256 | - unittests: fix unittests run on centos [Joshua Powers] | ||
262 | 257 | - Improve detection of snappy to include os-release and kernel cmdline. | ||
263 | 258 | (LP: #1689944) | ||
264 | 259 | - Add address to config entry generated by _klibc_to_config_entry. | ||
265 | 260 | [Julien Castets] (LP: #1691135) | ||
266 | 261 | - sysconfig: Raise ValueError when multiple default gateways are present. | ||
267 | 262 | [Chad Smith] (LP: #1687485) | ||
268 | 263 | - FreeBSD: improvements and fixes for use on Azure | ||
269 | 264 | [Hongjiang Zhang] (LP: #1636345) | ||
270 | 265 | - Add unit tests for ds-identify, fix Ec2 bug found. | ||
271 | 266 | - fs_setup: if cmd is specified, use shell interpretation. | ||
272 | 267 | [Paul Meyer] (LP: #1687712) | ||
273 | 268 | - doc: document network configuration defaults policy and formats. | ||
274 | 269 | [Ryan Harper] | ||
275 | 270 | - Fix name of "uri" key in docs for "cc_apt_configure" module | ||
276 | 271 | [Felix Dreissig] | ||
277 | 272 | - tests: Enable artful [Joshua Powers] | ||
278 | 273 | - nova-lxd: read product_name from environment, not platform. | ||
279 | 274 | (LP: #1685810) | ||
280 | 275 | - Fix yum repo config where keys contain array values | ||
281 | 276 | [Dylan Perry] (LP: #1592150) | ||
282 | 277 | - template: Update debian backports template [Joshua Powers] (LP: #1627293) | ||
283 | 278 | - rsyslog: replace ~ with stop [Joshua Powers] (LP: #1367899) | ||
284 | 279 | - Doc: add additional RTD examples [Joshua Powers] (LP: #1459604) | ||
285 | 280 | - Fix growpart for some cases when booted with root=PARTUUID. | ||
286 | 281 | (LP: #1684869) | ||
287 | 282 | - pylint: update output style to parseable [Joshua Powers] | ||
288 | 283 | - pylint: fix all logging warnings [Joshua Powers] | ||
289 | 284 | - CloudStack: Add NetworkManager to list of supported DHCP lease dirs. | ||
290 | 285 | [Syed] | ||
291 | 286 | - net: kernel lies about vlans not stealing mac addresses, when they do | ||
292 | 287 | [Dimitri John Ledkov] (LP: #1682871) | ||
293 | 288 | - ds-identify: Check correct path for "latest" config drive | ||
294 | 289 | [Daniel Watkins] (LP: #1673637) | ||
295 | 290 | - doc: Fix example for resolve.conf configuration. | ||
296 | 291 | [Jon Grimm] (LP: #1531582) | ||
297 | 292 | - Fix examples that reference upstream chef repository. | ||
298 | 293 | [Jon Grimm] (LP: #1678145) | ||
299 | 294 | - doc: correct grammar and improve clarity in merging documentation. | ||
300 | 295 | [David Tagatac] | ||
301 | 296 | - doc: Add missing doc link to snap-config module. [Ryan Harper] | ||
302 | 297 | - snap: allows for creating cloud-init snap [Joshua Powers] | ||
303 | 298 | - DigitalOcean: assign IPv4ll address to lowest indexed interface. | ||
304 | 299 | [Ben Howard] | ||
305 | 300 | - DigitalOcean: configure all NICs presented in meta-data. [Ben Howard] | ||
306 | 301 | - Remove (and/or fix) URL shortener references [Jon Grimm] (LP: #1669727) | ||
307 | 302 | - HACKING.rst: more info on filling out contributors agreement. | ||
308 | 303 | - util: teach write_file about copy_mode option | ||
309 | 304 | [Lars Kellogg-Stedman] (LP: #1644064) | ||
310 | 305 | - DigitalOcean: bind resolvers to loopback interface. [Ben Howard] | ||
311 | 306 | - tests: fix AltCloud tests to not rely on blkid (LP: #1636531) | ||
312 | 307 | - OpenStack: add 'dvs' to the list of physical link types. (LP: #1674946) | ||
313 | 308 | - Fix bug that resulted in an attempt to rename bonds or vlans. | ||
314 | 309 | (LP: #1669860) | ||
315 | 310 | - tests: update OpenNebula and Digital Ocean to not rely on host | ||
316 | 311 | interfaces. | ||
317 | 312 | - net: in netplan renderer delete known image-builtin content. | ||
318 | 313 | (LP: #1675576) | ||
319 | 314 | - doc: correct grammar in capabilities.rst [David Tagatac] | ||
320 | 315 | - ds-identify: fix detecting of maas datasource. (LP: #1677710) | ||
321 | 316 | - netplan: remove debugging prints, add debug logging [Ryan Harper] | ||
322 | 317 | - ds-identify: do not write None twice to datasource_list. | ||
323 | 318 | - support resizing partition and rootfs on system booted without | ||
324 | 319 | initramfs. [Steve Langasek] (LP: #1677376) | ||
325 | 320 | - apt_configure: run only when needed. (LP: #1675185) | ||
326 | 321 | - OpenStack: identify OpenStack by product 'OpenStack Compute'. | ||
327 | 322 | (LP: #1675349) | ||
328 | 323 | - GCE: Search GCE in ds-identify, consider serial number in check. | ||
329 | 324 | (LP: #1674861) | ||
330 | 325 | - Add support for setting hashed passwords [Tore S. Lonoy] (LP: #1570325) | ||
331 | 326 | - Fix filesystem creation when using "partition: auto" | ||
332 | 327 | [Jonathan Ballet] (LP: #1634678) | ||
333 | 328 | - ConfigDrive: support reading config drive data from /config-drive. | ||
334 | 329 | (LP: #1673411) | ||
335 | 330 | - ds-identify: fix detection of Bigstep datasource. (LP: #1674766) | ||
336 | 331 | - test: add running of pylint [Joshua Powers] | ||
337 | 332 | - ds-identify: fix bug where filename expansion was left on. | ||
338 | 333 | - advertise network config v2 support (NETWORK_CONFIG_V2) in features. | ||
339 | 334 | - Bigstep: fix bug when executing in python3. [root] | ||
340 | 335 | - Fix unit test when running in a system deployed with cloud-init. | ||
341 | 336 | - Bounce network interface for Azure when using the built-in path. | ||
342 | 337 | [Brent Baude] (LP: #1674685) | ||
343 | 338 | - cloudinit.net: add network config v2 parsing and rendering [Ryan Harper] | ||
344 | 339 | - net: Fix incorrect call to isfile [Joshua Powers] (LP: #1674317) | ||
345 | 340 | - net: add renderers for automatically selecting the renderer. | ||
346 | 341 | - doc: fix config drive doc with regard to unpartitioned disks. | ||
347 | 342 | (LP: #1673818) | ||
348 | 343 | - test: Adding integratiron test for password as list [Joshua Powers] | ||
349 | 344 | - render_network_state: switch arguments around, do not require target | ||
350 | 345 | - support 'loopback' as a device type. | ||
351 | 346 | - Integration Testing: improve testcase subclassing [Wesley Wiedenmeier] | ||
352 | 347 | - gitignore: adding doc/rtd_html [Joshua Powers] | ||
353 | 348 | - doc: add instructions for running integration tests via tox. | ||
354 | 349 | [Joshua Powers] | ||
355 | 350 | - test: avoid differences in 'date' output due to daylight savings. | ||
356 | 351 | - Fix chef config module in omnibus install. [Jeremy Melvin] (LP: #1583837) | ||
357 | 352 | - Add feature flags to cloudinit.version. [Wesley Wiedenmeier] | ||
358 | 353 | - tox: add a citest environment | ||
359 | 354 | - Further fix regression to support 'password' for default user. | ||
360 | 355 | - fix regression when no chpasswd/list was provided. | ||
361 | 356 | - Support chpasswd/list being a list in addition to a string. | ||
362 | 357 | [Sergio Lystopad] (LP: #1665694) | ||
363 | 358 | - doc: Fix configuration example for cc_set_passwords module. | ||
364 | 359 | [Sergio Lystopad] (LP: #1665773) | ||
365 | 360 | - net: support both ipv4 and ipv6 gateways in sysconfig. | ||
366 | 361 | [Lars Kellogg-Stedman] (LP: #1669504) | ||
367 | 362 | - net: do not raise exception for > 3 nameservers | ||
368 | 363 | [Lars Kellogg-Stedman] (LP: #1670052) | ||
369 | 364 | - ds-identify: report cleanups for config and exit value. (LP: #1669949) | ||
370 | 365 | - ds-identify: move default setting for Ec2/strict_id to a global. | ||
371 | 366 | - ds-identify: record not found in cloud.cfg and always add None. | ||
372 | 367 | - Support warning if the used datasource is not in ds-identify's list. | ||
373 | 368 | - tools/ds-identify: make report mode write namespaced results. | ||
374 | 369 | - Move warning functionality to cloudinit/warnings.py | ||
375 | 370 | - Add profile.d script for showing warnings on login. | ||
376 | 371 | - Z99-cloud-locale-test.sh: install and make consistent. | ||
377 | 372 | - tools/ds-identify: look at cloud.cfg when looking for ec2 strict_id. | ||
378 | 373 | - tools/ds-identify: disable vmware_guest_customization by default. | ||
379 | 374 | - tools/ds-identify: ovf identify vmware guest customization. | ||
380 | 375 | - Identify Brightbox as an Ec2 datasource user. (LP: #1661693) | ||
381 | 376 | - DatasourceEc2: add warning message when not on AWS. | ||
382 | 377 | - ds-identify: add reading of datasource/Ec2/strict_id | ||
383 | 378 | - tools/ds-identify: add support for found or maybe contributing config. | ||
384 | 379 | - tools/ds-identify: read the seed directory on Ec2 | ||
385 | 380 | - tools/ds-identify: use quotes in local declarations. | ||
386 | 381 | - tools/ds-identify: fix documentation of policy setting in a comment. | ||
387 | 382 | - ds-identify: only run once per boot unless --force is given. | ||
388 | 383 | - flake8: fix flake8 complaints in previous commit. | ||
389 | 384 | - net: correct errors in cloudinit/net/sysconfig.py | ||
390 | 385 | [Lars Kellogg-Stedman] (LP: #1665441) | ||
391 | 386 | - ec2_utils: fix MetadataLeafDecoder that returned bytes on empty | ||
392 | 387 | - apply the runtime configuration written by ds-identify. | ||
393 | 388 | - ds-identify: fix checking for filesystem label (LP: #1663735) | ||
394 | 389 | - ds-identify: read ds=nocloud properly (LP: #1663723) | ||
395 | 390 | - support nova-lxd by reading platform from environment of pid 1. | ||
396 | 391 | (LP: #1661797) | ||
397 | 392 | - ds-identify: change aarch64 to use the default for non-dmi systems. | ||
398 | 393 | - Remove style checking during build and add latest style checks to tox | ||
399 | 394 | [Joshua Powers] (LP: #1652329) | ||
400 | 395 | - code-style: make master pass pycodestyle (2.3.1) cleanly, currently: | ||
401 | 396 | [Joshua Powers] | ||
402 | 397 | - manual_cache_clean: When manually cleaning touch a file in instance dir. | ||
403 | 398 | - Add tools/ds-identify to identify datasources available. | ||
404 | 399 | - Fix small typo and change iso-filename for consistency [Robin Naundorf] | ||
405 | 400 | - Fix eni rendering of multiple IPs per interface | ||
406 | 401 | [Ryan Harper] (LP: #1657940) | ||
407 | 402 | - tools/mock-meta: support python2 or python3 and ipv6 in both. | ||
408 | 403 | - tests: remove executable bit on test_net, so it runs, and fix it. | ||
409 | 404 | - tests: No longer monkey patch httpretty for python 3.4.2 | ||
410 | 405 | - Add 3 ecdsa-sha2-nistp* ssh key types now that they are standardized | ||
411 | 406 | [Lars Kellogg-Stedman] (LP: #1658174) | ||
412 | 407 | - reset httppretty for each test [Lars Kellogg-Stedman] (LP: #1658200) | ||
413 | 408 | - build: fix running Make on a branch with tags other than master | ||
414 | 409 | - EC2: Do not cache security credentials on disk | ||
415 | 410 | [Andrew Jorgensen] (LP: #1638312) | ||
416 | 411 | - doc: Fix typos and clarify some aspects of the part-handler | ||
417 | 412 | [Erik M. Bray] | ||
418 | 413 | - doc: add some documentation on OpenStack datasource. | ||
419 | 414 | - OpenStack: Use timeout and retries from config in get_data. | ||
420 | 415 | [Lars Kellogg-Stedman] (LP: #1657130) | ||
421 | 416 | - Fixed Misc issues related to VMware customization. [Sankar Tanguturi] | ||
422 | 417 | - Fix minor docs typo: perserve > preserve [Jeremy Bicha] | ||
423 | 418 | - Use dnf instead of yum when available | ||
424 | 419 | [Lars Kellogg-Stedman] (LP: #1647118) | ||
425 | 420 | - validate-yaml: use python rather than explicitly python3 | ||
426 | 421 | - Get early logging logged, including failures of cmdline url. | ||
427 | 422 | |||
428 | 1 | 0.7.9: | 423 | 0.7.9: |
429 | 2 | - doc: adjust headers in tests documentation for consistency. | 424 | - doc: adjust headers in tests documentation for consistency. |
430 | 3 | - pep8: fix issue found in zesty build with pycodestyle. | 425 | - pep8: fix issue found in zesty build with pycodestyle. |
431 | diff --git a/Makefile b/Makefile | |||
432 | index f280911..4ace227 100644 | |||
433 | --- a/Makefile | |||
434 | +++ b/Makefile | |||
435 | @@ -4,7 +4,7 @@ PYVER ?= $(shell for p in python3 python2; do \ | |||
436 | 4 | 4 | ||
437 | 5 | noseopts ?= -v | 5 | noseopts ?= -v |
438 | 6 | 6 | ||
440 | 7 | YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f ) | 7 | YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f ) |
441 | 8 | YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) | 8 | YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) |
442 | 9 | 9 | ||
443 | 10 | PIP_INSTALL := pip install | 10 | PIP_INSTALL := pip install |
444 | @@ -48,10 +48,10 @@ pyflakes3: | |||
445 | 48 | @$(CWD)/tools/run-pyflakes3 | 48 | @$(CWD)/tools/run-pyflakes3 |
446 | 49 | 49 | ||
447 | 50 | unittest: clean_pyc | 50 | unittest: clean_pyc |
449 | 51 | nosetests $(noseopts) tests/unittests | 51 | nosetests $(noseopts) tests/unittests cloudinit |
450 | 52 | 52 | ||
451 | 53 | unittest3: clean_pyc | 53 | unittest3: clean_pyc |
453 | 54 | nosetests3 $(noseopts) tests/unittests | 54 | nosetests3 $(noseopts) tests/unittests cloudinit |
454 | 55 | 55 | ||
455 | 56 | ci-deps-ubuntu: | 56 | ci-deps-ubuntu: |
456 | 57 | @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro | 57 | @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro |
457 | diff --git a/cloudinit/analyze/__init__.py b/cloudinit/analyze/__init__.py | |||
458 | 58 | new file mode 100644 | 58 | new file mode 100644 |
459 | index 0000000..e69de29 | |||
460 | --- /dev/null | |||
461 | +++ b/cloudinit/analyze/__init__.py | |||
462 | diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py | |||
463 | 59 | new file mode 100644 | 59 | new file mode 100644 |
464 | index 0000000..69b9e43 | |||
465 | --- /dev/null | |||
466 | +++ b/cloudinit/analyze/__main__.py | |||
467 | @@ -0,0 +1,155 @@ | |||
468 | 1 | # Copyright (C) 2017 Canonical Ltd. | ||
469 | 2 | # | ||
470 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
471 | 4 | |||
472 | 5 | import argparse | ||
473 | 6 | import re | ||
474 | 7 | import sys | ||
475 | 8 | |||
476 | 9 | from . import dump | ||
477 | 10 | from . import show | ||
478 | 11 | |||
479 | 12 | |||
480 | 13 | def get_parser(parser=None): | ||
481 | 14 | if not parser: | ||
482 | 15 | parser = argparse.ArgumentParser( | ||
483 | 16 | prog='cloudinit-analyze', | ||
484 | 17 | description='Devel tool: Analyze cloud-init logs and data') | ||
485 | 18 | subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') | ||
486 | 19 | subparsers.required = True | ||
487 | 20 | |||
488 | 21 | parser_blame = subparsers.add_parser( | ||
489 | 22 | 'blame', help='Print list of executed stages ordered by time to init') | ||
490 | 23 | parser_blame.add_argument( | ||
491 | 24 | '-i', '--infile', action='store', dest='infile', | ||
492 | 25 | default='/var/log/cloud-init.log', | ||
493 | 26 | help='specify where to read input.') | ||
494 | 27 | parser_blame.add_argument( | ||
495 | 28 | '-o', '--outfile', action='store', dest='outfile', default='-', | ||
496 | 29 | help='specify where to write output. ') | ||
497 | 30 | parser_blame.set_defaults(action=('blame', analyze_blame)) | ||
498 | 31 | |||
499 | 32 | parser_show = subparsers.add_parser( | ||
500 | 33 | 'show', help='Print list of in-order events during execution') | ||
501 | 34 | parser_show.add_argument('-f', '--format', action='store', | ||
502 | 35 | dest='print_format', default='%I%D @%Es +%ds', | ||
503 | 36 | help='specify formatting of output.') | ||
504 | 37 | parser_show.add_argument('-i', '--infile', action='store', | ||
505 | 38 | dest='infile', default='/var/log/cloud-init.log', | ||
506 | 39 | help='specify where to read input.') | ||
507 | 40 | parser_show.add_argument('-o', '--outfile', action='store', | ||
508 | 41 | dest='outfile', default='-', | ||
509 | 42 | help='specify where to write output.') | ||
510 | 43 | parser_show.set_defaults(action=('show', analyze_show)) | ||
511 | 44 | parser_dump = subparsers.add_parser( | ||
512 | 45 | 'dump', help='Dump cloud-init events in JSON format') | ||
513 | 46 | parser_dump.add_argument('-i', '--infile', action='store', | ||
514 | 47 | dest='infile', default='/var/log/cloud-init.log', | ||
515 | 48 | help='specify where to read input. ') | ||
516 | 49 | parser_dump.add_argument('-o', '--outfile', action='store', | ||
517 | 50 | dest='outfile', default='-', | ||
518 | 51 | help='specify where to write output. ') | ||
519 | 52 | parser_dump.set_defaults(action=('dump', analyze_dump)) | ||
520 | 53 | return parser | ||
521 | 54 | |||
522 | 55 | |||
523 | 56 | def analyze_blame(name, args): | ||
524 | 57 | """Report a list of records sorted by largest time delta. | ||
525 | 58 | |||
526 | 59 | For example: | ||
527 | 60 | 30.210s (init-local) searching for datasource | ||
528 | 61 | 8.706s (init-network) reading and applying user-data | ||
529 | 62 | 166ms (modules-config) .... | ||
530 | 63 | 807us (modules-final) ... | ||
531 | 64 | |||
532 | 65 | We generate event records parsing cloud-init logs, formatting the output | ||
533 | 66 | and sorting by record data ('delta') | ||
534 | 67 | """ | ||
535 | 68 | (infh, outfh) = configure_io(args) | ||
536 | 69 | blame_format = ' %ds (%n)' | ||
537 | 70 | r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE) | ||
538 | 71 | for idx, record in enumerate(show.show_events(_get_events(infh), | ||
539 | 72 | blame_format)): | ||
540 | 73 | srecs = sorted(filter(r.match, record), reverse=True) | ||
541 | 74 | outfh.write('-- Boot Record %02d --\n' % (idx + 1)) | ||
542 | 75 | outfh.write('\n'.join(srecs) + '\n') | ||
543 | 76 | outfh.write('\n') | ||
544 | 77 | outfh.write('%d boot records analyzed\n' % (idx + 1)) | ||
545 | 78 | |||
546 | 79 | |||
547 | 80 | def analyze_show(name, args): | ||
548 | 81 | """Generate output records using the 'standard' format to printing events. | ||
549 | 82 | |||
550 | 83 | Example output follows: | ||
551 | 84 | Starting stage: (init-local) | ||
552 | 85 | ... | ||
553 | 86 | Finished stage: (init-local) 0.105195 seconds | ||
554 | 87 | |||
555 | 88 | Starting stage: (init-network) | ||
556 | 89 | ... | ||
557 | 90 | Finished stage: (init-network) 0.339024 seconds | ||
558 | 91 | |||
559 | 92 | Starting stage: (modules-config) | ||
560 | 93 | ... | ||
561 | 94 | Finished stage: (modules-config) 0.NNN seconds | ||
562 | 95 | |||
563 | 96 | Starting stage: (modules-final) | ||
564 | 97 | ... | ||
565 | 98 | Finished stage: (modules-final) 0.NNN seconds | ||
566 | 99 | """ | ||
567 | 100 | (infh, outfh) = configure_io(args) | ||
568 | 101 | for idx, record in enumerate(show.show_events(_get_events(infh), | ||
569 | 102 | args.print_format)): | ||
570 | 103 | outfh.write('-- Boot Record %02d --\n' % (idx + 1)) | ||
571 | 104 | outfh.write('The total time elapsed since completing an event is' | ||
572 | 105 | ' printed after the "@" character.\n') | ||
573 | 106 | outfh.write('The time the event takes is printed after the "+" ' | ||
574 | 107 | 'character.\n\n') | ||
575 | 108 | outfh.write('\n'.join(record) + '\n') | ||
576 | 109 | outfh.write('%d boot records analyzed\n' % (idx + 1)) | ||
577 | 110 | |||
578 | 111 | |||
579 | 112 | def analyze_dump(name, args): | ||
580 | 113 | """Dump cloud-init events in json format""" | ||
581 | 114 | (infh, outfh) = configure_io(args) | ||
582 | 115 | outfh.write(dump.json_dumps(_get_events(infh)) + '\n') | ||
583 | 116 | |||
584 | 117 | |||
585 | 118 | def _get_events(infile): | ||
586 | 119 | rawdata = None | ||
587 | 120 | events, rawdata = show.load_events(infile, None) | ||
588 | 121 | if not events: | ||
589 | 122 | events, _ = dump.dump_events(rawdata=rawdata) | ||
590 | 123 | return events | ||
591 | 124 | |||
592 | 125 | |||
593 | 126 | def configure_io(args): | ||
594 | 127 | """Common parsing and setup of input/output files""" | ||
595 | 128 | if args.infile == '-': | ||
596 | 129 | infh = sys.stdin | ||
597 | 130 | else: | ||
598 | 131 | try: | ||
599 | 132 | infh = open(args.infile, 'r') | ||
600 | 133 | except OSError: | ||
601 | 134 | sys.stderr.write('Cannot open file %s\n' % args.infile) | ||
602 | 135 | sys.exit(1) | ||
603 | 136 | |||
604 | 137 | if args.outfile == '-': | ||
605 | 138 | outfh = sys.stdout | ||
606 | 139 | else: | ||
607 | 140 | try: | ||
608 | 141 | outfh = open(args.outfile, 'w') | ||
609 | 142 | except OSError: | ||
610 | 143 | sys.stderr.write('Cannot open file %s\n' % args.outfile) | ||
611 | 144 | sys.exit(1) | ||
612 | 145 | |||
613 | 146 | return (infh, outfh) | ||
614 | 147 | |||
615 | 148 | |||
616 | 149 | if __name__ == '__main__': | ||
617 | 150 | parser = get_parser() | ||
618 | 151 | args = parser.parse_args() | ||
619 | 152 | (name, action_functor) = args.action | ||
620 | 153 | action_functor(name, args) | ||
621 | 154 | |||
622 | 155 | # vi: ts=4 expandtab | ||
623 | diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py | |||
624 | 0 | new file mode 100644 | 156 | new file mode 100644 |
625 | index 0000000..ca4da49 | |||
626 | --- /dev/null | |||
627 | +++ b/cloudinit/analyze/dump.py | |||
628 | @@ -0,0 +1,176 @@ | |||
629 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
630 | 2 | |||
631 | 3 | import calendar | ||
632 | 4 | from datetime import datetime | ||
633 | 5 | import json | ||
634 | 6 | import sys | ||
635 | 7 | |||
636 | 8 | from cloudinit import util | ||
637 | 9 | |||
638 | 10 | stage_to_description = { | ||
639 | 11 | 'finished': 'finished running cloud-init', | ||
640 | 12 | 'init-local': 'starting search for local datasources', | ||
641 | 13 | 'init-network': 'searching for network datasources', | ||
642 | 14 | 'init': 'searching for network datasources', | ||
643 | 15 | 'modules-config': 'running config modules', | ||
644 | 16 | 'modules-final': 'finalizing modules', | ||
645 | 17 | 'modules': 'running modules for', | ||
646 | 18 | 'single': 'running single module ', | ||
647 | 19 | } | ||
648 | 20 | |||
649 | 21 | # logger's asctime format | ||
650 | 22 | CLOUD_INIT_ASCTIME_FMT = "%Y-%m-%d %H:%M:%S,%f" | ||
651 | 23 | |||
652 | 24 | # journctl -o short-precise | ||
653 | 25 | CLOUD_INIT_JOURNALCTL_FMT = "%b %d %H:%M:%S.%f %Y" | ||
654 | 26 | |||
655 | 27 | # other | ||
656 | 28 | DEFAULT_FMT = "%b %d %H:%M:%S %Y" | ||
657 | 29 | |||
658 | 30 | |||
659 | 31 | def parse_timestamp(timestampstr): | ||
660 | 32 | # default syslog time does not include the current year | ||
661 | 33 | months = [calendar.month_abbr[m] for m in range(1, 13)] | ||
662 | 34 | if timestampstr.split()[0] in months: | ||
663 | 35 | # Aug 29 22:55:26 | ||
664 | 36 | FMT = DEFAULT_FMT | ||
665 | 37 | if '.' in timestampstr: | ||
666 | 38 | FMT = CLOUD_INIT_JOURNALCTL_FMT | ||
667 | 39 | dt = datetime.strptime(timestampstr + " " + | ||
668 | 40 | str(datetime.now().year), | ||
669 | 41 | FMT) | ||
670 | 42 | timestamp = dt.strftime("%s.%f") | ||
671 | 43 | elif "," in timestampstr: | ||
672 | 44 | # 2016-09-12 14:39:20,839 | ||
673 | 45 | dt = datetime.strptime(timestampstr, CLOUD_INIT_ASCTIME_FMT) | ||
674 | 46 | timestamp = dt.strftime("%s.%f") | ||
675 | 47 | else: | ||
676 | 48 | # allow date(1) to handle other formats we don't expect | ||
677 | 49 | timestamp = parse_timestamp_from_date(timestampstr) | ||
678 | 50 | |||
679 | 51 | return float(timestamp) | ||
680 | 52 | |||
681 | 53 | |||
682 | 54 | def parse_timestamp_from_date(timestampstr): | ||
683 | 55 | out, _ = util.subp(['date', '+%s.%3N', '-d', timestampstr]) | ||
684 | 56 | timestamp = out.strip() | ||
685 | 57 | return float(timestamp) | ||
686 | 58 | |||
687 | 59 | |||
688 | 60 | def parse_ci_logline(line): | ||
689 | 61 | # Stage Starts: | ||
690 | 62 | # Cloud-init v. 0.7.7 running 'init-local' at \ | ||
691 | 63 | # Fri, 02 Sep 2016 19:28:07 +0000. Up 1.0 seconds. | ||
692 | 64 | # Cloud-init v. 0.7.7 running 'init' at \ | ||
693 | 65 | # Fri, 02 Sep 2016 19:28:08 +0000. Up 2.0 seconds. | ||
694 | 66 | # Cloud-init v. 0.7.7 finished at | ||
695 | 67 | # Aug 29 22:55:26 test1 [CLOUDINIT] handlers.py[DEBUG]: \ | ||
696 | 68 | # finish: modules-final: SUCCESS: running modules for final | ||
697 | 69 | # 2016-08-30T21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: \ | ||
698 | 70 | # finish: modules-final: SUCCESS: running modules for final | ||
699 | 71 | # | ||
700 | 72 | # Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]: \ | ||
701 | 73 | # Cloud-init v. 0.7.8 running 'init-local' at \ | ||
702 | 74 | # Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds. | ||
703 | 75 | # | ||
704 | 76 | # 2017-05-22 18:02:01,088 - util.py[DEBUG]: Cloud-init v. 0.7.9 running \ | ||
705 | 77 | # 'init-local' at Mon, 22 May 2017 18:02:01 +0000. Up 2.0 seconds. | ||
706 | 78 | |||
707 | 79 | separators = [' - ', ' [CLOUDINIT] '] | ||
708 | 80 | found = False | ||
709 | 81 | for sep in separators: | ||
710 | 82 | if sep in line: | ||
711 | 83 | found = True | ||
712 | 84 | break | ||
713 | 85 | |||
714 | 86 | if not found: | ||
715 | 87 | return None | ||
716 | 88 | |||
717 | 89 | (timehost, eventstr) = line.split(sep) | ||
718 | 90 | |||
719 | 91 | # journalctl -o short-precise | ||
720 | 92 | if timehost.endswith(":"): | ||
721 | 93 | timehost = " ".join(timehost.split()[0:-1]) | ||
722 | 94 | |||
723 | 95 | if "," in timehost: | ||
724 | 96 | timestampstr, extra = timehost.split(",") | ||
725 | 97 | timestampstr += ",%s" % extra.split()[0] | ||
726 | 98 | if ' ' in extra: | ||
727 | 99 | hostname = extra.split()[-1] | ||
728 | 100 | else: | ||
729 | 101 | hostname = timehost.split()[-1] | ||
730 | 102 | timestampstr = timehost.split(hostname)[0].strip() | ||
731 | 103 | if 'Cloud-init v.' in eventstr: | ||
732 | 104 | event_type = 'start' | ||
733 | 105 | if 'running' in eventstr: | ||
734 | 106 | stage_and_timestamp = eventstr.split('running')[1].lstrip() | ||
735 | 107 | event_name, _ = stage_and_timestamp.split(' at ') | ||
736 | 108 | event_name = event_name.replace("'", "").replace(":", "-") | ||
737 | 109 | if event_name == "init": | ||
738 | 110 | event_name = "init-network" | ||
739 | 111 | else: | ||
740 | 112 | # don't generate a start for the 'finished at' banner | ||
741 | 113 | return None | ||
742 | 114 | event_description = stage_to_description[event_name] | ||
743 | 115 | else: | ||
744 | 116 | (pymodloglvl, event_type, event_name) = eventstr.split()[0:3] | ||
745 | 117 | event_description = eventstr.split(event_name)[1].strip() | ||
746 | 118 | |||
747 | 119 | event = { | ||
748 | 120 | 'name': event_name.rstrip(":"), | ||
749 | 121 | 'description': event_description, | ||
750 | 122 | 'timestamp': parse_timestamp(timestampstr), | ||
751 | 123 | 'origin': 'cloudinit', | ||
752 | 124 | 'event_type': event_type.rstrip(":"), | ||
753 | 125 | } | ||
754 | 126 | if event['event_type'] == "finish": | ||
755 | 127 | result = event_description.split(":")[0] | ||
756 | 128 | desc = event_description.split(result)[1].lstrip(':').strip() | ||
757 | 129 | event['result'] = result | ||
758 | 130 | event['description'] = desc.strip() | ||
759 | 131 | |||
760 | 132 | return event | ||
761 | 133 | |||
762 | 134 | |||
763 | 135 | def json_dumps(data): | ||
764 | 136 | return json.dumps(data, indent=1, sort_keys=True, | ||
765 | 137 | separators=(',', ': ')) | ||
766 | 138 | |||
767 | 139 | |||
768 | 140 | def dump_events(cisource=None, rawdata=None): | ||
769 | 141 | events = [] | ||
770 | 142 | event = None | ||
771 | 143 | CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.'] | ||
772 | 144 | |||
773 | 145 | if not any([cisource, rawdata]): | ||
774 | 146 | raise ValueError('Either cisource or rawdata parameters are required') | ||
775 | 147 | |||
776 | 148 | if rawdata: | ||
777 | 149 | data = rawdata.splitlines() | ||
778 | 150 | else: | ||
779 | 151 | data = cisource.readlines() | ||
780 | 152 | |||
781 | 153 | for line in data: | ||
782 | 154 | for match in CI_EVENT_MATCHES: | ||
783 | 155 | if match in line: | ||
784 | 156 | try: | ||
785 | 157 | event = parse_ci_logline(line) | ||
786 | 158 | except ValueError: | ||
787 | 159 | sys.stderr.write('Skipping invalid entry\n') | ||
788 | 160 | if event: | ||
789 | 161 | events.append(event) | ||
790 | 162 | |||
791 | 163 | return events, data | ||
792 | 164 | |||
793 | 165 | |||
794 | 166 | def main(): | ||
795 | 167 | if len(sys.argv) > 1: | ||
796 | 168 | cisource = open(sys.argv[1]) | ||
797 | 169 | else: | ||
798 | 170 | cisource = sys.stdin | ||
799 | 171 | |||
800 | 172 | return json_dumps(dump_events(cisource)) | ||
801 | 173 | |||
802 | 174 | |||
803 | 175 | if __name__ == "__main__": | ||
804 | 176 | print(main()) | ||
805 | diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py | |||
806 | 0 | new file mode 100644 | 177 | new file mode 100644 |
807 | index 0000000..3e778b8 | |||
808 | --- /dev/null | |||
809 | +++ b/cloudinit/analyze/show.py | |||
810 | @@ -0,0 +1,207 @@ | |||
811 | 1 | # Copyright (C) 2016 Canonical Ltd. | ||
812 | 2 | # | ||
813 | 3 | # Author: Ryan Harper <ryan.harper@canonical.com> | ||
814 | 4 | # | ||
815 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | ||
816 | 6 | |||
817 | 7 | import base64 | ||
818 | 8 | import datetime | ||
819 | 9 | import json | ||
820 | 10 | import os | ||
821 | 11 | |||
822 | 12 | from cloudinit import util | ||
823 | 13 | |||
824 | 14 | # An event: | ||
825 | 15 | ''' | ||
826 | 16 | { | ||
827 | 17 | "description": "executing late commands", | ||
828 | 18 | "event_type": "start", | ||
829 | 19 | "level": "INFO", | ||
830 | 20 | "name": "cmd-install/stage-late" | ||
831 | 21 | "origin": "cloudinit", | ||
832 | 22 | "timestamp": 1461164249.1590767, | ||
833 | 23 | }, | ||
834 | 24 | |||
835 | 25 | { | ||
836 | 26 | "description": "executing late commands", | ||
837 | 27 | "event_type": "finish", | ||
838 | 28 | "level": "INFO", | ||
839 | 29 | "name": "cmd-install/stage-late", | ||
840 | 30 | "origin": "cloudinit", | ||
841 | 31 | "result": "SUCCESS", | ||
842 | 32 | "timestamp": 1461164249.1590767 | ||
843 | 33 | } | ||
844 | 34 | |||
845 | 35 | ''' | ||
846 | 36 | format_key = { | ||
847 | 37 | '%d': 'delta', | ||
848 | 38 | '%D': 'description', | ||
849 | 39 | '%E': 'elapsed', | ||
850 | 40 | '%e': 'event_type', | ||
851 | 41 | '%I': 'indent', | ||
852 | 42 | '%l': 'level', | ||
853 | 43 | '%n': 'name', | ||
854 | 44 | '%o': 'origin', | ||
855 | 45 | '%r': 'result', | ||
856 | 46 | '%t': 'timestamp', | ||
857 | 47 | '%T': 'total_time', | ||
858 | 48 | } | ||
859 | 49 | |||
860 | 50 | formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v) | ||
861 | 51 | for k, v in format_key.items()]) | ||
862 | 52 | |||
863 | 53 | |||
864 | 54 | def format_record(msg, event): | ||
865 | 55 | for i, j in format_key.items(): | ||
866 | 56 | if i in msg: | ||
867 | 57 | # ensure consistent formatting of time values | ||
868 | 58 | if j in ['delta', 'elapsed', 'timestamp']: | ||
869 | 59 | msg = msg.replace(i, "{%s:08.5f}" % j) | ||
870 | 60 | else: | ||
871 | 61 | msg = msg.replace(i, "{%s}" % j) | ||
872 | 62 | return msg.format(**event) | ||
873 | 63 | |||
874 | 64 | |||
875 | 65 | def dump_event_files(event): | ||
876 | 66 | content = dict((k, v) for k, v in event.items() if k not in ['content']) | ||
877 | 67 | files = content['files'] | ||
878 | 68 | saved = [] | ||
879 | 69 | for f in files: | ||
880 | 70 | fname = f['path'] | ||
881 | 71 | fn_local = os.path.basename(fname) | ||
882 | 72 | fcontent = base64.b64decode(f['content']).decode('ascii') | ||
883 | 73 | util.write_file(fn_local, fcontent) | ||
884 | 74 | saved.append(fn_local) | ||
885 | 75 | |||
886 | 76 | return saved | ||
887 | 77 | |||
888 | 78 | |||
889 | 79 | def event_name(event): | ||
890 | 80 | if event: | ||
891 | 81 | return event.get('name') | ||
892 | 82 | return None | ||
893 | 83 | |||
894 | 84 | |||
895 | 85 | def event_type(event): | ||
896 | 86 | if event: | ||
897 | 87 | return event.get('event_type') | ||
898 | 88 | return None | ||
899 | 89 | |||
900 | 90 | |||
901 | 91 | def event_parent(event): | ||
902 | 92 | if event: | ||
903 | 93 | return event_name(event).split("/")[0] | ||
904 | 94 | return None | ||
905 | 95 | |||
906 | 96 | |||
907 | 97 | def event_timestamp(event): | ||
908 | 98 | return float(event.get('timestamp')) | ||
909 | 99 | |||
910 | 100 | |||
911 | 101 | def event_datetime(event): | ||
912 | 102 | return datetime.datetime.utcfromtimestamp(event_timestamp(event)) | ||
913 | 103 | |||
914 | 104 | |||
915 | 105 | def delta_seconds(t1, t2): | ||
916 | 106 | return (t2 - t1).total_seconds() | ||
917 | 107 | |||
918 | 108 | |||
919 | 109 | def event_duration(start, finish): | ||
920 | 110 | return delta_seconds(event_datetime(start), event_datetime(finish)) | ||
921 | 111 | |||
922 | 112 | |||
923 | 113 | def event_record(start_time, start, finish): | ||
924 | 114 | record = finish.copy() | ||
925 | 115 | record.update({ | ||
926 | 116 | 'delta': event_duration(start, finish), | ||
927 | 117 | 'elapsed': delta_seconds(start_time, event_datetime(start)), | ||
928 | 118 | 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->', | ||
929 | 119 | }) | ||
930 | 120 | |||
931 | 121 | return record | ||
932 | 122 | |||
933 | 123 | |||
934 | 124 | def total_time_record(total_time): | ||
935 | 125 | return 'Total Time: %3.5f seconds\n' % total_time | ||
936 | 126 | |||
937 | 127 | |||
938 | 128 | def generate_records(events, blame_sort=False, | ||
939 | 129 | print_format="(%n) %d seconds in %I%D", | ||
940 | 130 | dump_files=False, log_datafiles=False): | ||
941 | 131 | |||
942 | 132 | sorted_events = sorted(events, key=lambda x: x['timestamp']) | ||
943 | 133 | records = [] | ||
944 | 134 | start_time = None | ||
945 | 135 | total_time = 0.0 | ||
946 | 136 | stage_start_time = {} | ||
947 | 137 | stages_seen = [] | ||
948 | 138 | boot_records = [] | ||
949 | 139 | |||
950 | 140 | unprocessed = [] | ||
951 | 141 | for e in range(0, len(sorted_events)): | ||
952 | 142 | event = events[e] | ||
953 | 143 | try: | ||
954 | 144 | next_evt = events[e + 1] | ||
955 | 145 | except IndexError: | ||
956 | 146 | next_evt = None | ||
957 | 147 | |||
958 | 148 | if event_type(event) == 'start': | ||
959 | 149 | if event.get('name') in stages_seen: | ||
960 | 150 | records.append(total_time_record(total_time)) | ||
961 | 151 | boot_records.append(records) | ||
962 | 152 | records = [] | ||
963 | 153 | start_time = None | ||
964 | 154 | total_time = 0.0 | ||
965 | 155 | |||
966 | 156 | if start_time is None: | ||
967 | 157 | stages_seen = [] | ||
968 | 158 | start_time = event_datetime(event) | ||
969 | 159 | stage_start_time[event_parent(event)] = start_time | ||
970 | 160 | |||
971 | 161 | # see if we have a pair | ||
972 | 162 | if event_name(event) == event_name(next_evt): | ||
973 | 163 | if event_type(next_evt) == 'finish': | ||
974 | 164 | records.append(format_record(print_format, | ||
975 | 165 | event_record(start_time, | ||
976 | 166 | event, | ||
977 | 167 | next_evt))) | ||
978 | 168 | else: | ||
979 | 169 | # This is a parent event | ||
980 | 170 | records.append("Starting stage: %s" % event.get('name')) | ||
981 | 171 | unprocessed.append(event) | ||
982 | 172 | stages_seen.append(event.get('name')) | ||
983 | 173 | continue | ||
984 | 174 | else: | ||
985 | 175 | prev_evt = unprocessed.pop() | ||
986 | 176 | if event_name(event) == event_name(prev_evt): | ||
987 | 177 | record = event_record(start_time, prev_evt, event) | ||
988 | 178 | records.append(format_record("Finished stage: " | ||
989 | 179 | "(%n) %d seconds ", | ||
990 | 180 | record) + "\n") | ||
991 | 181 | total_time += record.get('delta') | ||
992 | 182 | else: | ||
993 | 183 | # not a match, put it back | ||
994 | 184 | unprocessed.append(prev_evt) | ||
995 | 185 | |||
996 | 186 | records.append(total_time_record(total_time)) | ||
997 | 187 | boot_records.append(records) | ||
998 | 188 | return boot_records | ||
999 | 189 | |||
1000 | 190 | |||
1001 | 191 | def show_events(events, print_format): | ||
1002 | 192 | return generate_records(events, print_format=print_format) | ||
1003 | 193 | |||
1004 | 194 | |||
1005 | 195 | def load_events(infile, rawdata=None): | ||
1006 | 196 | if rawdata: | ||
1007 | 197 | data = rawdata.read() | ||
1008 | 198 | else: | ||
1009 | 199 | data = infile.read() | ||
1010 | 200 | |||
1011 | 201 | j = None | ||
1012 | 202 | try: | ||
1013 | 203 | j = json.loads(data) | ||
1014 | 204 | except ValueError: | ||
1015 | 205 | pass | ||
1016 | 206 | |||
1017 | 207 | return j, data | ||
1018 | diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py | |||
1019 | 0 | new file mode 100644 | 208 | new file mode 100644 |
1020 | index 0000000..f4c4284 | |||
1021 | --- /dev/null | |||
1022 | +++ b/cloudinit/analyze/tests/test_dump.py | |||
1023 | @@ -0,0 +1,210 @@ | |||
1024 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
1025 | 2 | |||
1026 | 3 | from datetime import datetime | ||
1027 | 4 | from textwrap import dedent | ||
1028 | 5 | |||
1029 | 6 | from cloudinit.analyze.dump import ( | ||
1030 | 7 | dump_events, parse_ci_logline, parse_timestamp) | ||
1031 | 8 | from cloudinit.util import subp, write_file | ||
1032 | 9 | from cloudinit.tests.helpers import CiTestCase | ||
1033 | 10 | |||
1034 | 11 | |||
1035 | 12 | class TestParseTimestamp(CiTestCase): | ||
1036 | 13 | |||
1037 | 14 | def test_parse_timestamp_handles_cloud_init_default_format(self): | ||
1038 | 15 | """Logs with cloud-init detailed formats will be properly parsed.""" | ||
1039 | 16 | trusty_fmt = '%Y-%m-%d %H:%M:%S,%f' | ||
1040 | 17 | trusty_stamp = '2016-09-12 14:39:20,839' | ||
1041 | 18 | |||
1042 | 19 | parsed = parse_timestamp(trusty_stamp) | ||
1043 | 20 | |||
1044 | 21 | # convert ourselves | ||
1045 | 22 | dt = datetime.strptime(trusty_stamp, trusty_fmt) | ||
1046 | 23 | expected = float(dt.strftime('%s.%f')) | ||
1047 | 24 | |||
1048 | 25 | # use date(1) | ||
1049 | 26 | out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp]) | ||
1050 | 27 | timestamp = out.strip() | ||
1051 | 28 | date_ts = float(timestamp) | ||
1052 | 29 | |||
1053 | 30 | self.assertEqual(expected, parsed) | ||
1054 | 31 | self.assertEqual(expected, date_ts) | ||
1055 | 32 | self.assertEqual(date_ts, parsed) | ||
1056 | 33 | |||
1057 | 34 | def test_parse_timestamp_handles_syslog_adding_year(self): | ||
1058 | 35 | """Syslog timestamps lack a year. Add year and properly parse.""" | ||
1059 | 36 | syslog_fmt = '%b %d %H:%M:%S %Y' | ||
1060 | 37 | syslog_stamp = 'Aug 08 15:12:51' | ||
1061 | 38 | |||
1062 | 39 | # convert stamp ourselves by adding the missing year value | ||
1063 | 40 | year = datetime.now().year | ||
1064 | 41 | dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt) | ||
1065 | 42 | expected = float(dt.strftime('%s.%f')) | ||
1066 | 43 | parsed = parse_timestamp(syslog_stamp) | ||
1067 | 44 | |||
1068 | 45 | # use date(1) | ||
1069 | 46 | out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp]) | ||
1070 | 47 | timestamp = out.strip() | ||
1071 | 48 | date_ts = float(timestamp) | ||
1072 | 49 | |||
1073 | 50 | self.assertEqual(expected, parsed) | ||
1074 | 51 | self.assertEqual(expected, date_ts) | ||
1075 | 52 | self.assertEqual(date_ts, parsed) | ||
1076 | 53 | |||
1077 | 54 | def test_parse_timestamp_handles_journalctl_format_adding_year(self): | ||
1078 | 55 | """Journalctl precise timestamps lack a year. Add year and parse.""" | ||
1079 | 56 | journal_fmt = '%b %d %H:%M:%S.%f %Y' | ||
1080 | 57 | journal_stamp = 'Aug 08 17:15:50.606811' | ||
1081 | 58 | |||
1082 | 59 | # convert stamp ourselves by adding the missing year value | ||
1083 | 60 | year = datetime.now().year | ||
1084 | 61 | dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt) | ||
1085 | 62 | expected = float(dt.strftime('%s.%f')) | ||
1086 | 63 | parsed = parse_timestamp(journal_stamp) | ||
1087 | 64 | |||
1088 | 65 | # use date(1) | ||
1089 | 66 | out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp]) | ||
1090 | 67 | timestamp = out.strip() | ||
1091 | 68 | date_ts = float(timestamp) | ||
1092 | 69 | |||
1093 | 70 | self.assertEqual(expected, parsed) | ||
1094 | 71 | self.assertEqual(expected, date_ts) | ||
1095 | 72 | self.assertEqual(date_ts, parsed) | ||
1096 | 73 | |||
1097 | 74 | def test_parse_unexpected_timestamp_format_with_date_command(self): | ||
1098 | 75 | """Dump sends unexpected timestamp formats to data for processing.""" | ||
1099 | 76 | new_fmt = '%H:%M %m/%d %Y' | ||
1100 | 77 | new_stamp = '17:15 08/08' | ||
1101 | 78 | |||
1102 | 79 | # convert stamp ourselves by adding the missing year value | ||
1103 | 80 | year = datetime.now().year | ||
1104 | 81 | dt = datetime.strptime(new_stamp + " " + str(year), new_fmt) | ||
1105 | 82 | expected = float(dt.strftime('%s.%f')) | ||
1106 | 83 | parsed = parse_timestamp(new_stamp) | ||
1107 | 84 | |||
1108 | 85 | # use date(1) | ||
1109 | 86 | out, _ = subp(['date', '+%s.%6N', '-d', new_stamp]) | ||
1110 | 87 | timestamp = out.strip() | ||
1111 | 88 | date_ts = float(timestamp) | ||
1112 | 89 | |||
1113 | 90 | self.assertEqual(expected, parsed) | ||
1114 | 91 | self.assertEqual(expected, date_ts) | ||
1115 | 92 | self.assertEqual(date_ts, parsed) | ||
1116 | 93 | |||
1117 | 94 | |||
1118 | 95 | class TestParseCILogLine(CiTestCase): | ||
1119 | 96 | |||
1120 | 97 | def test_parse_logline_returns_none_without_separators(self): | ||
1121 | 98 | """When no separators are found, parse_ci_logline returns None.""" | ||
1122 | 99 | expected_parse_ignores = [ | ||
1123 | 100 | '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT'] | ||
1124 | 101 | for parse_ignores in expected_parse_ignores: | ||
1125 | 102 | self.assertIsNone(parse_ci_logline(parse_ignores)) | ||
1126 | 103 | |||
1127 | 104 | def test_parse_logline_returns_event_for_cloud_init_logs(self): | ||
1128 | 105 | """parse_ci_logline returns an event parse from cloud-init format.""" | ||
1129 | 106 | line = ( | ||
1130 | 107 | "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9" | ||
1131 | 108 | " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up" | ||
1132 | 109 | " 6.26 seconds.") | ||
1133 | 110 | dt = datetime.strptime( | ||
1134 | 111 | '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f') | ||
1135 | 112 | timestamp = float(dt.strftime('%s.%f')) | ||
1136 | 113 | expected = { | ||
1137 | 114 | 'description': 'starting search for local datasources', | ||
1138 | 115 | 'event_type': 'start', | ||
1139 | 116 | 'name': 'init-local', | ||
1140 | 117 | 'origin': 'cloudinit', | ||
1141 | 118 | 'timestamp': timestamp} | ||
1142 | 119 | self.assertEqual(expected, parse_ci_logline(line)) | ||
1143 | 120 | |||
1144 | 121 | def test_parse_logline_returns_event_for_journalctl_logs(self): | ||
1145 | 122 | """parse_ci_logline returns an event parse from journalctl format.""" | ||
1146 | 123 | line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]" | ||
1147 | 124 | " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at" | ||
1148 | 125 | " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.") | ||
1149 | 126 | year = datetime.now().year | ||
1150 | 127 | dt = datetime.strptime( | ||
1151 | 128 | 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') | ||
1152 | 129 | timestamp = float(dt.strftime('%s.%f')) | ||
1153 | 130 | expected = { | ||
1154 | 131 | 'description': 'starting search for local datasources', | ||
1155 | 132 | 'event_type': 'start', | ||
1156 | 133 | 'name': 'init-local', | ||
1157 | 134 | 'origin': 'cloudinit', | ||
1158 | 135 | 'timestamp': timestamp} | ||
1159 | 136 | self.assertEqual(expected, parse_ci_logline(line)) | ||
1160 | 137 | |||
1161 | 138 | def test_parse_logline_returns_event_for_finish_events(self): | ||
1162 | 139 | """parse_ci_logline returns a finish event for a parsed log line.""" | ||
1163 | 140 | line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]' | ||
1164 | 141 | ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running' | ||
1165 | 142 | ' modules for final') | ||
1166 | 143 | expected = { | ||
1167 | 144 | 'description': 'running modules for final', | ||
1168 | 145 | 'event_type': 'finish', | ||
1169 | 146 | 'name': 'modules-final', | ||
1170 | 147 | 'origin': 'cloudinit', | ||
1171 | 148 | 'result': 'SUCCESS', | ||
1172 | 149 | 'timestamp': 1472594005.972} | ||
1173 | 150 | self.assertEqual(expected, parse_ci_logline(line)) | ||
1174 | 151 | |||
1175 | 152 | |||
1176 | 153 | SAMPLE_LOGS = dedent("""\ | ||
1177 | 154 | Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\ | ||
1178 | 155 | Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\ | ||
1179 | 156 | 06:51:06 +0000. Up 1.0 seconds. | ||
1180 | 157 | 2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\ | ||
1181 | 158 | modules-final: SUCCESS: running modules for final | ||
1182 | 159 | """) | ||
1183 | 160 | |||
1184 | 161 | |||
1185 | 162 | class TestDumpEvents(CiTestCase): | ||
1186 | 163 | maxDiff = None | ||
1187 | 164 | |||
1188 | 165 | def test_dump_events_with_rawdata(self): | ||
1189 | 166 | """Rawdata is split and parsed into a tuple of events and data""" | ||
1190 | 167 | events, data = dump_events(rawdata=SAMPLE_LOGS) | ||
1191 | 168 | expected_data = SAMPLE_LOGS.splitlines() | ||
1192 | 169 | year = datetime.now().year | ||
1193 | 170 | dt1 = datetime.strptime( | ||
1194 | 171 | 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') | ||
1195 | 172 | timestamp1 = float(dt1.strftime('%s.%f')) | ||
1196 | 173 | expected_events = [{ | ||
1197 | 174 | 'description': 'starting search for local datasources', | ||
1198 | 175 | 'event_type': 'start', | ||
1199 | 176 | 'name': 'init-local', | ||
1200 | 177 | 'origin': 'cloudinit', | ||
1201 | 178 | 'timestamp': timestamp1}, { | ||
1202 | 179 | 'description': 'running modules for final', | ||
1203 | 180 | 'event_type': 'finish', | ||
1204 | 181 | 'name': 'modules-final', | ||
1205 | 182 | 'origin': 'cloudinit', | ||
1206 | 183 | 'result': 'SUCCESS', | ||
1207 | 184 | 'timestamp': 1472594005.972}] | ||
1208 | 185 | self.assertEqual(expected_events, events) | ||
1209 | 186 | self.assertEqual(expected_data, data) | ||
1210 | 187 | |||
1211 | 188 | def test_dump_events_with_cisource(self): | ||
1212 | 189 | """Cisource file is read and parsed into a tuple of events and data.""" | ||
1213 | 190 | tmpfile = self.tmp_path('logfile') | ||
1214 | 191 | write_file(tmpfile, SAMPLE_LOGS) | ||
1215 | 192 | events, data = dump_events(cisource=open(tmpfile)) | ||
1216 | 193 | year = datetime.now().year | ||
1217 | 194 | dt1 = datetime.strptime( | ||
1218 | 195 | 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') | ||
1219 | 196 | timestamp1 = float(dt1.strftime('%s.%f')) | ||
1220 | 197 | expected_events = [{ | ||
1221 | 198 | 'description': 'starting search for local datasources', | ||
1222 | 199 | 'event_type': 'start', | ||
1223 | 200 | 'name': 'init-local', | ||
1224 | 201 | 'origin': 'cloudinit', | ||
1225 | 202 | 'timestamp': timestamp1}, { | ||
1226 | 203 | 'description': 'running modules for final', | ||
1227 | 204 | 'event_type': 'finish', | ||
1228 | 205 | 'name': 'modules-final', | ||
1229 | 206 | 'origin': 'cloudinit', | ||
1230 | 207 | 'result': 'SUCCESS', | ||
1231 | 208 | 'timestamp': 1472594005.972}] | ||
1232 | 209 | self.assertEqual(expected_events, events) | ||
1233 | 210 | self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data]) | ||
1234 | diff --git a/cloudinit/apport.py b/cloudinit/apport.py | |||
1235 | 0 | new file mode 100644 | 211 | new file mode 100644 |
1236 | index 0000000..221f341 | |||
1237 | --- /dev/null | |||
1238 | +++ b/cloudinit/apport.py | |||
1239 | @@ -0,0 +1,105 @@ | |||
1240 | 1 | # Copyright (C) 2017 Canonical Ltd. | ||
1241 | 2 | # | ||
1242 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
1243 | 4 | |||
1244 | 5 | '''Cloud-init apport interface''' | ||
1245 | 6 | |||
1246 | 7 | try: | ||
1247 | 8 | from apport.hookutils import ( | ||
1248 | 9 | attach_file, attach_root_command_outputs, root_command_output) | ||
1249 | 10 | has_apport = True | ||
1250 | 11 | except ImportError: | ||
1251 | 12 | has_apport = False | ||
1252 | 13 | |||
1253 | 14 | |||
1254 | 15 | KNOWN_CLOUD_NAMES = [ | ||
1255 | 16 | 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', | ||
1256 | 17 | 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS', | ||
1257 | 18 | 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS', | ||
1258 | 19 | 'VMware', 'Other'] | ||
1259 | 20 | |||
1260 | 21 | # Potentially clear text collected logs | ||
1261 | 22 | CLOUDINIT_LOG = '/var/log/cloud-init.log' | ||
1262 | 23 | CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log' | ||
1263 | 24 | USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional | ||
1264 | 25 | |||
1265 | 26 | |||
1266 | 27 | def attach_cloud_init_logs(report, ui=None): | ||
1267 | 28 | '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.''' | ||
1268 | 29 | attach_root_command_outputs(report, { | ||
1269 | 30 | 'cloud-init-log-warnings': | ||
1270 | 31 | 'egrep -i "warn|error" /var/log/cloud-init.log', | ||
1271 | 32 | 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'}) | ||
1272 | 33 | root_command_output( | ||
1273 | 34 | ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz']) | ||
1274 | 35 | attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz') | ||
1275 | 36 | |||
1276 | 37 | |||
1277 | 38 | def attach_hwinfo(report, ui=None): | ||
1278 | 39 | '''Optionally attach hardware info from lshw.''' | ||
1279 | 40 | prompt = ( | ||
1280 | 41 | 'Your device details (lshw) may be useful to developers when' | ||
1281 | 42 | ' addressing this bug, but gathering it requires admin privileges.' | ||
1282 | 43 | ' Would you like to include this info?') | ||
1283 | 44 | if ui and ui.yesno(prompt): | ||
1284 | 45 | attach_root_command_outputs(report, {'lshw.txt': 'lshw'}) | ||
1285 | 46 | |||
1286 | 47 | |||
1287 | 48 | def attach_cloud_info(report, ui=None): | ||
1288 | 49 | '''Prompt for cloud details if available.''' | ||
1289 | 50 | if ui: | ||
1290 | 51 | prompt = 'Is this machine running in a cloud environment?' | ||
1291 | 52 | response = ui.yesno(prompt) | ||
1292 | 53 | if response is None: | ||
1293 | 54 | raise StopIteration # User cancelled | ||
1294 | 55 | if response: | ||
1295 | 56 | prompt = ('Please select the cloud vendor or environment in which' | ||
1296 | 57 | ' this instance is running') | ||
1297 | 58 | response = ui.choice(prompt, KNOWN_CLOUD_NAMES) | ||
1298 | 59 | if response: | ||
1299 | 60 | report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]] | ||
1300 | 61 | else: | ||
1301 | 62 | report['CloudName'] = 'None' | ||
1302 | 63 | |||
1303 | 64 | |||
1304 | 65 | def attach_user_data(report, ui=None): | ||
1305 | 66 | '''Optionally provide user-data if desired.''' | ||
1306 | 67 | if ui: | ||
1307 | 68 | prompt = ( | ||
1308 | 69 | 'Your user-data or cloud-config file can optionally be provided' | ||
1309 | 70 | ' from {0} and could be useful to developers when addressing this' | ||
1310 | 71 | ' bug. Do you wish to attach user-data to this bug?'.format( | ||
1311 | 72 | USER_DATA_FILE)) | ||
1312 | 73 | response = ui.yesno(prompt) | ||
1313 | 74 | if response is None: | ||
1314 | 75 | raise StopIteration # User cancelled | ||
1315 | 76 | if response: | ||
1316 | 77 | attach_file(report, USER_DATA_FILE, 'user_data.txt') | ||
1317 | 78 | |||
1318 | 79 | |||
1319 | 80 | def add_bug_tags(report): | ||
1320 | 81 | '''Add any appropriate tags to the bug.''' | ||
1321 | 82 | if 'JournalErrors' in report.keys(): | ||
1322 | 83 | errors = report['JournalErrors'] | ||
1323 | 84 | if 'Breaking ordering cycle' in errors: | ||
1324 | 85 | report['Tags'] = 'systemd-ordering' | ||
1325 | 86 | |||
1326 | 87 | |||
1327 | 88 | def add_info(report, ui): | ||
1328 | 89 | '''This is an entry point to run cloud-init's apport functionality. | ||
1329 | 90 | |||
1330 | 91 | Distros which want apport support will have a cloud-init package-hook at | ||
1331 | 92 | /usr/share/apport/package-hooks/cloud-init.py which defines an add_info | ||
1332 | 93 | function and returns the result of cloudinit.apport.add_info(report, ui). | ||
1333 | 94 | ''' | ||
1334 | 95 | if not has_apport: | ||
1335 | 96 | raise RuntimeError( | ||
1336 | 97 | 'No apport imports discovered. Apport functionality disabled') | ||
1337 | 98 | attach_cloud_init_logs(report, ui) | ||
1338 | 99 | attach_hwinfo(report, ui) | ||
1339 | 100 | attach_cloud_info(report, ui) | ||
1340 | 101 | attach_user_data(report, ui) | ||
1341 | 102 | add_bug_tags(report) | ||
1342 | 103 | return True | ||
1343 | 104 | |||
1344 | 105 | # vi: ts=4 expandtab | ||
1345 | diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py | |||
1346 | 0 | new file mode 100644 | 106 | new file mode 100644 |
1347 | index 0000000..e69de29 | |||
1348 | --- /dev/null | |||
1349 | +++ b/cloudinit/cmd/devel/__init__.py | |||
1350 | diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py | |||
1351 | 1 | new file mode 100644 | 107 | new file mode 100644 |
1352 | index 0000000..35ca478 | |||
1353 | --- /dev/null | |||
1354 | +++ b/cloudinit/cmd/devel/logs.py | |||
1355 | @@ -0,0 +1,101 @@ | |||
1356 | 1 | # Copyright (C) 2017 Canonical Ltd. | ||
1357 | 2 | # | ||
1358 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
1359 | 4 | |||
1360 | 5 | """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" | ||
1361 | 6 | |||
1362 | 7 | import argparse | ||
1363 | 8 | from cloudinit.util import ( | ||
1364 | 9 | ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file) | ||
1365 | 10 | from cloudinit.temp_utils import tempdir | ||
1366 | 11 | from datetime import datetime | ||
1367 | 12 | import os | ||
1368 | 13 | import shutil | ||
1369 | 14 | |||
1370 | 15 | |||
1371 | 16 | CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] | ||
1372 | 17 | CLOUDINIT_RUN_DIR = '/run/cloud-init' | ||
1373 | 18 | USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional | ||
1374 | 19 | |||
1375 | 20 | |||
1376 | 21 | def get_parser(parser=None): | ||
1377 | 22 | """Build or extend and arg parser for collect-logs utility. | ||
1378 | 23 | |||
1379 | 24 | @param parser: Optional existing ArgumentParser instance representing the | ||
1380 | 25 | collect-logs subcommand which will be extended to support the args of | ||
1381 | 26 | this utility. | ||
1382 | 27 | |||
1383 | 28 | @returns: ArgumentParser with proper argument configuration. | ||
1384 | 29 | """ | ||
1385 | 30 | if not parser: | ||
1386 | 31 | parser = argparse.ArgumentParser( | ||
1387 | 32 | prog='collect-logs', | ||
1388 | 33 | description='Collect and tar all cloud-init debug info') | ||
1389 | 34 | parser.add_argument( | ||
1390 | 35 | "--tarfile", '-t', default='cloud-init.tar.gz', | ||
1391 | 36 | help=('The tarfile to create containing all collected logs.' | ||
1392 | 37 | ' Default: cloud-init.tar.gz')) | ||
1393 | 38 | parser.add_argument( | ||
1394 | 39 | "--include-userdata", '-u', default=False, action='store_true', | ||
1395 | 40 | dest='userdata', help=( | ||
1396 | 41 | 'Optionally include user-data from {0} which could contain' | ||
1397 | 42 | ' sensitive information.'.format(USER_DATA_FILE))) | ||
1398 | 43 | return parser | ||
1399 | 44 | |||
1400 | 45 | |||
1401 | 46 | def _write_command_output_to_file(cmd, filename): | ||
1402 | 47 | """Helper which runs a command and writes output or error to filename.""" | ||
1403 | 48 | try: | ||
1404 | 49 | out, _ = subp(cmd) | ||
1405 | 50 | except ProcessExecutionError as e: | ||
1406 | 51 | write_file(filename, str(e)) | ||
1407 | 52 | else: | ||
1408 | 53 | write_file(filename, out) | ||
1409 | 54 | |||
1410 | 55 | |||
1411 | 56 | def collect_logs(tarfile, include_userdata): | ||
1412 | 57 | """Collect all cloud-init logs and tar them up into the provided tarfile. | ||
1413 | 58 | |||
1414 | 59 | @param tarfile: The path of the tar-gzipped file to create. | ||
1415 | 60 | @param include_userdata: Boolean, true means include user-data. | ||
1416 | 61 | """ | ||
1417 | 62 | tarfile = os.path.abspath(tarfile) | ||
1418 | 63 | date = datetime.utcnow().date().strftime('%Y-%m-%d') | ||
1419 | 64 | log_dir = 'cloud-init-logs-{0}'.format(date) | ||
1420 | 65 | with tempdir(dir='/tmp') as tmp_dir: | ||
1421 | 66 | log_dir = os.path.join(tmp_dir, log_dir) | ||
1422 | 67 | _write_command_output_to_file( | ||
1423 | 68 | ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], | ||
1424 | 69 | os.path.join(log_dir, 'version')) | ||
1425 | 70 | _write_command_output_to_file( | ||
1426 | 71 | ['dmesg'], os.path.join(log_dir, 'dmesg.txt')) | ||
1427 | 72 | _write_command_output_to_file( | ||
1428 | 73 | ['journalctl', '-o', 'short-precise'], | ||
1429 | 74 | os.path.join(log_dir, 'journal.txt')) | ||
1430 | 75 | for log in CLOUDINIT_LOGS: | ||
1431 | 76 | copy(log, log_dir) | ||
1432 | 77 | if include_userdata: | ||
1433 | 78 | copy(USER_DATA_FILE, log_dir) | ||
1434 | 79 | run_dir = os.path.join(log_dir, 'run') | ||
1435 | 80 | ensure_dir(run_dir) | ||
1436 | 81 | shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init')) | ||
1437 | 82 | with chdir(tmp_dir): | ||
1438 | 83 | subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) | ||
1439 | 84 | |||
1440 | 85 | |||
1441 | 86 | def handle_collect_logs_args(name, args): | ||
1442 | 87 | """Handle calls to 'cloud-init collect-logs' as a subcommand.""" | ||
1443 | 88 | collect_logs(args.tarfile, args.userdata) | ||
1444 | 89 | |||
1445 | 90 | |||
1446 | 91 | def main(): | ||
1447 | 92 | """Tool to collect and tar all cloud-init related logs.""" | ||
1448 | 93 | parser = get_parser() | ||
1449 | 94 | handle_collect_logs_args('collect-logs', parser.parse_args()) | ||
1450 | 95 | return 0 | ||
1451 | 96 | |||
1452 | 97 | |||
1453 | 98 | if __name__ == '__main__': | ||
1454 | 99 | main() | ||
1455 | 100 | |||
1456 | 101 | # vi: ts=4 expandtab | ||
1457 | diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py | |||
1458 | 0 | new file mode 100644 | 102 | new file mode 100644 |
1459 | index 0000000..acacc4e | |||
1460 | --- /dev/null | |||
1461 | +++ b/cloudinit/cmd/devel/parser.py | |||
1462 | @@ -0,0 +1,26 @@ | |||
1463 | 1 | # Copyright (C) 2017 Canonical Ltd. | ||
1464 | 2 | # | ||
1465 | 3 | # This file is part of cloud-init. See LICENSE file for license information. | ||
1466 | 4 | |||
1467 | 5 | """Define 'devel' subcommand argument parsers to include in cloud-init cmd.""" | ||
1468 | 6 | |||
1469 | 7 | import argparse | ||
1470 | 8 | from cloudinit.config.schema import ( | ||
1471 | 9 | get_parser as schema_parser, handle_schema_args) | ||
1472 | 10 | |||
1473 | 11 | |||
1474 | 12 | def get_parser(parser=None): | ||
1475 | 13 | if not parser: | ||
1476 | 14 | parser = argparse.ArgumentParser( | ||
1477 | 15 | prog='cloudinit-devel', | ||
1478 | 16 | description='Run development cloud-init tools') | ||
1479 | 17 | subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') | ||
1480 | 18 | subparsers.required = True | ||
1481 | 19 | |||
1482 | 20 | parser_schema = subparsers.add_parser( | ||
1483 | 21 | 'schema', help='Validate cloud-config files or document schema') | ||
1484 | 22 | # Construct schema subcommand parser | ||
1485 | 23 | schema_parser(parser_schema) | ||
1486 | 24 | parser_schema.set_defaults(action=('schema', handle_schema_args)) | ||
1487 | 25 | |||
1488 | 26 | return parser | ||
1489 | diff --git a/cloudinit/cmd/devel/tests/__init__.py b/cloudinit/cmd/devel/tests/__init__.py | |||
1490 | 0 | new file mode 100644 | 27 | new file mode 100644 |
1491 | index 0000000..e69de29 | |||
1492 | --- /dev/null | |||
1493 | +++ b/cloudinit/cmd/devel/tests/__init__.py | |||
1494 | diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py | |||
1495 | 1 | new file mode 100644 | 28 | new file mode 100644 |
1496 | index 0000000..dc4947c | |||
1497 | --- /dev/null | |||
1498 | +++ b/cloudinit/cmd/devel/tests/test_logs.py | |||
1499 | @@ -0,0 +1,120 @@ | |||
1500 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
1501 | 2 | |||
1502 | 3 | from cloudinit.cmd.devel import logs | ||
1503 | 4 | from cloudinit.util import ensure_dir, load_file, subp, write_file | ||
1504 | 5 | from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call | ||
1505 | 6 | from datetime import datetime | ||
1506 | 7 | import os | ||
1507 | 8 | |||
1508 | 9 | |||
1509 | 10 | class TestCollectLogs(FilesystemMockingTestCase): | ||
1510 | 11 | |||
1511 | 12 | def setUp(self): | ||
1512 | 13 | super(TestCollectLogs, self).setUp() | ||
1513 | 14 | self.new_root = self.tmp_dir() | ||
1514 | 15 | self.run_dir = self.tmp_path('run', self.new_root) | ||
1515 | 16 | |||
1516 | 17 | def test_collect_logs_creates_tarfile(self): | ||
1517 | 18 | """collect-logs creates a tarfile with all related cloud-init info.""" | ||
1518 | 19 | log1 = self.tmp_path('cloud-init.log', self.new_root) | ||
1519 | 20 | write_file(log1, 'cloud-init-log') | ||
1520 | 21 | log2 = self.tmp_path('cloud-init-output.log', self.new_root) | ||
1521 | 22 | write_file(log2, 'cloud-init-output-log') | ||
1522 | 23 | ensure_dir(self.run_dir) | ||
1523 | 24 | write_file(self.tmp_path('results.json', self.run_dir), 'results') | ||
1524 | 25 | output_tarfile = self.tmp_path('logs.tgz') | ||
1525 | 26 | |||
1526 | 27 | date = datetime.utcnow().date().strftime('%Y-%m-%d') | ||
1527 | 28 | date_logdir = 'cloud-init-logs-{0}'.format(date) | ||
1528 | 29 | |||
1529 | 30 | expected_subp = { | ||
1530 | 31 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): | ||
1531 | 32 | '0.7fake\n', | ||
1532 | 33 | ('dmesg',): 'dmesg-out\n', | ||
1533 | 34 | ('journalctl', '-o', 'short-precise'): 'journal-out\n', | ||
1534 | 35 | ('tar', 'czvf', output_tarfile, date_logdir): '' | ||
1535 | 36 | } | ||
1536 | 37 | |||
1537 | 38 | def fake_subp(cmd): | ||
1538 | 39 | cmd_tuple = tuple(cmd) | ||
1539 | 40 | if cmd_tuple not in expected_subp: | ||
1540 | 41 | raise AssertionError( | ||
1541 | 42 | 'Unexpected command provided to subp: {0}'.format(cmd)) | ||
1542 | 43 | if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: | ||
1543 | 44 | subp(cmd) # Pass through tar cmd so we can check output | ||
1544 | 45 | return expected_subp[cmd_tuple], '' | ||
1545 | 46 | |||
1546 | 47 | wrap_and_call( | ||
1547 | 48 | 'cloudinit.cmd.devel.logs', | ||
1548 | 49 | {'subp': {'side_effect': fake_subp}, | ||
1549 | 50 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, | ||
1550 | 51 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, | ||
1551 | 52 | logs.collect_logs, output_tarfile, include_userdata=False) | ||
1552 | 53 | # unpack the tarfile and check file contents | ||
1553 | 54 | subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) | ||
1554 | 55 | out_logdir = self.tmp_path(date_logdir, self.new_root) | ||
1555 | 56 | self.assertEqual( | ||
1556 | 57 | '0.7fake\n', | ||
1557 | 58 | load_file(os.path.join(out_logdir, 'version'))) | ||
1558 | 59 | self.assertEqual( | ||
1559 | 60 | 'cloud-init-log', | ||
1560 | 61 | load_file(os.path.join(out_logdir, 'cloud-init.log'))) | ||
1561 | 62 | self.assertEqual( | ||
1562 | 63 | 'cloud-init-output-log', | ||
1563 | 64 | load_file(os.path.join(out_logdir, 'cloud-init-output.log'))) | ||
1564 | 65 | self.assertEqual( | ||
1565 | 66 | 'dmesg-out\n', | ||
1566 | 67 | load_file(os.path.join(out_logdir, 'dmesg.txt'))) | ||
1567 | 68 | self.assertEqual( | ||
1568 | 69 | 'journal-out\n', | ||
1569 | 70 | load_file(os.path.join(out_logdir, 'journal.txt'))) | ||
1570 | 71 | self.assertEqual( | ||
1571 | 72 | 'results', | ||
1572 | 73 | load_file( | ||
1573 | 74 | os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) | ||
1574 | 75 | |||
1575 | 76 | def test_collect_logs_includes_optional_userdata(self): | ||
1576 | 77 | """collect-logs include userdata when --include-userdata is set.""" | ||
1577 | 78 | log1 = self.tmp_path('cloud-init.log', self.new_root) | ||
1578 | 79 | write_file(log1, 'cloud-init-log') | ||
1579 | 80 | log2 = self.tmp_path('cloud-init-output.log', self.new_root) | ||
1580 | 81 | write_file(log2, 'cloud-init-output-log') | ||
1581 | 82 | userdata = self.tmp_path('user-data.txt', self.new_root) | ||
1582 | 83 | write_file(userdata, 'user-data') | ||
1583 | 84 | ensure_dir(self.run_dir) | ||
1584 | 85 | write_file(self.tmp_path('results.json', self.run_dir), 'results') | ||
1585 | 86 | output_tarfile = self.tmp_path('logs.tgz') | ||
1586 | 87 | |||
1587 | 88 | date = datetime.utcnow().date().strftime('%Y-%m-%d') | ||
1588 | 89 | date_logdir = 'cloud-init-logs-{0}'.format(date) | ||
1589 | 90 | |||
1590 | 91 | expected_subp = { | ||
1591 | 92 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): | ||
1592 | 93 | '0.7fake', | ||
1593 | 94 | ('dmesg',): 'dmesg-out\n', | ||
1594 | 95 | ('journalctl', '-o', 'short-precise'): 'journal-out\n', | ||
1595 | 96 | ('tar', 'czvf', output_tarfile, date_logdir): '' | ||
1596 | 97 | } | ||
1597 | 98 | |||
1598 | 99 | def fake_subp(cmd): | ||
1599 | 100 | cmd_tuple = tuple(cmd) | ||
1600 | 101 | if cmd_tuple not in expected_subp: | ||
1601 | 102 | raise AssertionError( | ||
1602 | 103 | 'Unexpected command provided to subp: {0}'.format(cmd)) | ||
1603 | 104 | if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: | ||
1604 | 105 | subp(cmd) # Pass through tar cmd so we can check output | ||
1605 | 106 | return expected_subp[cmd_tuple], '' | ||
1606 | 107 | |||
1607 | 108 | wrap_and_call( | ||
1608 | 109 | 'cloudinit.cmd.devel.logs', | ||
1609 | 110 | {'subp': {'side_effect': fake_subp}, | ||
1610 | 111 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, | ||
1611 | 112 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, | ||
1612 | 113 | 'USER_DATA_FILE': {'new': userdata}}, | ||
1613 | 114 | logs.collect_logs, output_tarfile, include_userdata=True) | ||
1614 | 115 | # unpack the tarfile and check file contents | ||
1615 | 116 | subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) | ||
1616 | 117 | out_logdir = self.tmp_path(date_logdir, self.new_root) | ||
1617 | 118 | self.assertEqual( | ||
1618 | 119 | 'user-data', | ||
1619 | 120 | load_file(os.path.join(out_logdir, 'user-data.txt'))) | ||
1620 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
1621 | index 139e03b..6fb9d9e 100644 | |||
1622 | --- a/cloudinit/cmd/main.py | |||
1623 | +++ b/cloudinit/cmd/main.py | |||
1624 | @@ -50,13 +50,6 @@ WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at " | |||
1625 | 50 | # Module section template | 50 | # Module section template |
1626 | 51 | MOD_SECTION_TPL = "cloud_%s_modules" | 51 | MOD_SECTION_TPL = "cloud_%s_modules" |
1627 | 52 | 52 | ||
1628 | 53 | # Things u can query on | ||
1629 | 54 | QUERY_DATA_TYPES = [ | ||
1630 | 55 | 'data', | ||
1631 | 56 | 'data_raw', | ||
1632 | 57 | 'instance_id', | ||
1633 | 58 | ] | ||
1634 | 59 | |||
1635 | 60 | # Frequency shortname to full name | 53 | # Frequency shortname to full name |
1636 | 61 | # (so users don't have to remember the full name...) | 54 | # (so users don't have to remember the full name...) |
1637 | 62 | FREQ_SHORT_NAMES = { | 55 | FREQ_SHORT_NAMES = { |
1638 | @@ -510,11 +503,6 @@ def main_modules(action_name, args): | |||
1639 | 510 | return run_module_section(mods, name, name) | 503 | return run_module_section(mods, name, name) |
1640 | 511 | 504 | ||
1641 | 512 | 505 | ||
1642 | 513 | def main_query(name, _args): | ||
1643 | 514 | raise NotImplementedError(("Action '%s' is not" | ||
1644 | 515 | " currently implemented") % (name)) | ||
1645 | 516 | |||
1646 | 517 | |||
1647 | 518 | def main_single(name, args): | 506 | def main_single(name, args): |
1648 | 519 | # Cloud-init single stage is broken up into the following sub-stages | 507 | # Cloud-init single stage is broken up into the following sub-stages |
1649 | 520 | # 1. Ensure that the init object fetches its config without errors | 508 | # 1. Ensure that the init object fetches its config without errors |
1650 | @@ -688,11 +676,10 @@ def main_features(name, args): | |||
1651 | 688 | 676 | ||
1652 | 689 | 677 | ||
1653 | 690 | def main(sysv_args=None): | 678 | def main(sysv_args=None): |
1659 | 691 | if sysv_args is not None: | 679 | if not sysv_args: |
1660 | 692 | parser = argparse.ArgumentParser(prog=sysv_args[0]) | 680 | sysv_args = sys.argv |
1661 | 693 | sysv_args = sysv_args[1:] | 681 | parser = argparse.ArgumentParser(prog=sysv_args[0]) |
1662 | 694 | else: | 682 | sysv_args = sysv_args[1:] |
1658 | 695 | parser = argparse.ArgumentParser() | ||
1663 | 696 | 683 | ||
1664 | 697 | # Top level args | 684 | # Top level args |
1665 | 698 | parser.add_argument('--version', '-v', action='version', | 685 | parser.add_argument('--version', '-v', action='version', |
1666 | @@ -713,7 +700,8 @@ def main(sysv_args=None): | |||
1667 | 713 | default=False) | 700 | default=False) |
1668 | 714 | 701 | ||
1669 | 715 | parser.set_defaults(reporter=None) | 702 | parser.set_defaults(reporter=None) |
1671 | 716 | subparsers = parser.add_subparsers() | 703 | subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') |
1672 | 704 | subparsers.required = True | ||
1673 | 717 | 705 | ||
1674 | 718 | # Each action and its sub-options (if any) | 706 | # Each action and its sub-options (if any) |
1675 | 719 | parser_init = subparsers.add_parser('init', | 707 | parser_init = subparsers.add_parser('init', |
1676 | @@ -737,17 +725,6 @@ def main(sysv_args=None): | |||
1677 | 737 | choices=('init', 'config', 'final')) | 725 | choices=('init', 'config', 'final')) |
1678 | 738 | parser_mod.set_defaults(action=('modules', main_modules)) | 726 | parser_mod.set_defaults(action=('modules', main_modules)) |
1679 | 739 | 727 | ||
1680 | 740 | # These settings are used when you want to query information | ||
1681 | 741 | # stored in the cloud-init data objects/directories/files | ||
1682 | 742 | parser_query = subparsers.add_parser('query', | ||
1683 | 743 | help=('query information stored ' | ||
1684 | 744 | 'in cloud-init')) | ||
1685 | 745 | parser_query.add_argument("--name", '-n', action="store", | ||
1686 | 746 | help="item name to query on", | ||
1687 | 747 | required=True, | ||
1688 | 748 | choices=QUERY_DATA_TYPES) | ||
1689 | 749 | parser_query.set_defaults(action=('query', main_query)) | ||
1690 | 750 | |||
1691 | 751 | # This subcommand allows you to run a single module | 728 | # This subcommand allows you to run a single module |
1692 | 752 | parser_single = subparsers.add_parser('single', | 729 | parser_single = subparsers.add_parser('single', |
1693 | 753 | help=('run a single module ')) | 730 | help=('run a single module ')) |
1694 | @@ -781,15 +758,39 @@ def main(sysv_args=None): | |||
1695 | 781 | help=('list defined features')) | 758 | help=('list defined features')) |
1696 | 782 | parser_features.set_defaults(action=('features', main_features)) | 759 | parser_features.set_defaults(action=('features', main_features)) |
1697 | 783 | 760 | ||
1698 | 761 | parser_analyze = subparsers.add_parser( | ||
1699 | 762 | 'analyze', help='Devel tool: Analyze cloud-init logs and data') | ||
1700 | 763 | |||
1701 | 764 | parser_devel = subparsers.add_parser( | ||
1702 | 765 | 'devel', help='Run development tools') | ||
1703 | 766 | |||
1704 | 767 | parser_collect_logs = subparsers.add_parser( | ||
1705 | 768 | 'collect-logs', help='Collect and tar all cloud-init debug info') | ||
1706 | 769 | |||
1707 | 770 | if sysv_args: | ||
1708 | 771 | # Only load subparsers if subcommand is specified to avoid load cost | ||
1709 | 772 | if sysv_args[0] == 'analyze': | ||
1710 | 773 | from cloudinit.analyze.__main__ import get_parser as analyze_parser | ||
1711 | 774 | # Construct analyze subcommand parser | ||
1712 | 775 | analyze_parser(parser_analyze) | ||
1713 | 776 | elif sysv_args[0] == 'devel': | ||
1714 | 777 | from cloudinit.cmd.devel.parser import get_parser as devel_parser | ||
1715 | 778 | # Construct devel subcommand parser | ||
1716 | 779 | devel_parser(parser_devel) | ||
1717 | 780 | elif sysv_args[0] == 'collect-logs': | ||
1718 | 781 | from cloudinit.cmd.devel.logs import ( | ||
1719 | 782 | get_parser as logs_parser, handle_collect_logs_args) | ||
1720 | 783 | logs_parser(parser_collect_logs) | ||
1721 | 784 | parser_collect_logs.set_defaults( | ||
1722 | 785 | action=('collect-logs', handle_collect_logs_args)) | ||
1723 | 786 | |||
1724 | 784 | args = parser.parse_args(args=sysv_args) | 787 | args = parser.parse_args(args=sysv_args) |
1725 | 785 | 788 | ||
1730 | 786 | try: | 789 | # Subparsers.required = True and each subparser sets action=(name, functor) |
1731 | 787 | (name, functor) = args.action | 790 | (name, functor) = args.action |
1728 | 788 | except AttributeError: | ||
1729 | 789 | parser.error('too few arguments') | ||
1732 | 790 | 791 | ||
1733 | 791 | # Setup basic logging to start (until reinitialized) | 792 | # Setup basic logging to start (until reinitialized) |
1735 | 792 | # iff in debug mode... | 793 | # iff in debug mode. |
1736 | 793 | if args.debug: | 794 | if args.debug: |
1737 | 794 | logging.setupBasicLogging() | 795 | logging.setupBasicLogging() |
1738 | 795 | 796 | ||
1739 | diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py | |||
1740 | index 604f93b..233da1e 100644 | |||
1741 | --- a/cloudinit/config/cc_bootcmd.py | |||
1742 | +++ b/cloudinit/config/cc_bootcmd.py | |||
1743 | @@ -3,44 +3,73 @@ | |||
1744 | 3 | # | 3 | # |
1745 | 4 | # Author: Scott Moser <scott.moser@canonical.com> | 4 | # Author: Scott Moser <scott.moser@canonical.com> |
1746 | 5 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | 5 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> |
1747 | 6 | # Author: Chad Smith <chad.smith@canonical.com> | ||
1748 | 6 | # | 7 | # |
1749 | 7 | # This file is part of cloud-init. See LICENSE file for license information. | 8 | # This file is part of cloud-init. See LICENSE file for license information. |
1750 | 8 | 9 | ||
1778 | 9 | """ | 10 | """Bootcmd: run arbitrary commands early in the boot process.""" |
1752 | 10 | Bootcmd | ||
1753 | 11 | ------- | ||
1754 | 12 | **Summary:** run commands early in boot process | ||
1755 | 13 | |||
1756 | 14 | This module runs arbitrary commands very early in the boot process, | ||
1757 | 15 | only slightly after a boothook would run. This is very similar to a | ||
1758 | 16 | boothook, but more user friendly. The environment variable ``INSTANCE_ID`` | ||
1759 | 17 | will be set to the current instance id for all run commands. Commands can be | ||
1760 | 18 | specified either as lists or strings. For invocation details, see ``runcmd``. | ||
1761 | 19 | |||
1762 | 20 | .. note:: | ||
1763 | 21 | bootcmd should only be used for things that could not be done later in the | ||
1764 | 22 | boot process. | ||
1765 | 23 | |||
1766 | 24 | **Internal name:** ``cc_bootcmd`` | ||
1767 | 25 | |||
1768 | 26 | **Module frequency:** per always | ||
1769 | 27 | |||
1770 | 28 | **Supported distros:** all | ||
1771 | 29 | |||
1772 | 30 | **Config keys**:: | ||
1773 | 31 | |||
1774 | 32 | bootcmd: | ||
1775 | 33 | - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts | ||
1776 | 34 | - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] | ||
1777 | 35 | """ | ||
1779 | 36 | 11 | ||
1780 | 37 | import os | 12 | import os |
1781 | 13 | from textwrap import dedent | ||
1782 | 38 | 14 | ||
1783 | 15 | from cloudinit.config.schema import ( | ||
1784 | 16 | get_schema_doc, validate_cloudconfig_schema) | ||
1785 | 39 | from cloudinit.settings import PER_ALWAYS | 17 | from cloudinit.settings import PER_ALWAYS |
1786 | 18 | from cloudinit import temp_utils | ||
1787 | 40 | from cloudinit import util | 19 | from cloudinit import util |
1788 | 41 | 20 | ||
1789 | 42 | frequency = PER_ALWAYS | 21 | frequency = PER_ALWAYS |
1790 | 43 | 22 | ||
1791 | 23 | # The schema definition for each cloud-config module is a strict contract for | ||
1792 | 24 | # describing supported configuration parameters for each cloud-config section. | ||
1793 | 25 | # It allows cloud-config to validate and alert users to invalid or ignored | ||
1794 | 26 | # configuration options before actually attempting to deploy with said | ||
1795 | 27 | # configuration. | ||
1796 | 28 | |||
1797 | 29 | distros = ['all'] | ||
1798 | 30 | |||
1799 | 31 | schema = { | ||
1800 | 32 | 'id': 'cc_bootcmd', | ||
1801 | 33 | 'name': 'Bootcmd', | ||
1802 | 34 | 'title': 'Run arbitrary commands early in the boot process', | ||
1803 | 35 | 'description': dedent("""\ | ||
1804 | 36 | This module runs arbitrary commands very early in the boot process, | ||
1805 | 37 | only slightly after a boothook would run. This is very similar to a | ||
1806 | 38 | boothook, but more user friendly. The environment variable | ||
1807 | 39 | ``INSTANCE_ID`` will be set to the current instance id for all run | ||
1808 | 40 | commands. Commands can be specified either as lists or strings. For | ||
1809 | 41 | invocation details, see ``runcmd``. | ||
1810 | 42 | |||
1811 | 43 | .. note:: | ||
1812 | 44 | bootcmd should only be used for things that could not be done later | ||
1813 | 45 | in the boot process."""), | ||
1814 | 46 | 'distros': distros, | ||
1815 | 47 | 'examples': [dedent("""\ | ||
1816 | 48 | bootcmd: | ||
1817 | 49 | - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts | ||
1818 | 50 | - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] | ||
1819 | 51 | """)], | ||
1820 | 52 | 'frequency': PER_ALWAYS, | ||
1821 | 53 | 'type': 'object', | ||
1822 | 54 | 'properties': { | ||
1823 | 55 | 'bootcmd': { | ||
1824 | 56 | 'type': 'array', | ||
1825 | 57 | 'items': { | ||
1826 | 58 | 'oneOf': [ | ||
1827 | 59 | {'type': 'array', 'items': {'type': 'string'}}, | ||
1828 | 60 | {'type': 'string'}] | ||
1829 | 61 | }, | ||
1830 | 62 | 'additionalItems': False, # Reject items of non-string non-list | ||
1831 | 63 | 'additionalProperties': False, | ||
1832 | 64 | 'minItems': 1, | ||
1833 | 65 | 'required': [], | ||
1834 | 66 | 'uniqueItems': True | ||
1835 | 67 | } | ||
1836 | 68 | } | ||
1837 | 69 | } | ||
1838 | 70 | |||
1839 | 71 | __doc__ = get_schema_doc(schema) # Supplement python help() | ||
1840 | 72 | |||
1841 | 44 | 73 | ||
1842 | 45 | def handle(name, cfg, cloud, log, _args): | 74 | def handle(name, cfg, cloud, log, _args): |
1843 | 46 | 75 | ||
1844 | @@ -49,13 +78,14 @@ def handle(name, cfg, cloud, log, _args): | |||
1845 | 49 | " no 'bootcmd' key in configuration"), name) | 78 | " no 'bootcmd' key in configuration"), name) |
1846 | 50 | return | 79 | return |
1847 | 51 | 80 | ||
1849 | 52 | with util.ExtendedTemporaryFile(suffix=".sh") as tmpf: | 81 | validate_cloudconfig_schema(cfg, schema) |
1850 | 82 | with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf: | ||
1851 | 53 | try: | 83 | try: |
1852 | 54 | content = util.shellify(cfg["bootcmd"]) | 84 | content = util.shellify(cfg["bootcmd"]) |
1853 | 55 | tmpf.write(util.encode_text(content)) | 85 | tmpf.write(util.encode_text(content)) |
1854 | 56 | tmpf.flush() | 86 | tmpf.flush() |
1857 | 57 | except Exception: | 87 | except Exception as e: |
1858 | 58 | util.logexc(log, "Failed to shellify bootcmd") | 88 | util.logexc(log, "Failed to shellify bootcmd: %s", str(e)) |
1859 | 59 | raise | 89 | raise |
1860 | 60 | 90 | ||
1861 | 61 | try: | 91 | try: |
1862 | diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py | |||
1863 | index 02c70b1..46abedd 100644 | |||
1864 | --- a/cloudinit/config/cc_chef.py | |||
1865 | +++ b/cloudinit/config/cc_chef.py | |||
1866 | @@ -58,6 +58,9 @@ file). | |||
1867 | 58 | log_level: | 58 | log_level: |
1868 | 59 | log_location: | 59 | log_location: |
1869 | 60 | node_name: | 60 | node_name: |
1870 | 61 | omnibus_url: | ||
1871 | 62 | omnibus_url_retries: | ||
1872 | 63 | omnibus_version: | ||
1873 | 61 | pid_file: | 64 | pid_file: |
1874 | 62 | server_url: | 65 | server_url: |
1875 | 63 | show_time: | 66 | show_time: |
1876 | @@ -279,6 +282,31 @@ def run_chef(chef_cfg, log): | |||
1877 | 279 | util.subp(cmd, capture=False) | 282 | util.subp(cmd, capture=False) |
1878 | 280 | 283 | ||
1879 | 281 | 284 | ||
1880 | 285 | def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): | ||
1881 | 286 | """Install an omnibus unified package from url. | ||
1882 | 287 | |||
1883 | 288 | @param url: URL where blob of chef content may be downloaded. Defaults to | ||
1884 | 289 | OMNIBUS_URL. | ||
1885 | 290 | @param retries: Number of retries to perform when attempting to read url. | ||
1886 | 291 | Defaults to OMNIBUS_URL_RETRIES | ||
1887 | 292 | @param omnibus_version: Optional version string to require for omnibus | ||
1888 | 293 | install. | ||
1889 | 294 | """ | ||
1890 | 295 | if url is None: | ||
1891 | 296 | url = OMNIBUS_URL | ||
1892 | 297 | if retries is None: | ||
1893 | 298 | retries = OMNIBUS_URL_RETRIES | ||
1894 | 299 | |||
1895 | 300 | if omnibus_version is None: | ||
1896 | 301 | args = [] | ||
1897 | 302 | else: | ||
1898 | 303 | args = ['-v', omnibus_version] | ||
1899 | 304 | content = url_helper.readurl(url=url, retries=retries).contents | ||
1900 | 305 | return util.subp_blob_in_tempfile( | ||
1901 | 306 | blob=content, args=args, | ||
1902 | 307 | basename='chef-omnibus-install', capture=False) | ||
1903 | 308 | |||
1904 | 309 | |||
1905 | 282 | def install_chef(cloud, chef_cfg, log): | 310 | def install_chef(cloud, chef_cfg, log): |
1906 | 283 | # If chef is not installed, we install chef based on 'install_type' | 311 | # If chef is not installed, we install chef based on 'install_type' |
1907 | 284 | install_type = util.get_cfg_option_str(chef_cfg, 'install_type', | 312 | install_type = util.get_cfg_option_str(chef_cfg, 'install_type', |
1908 | @@ -297,17 +325,11 @@ def install_chef(cloud, chef_cfg, log): | |||
1909 | 297 | # This will install and run the chef-client from packages | 325 | # This will install and run the chef-client from packages |
1910 | 298 | cloud.distro.install_packages(('chef',)) | 326 | cloud.distro.install_packages(('chef',)) |
1911 | 299 | elif install_type == 'omnibus': | 327 | elif install_type == 'omnibus': |
1923 | 300 | # This will install as a omnibus unified package | 328 | omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version") |
1924 | 301 | url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) | 329 | install_chef_from_omnibus( |
1925 | 302 | retries = max(0, util.get_cfg_option_int(chef_cfg, | 330 | url=util.get_cfg_option_str(chef_cfg, "omnibus_url"), |
1926 | 303 | "omnibus_url_retries", | 331 | retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), |
1927 | 304 | default=OMNIBUS_URL_RETRIES)) | 332 | omnibus_version=omnibus_version) |
1917 | 305 | content = url_helper.readurl(url=url, retries=retries).contents | ||
1918 | 306 | with util.tempdir() as tmpd: | ||
1919 | 307 | # Use tmpdir over tmpfile to avoid 'text file busy' on execute | ||
1920 | 308 | tmpf = "%s/chef-omnibus-install" % tmpd | ||
1921 | 309 | util.write_file(tmpf, content, mode=0o700) | ||
1922 | 310 | util.subp([tmpf], capture=False) | ||
1928 | 311 | else: | 333 | else: |
1929 | 312 | log.warn("Unknown chef install type '%s'", install_type) | 334 | log.warn("Unknown chef install type '%s'", install_type) |
1930 | 313 | run = False | 335 | run = False |
1931 | diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py | |||
1932 | index 86b7138..8f9f1ab 100644 | |||
1933 | --- a/cloudinit/config/cc_landscape.py | |||
1934 | +++ b/cloudinit/config/cc_landscape.py | |||
1935 | @@ -57,7 +57,7 @@ The following default client config is provided, but can be overridden:: | |||
1936 | 57 | 57 | ||
1937 | 58 | import os | 58 | import os |
1938 | 59 | 59 | ||
1940 | 60 | from six import StringIO | 60 | from six import BytesIO |
1941 | 61 | 61 | ||
1942 | 62 | from configobj import ConfigObj | 62 | from configobj import ConfigObj |
1943 | 63 | 63 | ||
1944 | @@ -109,7 +109,7 @@ def handle(_name, cfg, cloud, log, _args): | |||
1945 | 109 | ls_cloudcfg, | 109 | ls_cloudcfg, |
1946 | 110 | ] | 110 | ] |
1947 | 111 | merged = merge_together(merge_data) | 111 | merged = merge_together(merge_data) |
1949 | 112 | contents = StringIO() | 112 | contents = BytesIO() |
1950 | 113 | merged.write(contents) | 113 | merged.write(contents) |
1951 | 114 | 114 | ||
1952 | 115 | util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) | 115 | util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) |
1953 | diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py | |||
1954 | index 31ed64e..15ae1ec 100644 | |||
1955 | --- a/cloudinit/config/cc_ntp.py | |||
1956 | +++ b/cloudinit/config/cc_ntp.py | |||
1957 | @@ -4,39 +4,10 @@ | |||
1958 | 4 | # | 4 | # |
1959 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | 5 | # This file is part of cloud-init. See LICENSE file for license information. |
1960 | 6 | 6 | ||
1992 | 7 | """ | 7 | """NTP: enable and configure ntp""" |
1962 | 8 | NTP | ||
1963 | 9 | --- | ||
1964 | 10 | **Summary:** enable and configure ntp | ||
1965 | 11 | |||
1966 | 12 | Handle ntp configuration. If ntp is not installed on the system and ntp | ||
1967 | 13 | configuration is specified, ntp will be installed. If there is a default ntp | ||
1968 | 14 | config file in the image or one is present in the distro's ntp package, it will | ||
1969 | 15 | be copied to ``/etc/ntp.conf.dist`` before any changes are made. A list of ntp | ||
1970 | 16 | pools and ntp servers can be provided under the ``ntp`` config key. If no ntp | ||
1971 | 17 | servers or pools are provided, 4 pools will be used in the format | ||
1972 | 18 | ``{0-3}.{distro}.pool.ntp.org``. | ||
1973 | 19 | |||
1974 | 20 | **Internal name:** ``cc_ntp`` | ||
1975 | 21 | |||
1976 | 22 | **Module frequency:** per instance | ||
1977 | 23 | |||
1978 | 24 | **Supported distros:** centos, debian, fedora, opensuse, ubuntu | ||
1979 | 25 | |||
1980 | 26 | **Config keys**:: | ||
1981 | 27 | |||
1982 | 28 | ntp: | ||
1983 | 29 | pools: | ||
1984 | 30 | - 0.company.pool.ntp.org | ||
1985 | 31 | - 1.company.pool.ntp.org | ||
1986 | 32 | - ntp.myorg.org | ||
1987 | 33 | servers: | ||
1988 | 34 | - my.ntp.server.local | ||
1989 | 35 | - ntp.ubuntu.com | ||
1990 | 36 | - 192.168.23.2 | ||
1991 | 37 | """ | ||
1993 | 38 | 8 | ||
1995 | 39 | from cloudinit.config.schema import validate_cloudconfig_schema | 9 | from cloudinit.config.schema import ( |
1996 | 10 | get_schema_doc, validate_cloudconfig_schema) | ||
1997 | 40 | from cloudinit import log as logging | 11 | from cloudinit import log as logging |
1998 | 41 | from cloudinit.settings import PER_INSTANCE | 12 | from cloudinit.settings import PER_INSTANCE |
1999 | 42 | from cloudinit import templater | 13 | from cloudinit import templater |
2000 | @@ -50,6 +21,7 @@ LOG = logging.getLogger(__name__) | |||
2001 | 50 | 21 | ||
2002 | 51 | frequency = PER_INSTANCE | 22 | frequency = PER_INSTANCE |
2003 | 52 | NTP_CONF = '/etc/ntp.conf' | 23 | NTP_CONF = '/etc/ntp.conf' |
2004 | 24 | TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf' | ||
2005 | 53 | NR_POOL_SERVERS = 4 | 25 | NR_POOL_SERVERS = 4 |
2006 | 54 | distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] | 26 | distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] |
2007 | 55 | 27 | ||
2008 | @@ -75,10 +47,13 @@ schema = { | |||
2009 | 75 | ``{0-3}.{distro}.pool.ntp.org``."""), | 47 | ``{0-3}.{distro}.pool.ntp.org``."""), |
2010 | 76 | 'distros': distros, | 48 | 'distros': distros, |
2011 | 77 | 'examples': [ | 49 | 'examples': [ |
2016 | 78 | {'ntp': {'pools': ['0.company.pool.ntp.org', '1.company.pool.ntp.org', | 50 | dedent("""\ |
2017 | 79 | 'ntp.myorg.org'], | 51 | ntp: |
2018 | 80 | 'servers': ['my.ntp.server.local', 'ntp.ubuntu.com', | 52 | pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] |
2019 | 81 | '192.168.23.2']}}], | 53 | servers: |
2020 | 54 | - ntp.server.local | ||
2021 | 55 | - ntp.ubuntu.com | ||
2022 | 56 | - 192.168.23.2""")], | ||
2023 | 82 | 'frequency': PER_INSTANCE, | 57 | 'frequency': PER_INSTANCE, |
2024 | 83 | 'type': 'object', | 58 | 'type': 'object', |
2025 | 84 | 'properties': { | 59 | 'properties': { |
2026 | @@ -116,6 +91,8 @@ schema = { | |||
2027 | 116 | } | 91 | } |
2028 | 117 | } | 92 | } |
2029 | 118 | 93 | ||
2030 | 94 | __doc__ = get_schema_doc(schema) # Supplement python help() | ||
2031 | 95 | |||
2032 | 119 | 96 | ||
2033 | 120 | def handle(name, cfg, cloud, log, _args): | 97 | def handle(name, cfg, cloud, log, _args): |
2034 | 121 | """Enable and configure ntp.""" | 98 | """Enable and configure ntp.""" |
2035 | @@ -132,20 +109,50 @@ def handle(name, cfg, cloud, log, _args): | |||
2036 | 132 | " is a %s %instead"), type_utils.obj_name(ntp_cfg)) | 109 | " is a %s %instead"), type_utils.obj_name(ntp_cfg)) |
2037 | 133 | 110 | ||
2038 | 134 | validate_cloudconfig_schema(cfg, schema) | 111 | validate_cloudconfig_schema(cfg, schema) |
2039 | 112 | if ntp_installable(): | ||
2040 | 113 | service_name = 'ntp' | ||
2041 | 114 | confpath = NTP_CONF | ||
2042 | 115 | template_name = None | ||
2043 | 116 | packages = ['ntp'] | ||
2044 | 117 | check_exe = 'ntpd' | ||
2045 | 118 | else: | ||
2046 | 119 | service_name = 'systemd-timesyncd' | ||
2047 | 120 | confpath = TIMESYNCD_CONF | ||
2048 | 121 | template_name = 'timesyncd.conf' | ||
2049 | 122 | packages = [] | ||
2050 | 123 | check_exe = '/lib/systemd/systemd-timesyncd' | ||
2051 | 124 | |||
2052 | 135 | rename_ntp_conf() | 125 | rename_ntp_conf() |
2053 | 136 | # ensure when ntp is installed it has a configuration file | 126 | # ensure when ntp is installed it has a configuration file |
2054 | 137 | # to use instead of starting up with packaged defaults | 127 | # to use instead of starting up with packaged defaults |
2059 | 138 | write_ntp_config_template(ntp_cfg, cloud) | 128 | write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name) |
2060 | 139 | install_ntp(cloud.distro.install_packages, packages=['ntp'], | 129 | install_ntp(cloud.distro.install_packages, packages=packages, |
2061 | 140 | check_exe="ntpd") | 130 | check_exe=check_exe) |
2062 | 141 | # if ntp was already installed, it may not have started | 131 | |
2063 | 142 | try: | 132 | try: |
2065 | 143 | reload_ntp(systemd=cloud.distro.uses_systemd()) | 133 | reload_ntp(service_name, systemd=cloud.distro.uses_systemd()) |
2066 | 144 | except util.ProcessExecutionError as e: | 134 | except util.ProcessExecutionError as e: |
2067 | 145 | LOG.exception("Failed to reload/start ntp service: %s", e) | 135 | LOG.exception("Failed to reload/start ntp service: %s", e) |
2068 | 146 | raise | 136 | raise |
2069 | 147 | 137 | ||
2070 | 148 | 138 | ||
2071 | 139 | def ntp_installable(): | ||
2072 | 140 | """Check if we can install ntp package | ||
2073 | 141 | |||
2074 | 142 | Ubuntu-Core systems do not have an ntp package available, so | ||
2075 | 143 | we always return False. Other systems require package managers to install | ||
2076 | 144 | the ntp package If we fail to find one of the package managers, then we | ||
2077 | 145 | cannot install ntp. | ||
2078 | 146 | """ | ||
2079 | 147 | if util.system_is_snappy(): | ||
2080 | 148 | return False | ||
2081 | 149 | |||
2082 | 150 | if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])): | ||
2083 | 151 | return True | ||
2084 | 152 | |||
2085 | 153 | return False | ||
2086 | 154 | |||
2087 | 155 | |||
2088 | 149 | def install_ntp(install_func, packages=None, check_exe="ntpd"): | 156 | def install_ntp(install_func, packages=None, check_exe="ntpd"): |
2089 | 150 | if util.which(check_exe): | 157 | if util.which(check_exe): |
2090 | 151 | return | 158 | return |
2091 | @@ -156,7 +163,7 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): | |||
2092 | 156 | 163 | ||
2093 | 157 | 164 | ||
2094 | 158 | def rename_ntp_conf(config=None): | 165 | def rename_ntp_conf(config=None): |
2096 | 159 | """Rename any existing ntp.conf file and render from template""" | 166 | """Rename any existing ntp.conf file""" |
2097 | 160 | if config is None: # For testing | 167 | if config is None: # For testing |
2098 | 161 | config = NTP_CONF | 168 | config = NTP_CONF |
2099 | 162 | if os.path.exists(config): | 169 | if os.path.exists(config): |
2100 | @@ -171,7 +178,7 @@ def generate_server_names(distro): | |||
2101 | 171 | return names | 178 | return names |
2102 | 172 | 179 | ||
2103 | 173 | 180 | ||
2105 | 174 | def write_ntp_config_template(cfg, cloud): | 181 | def write_ntp_config_template(cfg, cloud, path, template=None): |
2106 | 175 | servers = cfg.get('servers', []) | 182 | servers = cfg.get('servers', []) |
2107 | 176 | pools = cfg.get('pools', []) | 183 | pools = cfg.get('pools', []) |
2108 | 177 | 184 | ||
2109 | @@ -185,19 +192,20 @@ def write_ntp_config_template(cfg, cloud): | |||
2110 | 185 | 'pools': pools, | 192 | 'pools': pools, |
2111 | 186 | } | 193 | } |
2112 | 187 | 194 | ||
2115 | 188 | template_fn = cloud.get_template_filename('ntp.conf.%s' % | 195 | if template is None: |
2116 | 189 | (cloud.distro.name)) | 196 | template = 'ntp.conf.%s' % cloud.distro.name |
2117 | 197 | |||
2118 | 198 | template_fn = cloud.get_template_filename(template) | ||
2119 | 190 | if not template_fn: | 199 | if not template_fn: |
2120 | 191 | template_fn = cloud.get_template_filename('ntp.conf') | 200 | template_fn = cloud.get_template_filename('ntp.conf') |
2121 | 192 | if not template_fn: | 201 | if not template_fn: |
2122 | 193 | raise RuntimeError(("No template found, " | 202 | raise RuntimeError(("No template found, " |
2124 | 194 | "not rendering %s"), NTP_CONF) | 203 | "not rendering %s"), path) |
2125 | 195 | 204 | ||
2127 | 196 | templater.render_to_file(template_fn, NTP_CONF, params) | 205 | templater.render_to_file(template_fn, path, params) |
2128 | 197 | 206 | ||
2129 | 198 | 207 | ||
2132 | 199 | def reload_ntp(systemd=False): | 208 | def reload_ntp(service, systemd=False): |
2131 | 200 | service = 'ntp' | ||
2133 | 201 | if systemd: | 209 | if systemd: |
2134 | 202 | cmd = ['systemctl', 'reload-or-restart', service] | 210 | cmd = ['systemctl', 'reload-or-restart', service] |
2135 | 203 | else: | 211 | else: |
2136 | diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py | |||
2137 | index dc11561..28b1d56 100644 | |||
2138 | --- a/cloudinit/config/cc_puppet.py | |||
2139 | +++ b/cloudinit/config/cc_puppet.py | |||
2140 | @@ -15,21 +15,23 @@ This module handles puppet installation and configuration. If the ``puppet`` | |||
2141 | 15 | key does not exist in global configuration, no action will be taken. If a | 15 | key does not exist in global configuration, no action will be taken. If a |
2142 | 16 | config entry for ``puppet`` is present, then by default the latest version of | 16 | config entry for ``puppet`` is present, then by default the latest version of |
2143 | 17 | puppet will be installed. If ``install`` is set to ``false``, puppet will not | 17 | puppet will be installed. If ``install`` is set to ``false``, puppet will not |
2145 | 18 | be installed. However, this may result in an error if puppet is not already | 18 | be installed. However, this will result in an error if puppet is not already |
2146 | 19 | present on the system. The version of puppet to be installed can be specified | 19 | present on the system. The version of puppet to be installed can be specified |
2147 | 20 | under ``version``, and defaults to ``none``, which selects the latest version | 20 | under ``version``, and defaults to ``none``, which selects the latest version |
2148 | 21 | in the repos. If the ``puppet`` config key exists in the config archive, this | 21 | in the repos. If the ``puppet`` config key exists in the config archive, this |
2149 | 22 | module will attempt to start puppet even if no installation was performed. | 22 | module will attempt to start puppet even if no installation was performed. |
2150 | 23 | 23 | ||
2154 | 24 | Puppet configuration can be specified under the ``conf`` key. The configuration | 24 | Puppet configuration can be specified under the ``conf`` key. The |
2155 | 25 | is specified as a dictionary which is converted into ``<key>=<value>`` format | 25 | configuration is specified as a dictionary containing high-level ``<section>`` |
2156 | 26 | and appended to ``puppet.conf`` under the ``[puppetd]`` section. The | 26 | keys and lists of ``<key>=<value>`` pairs within each section. Each section |
2157 | 27 | name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As | ||
2158 | 28 | such, section names should be one of: ``main``, ``master``, ``agent`` or | ||
2159 | 29 | ``user`` and keys should be valid puppet configuration options. The | ||
2160 | 27 | ``certname`` key supports string substitutions for ``%i`` and ``%f``, | 30 | ``certname`` key supports string substitutions for ``%i`` and ``%f``, |
2161 | 28 | corresponding to the instance id and fqdn of the machine respectively. | 31 | corresponding to the instance id and fqdn of the machine respectively. |
2166 | 29 | If ``ca_cert`` is present under ``conf``, it will not be written to | 32 | If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but |
2167 | 30 | ``puppet.conf``, but instead will be used as the puppermaster certificate. | 33 | instead will be used as the puppermaster certificate. It should be specified |
2168 | 31 | It should be specified in pem format as a multi-line string (using the ``|`` | 34 | in pem format as a multi-line string (using the ``|`` yaml notation). |
2165 | 32 | yaml notation). | ||
2169 | 33 | 35 | ||
2170 | 34 | **Internal name:** ``cc_puppet`` | 36 | **Internal name:** ``cc_puppet`` |
2171 | 35 | 37 | ||
2172 | @@ -43,12 +45,13 @@ yaml notation). | |||
2173 | 43 | install: <true/false> | 45 | install: <true/false> |
2174 | 44 | version: <version> | 46 | version: <version> |
2175 | 45 | conf: | 47 | conf: |
2182 | 46 | server: "puppetmaster.example.org" | 48 | agent: |
2183 | 47 | certname: "%i.%f" | 49 | server: "puppetmaster.example.org" |
2184 | 48 | ca_cert: | | 50 | certname: "%i.%f" |
2185 | 49 | -------BEGIN CERTIFICATE------- | 51 | ca_cert: | |
2186 | 50 | <cert data> | 52 | -------BEGIN CERTIFICATE------- |
2187 | 51 | -------END CERTIFICATE------- | 53 | <cert data> |
2188 | 54 | -------END CERTIFICATE------- | ||
2189 | 52 | """ | 55 | """ |
2190 | 53 | 56 | ||
2191 | 54 | from six import StringIO | 57 | from six import StringIO |
2192 | @@ -127,7 +130,7 @@ def handle(name, cfg, cloud, log, _args): | |||
2193 | 127 | util.write_file(PUPPET_SSL_CERT_PATH, cfg) | 130 | util.write_file(PUPPET_SSL_CERT_PATH, cfg) |
2194 | 128 | util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') | 131 | util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') |
2195 | 129 | else: | 132 | else: |
2197 | 130 | # Iterate throug the config items, we'll use ConfigParser.set | 133 | # Iterate through the config items, we'll use ConfigParser.set |
2198 | 131 | # to overwrite or create new items as needed | 134 | # to overwrite or create new items as needed |
2199 | 132 | for (o, v) in cfg.items(): | 135 | for (o, v) in cfg.items(): |
2200 | 133 | if o == 'certname': | 136 | if o == 'certname': |
2201 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py | |||
2202 | index ceee952..f774baa 100644 | |||
2203 | --- a/cloudinit/config/cc_resizefs.py | |||
2204 | +++ b/cloudinit/config/cc_resizefs.py | |||
2205 | @@ -6,31 +6,8 @@ | |||
2206 | 6 | # | 6 | # |
2207 | 7 | # This file is part of cloud-init. See LICENSE file for license information. | 7 | # This file is part of cloud-init. See LICENSE file for license information. |
2208 | 8 | 8 | ||
2213 | 9 | """ | 9 | """Resizefs: cloud-config module which resizes the filesystem""" |
2210 | 10 | Resizefs | ||
2211 | 11 | -------- | ||
2212 | 12 | **Summary:** resize filesystem | ||
2214 | 13 | 10 | ||
2215 | 14 | Resize a filesystem to use all avaliable space on partition. This module is | ||
2216 | 15 | useful along with ``cc_growpart`` and will ensure that if the root partition | ||
2217 | 16 | has been resized the root filesystem will be resized along with it. By default, | ||
2218 | 17 | ``cc_resizefs`` will resize the root partition and will block the boot process | ||
2219 | 18 | while the resize command is running. Optionally, the resize operation can be | ||
2220 | 19 | performed in the background while cloud-init continues running modules. This | ||
2221 | 20 | can be enabled by setting ``resize_rootfs`` to ``true``. This module can be | ||
2222 | 21 | disabled altogether by setting ``resize_rootfs`` to ``false``. | ||
2223 | 22 | |||
2224 | 23 | **Internal name:** ``cc_resizefs`` | ||
2225 | 24 | |||
2226 | 25 | **Module frequency:** per always | ||
2227 | 26 | |||
2228 | 27 | **Supported distros:** all | ||
2229 | 28 | |||
2230 | 29 | **Config keys**:: | ||
2231 | 30 | |||
2232 | 31 | resize_rootfs: <true/false/"noblock"> | ||
2233 | 32 | resize_rootfs_tmp: <directory> | ||
2234 | 33 | """ | ||
2235 | 34 | 11 | ||
2236 | 35 | import errno | 12 | import errno |
2237 | 36 | import getopt | 13 | import getopt |
2238 | @@ -38,11 +15,47 @@ import os | |||
2239 | 38 | import re | 15 | import re |
2240 | 39 | import shlex | 16 | import shlex |
2241 | 40 | import stat | 17 | import stat |
2242 | 18 | from textwrap import dedent | ||
2243 | 41 | 19 | ||
2244 | 20 | from cloudinit.config.schema import ( | ||
2245 | 21 | get_schema_doc, validate_cloudconfig_schema) | ||
2246 | 42 | from cloudinit.settings import PER_ALWAYS | 22 | from cloudinit.settings import PER_ALWAYS |
2247 | 43 | from cloudinit import util | 23 | from cloudinit import util |
2248 | 44 | 24 | ||
2249 | 25 | NOBLOCK = "noblock" | ||
2250 | 26 | |||
2251 | 45 | frequency = PER_ALWAYS | 27 | frequency = PER_ALWAYS |
2252 | 28 | distros = ['all'] | ||
2253 | 29 | |||
2254 | 30 | schema = { | ||
2255 | 31 | 'id': 'cc_resizefs', | ||
2256 | 32 | 'name': 'Resizefs', | ||
2257 | 33 | 'title': 'Resize filesystem', | ||
2258 | 34 | 'description': dedent("""\ | ||
2259 | 35 | Resize a filesystem to use all avaliable space on partition. This | ||
2260 | 36 | module is useful along with ``cc_growpart`` and will ensure that if the | ||
2261 | 37 | root partition has been resized the root filesystem will be resized | ||
2262 | 38 | along with it. By default, ``cc_resizefs`` will resize the root | ||
2263 | 39 | partition and will block the boot process while the resize command is | ||
2264 | 40 | running. Optionally, the resize operation can be performed in the | ||
2265 | 41 | background while cloud-init continues running modules. This can be | ||
2266 | 42 | enabled by setting ``resize_rootfs`` to ``true``. This module can be | ||
2267 | 43 | disabled altogether by setting ``resize_rootfs`` to ``false``."""), | ||
2268 | 44 | 'distros': distros, | ||
2269 | 45 | 'examples': [ | ||
2270 | 46 | 'resize_rootfs: false # disable root filesystem resize operation'], | ||
2271 | 47 | 'frequency': PER_ALWAYS, | ||
2272 | 48 | 'type': 'object', | ||
2273 | 49 | 'properties': { | ||
2274 | 50 | 'resize_rootfs': { | ||
2275 | 51 | 'enum': [True, False, NOBLOCK], | ||
2276 | 52 | 'description': dedent("""\ | ||
2277 | 53 | Whether to resize the root partition. Default: 'true'""") | ||
2278 | 54 | } | ||
2279 | 55 | } | ||
2280 | 56 | } | ||
2281 | 57 | |||
2282 | 58 | __doc__ = get_schema_doc(schema) # Supplement python help() | ||
2283 | 46 | 59 | ||
2284 | 47 | 60 | ||
2285 | 48 | def _resize_btrfs(mount_point, devpth): | 61 | def _resize_btrfs(mount_point, devpth): |
2286 | @@ -54,7 +67,7 @@ def _resize_ext(mount_point, devpth): | |||
2287 | 54 | 67 | ||
2288 | 55 | 68 | ||
2289 | 56 | def _resize_xfs(mount_point, devpth): | 69 | def _resize_xfs(mount_point, devpth): |
2291 | 57 | return ('xfs_growfs', devpth) | 70 | return ('xfs_growfs', mount_point) |
2292 | 58 | 71 | ||
2293 | 59 | 72 | ||
2294 | 60 | def _resize_ufs(mount_point, devpth): | 73 | def _resize_ufs(mount_point, devpth): |
2295 | @@ -131,8 +144,6 @@ RESIZE_FS_PRECHECK_CMDS = { | |||
2296 | 131 | 'ufs': _can_skip_resize_ufs | 144 | 'ufs': _can_skip_resize_ufs |
2297 | 132 | } | 145 | } |
2298 | 133 | 146 | ||
2299 | 134 | NOBLOCK = "noblock" | ||
2300 | 135 | |||
2301 | 136 | 147 | ||
2302 | 137 | def rootdev_from_cmdline(cmdline): | 148 | def rootdev_from_cmdline(cmdline): |
2303 | 138 | found = None | 149 | found = None |
2304 | @@ -161,71 +172,77 @@ def can_skip_resize(fs_type, resize_what, devpth): | |||
2305 | 161 | return False | 172 | return False |
2306 | 162 | 173 | ||
2307 | 163 | 174 | ||
2313 | 164 | def handle(name, cfg, _cloud, log, args): | 175 | def is_device_path_writable_block(devpath, info, log): |
2314 | 165 | if len(args) != 0: | 176 | """Return True if devpath is a writable block device. |
2310 | 166 | resize_root = args[0] | ||
2311 | 167 | else: | ||
2312 | 168 | resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) | ||
2315 | 169 | 177 | ||
2335 | 170 | if not util.translate_bool(resize_root, addons=[NOBLOCK]): | 178 | @param devpath: Path to the root device we want to resize. |
2336 | 171 | log.debug("Skipping module named %s, resizing disabled", name) | 179 | @param info: String representing information about the requested device. |
2337 | 172 | return | 180 | @param log: Logger to which logs will be added upon error. |
2319 | 173 | |||
2320 | 174 | # TODO(harlowja) is the directory ok to be used?? | ||
2321 | 175 | resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") | ||
2322 | 176 | util.ensure_dir(resize_root_d) | ||
2323 | 177 | |||
2324 | 178 | # TODO(harlowja): allow what is to be resized to be configurable?? | ||
2325 | 179 | resize_what = "/" | ||
2326 | 180 | result = util.get_mount_info(resize_what, log) | ||
2327 | 181 | if not result: | ||
2328 | 182 | log.warn("Could not determine filesystem type of %s", resize_what) | ||
2329 | 183 | return | ||
2330 | 184 | |||
2331 | 185 | (devpth, fs_type, mount_point) = result | ||
2332 | 186 | |||
2333 | 187 | info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) | ||
2334 | 188 | log.debug("resize_info: %s" % info) | ||
2338 | 189 | 181 | ||
2339 | 182 | @returns Boolean True if block device is writable | ||
2340 | 183 | """ | ||
2341 | 190 | container = util.is_container() | 184 | container = util.is_container() |
2342 | 191 | 185 | ||
2343 | 192 | # Ensure the path is a block device. | 186 | # Ensure the path is a block device. |
2345 | 193 | if (devpth == "/dev/root" and not os.path.exists(devpth) and | 187 | if (devpath == "/dev/root" and not os.path.exists(devpath) and |
2346 | 194 | not container): | 188 | not container): |
2349 | 195 | devpth = util.rootdev_from_cmdline(util.get_cmdline()) | 189 | devpath = util.rootdev_from_cmdline(util.get_cmdline()) |
2350 | 196 | if devpth is None: | 190 | if devpath is None: |
2351 | 197 | log.warn("Unable to find device '/dev/root'") | 191 | log.warn("Unable to find device '/dev/root'") |
2354 | 198 | return | 192 | return False |
2355 | 199 | log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth) | 193 | log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) |
2356 | 194 | |||
2357 | 195 | if devpath == 'overlayroot': | ||
2358 | 196 | log.debug("Not attempting to resize devpath '%s': %s", devpath, info) | ||
2359 | 197 | return False | ||
2360 | 200 | 198 | ||
2361 | 201 | try: | 199 | try: |
2363 | 202 | statret = os.stat(devpth) | 200 | statret = os.stat(devpath) |
2364 | 203 | except OSError as exc: | 201 | except OSError as exc: |
2365 | 204 | if container and exc.errno == errno.ENOENT: | 202 | if container and exc.errno == errno.ENOENT: |
2366 | 205 | log.debug("Device '%s' did not exist in container. " | 203 | log.debug("Device '%s' did not exist in container. " |
2368 | 206 | "cannot resize: %s", devpth, info) | 204 | "cannot resize: %s", devpath, info) |
2369 | 207 | elif exc.errno == errno.ENOENT: | 205 | elif exc.errno == errno.ENOENT: |
2370 | 208 | log.warn("Device '%s' did not exist. cannot resize: %s", | 206 | log.warn("Device '%s' did not exist. cannot resize: %s", |
2372 | 209 | devpth, info) | 207 | devpath, info) |
2373 | 210 | else: | 208 | else: |
2374 | 211 | raise exc | 209 | raise exc |
2384 | 212 | return | 210 | return False |
2376 | 213 | |||
2377 | 214 | if not os.access(devpth, os.W_OK): | ||
2378 | 215 | if container: | ||
2379 | 216 | log.debug("'%s' not writable in container. cannot resize: %s", | ||
2380 | 217 | devpth, info) | ||
2381 | 218 | else: | ||
2382 | 219 | log.warn("'%s' not writable. cannot resize: %s", devpth, info) | ||
2383 | 220 | return | ||
2385 | 221 | 211 | ||
2386 | 222 | if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): | 212 | if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): |
2387 | 223 | if container: | 213 | if container: |
2388 | 224 | log.debug("device '%s' not a block device in container." | 214 | log.debug("device '%s' not a block device in container." |
2390 | 225 | " cannot resize: %s" % (devpth, info)) | 215 | " cannot resize: %s" % (devpath, info)) |
2391 | 226 | else: | 216 | else: |
2392 | 227 | log.warn("device '%s' not a block device. cannot resize: %s" % | 217 | log.warn("device '%s' not a block device. cannot resize: %s" % |
2394 | 228 | (devpth, info)) | 218 | (devpath, info)) |
2395 | 219 | return False | ||
2396 | 220 | return True | ||
2397 | 221 | |||
2398 | 222 | |||
2399 | 223 | def handle(name, cfg, _cloud, log, args): | ||
2400 | 224 | if len(args) != 0: | ||
2401 | 225 | resize_root = args[0] | ||
2402 | 226 | else: | ||
2403 | 227 | resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) | ||
2404 | 228 | validate_cloudconfig_schema(cfg, schema) | ||
2405 | 229 | if not util.translate_bool(resize_root, addons=[NOBLOCK]): | ||
2406 | 230 | log.debug("Skipping module named %s, resizing disabled", name) | ||
2407 | 231 | return | ||
2408 | 232 | |||
2409 | 233 | # TODO(harlowja): allow what is to be resized to be configurable?? | ||
2410 | 234 | resize_what = "/" | ||
2411 | 235 | result = util.get_mount_info(resize_what, log) | ||
2412 | 236 | if not result: | ||
2413 | 237 | log.warn("Could not determine filesystem type of %s", resize_what) | ||
2414 | 238 | return | ||
2415 | 239 | |||
2416 | 240 | (devpth, fs_type, mount_point) = result | ||
2417 | 241 | |||
2418 | 242 | info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) | ||
2419 | 243 | log.debug("resize_info: %s" % info) | ||
2420 | 244 | |||
2421 | 245 | if not is_device_path_writable_block(devpth, info, log): | ||
2422 | 229 | return | 246 | return |
2423 | 230 | 247 | ||
2424 | 231 | resizer = None | 248 | resizer = None |
2425 | diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py | |||
2426 | index 2548d1f..9812562 100644 | |||
2427 | --- a/cloudinit/config/cc_resolv_conf.py | |||
2428 | +++ b/cloudinit/config/cc_resolv_conf.py | |||
2429 | @@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__) | |||
2430 | 55 | 55 | ||
2431 | 56 | frequency = PER_INSTANCE | 56 | frequency = PER_INSTANCE |
2432 | 57 | 57 | ||
2434 | 58 | distros = ['fedora', 'rhel', 'sles'] | 58 | distros = ['fedora', 'opensuse', 'rhel', 'sles'] |
2435 | 59 | 59 | ||
2436 | 60 | 60 | ||
2437 | 61 | def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): | 61 | def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): |
2438 | diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py | |||
2439 | index dfa8cb3..449872f 100644 | |||
2440 | --- a/cloudinit/config/cc_runcmd.py | |||
2441 | +++ b/cloudinit/config/cc_runcmd.py | |||
2442 | @@ -6,41 +6,70 @@ | |||
2443 | 6 | # | 6 | # |
2444 | 7 | # This file is part of cloud-init. See LICENSE file for license information. | 7 | # This file is part of cloud-init. See LICENSE file for license information. |
2445 | 8 | 8 | ||
2450 | 9 | """ | 9 | """Runcmd: run arbitrary commands at rc.local with output to the console""" |
2447 | 10 | Runcmd | ||
2448 | 11 | ------ | ||
2449 | 12 | **Summary:** run commands | ||
2451 | 13 | 10 | ||
2463 | 14 | Run arbitrary commands at a rc.local like level with output to the console. | 11 | from cloudinit.config.schema import ( |
2464 | 15 | Each item can be either a list or a string. If the item is a list, it will be | 12 | get_schema_doc, validate_cloudconfig_schema) |
2465 | 16 | properly executed as if passed to ``execve()`` (with the first arg as the | 13 | from cloudinit.distros import ALL_DISTROS |
2466 | 17 | command). If the item is a string, it will be written to a file and interpreted | 14 | from cloudinit.settings import PER_INSTANCE |
2467 | 18 | using ``sh``. | 15 | from cloudinit import util |
2457 | 19 | |||
2458 | 20 | .. note:: | ||
2459 | 21 | all commands must be proper yaml, so you have to quote any characters yaml | ||
2460 | 22 | would eat (':' can be problematic) | ||
2461 | 23 | |||
2462 | 24 | **Internal name:** ``cc_runcmd`` | ||
2468 | 25 | 16 | ||
2470 | 26 | **Module frequency:** per instance | 17 | import os |
2471 | 18 | from textwrap import dedent | ||
2472 | 27 | 19 | ||
2473 | 28 | **Supported distros:** all | ||
2474 | 29 | 20 | ||
2476 | 30 | **Config keys**:: | 21 | # The schema definition for each cloud-config module is a strict contract for |
2477 | 22 | # describing supported configuration parameters for each cloud-config section. | ||
2478 | 23 | # It allows cloud-config to validate and alert users to invalid or ignored | ||
2479 | 24 | # configuration options before actually attempting to deploy with said | ||
2480 | 25 | # configuration. | ||
2481 | 31 | 26 | ||
2489 | 32 | runcmd: | 27 | distros = [ALL_DISTROS] |
2483 | 33 | - [ ls, -l, / ] | ||
2484 | 34 | - [ sh, -xc, "echo $(date) ': hello world!'" ] | ||
2485 | 35 | - [ sh, -c, echo "=========hello world'=========" ] | ||
2486 | 36 | - ls -l /root | ||
2487 | 37 | - [ wget, "http://example.org", -O, /tmp/index.html ] | ||
2488 | 38 | """ | ||
2490 | 39 | 28 | ||
2491 | 29 | schema = { | ||
2492 | 30 | 'id': 'cc_runcmd', | ||
2493 | 31 | 'name': 'Runcmd', | ||
2494 | 32 | 'title': 'Run arbitrary commands', | ||
2495 | 33 | 'description': dedent("""\ | ||
2496 | 34 | Run arbitrary commands at a rc.local like level with output to the | ||
2497 | 35 | console. Each item can be either a list or a string. If the item is a | ||
2498 | 36 | list, it will be properly executed as if passed to ``execve()`` (with | ||
2499 | 37 | the first arg as the command). If the item is a string, it will be | ||
2500 | 38 | written to a file and interpreted | ||
2501 | 39 | using ``sh``. | ||
2502 | 40 | 40 | ||
2504 | 41 | import os | 41 | .. note:: |
2505 | 42 | all commands must be proper yaml, so you have to quote any characters | ||
2506 | 43 | yaml would eat (':' can be problematic)"""), | ||
2507 | 44 | 'distros': distros, | ||
2508 | 45 | 'examples': [dedent("""\ | ||
2509 | 46 | runcmd: | ||
2510 | 47 | - [ ls, -l, / ] | ||
2511 | 48 | - [ sh, -xc, "echo $(date) ': hello world!'" ] | ||
2512 | 49 | - [ sh, -c, echo "=========hello world'=========" ] | ||
2513 | 50 | - ls -l /root | ||
2514 | 51 | - [ wget, "http://example.org", -O, /tmp/index.html ] | ||
2515 | 52 | """)], | ||
2516 | 53 | 'frequency': PER_INSTANCE, | ||
2517 | 54 | 'type': 'object', | ||
2518 | 55 | 'properties': { | ||
2519 | 56 | 'runcmd': { | ||
2520 | 57 | 'type': 'array', | ||
2521 | 58 | 'items': { | ||
2522 | 59 | 'oneOf': [ | ||
2523 | 60 | {'type': 'array', 'items': {'type': 'string'}}, | ||
2524 | 61 | {'type': 'string'}] | ||
2525 | 62 | }, | ||
2526 | 63 | 'additionalItems': False, # Reject items of non-string non-list | ||
2527 | 64 | 'additionalProperties': False, | ||
2528 | 65 | 'minItems': 1, | ||
2529 | 66 | 'required': [], | ||
2530 | 67 | 'uniqueItems': True | ||
2531 | 68 | } | ||
2532 | 69 | } | ||
2533 | 70 | } | ||
2534 | 42 | 71 | ||
2536 | 43 | from cloudinit import util | 72 | __doc__ = get_schema_doc(schema) # Supplement python help() |
2537 | 44 | 73 | ||
2538 | 45 | 74 | ||
2539 | 46 | def handle(name, cfg, cloud, log, _args): | 75 | def handle(name, cfg, cloud, log, _args): |
2540 | @@ -49,6 +78,7 @@ def handle(name, cfg, cloud, log, _args): | |||
2541 | 49 | " no 'runcmd' key in configuration"), name) | 78 | " no 'runcmd' key in configuration"), name) |
2542 | 50 | return | 79 | return |
2543 | 51 | 80 | ||
2544 | 81 | validate_cloudconfig_schema(cfg, schema) | ||
2545 | 52 | out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd") | 82 | out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd") |
2546 | 53 | cmd = cfg["runcmd"] | 83 | cmd = cfg["runcmd"] |
2547 | 54 | try: | 84 | try: |
2548 | diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py | |||
2549 | index a9682f1..eecb817 100644 | |||
2550 | --- a/cloudinit/config/cc_snappy.py | |||
2551 | +++ b/cloudinit/config/cc_snappy.py | |||
2552 | @@ -63,11 +63,11 @@ is ``auto``. Options are: | |||
2553 | 63 | 63 | ||
2554 | 64 | from cloudinit import log as logging | 64 | from cloudinit import log as logging |
2555 | 65 | from cloudinit.settings import PER_INSTANCE | 65 | from cloudinit.settings import PER_INSTANCE |
2556 | 66 | from cloudinit import temp_utils | ||
2557 | 66 | from cloudinit import util | 67 | from cloudinit import util |
2558 | 67 | 68 | ||
2559 | 68 | import glob | 69 | import glob |
2560 | 69 | import os | 70 | import os |
2561 | 70 | import tempfile | ||
2562 | 71 | 71 | ||
2563 | 72 | LOG = logging.getLogger(__name__) | 72 | LOG = logging.getLogger(__name__) |
2564 | 73 | 73 | ||
2565 | @@ -183,7 +183,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None): | |||
2566 | 183 | # config | 183 | # config |
2567 | 184 | # Note, however, we do not touch config files on disk. | 184 | # Note, however, we do not touch config files on disk. |
2568 | 185 | nested_cfg = {'config': {shortname: config}} | 185 | nested_cfg = {'config': {shortname: config}} |
2570 | 186 | (fd, cfg_tmpf) = tempfile.mkstemp() | 186 | (fd, cfg_tmpf) = temp_utils.mkstemp() |
2571 | 187 | os.write(fd, util.yaml_dumps(nested_cfg).encode()) | 187 | os.write(fd, util.yaml_dumps(nested_cfg).encode()) |
2572 | 188 | os.close(fd) | 188 | os.close(fd) |
2573 | 189 | cfgfile = cfg_tmpf | 189 | cfgfile = cfg_tmpf |
2574 | diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py | |||
2575 | index 0066e97..35d8c57 100755 | |||
2576 | --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py | |||
2577 | +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py | |||
2578 | @@ -28,7 +28,7 @@ the keys can be specified, but defaults to ``md5``. | |||
2579 | 28 | import base64 | 28 | import base64 |
2580 | 29 | import hashlib | 29 | import hashlib |
2581 | 30 | 30 | ||
2583 | 31 | from prettytable import PrettyTable | 31 | from cloudinit.simpletable import SimpleTable |
2584 | 32 | 32 | ||
2585 | 33 | from cloudinit.distros import ug_util | 33 | from cloudinit.distros import ug_util |
2586 | 34 | from cloudinit import ssh_util | 34 | from cloudinit import ssh_util |
2587 | @@ -74,7 +74,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', | |||
2588 | 74 | return | 74 | return |
2589 | 75 | tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', | 75 | tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', |
2590 | 76 | 'Comment'] | 76 | 'Comment'] |
2592 | 77 | tbl = PrettyTable(tbl_fields) | 77 | tbl = SimpleTable(tbl_fields) |
2593 | 78 | for entry in key_entries: | 78 | for entry in key_entries: |
2594 | 79 | if _is_printable_key(entry): | 79 | if _is_printable_key(entry): |
2595 | 80 | row = [] | 80 | row = [] |
2596 | diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py | |||
2597 | 81 | deleted file mode 100644 | 81 | deleted file mode 100644 |
2598 | index 5dd2690..0000000 | |||
2599 | --- a/cloudinit/config/cc_ubuntu_init_switch.py | |||
2600 | +++ /dev/null | |||
2601 | @@ -1,160 +0,0 @@ | |||
2602 | 1 | # Copyright (C) 2014 Canonical Ltd. | ||
2603 | 2 | # | ||
2604 | 3 | # Author: Scott Moser <scott.moser@canonical.com> | ||
2605 | 4 | # | ||
2606 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | ||
2607 | 6 | |||
2608 | 7 | """ | ||
2609 | 8 | Ubuntu Init Switch | ||
2610 | 9 | ------------------ | ||
2611 | 10 | **Summary:** reboot system into another init. | ||
2612 | 11 | |||
2613 | 12 | This module provides a way for the user to boot with systemd even if the image | ||
2614 | 13 | is set to boot with upstart. It should be run as one of the first | ||
2615 | 14 | ``cloud_init_modules``, and will switch the init system and then issue a | ||
2616 | 15 | reboot. The next boot will come up in the target init system and no action | ||
2617 | 16 | will be taken. This should be inert on non-ubuntu systems, and also | ||
2618 | 17 | exit quickly. | ||
2619 | 18 | |||
2620 | 19 | .. note:: | ||
2621 | 20 | best effort is made, but it's possible this system will break, and probably | ||
2622 | 21 | won't interact well with any other mechanism you've used to switch the init | ||
2623 | 22 | system. | ||
2624 | 23 | |||
2625 | 24 | **Internal name:** ``cc_ubuntu_init_switch`` | ||
2626 | 25 | |||
2627 | 26 | **Module frequency:** once per instance | ||
2628 | 27 | |||
2629 | 28 | **Supported distros:** ubuntu | ||
2630 | 29 | |||
2631 | 30 | **Config keys**:: | ||
2632 | 31 | |||
2633 | 32 | init_switch: | ||
2634 | 33 | target: systemd (can be 'systemd' or 'upstart') | ||
2635 | 34 | reboot: true (reboot if a change was made, or false to not reboot) | ||
2636 | 35 | """ | ||
2637 | 36 | |||
2638 | 37 | from cloudinit.distros import ubuntu | ||
2639 | 38 | from cloudinit import log as logging | ||
2640 | 39 | from cloudinit.settings import PER_INSTANCE | ||
2641 | 40 | from cloudinit import util | ||
2642 | 41 | |||
2643 | 42 | import os | ||
2644 | 43 | import time | ||
2645 | 44 | |||
2646 | 45 | frequency = PER_INSTANCE | ||
2647 | 46 | REBOOT_CMD = ["/sbin/reboot", "--force"] | ||
2648 | 47 | |||
2649 | 48 | DEFAULT_CONFIG = { | ||
2650 | 49 | 'init_switch': {'target': None, 'reboot': True} | ||
2651 | 50 | } | ||
2652 | 51 | |||
2653 | 52 | SWITCH_INIT = """ | ||
2654 | 53 | #!/bin/sh | ||
2655 | 54 | # switch_init: [upstart | systemd] | ||
2656 | 55 | |||
2657 | 56 | is_systemd() { | ||
2658 | 57 | [ "$(dpkg-divert --listpackage /sbin/init)" = "systemd-sysv" ] | ||
2659 | 58 | } | ||
2660 | 59 | debug() { echo "$@" 1>&2; } | ||
2661 | 60 | fail() { echo "$@" 1>&2; exit 1; } | ||
2662 | 61 | |||
2663 | 62 | if [ "$1" = "systemd" ]; then | ||
2664 | 63 | if is_systemd; then | ||
2665 | 64 | debug "already systemd, nothing to do" | ||
2666 | 65 | else | ||
2667 | 66 | [ -f /lib/systemd/systemd ] || fail "no systemd available"; | ||
2668 | 67 | dpkg-divert --package systemd-sysv --divert /sbin/init.diverted \\ | ||
2669 | 68 | --rename /sbin/init | ||
2670 | 69 | fi | ||
2671 | 70 | [ -f /sbin/init ] || ln /lib/systemd/systemd /sbin/init | ||
2672 | 71 | elif [ "$1" = "upstart" ]; then | ||
2673 | 72 | if is_systemd; then | ||
2674 | 73 | rm -f /sbin/init | ||
2675 | 74 | dpkg-divert --package systemd-sysv --rename --remove /sbin/init | ||
2676 | 75 | else | ||
2677 | 76 | debug "already upstart, nothing to do." | ||
2678 | 77 | fi | ||
2679 | 78 | else | ||
2680 | 79 | fail "Error. expect 'upstart' or 'systemd'" | ||
2681 | 80 | fi | ||
2682 | 81 | """ | ||
2683 | 82 | |||
2684 | 83 | distros = ['ubuntu'] | ||
2685 | 84 | |||
2686 | 85 | |||
2687 | 86 | def handle(name, cfg, cloud, log, args): | ||
2688 | 87 | """Handler method activated by cloud-init.""" | ||
2689 | 88 | |||
2690 | 89 | if not isinstance(cloud.distro, ubuntu.Distro): | ||
2691 | 90 | log.debug("%s: distro is '%s', not ubuntu. returning", | ||
2692 | 91 | name, cloud.distro.__class__) | ||
2693 | 92 | return | ||
2694 | 93 | |||
2695 | 94 | cfg = util.mergemanydict([cfg, DEFAULT_CONFIG]) | ||
2696 | 95 | target = cfg['init_switch']['target'] | ||
2697 | 96 | reboot = cfg['init_switch']['reboot'] | ||
2698 | 97 | |||
2699 | 98 | if len(args) != 0: | ||
2700 | 99 | target = args[0] | ||
2701 | 100 | if len(args) > 1: | ||
2702 | 101 | reboot = util.is_true(args[1]) | ||
2703 | 102 | |||
2704 | 103 | if not target: | ||
2705 | 104 | log.debug("%s: target=%s. nothing to do", name, target) | ||
2706 | 105 | return | ||
2707 | 106 | |||
2708 | 107 | if not util.which('dpkg'): | ||
2709 | 108 | log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name) | ||
2710 | 109 | return | ||
2711 | 110 | |||
2712 | 111 | supported = ('upstart', 'systemd') | ||
2713 | 112 | if target not in supported: | ||
2714 | 113 | log.warn("%s: target set to %s, expected one of: %s", | ||
2715 | 114 | name, target, str(supported)) | ||
2716 | 115 | |||
2717 | 116 | if os.path.exists("/run/systemd/system"): | ||
2718 | 117 | current = "systemd" | ||
2719 | 118 | else: | ||
2720 | 119 | current = "upstart" | ||
2721 | 120 | |||
2722 | 121 | if current == target: | ||
2723 | 122 | log.debug("%s: current = target = %s. nothing to do", name, target) | ||
2724 | 123 | return | ||
2725 | 124 | |||
2726 | 125 | try: | ||
2727 | 126 | util.subp(['sh', '-s', target], data=SWITCH_INIT) | ||
2728 | 127 | except util.ProcessExecutionError as e: | ||
2729 | 128 | log.warn("%s: Failed to switch to init '%s'. %s", name, target, e) | ||
2730 | 129 | return | ||
2731 | 130 | |||
2732 | 131 | if util.is_false(reboot): | ||
2733 | 132 | log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.", | ||
2734 | 133 | name, current, target) | ||
2735 | 134 | return | ||
2736 | 135 | |||
2737 | 136 | try: | ||
2738 | 137 | log.warn("%s: switched '%s' to '%s'. rebooting.", | ||
2739 | 138 | name, current, target) | ||
2740 | 139 | logging.flushLoggers(log) | ||
2741 | 140 | _fire_reboot(log, wait_attempts=4, initial_sleep=4) | ||
2742 | 141 | except Exception as e: | ||
2743 | 142 | util.logexc(log, "Requested reboot did not happen!") | ||
2744 | 143 | raise | ||
2745 | 144 | |||
2746 | 145 | |||
2747 | 146 | def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): | ||
2748 | 147 | util.subp(REBOOT_CMD) | ||
2749 | 148 | start = time.time() | ||
2750 | 149 | wait_time = initial_sleep | ||
2751 | 150 | for _i in range(0, wait_attempts): | ||
2752 | 151 | time.sleep(wait_time) | ||
2753 | 152 | wait_time *= backoff | ||
2754 | 153 | elapsed = time.time() - start | ||
2755 | 154 | log.debug("Rebooted, but still running after %s seconds", int(elapsed)) | ||
2756 | 155 | # If we got here, not good | ||
2757 | 156 | elapsed = time.time() - start | ||
2758 | 157 | raise RuntimeError(("Reboot did not happen" | ||
2759 | 158 | " after %s seconds!") % (int(elapsed))) | ||
2760 | 159 | |||
2761 | 160 | # vi: ts=4 expandtab | ||
2762 | diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py | |||
2763 | 161 | new file mode 100644 | 0 | new file mode 100644 |
2764 | index 0000000..aba2695 | |||
2765 | --- /dev/null | |||
2766 | +++ b/cloudinit/config/cc_zypper_add_repo.py | |||
2767 | @@ -0,0 +1,218 @@ | |||
2768 | 1 | # | ||
2769 | 2 | # Copyright (C) 2017 SUSE LLC. | ||
2770 | 3 | # | ||
2771 | 4 | # This file is part of cloud-init. See LICENSE file for license information. | ||
2772 | 5 | |||
2773 | 6 | """zypper_add_repo: Add zyper repositories to the system""" | ||
2774 | 7 | |||
2775 | 8 | import configobj | ||
2776 | 9 | import os | ||
2777 | 10 | from six import string_types | ||
2778 | 11 | from textwrap import dedent | ||
2779 | 12 | |||
2780 | 13 | from cloudinit.config.schema import get_schema_doc | ||
2781 | 14 | from cloudinit import log as logging | ||
2782 | 15 | from cloudinit.settings import PER_ALWAYS | ||
2783 | 16 | from cloudinit import util | ||
2784 | 17 | |||
2785 | 18 | distros = ['opensuse', 'sles'] | ||
2786 | 19 | |||
2787 | 20 | schema = { | ||
2788 | 21 | 'id': 'cc_zypper_add_repo', | ||
2789 | 22 | 'name': 'ZypperAddRepo', | ||
2790 | 23 | 'title': 'Configure zypper behavior and add zypper repositories', | ||
2791 | 24 | 'description': dedent("""\ | ||
2792 | 25 | Configure zypper behavior by modifying /etc/zypp/zypp.conf. The | ||
2793 | 26 | configuration writer is "dumb" and will simply append the provided | ||
2794 | 27 | configuration options to the configuration file. Option settings | ||
2795 | 28 | that may be duplicate will be resolved by the way the zypp.conf file | ||
2796 | 29 | is parsed. The file is in INI format. | ||
2797 | 30 | Add repositories to the system. No validation is performed on the | ||
2798 | 31 | repository file entries, it is assumed the user is familiar with | ||
2799 | 32 | the zypper repository file format."""), | ||
2800 | 33 | 'distros': distros, | ||
2801 | 34 | 'examples': [dedent("""\ | ||
2802 | 35 | zypper: | ||
2803 | 36 | repos: | ||
2804 | 37 | - id: opensuse-oss | ||
2805 | 38 | name: os-oss | ||
2806 | 39 | baseurl: http://dl.opensuse.org/dist/leap/v/repo/oss/ | ||
2807 | 40 | enabled: 1 | ||
2808 | 41 | autorefresh: 1 | ||
2809 | 42 | - id: opensuse-oss-update | ||
2810 | 43 | name: os-oss-up | ||
2811 | 44 | baseurl: http://dl.opensuse.org/dist/leap/v/update | ||
2812 | 45 | # any setting per | ||
2813 | 46 | # https://en.opensuse.org/openSUSE:Standards_RepoInfo | ||
2814 | 47 | # enable and autorefresh are on by default | ||
2815 | 48 | config: | ||
2816 | 49 | reposdir: /etc/zypp/repos.dir | ||
2817 | 50 | servicesdir: /etc/zypp/services.d | ||
2818 | 51 | download.use_deltarpm: true | ||
2819 | 52 | # any setting in /etc/zypp/zypp.conf | ||
2820 | 53 | """)], | ||
2821 | 54 | 'frequency': PER_ALWAYS, | ||
2822 | 55 | 'type': 'object', | ||
2823 | 56 | 'properties': { | ||
2824 | 57 | 'zypper': { | ||
2825 | 58 | 'type': 'object', | ||
2826 | 59 | 'properties': { | ||
2827 | 60 | 'repos': { | ||
2828 | 61 | 'type': 'array', | ||
2829 | 62 | 'items': { | ||
2830 | 63 | 'type': 'object', | ||
2831 | 64 | 'properties': { | ||
2832 | 65 | 'id': { | ||
2833 | 66 | 'type': 'string', | ||
2834 | 67 | 'description': dedent("""\ | ||
2835 | 68 | The unique id of the repo, used when | ||
2836 | 69 | writing | ||
2837 | 70 | /etc/zypp/repos.d/<id>.repo.""") | ||
2838 | 71 | }, | ||
2839 | 72 | 'baseurl': { | ||
2840 | 73 | 'type': 'string', | ||
2841 | 74 | 'format': 'uri', # built-in format type | ||
2842 | 75 | 'description': 'The base repositoy URL' | ||
2843 | 76 | } | ||
2844 | 77 | }, | ||
2845 | 78 | 'required': ['id', 'baseurl'], | ||
2846 | 79 | 'additionalProperties': True | ||
2847 | 80 | }, | ||
2848 | 81 | 'minItems': 1 | ||
2849 | 82 | }, | ||
2850 | 83 | 'config': { | ||
2851 | 84 | 'type': 'object', | ||
2852 | 85 | 'description': dedent("""\ | ||
2853 | 86 | Any supported zypo.conf key is written to | ||
2854 | 87 | /etc/zypp/zypp.conf'""") | ||
2855 | 88 | } | ||
2856 | 89 | }, | ||
2857 | 90 | 'required': [], | ||
2858 | 91 | 'minProperties': 1, # Either config or repo must be provided | ||
2859 | 92 | 'additionalProperties': False, # only repos and config allowed | ||
2860 | 93 | } | ||
2861 | 94 | } | ||
2862 | 95 | } | ||
2863 | 96 | |||
2864 | 97 | __doc__ = get_schema_doc(schema) # Supplement python help() | ||
2865 | 98 | |||
2866 | 99 | LOG = logging.getLogger(__name__) | ||
2867 | 100 | |||
2868 | 101 | |||
2869 | 102 | def _canonicalize_id(repo_id): | ||
2870 | 103 | repo_id = repo_id.replace(" ", "_") | ||
2871 | 104 | return repo_id | ||
2872 | 105 | |||
2873 | 106 | |||
2874 | 107 | def _format_repo_value(val): | ||
2875 | 108 | if isinstance(val, bool): | ||
2876 | 109 | # zypp prefers 1/0 | ||
2877 | 110 | return 1 if val else 0 | ||
2878 | 111 | if isinstance(val, (list, tuple)): | ||
2879 | 112 | return "\n ".join([_format_repo_value(v) for v in val]) | ||
2880 | 113 | if not isinstance(val, string_types): | ||
2881 | 114 | return str(val) | ||
2882 | 115 | return val | ||
2883 | 116 | |||
2884 | 117 | |||
2885 | 118 | def _format_repository_config(repo_id, repo_config): | ||
2886 | 119 | to_be = configobj.ConfigObj() | ||
2887 | 120 | to_be[repo_id] = {} | ||
2888 | 121 | # Do basic translation of the items -> values | ||
2889 | 122 | for (k, v) in repo_config.items(): | ||
2890 | 123 | # For now assume that people using this know the format | ||
2891 | 124 | # of zypper repos and don't verify keys/values further | ||
2892 | 125 | to_be[repo_id][k] = _format_repo_value(v) | ||
2893 | 126 | lines = to_be.write() | ||
2894 | 127 | return "\n".join(lines) | ||
2895 | 128 | |||
2896 | 129 | |||
2897 | 130 | def _write_repos(repos, repo_base_path): | ||
2898 | 131 | """Write the user-provided repo definition files | ||
2899 | 132 | @param repos: A list of repo dictionary objects provided by the user's | ||
2900 | 133 | cloud config. | ||
2901 | 134 | @param repo_base_path: The directory path to which repo definitions are | ||
2902 | 135 | written. | ||
2903 | 136 | """ | ||
2904 | 137 | |||
2905 | 138 | if not repos: | ||
2906 | 139 | return | ||
2907 | 140 | valid_repos = {} | ||
2908 | 141 | for index, user_repo_config in enumerate(repos): | ||
2909 | 142 | # Skip on absent required keys | ||
2910 | 143 | missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config)) | ||
2911 | 144 | if missing_keys: | ||
2912 | 145 | LOG.warning( | ||
2913 | 146 | "Repo config at index %d is missing required config keys: %s", | ||
2914 | 147 | index, ",".join(missing_keys)) | ||
2915 | 148 | continue | ||
2916 | 149 | repo_id = user_repo_config.get('id') | ||
2917 | 150 | canon_repo_id = _canonicalize_id(repo_id) | ||
2918 | 151 | repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) | ||
2919 | 152 | if os.path.exists(repo_fn_pth): | ||
2920 | 153 | LOG.info("Skipping repo %s, file %s already exists!", | ||
2921 | 154 | repo_id, repo_fn_pth) | ||
2922 | 155 | continue | ||
2923 | 156 | elif repo_id in valid_repos: | ||
2924 | 157 | LOG.info("Skipping repo %s, file %s already pending!", | ||
2925 | 158 | repo_id, repo_fn_pth) | ||
2926 | 159 | continue | ||
2927 | 160 | |||
2928 | 161 | # Do some basic key formatting | ||
2929 | 162 | repo_config = dict( | ||
2930 | 163 | (k.lower().strip().replace("-", "_"), v) | ||
2931 | 164 | for k, v in user_repo_config.items() | ||
2932 | 165 | if k and k != 'id') | ||
2933 | 166 | |||
2934 | 167 | # Set defaults if not present | ||
2935 | 168 | for field in ['enabled', 'autorefresh']: | ||
2936 | 169 | if field not in repo_config: | ||
2937 | 170 | repo_config[field] = '1' | ||
2938 | 171 | |||
2939 | 172 | valid_repos[repo_id] = (repo_fn_pth, repo_config) | ||
2940 | 173 | |||
2941 | 174 | for (repo_id, repo_data) in valid_repos.items(): | ||
2942 | 175 | repo_blob = _format_repository_config(repo_id, repo_data[-1]) | ||
2943 | 176 | util.write_file(repo_data[0], repo_blob) | ||
2944 | 177 | |||
2945 | 178 | |||
2946 | 179 | def _write_zypp_config(zypper_config): | ||
2947 | 180 | """Write to the default zypp configuration file /etc/zypp/zypp.conf""" | ||
2948 | 181 | if not zypper_config: | ||
2949 | 182 | return | ||
2950 | 183 | zypp_config = '/etc/zypp/zypp.conf' | ||
2951 | 184 | zypp_conf_content = util.load_file(zypp_config) | ||
2952 | 185 | new_settings = ['# Added via cloud.cfg'] | ||
2953 | 186 | for setting, value in zypper_config.items(): | ||
2954 | 187 | if setting == 'configdir': | ||
2955 | 188 | msg = 'Changing the location of the zypper configuration is ' | ||
2956 | 189 | msg += 'not supported, skipping "configdir" setting' | ||
2957 | 190 | LOG.warning(msg) | ||
2958 | 191 | continue | ||
2959 | 192 | if value: | ||
2960 | 193 | new_settings.append('%s=%s' % (setting, value)) | ||
2961 | 194 | if len(new_settings) > 1: | ||
2962 | 195 | new_config = zypp_conf_content + '\n'.join(new_settings) | ||
2963 | 196 | else: | ||
2964 | 197 | new_config = zypp_conf_content | ||
2965 | 198 | util.write_file(zypp_config, new_config) | ||
2966 | 199 | |||
2967 | 200 | |||
2968 | 201 | def handle(name, cfg, _cloud, log, _args): | ||
2969 | 202 | zypper_section = cfg.get('zypper') | ||
2970 | 203 | if not zypper_section: | ||
2971 | 204 | LOG.debug(("Skipping module named %s," | ||
2972 | 205 | " no 'zypper' relevant configuration found"), name) | ||
2973 | 206 | return | ||
2974 | 207 | repos = zypper_section.get('repos') | ||
2975 | 208 | if not repos: | ||
2976 | 209 | LOG.debug(("Skipping module named %s," | ||
2977 | 210 | " no 'repos' configuration found"), name) | ||
2978 | 211 | return | ||
2979 | 212 | zypper_config = zypper_section.get('config', {}) | ||
2980 | 213 | repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/') | ||
2981 | 214 | |||
2982 | 215 | _write_zypp_config(zypper_config) | ||
2983 | 216 | _write_repos(repos, repo_base_path) | ||
2984 | 217 | |||
2985 | 218 | # vi: ts=4 expandtab | ||
2986 | diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py | |||
2987 | index 6400f00..bb291ff 100644 | |||
2988 | --- a/cloudinit/config/schema.py | |||
2989 | +++ b/cloudinit/config/schema.py | |||
2990 | @@ -3,19 +3,24 @@ | |||
2991 | 3 | 3 | ||
2992 | 4 | from __future__ import print_function | 4 | from __future__ import print_function |
2993 | 5 | 5 | ||
2995 | 6 | from cloudinit.util import read_file_or_url | 6 | from cloudinit import importer |
2996 | 7 | from cloudinit.util import find_modules, read_file_or_url | ||
2997 | 7 | 8 | ||
2998 | 8 | import argparse | 9 | import argparse |
2999 | 10 | from collections import defaultdict | ||
3000 | 11 | from copy import deepcopy | ||
3001 | 9 | import logging | 12 | import logging |
3002 | 10 | import os | 13 | import os |
3003 | 14 | import re | ||
3004 | 11 | import sys | 15 | import sys |
3005 | 12 | import yaml | 16 | import yaml |
3006 | 13 | 17 | ||
3007 | 18 | _YAML_MAP = {True: 'true', False: 'false', None: 'null'} | ||
3008 | 14 | SCHEMA_UNDEFINED = b'UNDEFINED' | 19 | SCHEMA_UNDEFINED = b'UNDEFINED' |
3009 | 15 | CLOUD_CONFIG_HEADER = b'#cloud-config' | 20 | CLOUD_CONFIG_HEADER = b'#cloud-config' |
3010 | 16 | SCHEMA_DOC_TMPL = """ | 21 | SCHEMA_DOC_TMPL = """ |
3011 | 17 | {name} | 22 | {name} |
3013 | 18 | --- | 23 | {title_underbar} |
3014 | 19 | **Summary:** {title} | 24 | **Summary:** {title} |
3015 | 20 | 25 | ||
3016 | 21 | {description} | 26 | {description} |
3017 | @@ -31,6 +36,8 @@ SCHEMA_DOC_TMPL = """ | |||
3018 | 31 | {examples} | 36 | {examples} |
3019 | 32 | """ | 37 | """ |
3020 | 33 | SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}' | 38 | SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}' |
3021 | 39 | SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n' | ||
3022 | 40 | SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---' | ||
3023 | 34 | 41 | ||
3024 | 35 | 42 | ||
3025 | 36 | class SchemaValidationError(ValueError): | 43 | class SchemaValidationError(ValueError): |
3026 | @@ -83,11 +90,49 @@ def validate_cloudconfig_schema(config, schema, strict=False): | |||
3027 | 83 | logging.warning('Invalid config:\n%s', '\n'.join(messages)) | 90 | logging.warning('Invalid config:\n%s', '\n'.join(messages)) |
3028 | 84 | 91 | ||
3029 | 85 | 92 | ||
3031 | 86 | def validate_cloudconfig_file(config_path, schema): | 93 | def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): |
3032 | 94 | """Return contents of the cloud-config file annotated with schema errors. | ||
3033 | 95 | |||
3034 | 96 | @param cloudconfig: YAML-loaded object from the original_content. | ||
3035 | 97 | @param original_content: The contents of a cloud-config file | ||
3036 | 98 | @param schema_errors: List of tuples from a JSONSchemaValidationError. The | ||
3037 | 99 | tuples consist of (schemapath, error_message). | ||
3038 | 100 | """ | ||
3039 | 101 | if not schema_errors: | ||
3040 | 102 | return original_content | ||
3041 | 103 | schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content) | ||
3042 | 104 | errors_by_line = defaultdict(list) | ||
3043 | 105 | error_count = 1 | ||
3044 | 106 | error_footer = [] | ||
3045 | 107 | annotated_content = [] | ||
3046 | 108 | for path, msg in schema_errors: | ||
3047 | 109 | errors_by_line[schemapaths[path]].append(msg) | ||
3048 | 110 | error_footer.append('# E{0}: {1}'.format(error_count, msg)) | ||
3049 | 111 | error_count += 1 | ||
3050 | 112 | lines = original_content.decode().split('\n') | ||
3051 | 113 | error_count = 1 | ||
3052 | 114 | for line_number, line in enumerate(lines): | ||
3053 | 115 | errors = errors_by_line[line_number + 1] | ||
3054 | 116 | if errors: | ||
3055 | 117 | error_label = ','.join( | ||
3056 | 118 | ['E{0}'.format(count + error_count) | ||
3057 | 119 | for count in range(0, len(errors))]) | ||
3058 | 120 | error_count += len(errors) | ||
3059 | 121 | annotated_content.append(line + '\t\t# ' + error_label) | ||
3060 | 122 | else: | ||
3061 | 123 | annotated_content.append(line) | ||
3062 | 124 | annotated_content.append( | ||
3063 | 125 | '# Errors: -------------\n{0}\n\n'.format('\n'.join(error_footer))) | ||
3064 | 126 | return '\n'.join(annotated_content) | ||
3065 | 127 | |||
3066 | 128 | |||
3067 | 129 | def validate_cloudconfig_file(config_path, schema, annotate=False): | ||
3068 | 87 | """Validate cloudconfig file adheres to a specific jsonschema. | 130 | """Validate cloudconfig file adheres to a specific jsonschema. |
3069 | 88 | 131 | ||
3070 | 89 | @param config_path: Path to the yaml cloud-config file to parse. | 132 | @param config_path: Path to the yaml cloud-config file to parse. |
3071 | 90 | @param schema: Dict describing a valid jsonschema to validate against. | 133 | @param schema: Dict describing a valid jsonschema to validate against. |
3072 | 134 | @param annotate: Boolean set True to print original config file with error | ||
3073 | 135 | annotations on the offending lines. | ||
3074 | 91 | 136 | ||
3075 | 92 | @raises SchemaValidationError containing any of schema_errors encountered. | 137 | @raises SchemaValidationError containing any of schema_errors encountered. |
3076 | 93 | @raises RuntimeError when config_path does not exist. | 138 | @raises RuntimeError when config_path does not exist. |
3077 | @@ -108,18 +153,83 @@ def validate_cloudconfig_file(config_path, schema): | |||
3078 | 108 | ('format', 'File {0} is not valid yaml. {1}'.format( | 153 | ('format', 'File {0} is not valid yaml. {1}'.format( |
3079 | 109 | config_path, str(e))),) | 154 | config_path, str(e))),) |
3080 | 110 | raise SchemaValidationError(errors) | 155 | raise SchemaValidationError(errors) |
3083 | 111 | validate_cloudconfig_schema( | 156 | |
3084 | 112 | cloudconfig, schema, strict=True) | 157 | try: |
3085 | 158 | validate_cloudconfig_schema( | ||
3086 | 159 | cloudconfig, schema, strict=True) | ||
3087 | 160 | except SchemaValidationError as e: | ||
3088 | 161 | if annotate: | ||
3089 | 162 | print(annotated_cloudconfig_file( | ||
3090 | 163 | cloudconfig, content, e.schema_errors)) | ||
3091 | 164 | raise | ||
3092 | 165 | |||
3093 | 166 | |||
3094 | 167 | def _schemapath_for_cloudconfig(config, original_content): | ||
3095 | 168 | """Return a dictionary mapping schemapath to original_content line number. | ||
3096 | 169 | |||
3097 | 170 | @param config: The yaml.loaded config dictionary of a cloud-config file. | ||
3098 | 171 | @param original_content: The simple file content of the cloud-config file | ||
3099 | 172 | """ | ||
3100 | 173 | # FIXME Doesn't handle multi-line lists or multi-line strings | ||
3101 | 174 | content_lines = original_content.decode().split('\n') | ||
3102 | 175 | schema_line_numbers = {} | ||
3103 | 176 | list_index = 0 | ||
3104 | 177 | RE_YAML_INDENT = r'^(\s*)' | ||
3105 | 178 | scopes = [] | ||
3106 | 179 | for line_number, line in enumerate(content_lines): | ||
3107 | 180 | indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) | ||
3108 | 181 | line = line.strip() | ||
3109 | 182 | if not line or line.startswith('#'): | ||
3110 | 183 | continue | ||
3111 | 184 | if scopes: | ||
3112 | 185 | previous_depth, path_prefix = scopes[-1] | ||
3113 | 186 | else: | ||
3114 | 187 | previous_depth = -1 | ||
3115 | 188 | path_prefix = '' | ||
3116 | 189 | if line.startswith('- '): | ||
3117 | 190 | key = str(list_index) | ||
3118 | 191 | value = line[1:] | ||
3119 | 192 | list_index += 1 | ||
3120 | 193 | else: | ||
3121 | 194 | list_index = 0 | ||
3122 | 195 | key, value = line.split(':', 1) | ||
3123 | 196 | while indent_depth <= previous_depth: | ||
3124 | 197 | if scopes: | ||
3125 | 198 | previous_depth, path_prefix = scopes.pop() | ||
3126 | 199 | else: | ||
3127 | 200 | previous_depth = -1 | ||
3128 | 201 | path_prefix = '' | ||
3129 | 202 | if path_prefix: | ||
3130 | 203 | key = path_prefix + '.' + key | ||
3131 | 204 | scopes.append((indent_depth, key)) | ||
3132 | 205 | if value: | ||
3133 | 206 | value = value.strip() | ||
3134 | 207 | if value.startswith('['): | ||
3135 | 208 | scopes.append((indent_depth + 2, key + '.0')) | ||
3136 | 209 | for inner_list_index in range(0, len(yaml.safe_load(value))): | ||
3137 | 210 | list_key = key + '.' + str(inner_list_index) | ||
3138 | 211 | schema_line_numbers[list_key] = line_number + 1 | ||
3139 | 212 | schema_line_numbers[key] = line_number + 1 | ||
3140 | 213 | return schema_line_numbers | ||
3141 | 113 | 214 | ||
3142 | 114 | 215 | ||
3143 | 115 | def _get_property_type(property_dict): | 216 | def _get_property_type(property_dict): |
3144 | 116 | """Return a string representing a property type from a given jsonschema.""" | 217 | """Return a string representing a property type from a given jsonschema.""" |
3145 | 117 | property_type = property_dict.get('type', SCHEMA_UNDEFINED) | 218 | property_type = property_dict.get('type', SCHEMA_UNDEFINED) |
3146 | 219 | if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'): | ||
3147 | 220 | property_type = [ | ||
3148 | 221 | str(_YAML_MAP.get(k, k)) for k in property_dict['enum']] | ||
3149 | 118 | if isinstance(property_type, list): | 222 | if isinstance(property_type, list): |
3150 | 119 | property_type = '/'.join(property_type) | 223 | property_type = '/'.join(property_type) |
3154 | 120 | item_type = property_dict.get('items', {}).get('type') | 224 | items = property_dict.get('items', {}) |
3155 | 121 | if item_type: | 225 | sub_property_type = items.get('type', '') |
3156 | 122 | property_type = '{0} of {1}'.format(property_type, item_type) | 226 | # Collect each item type |
3157 | 227 | for sub_item in items.get('oneOf', {}): | ||
3158 | 228 | if sub_property_type: | ||
3159 | 229 | sub_property_type += '/' | ||
3160 | 230 | sub_property_type += '(' + _get_property_type(sub_item) + ')' | ||
3161 | 231 | if sub_property_type: | ||
3162 | 232 | return '{0} of {1}'.format(property_type, sub_property_type) | ||
3163 | 123 | return property_type | 233 | return property_type |
3164 | 124 | 234 | ||
3165 | 125 | 235 | ||
3166 | @@ -146,12 +256,14 @@ def _get_schema_examples(schema, prefix=''): | |||
3167 | 146 | examples = schema.get('examples') | 256 | examples = schema.get('examples') |
3168 | 147 | if not examples: | 257 | if not examples: |
3169 | 148 | return '' | 258 | return '' |
3173 | 149 | rst_content = '\n**Examples**::\n\n' | 259 | rst_content = SCHEMA_EXAMPLES_HEADER |
3174 | 150 | for example in examples: | 260 | for count, example in enumerate(examples): |
3172 | 151 | example_yaml = yaml.dump(example, default_flow_style=False) | ||
3175 | 152 | # Python2.6 is missing textwrapper.indent | 261 | # Python2.6 is missing textwrapper.indent |
3177 | 153 | lines = example_yaml.split('\n') | 262 | lines = example.split('\n') |
3178 | 154 | indented_lines = [' {0}'.format(line) for line in lines] | 263 | indented_lines = [' {0}'.format(line) for line in lines] |
3179 | 264 | if rst_content != SCHEMA_EXAMPLES_HEADER: | ||
3180 | 265 | indented_lines.insert( | ||
3181 | 266 | 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1)) | ||
3182 | 155 | rst_content += '\n'.join(indented_lines) | 267 | rst_content += '\n'.join(indented_lines) |
3183 | 156 | return rst_content | 268 | return rst_content |
3184 | 157 | 269 | ||
3185 | @@ -162,61 +274,87 @@ def get_schema_doc(schema): | |||
3186 | 162 | @param schema: Dict of jsonschema to render. | 274 | @param schema: Dict of jsonschema to render. |
3187 | 163 | @raise KeyError: If schema lacks an expected key. | 275 | @raise KeyError: If schema lacks an expected key. |
3188 | 164 | """ | 276 | """ |
3203 | 165 | schema['property_doc'] = _get_property_doc(schema) | 277 | schema_copy = deepcopy(schema) |
3204 | 166 | schema['examples'] = _get_schema_examples(schema) | 278 | schema_copy['property_doc'] = _get_property_doc(schema) |
3205 | 167 | schema['distros'] = ', '.join(schema['distros']) | 279 | schema_copy['examples'] = _get_schema_examples(schema) |
3206 | 168 | return SCHEMA_DOC_TMPL.format(**schema) | 280 | schema_copy['distros'] = ', '.join(schema['distros']) |
3207 | 169 | 281 | # Need an underbar of the same length as the name | |
3208 | 170 | 282 | schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name']) | |
3209 | 171 | def get_schema(section_key=None): | 283 | return SCHEMA_DOC_TMPL.format(**schema_copy) |
3210 | 172 | """Return a dict of jsonschema defined in any cc_* module. | 284 | |
3211 | 173 | 285 | ||
3212 | 174 | @param: section_key: Optionally limit schema to a specific top-level key. | 286 | FULL_SCHEMA = None |
3213 | 175 | """ | 287 | |
3214 | 176 | # TODO use util.find_modules in subsequent branch | 288 | |
3215 | 177 | from cloudinit.config.cc_ntp import schema | 289 | def get_schema(): |
3216 | 178 | return schema | 290 | """Return jsonschema coalesced from all cc_* cloud-config module.""" |
3217 | 291 | global FULL_SCHEMA | ||
3218 | 292 | if FULL_SCHEMA: | ||
3219 | 293 | return FULL_SCHEMA | ||
3220 | 294 | full_schema = { | ||
3221 | 295 | '$schema': 'http://json-schema.org/draft-04/schema#', | ||
3222 | 296 | 'id': 'cloud-config-schema', 'allOf': []} | ||
3223 | 297 | |||
3224 | 298 | configs_dir = os.path.dirname(os.path.abspath(__file__)) | ||
3225 | 299 | potential_handlers = find_modules(configs_dir) | ||
3226 | 300 | for (fname, mod_name) in potential_handlers.items(): | ||
3227 | 301 | mod_locs, looked_locs = importer.find_module( | ||
3228 | 302 | mod_name, ['cloudinit.config'], ['schema']) | ||
3229 | 303 | if mod_locs: | ||
3230 | 304 | mod = importer.import_module(mod_locs[0]) | ||
3231 | 305 | full_schema['allOf'].append(mod.schema) | ||
3232 | 306 | FULL_SCHEMA = full_schema | ||
3233 | 307 | return full_schema | ||
3234 | 179 | 308 | ||
3235 | 180 | 309 | ||
3236 | 181 | def error(message): | 310 | def error(message): |
3237 | 182 | print(message, file=sys.stderr) | 311 | print(message, file=sys.stderr) |
3239 | 183 | return 1 | 312 | sys.exit(1) |
3240 | 184 | 313 | ||
3241 | 185 | 314 | ||
3243 | 186 | def get_parser(): | 315 | def get_parser(parser=None): |
3244 | 187 | """Return a parser for supported cmdline arguments.""" | 316 | """Return a parser for supported cmdline arguments.""" |
3246 | 188 | parser = argparse.ArgumentParser() | 317 | if not parser: |
3247 | 318 | parser = argparse.ArgumentParser( | ||
3248 | 319 | prog='cloudconfig-schema', | ||
3249 | 320 | description='Validate cloud-config files or document schema') | ||
3250 | 189 | parser.add_argument('-c', '--config-file', | 321 | parser.add_argument('-c', '--config-file', |
3251 | 190 | help='Path of the cloud-config yaml file to validate') | 322 | help='Path of the cloud-config yaml file to validate') |
3252 | 191 | parser.add_argument('-d', '--doc', action="store_true", default=False, | 323 | parser.add_argument('-d', '--doc', action="store_true", default=False, |
3253 | 192 | help='Print schema documentation') | 324 | help='Print schema documentation') |
3256 | 193 | parser.add_argument('-k', '--key', | 325 | parser.add_argument('--annotate', action="store_true", default=False, |
3257 | 194 | help='Limit validation or docs to a section key') | 326 | help='Annotate existing cloud-config file with errors') |
3258 | 195 | return parser | 327 | return parser |
3259 | 196 | 328 | ||
3260 | 197 | 329 | ||
3265 | 198 | def main(): | 330 | def handle_schema_args(name, args): |
3266 | 199 | """Tool to validate schema of a cloud-config file or print schema docs.""" | 331 | """Handle provided schema args and perform the appropriate actions.""" |
3263 | 200 | parser = get_parser() | ||
3264 | 201 | args = parser.parse_args() | ||
3267 | 202 | exclusive_args = [args.config_file, args.doc] | 332 | exclusive_args = [args.config_file, args.doc] |
3268 | 203 | if not any(exclusive_args) or all(exclusive_args): | 333 | if not any(exclusive_args) or all(exclusive_args): |
3272 | 204 | return error('Expected either --config-file argument or --doc') | 334 | error('Expected either --config-file argument or --doc') |
3273 | 205 | 335 | full_schema = get_schema() | |
3271 | 206 | schema = get_schema() | ||
3274 | 207 | if args.config_file: | 336 | if args.config_file: |
3275 | 208 | try: | 337 | try: |
3277 | 209 | validate_cloudconfig_file(args.config_file, schema) | 338 | validate_cloudconfig_file( |
3278 | 339 | args.config_file, full_schema, args.annotate) | ||
3279 | 210 | except (SchemaValidationError, RuntimeError) as e: | 340 | except (SchemaValidationError, RuntimeError) as e: |
3282 | 211 | return error(str(e)) | 341 | if not args.annotate: |
3283 | 212 | print("Valid cloud-config file {0}".format(args.config_file)) | 342 | error(str(e)) |
3284 | 343 | else: | ||
3285 | 344 | print("Valid cloud-config file {0}".format(args.config_file)) | ||
3286 | 213 | if args.doc: | 345 | if args.doc: |
3288 | 214 | print(get_schema_doc(schema)) | 346 | for subschema in full_schema['allOf']: |
3289 | 347 | print(get_schema_doc(subschema)) | ||
3290 | 348 | |||
3291 | 349 | |||
3292 | 350 | def main(): | ||
3293 | 351 | """Tool to validate schema of a cloud-config file or print schema docs.""" | ||
3294 | 352 | parser = get_parser() | ||
3295 | 353 | handle_schema_args('cloudconfig-schema', parser.parse_args()) | ||
3296 | 215 | return 0 | 354 | return 0 |
3297 | 216 | 355 | ||
3298 | 217 | 356 | ||
3299 | 218 | if __name__ == '__main__': | 357 | if __name__ == '__main__': |
3300 | 219 | sys.exit(main()) | 358 | sys.exit(main()) |
3301 | 220 | 359 | ||
3302 | 221 | |||
3303 | 222 | # vi: ts=4 expandtab | 360 | # vi: ts=4 expandtab |
3304 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py | |||
3305 | index 1fd48a7..d5becd1 100755 | |||
3306 | --- a/cloudinit/distros/__init__.py | |||
3307 | +++ b/cloudinit/distros/__init__.py | |||
3308 | @@ -30,12 +30,16 @@ from cloudinit import util | |||
3309 | 30 | from cloudinit.distros.parsers import hosts | 30 | from cloudinit.distros.parsers import hosts |
3310 | 31 | 31 | ||
3311 | 32 | 32 | ||
3312 | 33 | # Used when a cloud-config module can be run on all cloud-init distibutions. | ||
3313 | 34 | # The value 'all' is surfaced in module documentation for distro support. | ||
3314 | 35 | ALL_DISTROS = 'all' | ||
3315 | 36 | |||
3316 | 33 | OSFAMILIES = { | 37 | OSFAMILIES = { |
3317 | 34 | 'debian': ['debian', 'ubuntu'], | 38 | 'debian': ['debian', 'ubuntu'], |
3318 | 35 | 'redhat': ['centos', 'fedora', 'rhel'], | 39 | 'redhat': ['centos', 'fedora', 'rhel'], |
3319 | 36 | 'gentoo': ['gentoo'], | 40 | 'gentoo': ['gentoo'], |
3320 | 37 | 'freebsd': ['freebsd'], | 41 | 'freebsd': ['freebsd'], |
3322 | 38 | 'suse': ['sles'], | 42 | 'suse': ['opensuse', 'sles'], |
3323 | 39 | 'arch': ['arch'], | 43 | 'arch': ['arch'], |
3324 | 40 | } | 44 | } |
3325 | 41 | 45 | ||
3326 | @@ -188,6 +192,9 @@ class Distro(object): | |||
3327 | 188 | def _get_localhost_ip(self): | 192 | def _get_localhost_ip(self): |
3328 | 189 | return "127.0.0.1" | 193 | return "127.0.0.1" |
3329 | 190 | 194 | ||
3330 | 195 | def get_locale(self): | ||
3331 | 196 | raise NotImplementedError() | ||
3332 | 197 | |||
3333 | 191 | @abc.abstractmethod | 198 | @abc.abstractmethod |
3334 | 192 | def _read_hostname(self, filename, default=None): | 199 | def _read_hostname(self, filename, default=None): |
3335 | 193 | raise NotImplementedError() | 200 | raise NotImplementedError() |
3336 | diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py | |||
3337 | index b4c0ba7..f87a343 100644 | |||
3338 | --- a/cloudinit/distros/arch.py | |||
3339 | +++ b/cloudinit/distros/arch.py | |||
3340 | @@ -14,6 +14,8 @@ from cloudinit.distros.parsers.hostname import HostnameConf | |||
3341 | 14 | 14 | ||
3342 | 15 | from cloudinit.settings import PER_INSTANCE | 15 | from cloudinit.settings import PER_INSTANCE |
3343 | 16 | 16 | ||
3344 | 17 | import os | ||
3345 | 18 | |||
3346 | 17 | LOG = logging.getLogger(__name__) | 19 | LOG = logging.getLogger(__name__) |
3347 | 18 | 20 | ||
3348 | 19 | 21 | ||
3349 | @@ -52,31 +54,10 @@ class Distro(distros.Distro): | |||
3350 | 52 | entries = net_util.translate_network(settings) | 54 | entries = net_util.translate_network(settings) |
3351 | 53 | LOG.debug("Translated ubuntu style network settings %s into %s", | 55 | LOG.debug("Translated ubuntu style network settings %s into %s", |
3352 | 54 | settings, entries) | 56 | settings, entries) |
3378 | 55 | dev_names = entries.keys() | 57 | return _render_network( |
3379 | 56 | # Format for netctl | 58 | entries, resolv_conf=self.resolve_conf_fn, |
3380 | 57 | for (dev, info) in entries.items(): | 59 | conf_dir=self.network_conf_dir, |
3381 | 58 | nameservers = [] | 60 | enable_func=self._enable_interface) |
3357 | 59 | net_fn = self.network_conf_dir + dev | ||
3358 | 60 | net_cfg = { | ||
3359 | 61 | 'Connection': 'ethernet', | ||
3360 | 62 | 'Interface': dev, | ||
3361 | 63 | 'IP': info.get('bootproto'), | ||
3362 | 64 | 'Address': "('%s/%s')" % (info.get('address'), | ||
3363 | 65 | info.get('netmask')), | ||
3364 | 66 | 'Gateway': info.get('gateway'), | ||
3365 | 67 | 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '') | ||
3366 | 68 | } | ||
3367 | 69 | util.write_file(net_fn, convert_netctl(net_cfg)) | ||
3368 | 70 | if info.get('auto'): | ||
3369 | 71 | self._enable_interface(dev) | ||
3370 | 72 | if 'dns-nameservers' in info: | ||
3371 | 73 | nameservers.extend(info['dns-nameservers']) | ||
3372 | 74 | |||
3373 | 75 | if nameservers: | ||
3374 | 76 | util.write_file(self.resolve_conf_fn, | ||
3375 | 77 | convert_resolv_conf(nameservers)) | ||
3376 | 78 | |||
3377 | 79 | return dev_names | ||
3382 | 80 | 61 | ||
3383 | 81 | def _enable_interface(self, device_name): | 62 | def _enable_interface(self, device_name): |
3384 | 82 | cmd = ['netctl', 'reenable', device_name] | 63 | cmd = ['netctl', 'reenable', device_name] |
3385 | @@ -173,13 +154,60 @@ class Distro(distros.Distro): | |||
3386 | 173 | ["-y"], freq=PER_INSTANCE) | 154 | ["-y"], freq=PER_INSTANCE) |
3387 | 174 | 155 | ||
3388 | 175 | 156 | ||
3389 | 157 | def _render_network(entries, target="/", conf_dir="etc/netctl", | ||
3390 | 158 | resolv_conf="etc/resolv.conf", enable_func=None): | ||
3391 | 159 | """Render the translate_network format into netctl files in target. | ||
3392 | 160 | Paths will be rendered under target. | ||
3393 | 161 | """ | ||
3394 | 162 | |||
3395 | 163 | devs = [] | ||
3396 | 164 | nameservers = [] | ||
3397 | 165 | resolv_conf = util.target_path(target, resolv_conf) | ||
3398 | 166 | conf_dir = util.target_path(target, conf_dir) | ||
3399 | 167 | |||
3400 | 168 | for (dev, info) in entries.items(): | ||
3401 | 169 | if dev == 'lo': | ||
3402 | 170 | # no configuration should be rendered for 'lo' | ||
3403 | 171 | continue | ||
3404 | 172 | devs.append(dev) | ||
3405 | 173 | net_fn = os.path.join(conf_dir, dev) | ||
3406 | 174 | net_cfg = { | ||
3407 | 175 | 'Connection': 'ethernet', | ||
3408 | 176 | 'Interface': dev, | ||
3409 | 177 | 'IP': info.get('bootproto'), | ||
3410 | 178 | 'Address': "%s/%s" % (info.get('address'), | ||
3411 | 179 | info.get('netmask')), | ||
3412 | 180 | 'Gateway': info.get('gateway'), | ||
3413 | 181 | 'DNS': info.get('dns-nameservers', []), | ||
3414 | 182 | } | ||
3415 | 183 | util.write_file(net_fn, convert_netctl(net_cfg)) | ||
3416 | 184 | if enable_func and info.get('auto'): | ||
3417 | 185 | enable_func(dev) | ||
3418 | 186 | if 'dns-nameservers' in info: | ||
3419 | 187 | nameservers.extend(info['dns-nameservers']) | ||
3420 | 188 | |||
3421 | 189 | if nameservers: | ||
3422 | 190 | util.write_file(resolv_conf, | ||
3423 | 191 | convert_resolv_conf(nameservers)) | ||
3424 | 192 | return devs | ||
3425 | 193 | |||
3426 | 194 | |||
3427 | 176 | def convert_netctl(settings): | 195 | def convert_netctl(settings): |
3434 | 177 | """Returns a settings string formatted for netctl.""" | 196 | """Given a dictionary, returns a string in netctl profile format. |
3435 | 178 | result = '' | 197 | |
3436 | 179 | if isinstance(settings, dict): | 198 | netctl profile is described at: |
3437 | 180 | for k, v in settings.items(): | 199 | https://git.archlinux.org/netctl.git/tree/docs/netctl.profile.5.txt |
3438 | 181 | result = result + '%s=%s\n' % (k, v) | 200 | |
3439 | 182 | return result | 201 | Note that the 'Special Quoting Rules' are not handled here.""" |
3440 | 202 | result = [] | ||
3441 | 203 | for key in sorted(settings): | ||
3442 | 204 | val = settings[key] | ||
3443 | 205 | if val is None: | ||
3444 | 206 | val = "" | ||
3445 | 207 | elif isinstance(val, (tuple, list)): | ||
3446 | 208 | val = "(" + ' '.join("'%s'" % v for v in val) + ")" | ||
3447 | 209 | result.append("%s=%s\n" % (key, val)) | ||
3448 | 210 | return ''.join(result) | ||
3449 | 183 | 211 | ||
3450 | 184 | 212 | ||
3451 | 185 | def convert_resolv_conf(settings): | 213 | def convert_resolv_conf(settings): |
3452 | diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py | |||
3453 | index abfb81f..33cc0bf 100644 | |||
3454 | --- a/cloudinit/distros/debian.py | |||
3455 | +++ b/cloudinit/distros/debian.py | |||
3456 | @@ -61,11 +61,49 @@ class Distro(distros.Distro): | |||
3457 | 61 | # should only happen say once per instance...) | 61 | # should only happen say once per instance...) |
3458 | 62 | self._runner = helpers.Runners(paths) | 62 | self._runner = helpers.Runners(paths) |
3459 | 63 | self.osfamily = 'debian' | 63 | self.osfamily = 'debian' |
3460 | 64 | self.default_locale = 'en_US.UTF-8' | ||
3461 | 65 | self.system_locale = None | ||
3462 | 64 | 66 | ||
3464 | 65 | def apply_locale(self, locale, out_fn=None): | 67 | def get_locale(self): |
3465 | 68 | """Return the default locale if set, else use default locale""" | ||
3466 | 69 | |||
3467 | 70 | # read system locale value | ||
3468 | 71 | if not self.system_locale: | ||
3469 | 72 | self.system_locale = read_system_locale() | ||
3470 | 73 | |||
3471 | 74 | # Return system_locale setting if valid, else use default locale | ||
3472 | 75 | return (self.system_locale if self.system_locale else | ||
3473 | 76 | self.default_locale) | ||
3474 | 77 | |||
3475 | 78 | def apply_locale(self, locale, out_fn=None, keyname='LANG'): | ||
3476 | 79 | """Apply specified locale to system, regenerate if specified locale | ||
3477 | 80 | differs from system default.""" | ||
3478 | 66 | if not out_fn: | 81 | if not out_fn: |
3479 | 67 | out_fn = LOCALE_CONF_FN | 82 | out_fn = LOCALE_CONF_FN |
3481 | 68 | apply_locale(locale, out_fn) | 83 | |
3482 | 84 | if not locale: | ||
3483 | 85 | raise ValueError('Failed to provide locale value.') | ||
3484 | 86 | |||
3485 | 87 | # Only call locale regeneration if needed | ||
3486 | 88 | # Update system locale config with specified locale if needed | ||
3487 | 89 | distro_locale = self.get_locale() | ||
3488 | 90 | conf_fn_exists = os.path.exists(out_fn) | ||
3489 | 91 | sys_locale_unset = False if self.system_locale else True | ||
3490 | 92 | need_regen = (locale.lower() != distro_locale.lower() or | ||
3491 | 93 | not conf_fn_exists or sys_locale_unset) | ||
3492 | 94 | need_conf = not conf_fn_exists or need_regen or sys_locale_unset | ||
3493 | 95 | |||
3494 | 96 | if need_regen: | ||
3495 | 97 | regenerate_locale(locale, out_fn, keyname=keyname) | ||
3496 | 98 | else: | ||
3497 | 99 | LOG.debug( | ||
3498 | 100 | "System has '%s=%s' requested '%s', skipping regeneration.", | ||
3499 | 101 | keyname, self.system_locale, locale) | ||
3500 | 102 | |||
3501 | 103 | if need_conf: | ||
3502 | 104 | update_locale_conf(locale, out_fn, keyname=keyname) | ||
3503 | 105 | # once we've updated the system config, invalidate cache | ||
3504 | 106 | self.system_locale = None | ||
3505 | 69 | 107 | ||
3506 | 70 | def install_packages(self, pkglist): | 108 | def install_packages(self, pkglist): |
3507 | 71 | self.update_package_sources() | 109 | self.update_package_sources() |
3508 | @@ -218,37 +256,47 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"): | |||
3509 | 218 | LOG.warning(msg) | 256 | LOG.warning(msg) |
3510 | 219 | 257 | ||
3511 | 220 | 258 | ||
3526 | 221 | def apply_locale(locale, sys_path=LOCALE_CONF_FN, keyname='LANG'): | 259 | def read_system_locale(sys_path=LOCALE_CONF_FN, keyname='LANG'): |
3527 | 222 | """Apply the locale. | 260 | """Read system default locale setting, if present""" |
3528 | 223 | 261 | sys_val = "" | |
3515 | 224 | Run locale-gen for the provided locale and set the default | ||
3516 | 225 | system variable `keyname` appropriately in the provided `sys_path`. | ||
3517 | 226 | |||
3518 | 227 | If sys_path indicates that `keyname` is already set to `locale` | ||
3519 | 228 | then no changes will be made and locale-gen not called. | ||
3520 | 229 | This allows images built with a locale already generated to not re-run | ||
3521 | 230 | locale-gen which can be very heavy. | ||
3522 | 231 | """ | ||
3523 | 232 | if not locale: | ||
3524 | 233 | raise ValueError('Failed to provide locale value.') | ||
3525 | 234 | |||
3529 | 235 | if not sys_path: | 262 | if not sys_path: |
3530 | 236 | raise ValueError('Invalid path: %s' % sys_path) | 263 | raise ValueError('Invalid path: %s' % sys_path) |
3531 | 237 | 264 | ||
3532 | 238 | if os.path.exists(sys_path): | 265 | if os.path.exists(sys_path): |
3533 | 239 | locale_content = util.load_file(sys_path) | 266 | locale_content = util.load_file(sys_path) |
3534 | 240 | # if LANG isn't present, regen | ||
3535 | 241 | sys_defaults = util.load_shell_content(locale_content) | 267 | sys_defaults = util.load_shell_content(locale_content) |
3536 | 242 | sys_val = sys_defaults.get(keyname, "") | 268 | sys_val = sys_defaults.get(keyname, "") |
3537 | 243 | if sys_val.lower() == locale.lower(): | ||
3538 | 244 | LOG.debug( | ||
3539 | 245 | "System has '%s=%s' requested '%s', skipping regeneration.", | ||
3540 | 246 | keyname, sys_val, locale) | ||
3541 | 247 | return | ||
3542 | 248 | 269 | ||
3544 | 249 | util.subp(['locale-gen', locale], capture=False) | 270 | return sys_val |
3545 | 271 | |||
3546 | 272 | |||
3547 | 273 | def update_locale_conf(locale, sys_path, keyname='LANG'): | ||
3548 | 274 | """Update system locale config""" | ||
3549 | 275 | LOG.debug('Updating %s with locale setting %s=%s', | ||
3550 | 276 | sys_path, keyname, locale) | ||
3551 | 250 | util.subp( | 277 | util.subp( |
3552 | 251 | ['update-locale', '--locale-file=' + sys_path, | 278 | ['update-locale', '--locale-file=' + sys_path, |
3553 | 252 | '%s=%s' % (keyname, locale)], capture=False) | 279 | '%s=%s' % (keyname, locale)], capture=False) |
3554 | 253 | 280 | ||
3555 | 281 | |||
3556 | 282 | def regenerate_locale(locale, sys_path, keyname='LANG'): | ||
3557 | 283 | """ | ||
3558 | 284 | Run locale-gen for the provided locale and set the default | ||
3559 | 285 | system variable `keyname` appropriately in the provided `sys_path`. | ||
3560 | 286 | |||
3561 | 287 | """ | ||
3562 | 288 | # special case for locales which do not require regen | ||
3563 | 289 | # % locale -a | ||
3564 | 290 | # C | ||
3565 | 291 | # C.UTF-8 | ||
3566 | 292 | # POSIX | ||
3567 | 293 | if locale.lower() in ['c', 'c.utf-8', 'posix']: | ||
3568 | 294 | LOG.debug('%s=%s does not require rengeneration', keyname, locale) | ||
3569 | 295 | return | ||
3570 | 296 | |||
3571 | 297 | # finally, trigger regeneration | ||
3572 | 298 | LOG.debug('Generating locales for %s', locale) | ||
3573 | 299 | util.subp(['locale-gen', locale], capture=False) | ||
3574 | 300 | |||
3575 | 301 | |||
3576 | 254 | # vi: ts=4 expandtab | 302 | # vi: ts=4 expandtab |
3577 | diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py | |||
3578 | 255 | new file mode 100644 | 303 | new file mode 100644 |
3579 | index 0000000..a219e9f | |||
3580 | --- /dev/null | |||
3581 | +++ b/cloudinit/distros/opensuse.py | |||
3582 | @@ -0,0 +1,212 @@ | |||
3583 | 1 | # Copyright (C) 2017 SUSE LLC | ||
3584 | 2 | # Copyright (C) 2013 Hewlett-Packard Development Company, L.P. | ||
3585 | 3 | # | ||
3586 | 4 | # Author: Robert Schweikert <rjschwei@suse.com> | ||
3587 | 5 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | ||
3588 | 6 | # | ||
3589 | 7 | # Leaning very heavily on the RHEL and Debian implementation | ||
3590 | 8 | # | ||
3591 | 9 | # This file is part of cloud-init. See LICENSE file for license information. | ||
3592 | 10 | |||
3593 | 11 | from cloudinit import distros | ||
3594 | 12 | |||
3595 | 13 | from cloudinit.distros.parsers.hostname import HostnameConf | ||
3596 | 14 | |||
3597 | 15 | from cloudinit import helpers | ||
3598 | 16 | from cloudinit import log as logging | ||
3599 | 17 | from cloudinit import util | ||
3600 | 18 | |||
3601 | 19 | from cloudinit.distros import net_util | ||
3602 | 20 | from cloudinit.distros import rhel_util as rhutil | ||
3603 | 21 | from cloudinit.settings import PER_INSTANCE | ||
3604 | 22 | |||
3605 | 23 | LOG = logging.getLogger(__name__) | ||
3606 | 24 | |||
3607 | 25 | |||
3608 | 26 | class Distro(distros.Distro): | ||
3609 | 27 | clock_conf_fn = '/etc/sysconfig/clock' | ||
3610 | 28 | hostname_conf_fn = '/etc/HOSTNAME' | ||
3611 | 29 | init_cmd = ['service'] | ||
3612 | 30 | locale_conf_fn = '/etc/sysconfig/language' | ||
3613 | 31 | network_conf_fn = '/etc/sysconfig/network' | ||
3614 | 32 | network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' | ||
3615 | 33 | resolve_conf_fn = '/etc/resolv.conf' | ||
3616 | 34 | route_conf_tpl = '/etc/sysconfig/network/ifroute-%s' | ||
3617 | 35 | systemd_hostname_conf_fn = '/etc/hostname' | ||
3618 | 36 | systemd_locale_conf_fn = '/etc/locale.conf' | ||
3619 | 37 | tz_local_fn = '/etc/localtime' | ||
3620 | 38 | |||
3621 | 39 | def __init__(self, name, cfg, paths): | ||
3622 | 40 | distros.Distro.__init__(self, name, cfg, paths) | ||
3623 | 41 | self._runner = helpers.Runners(paths) | ||
3624 | 42 | self.osfamily = 'suse' | ||
3625 | 43 | cfg['ssh_svcname'] = 'sshd' | ||
3626 | 44 | if self.uses_systemd(): | ||
3627 | 45 | self.init_cmd = ['systemctl'] | ||
3628 | 46 | cfg['ssh_svcname'] = 'sshd.service' | ||
3629 | 47 | |||
3630 | 48 | def apply_locale(self, locale, out_fn=None): | ||
3631 | 49 | if self.uses_systemd(): | ||
3632 | 50 | if not out_fn: | ||
3633 | 51 | out_fn = self.systemd_locale_conf_fn | ||
3634 | 52 | locale_cfg = {'LANG': locale} | ||
3635 | 53 | else: | ||
3636 | 54 | if not out_fn: | ||
3637 | 55 | out_fn = self.locale_conf_fn | ||
3638 | 56 | locale_cfg = {'RC_LANG': locale} | ||
3639 | 57 | rhutil.update_sysconfig_file(out_fn, locale_cfg) | ||
3640 | 58 | |||
3641 | 59 | def install_packages(self, pkglist): | ||
3642 | 60 | self.package_command( | ||
3643 | 61 | 'install', | ||
3644 | 62 | args='--auto-agree-with-licenses', | ||
3645 | 63 | pkgs=pkglist | ||
3646 | 64 | ) | ||
3647 | 65 | |||
3648 | 66 | def package_command(self, command, args=None, pkgs=None): | ||
3649 | 67 | if pkgs is None: | ||
3650 | 68 | pkgs = [] | ||
3651 | 69 | |||
3652 | 70 | cmd = ['zypper'] | ||
3653 | 71 | # No user interaction possible, enable non-interactive mode | ||
3654 | 72 | cmd.append('--non-interactive') | ||
3655 | 73 | |||
3656 | 74 | # Comand is the operation, such as install | ||
3657 | 75 | if command == 'upgrade': | ||
3658 | 76 | command = 'update' | ||
3659 | 77 | cmd.append(command) | ||
3660 | 78 | |||
3661 | 79 | # args are the arguments to the command, not global options | ||
3662 | 80 | if args and isinstance(args, str): | ||
3663 | 81 | cmd.append(args) | ||
3664 | 82 | elif args and isinstance(args, list): | ||
3665 | 83 | cmd.extend(args) | ||
3666 | 84 | |||
3667 | 85 | pkglist = util.expand_package_list('%s-%s', pkgs) | ||
3668 | 86 | cmd.extend(pkglist) | ||
3669 | 87 | |||
3670 | 88 | # Allow the output of this to flow outwards (ie not be captured) | ||
3671 | 89 | util.subp(cmd, capture=False) | ||
3672 | 90 | |||
3673 | 91 | def set_timezone(self, tz): | ||
3674 | 92 | tz_file = self._find_tz_file(tz) | ||
3675 | 93 | if self.uses_systemd(): | ||
3676 | 94 | # Currently, timedatectl complains if invoked during startup | ||
3677 | 95 | # so for compatibility, create the link manually. | ||
3678 | 96 | util.del_file(self.tz_local_fn) | ||
3679 | 97 | util.sym_link(tz_file, self.tz_local_fn) | ||
3680 | 98 | else: | ||
3681 | 99 | # Adjust the sysconfig clock zone setting | ||
3682 | 100 | clock_cfg = { | ||
3683 | 101 | 'TIMEZONE': str(tz), | ||
3684 | 102 | } | ||
3685 | 103 | rhutil.update_sysconfig_file(self.clock_conf_fn, clock_cfg) | ||
3686 | 104 | # This ensures that the correct tz will be used for the system | ||
3687 | 105 | util.copy(tz_file, self.tz_local_fn) | ||
3688 | 106 | |||
3689 | 107 | def update_package_sources(self): | ||
3690 | 108 | self._runner.run("update-sources", self.package_command, | ||
3691 | 109 | ['refresh'], freq=PER_INSTANCE) | ||
3692 | 110 | |||
3693 | 111 | def _bring_up_interfaces(self, device_names): | ||
3694 | 112 | if device_names and 'all' in device_names: | ||
3695 | 113 | raise RuntimeError(('Distro %s can not translate ' | ||
3696 | 114 | 'the device name "all"') % (self.name)) | ||
3697 | 115 | return distros.Distro._bring_up_interfaces(self, device_names) | ||
3698 | 116 | |||
3699 | 117 | def _read_hostname(self, filename, default=None): | ||
3700 | 118 | if self.uses_systemd() and filename.endswith('/previous-hostname'): | ||
3701 | 119 | return util.load_file(filename).strip() | ||
3702 | 120 | elif self.uses_systemd(): | ||
3703 | 121 | (out, _err) = util.subp(['hostname']) | ||
3704 | 122 | if len(out): | ||
3705 | 123 | return out | ||
3706 | 124 | else: | ||
3707 | 125 | return default | ||
3708 | 126 | else: | ||
3709 | 127 | try: | ||
3710 | 128 | conf = self._read_hostname_conf(filename) | ||
3711 | 129 | hostname = conf.hostname | ||
3712 | 130 | except IOError: | ||
3713 | 131 | pass | ||
3714 | 132 | if not hostname: | ||
3715 | 133 | return default | ||
3716 | 134 | return hostname | ||
3717 | 135 | |||
3718 | 136 | def _read_hostname_conf(self, filename): | ||
3719 | 137 | conf = HostnameConf(util.load_file(filename)) | ||
3720 | 138 | conf.parse() | ||
3721 | 139 | return conf | ||
3722 | 140 | |||
3723 | 141 | def _read_system_hostname(self): | ||
3724 | 142 | if self.uses_systemd(): | ||
3725 | 143 | host_fn = self.systemd_hostname_conf_fn | ||
3726 | 144 | else: | ||
3727 | 145 | host_fn = self.hostname_conf_fn | ||
3728 | 146 | return (host_fn, self._read_hostname(host_fn)) | ||
3729 | 147 | |||
3730 | 148 | def _write_hostname(self, hostname, out_fn): | ||
3731 | 149 | if self.uses_systemd() and out_fn.endswith('/previous-hostname'): | ||
3732 | 150 | util.write_file(out_fn, hostname) | ||
3733 | 151 | elif self.uses_systemd(): | ||
3734 | 152 | util.subp(['hostnamectl', 'set-hostname', str(hostname)]) | ||
3735 | 153 | else: | ||
3736 | 154 | conf = None | ||
3737 | 155 | try: | ||
3738 | 156 | # Try to update the previous one | ||
3739 | 157 | # so lets see if we can read it first. | ||
3740 | 158 | conf = self._read_hostname_conf(out_fn) | ||
3741 | 159 | except IOError: | ||
3742 | 160 | pass | ||
3743 | 161 | if not conf: | ||
3744 | 162 | conf = HostnameConf('') | ||
3745 | 163 | conf.set_hostname(hostname) | ||
3746 | 164 | util.write_file(out_fn, str(conf), 0o644) | ||
3747 | 165 | |||
3748 | 166 | def _write_network(self, settings): | ||
3749 | 167 | # Convert debian settings to ifcfg format | ||
3750 | 168 | entries = net_util.translate_network(settings) | ||
3751 | 169 | LOG.debug("Translated ubuntu style network settings %s into %s", | ||
3752 | 170 | settings, entries) | ||
3753 | 171 | # Make the intermediate format as the suse format... | ||
3754 | 172 | nameservers = [] | ||
3755 | 173 | searchservers = [] | ||
3756 | 174 | dev_names = entries.keys() | ||
3757 | 175 | for (dev, info) in entries.items(): | ||
3758 | 176 | net_fn = self.network_script_tpl % (dev) | ||
3759 | 177 | route_fn = self.route_conf_tpl % (dev) | ||
3760 | 178 | mode = None | ||
3761 | 179 | if info.get('auto', None): | ||
3762 | 180 | mode = 'auto' | ||
3763 | 181 | else: | ||
3764 | 182 | mode = 'manual' | ||
3765 | 183 | bootproto = info.get('bootproto', None) | ||
3766 | 184 | gateway = info.get('gateway', None) | ||
3767 | 185 | net_cfg = { | ||
3768 | 186 | 'BOOTPROTO': bootproto, | ||
3769 | 187 | 'BROADCAST': info.get('broadcast'), | ||
3770 | 188 | 'GATEWAY': gateway, | ||
3771 | 189 | 'IPADDR': info.get('address'), | ||
3772 | 190 | 'LLADDR': info.get('hwaddress'), | ||
3773 | 191 | 'NETMASK': info.get('netmask'), | ||
3774 | 192 | 'STARTMODE': mode, | ||
3775 | 193 | 'USERCONTROL': 'no' | ||
3776 | 194 | } | ||
3777 | 195 | if dev != 'lo': | ||
3778 | 196 | net_cfg['ETHTOOL_OPTIONS'] = '' | ||
3779 | 197 | else: | ||
3780 | 198 | net_cfg['FIREWALL'] = 'no' | ||
3781 | 199 | rhutil.update_sysconfig_file(net_fn, net_cfg, True) | ||
3782 | 200 | if gateway and bootproto == 'static': | ||
3783 | 201 | default_route = 'default %s' % gateway | ||
3784 | 202 | util.write_file(route_fn, default_route, 0o644) | ||
3785 | 203 | if 'dns-nameservers' in info: | ||
3786 | 204 | nameservers.extend(info['dns-nameservers']) | ||
3787 | 205 | if 'dns-search' in info: | ||
3788 | 206 | searchservers.extend(info['dns-search']) | ||
3789 | 207 | if nameservers or searchservers: | ||
3790 | 208 | rhutil.update_resolve_conf_file(self.resolve_conf_fn, | ||
3791 | 209 | nameservers, searchservers) | ||
3792 | 210 | return dev_names | ||
3793 | 211 | |||
3794 | 212 | # vi: ts=4 expandtab | ||
3795 | diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py | |||
3796 | index dbec2ed..6e336cb 100644 | |||
3797 | --- a/cloudinit/distros/sles.py | |||
3798 | +++ b/cloudinit/distros/sles.py | |||
3799 | @@ -1,167 +1,17 @@ | |||
3801 | 1 | # Copyright (C) 2013 Hewlett-Packard Development Company, L.P. | 1 | # Copyright (C) 2017 SUSE LLC |
3802 | 2 | # | 2 | # |
3804 | 3 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | 3 | # Author: Robert Schweikert <rjschwei@suse.com> |
3805 | 4 | # | 4 | # |
3806 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | 5 | # This file is part of cloud-init. See LICENSE file for license information. |
3807 | 6 | 6 | ||
3809 | 7 | from cloudinit import distros | 7 | from cloudinit.distros import opensuse |
3810 | 8 | 8 | ||
3811 | 9 | from cloudinit.distros.parsers.hostname import HostnameConf | ||
3812 | 10 | |||
3813 | 11 | from cloudinit import helpers | ||
3814 | 12 | from cloudinit import log as logging | 9 | from cloudinit import log as logging |
3815 | 13 | from cloudinit import util | ||
3816 | 14 | |||
3817 | 15 | from cloudinit.distros import net_util | ||
3818 | 16 | from cloudinit.distros import rhel_util | ||
3819 | 17 | from cloudinit.settings import PER_INSTANCE | ||
3820 | 18 | 10 | ||
3821 | 19 | LOG = logging.getLogger(__name__) | 11 | LOG = logging.getLogger(__name__) |
3822 | 20 | 12 | ||
3823 | 21 | 13 | ||
3968 | 22 | class Distro(distros.Distro): | 14 | class Distro(opensuse.Distro): |
3969 | 23 | clock_conf_fn = '/etc/sysconfig/clock' | 15 | pass |
3826 | 24 | locale_conf_fn = '/etc/sysconfig/language' | ||
3827 | 25 | network_conf_fn = '/etc/sysconfig/network' | ||
3828 | 26 | hostname_conf_fn = '/etc/HOSTNAME' | ||
3829 | 27 | network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' | ||
3830 | 28 | resolve_conf_fn = '/etc/resolv.conf' | ||
3831 | 29 | tz_local_fn = '/etc/localtime' | ||
3832 | 30 | |||
3833 | 31 | def __init__(self, name, cfg, paths): | ||
3834 | 32 | distros.Distro.__init__(self, name, cfg, paths) | ||
3835 | 33 | # This will be used to restrict certain | ||
3836 | 34 | # calls from repeatly happening (when they | ||
3837 | 35 | # should only happen say once per instance...) | ||
3838 | 36 | self._runner = helpers.Runners(paths) | ||
3839 | 37 | self.osfamily = 'suse' | ||
3840 | 38 | |||
3841 | 39 | def install_packages(self, pkglist): | ||
3842 | 40 | self.package_command('install', args='-l', pkgs=pkglist) | ||
3843 | 41 | |||
3844 | 42 | def _write_network(self, settings): | ||
3845 | 43 | # Convert debian settings to ifcfg format | ||
3846 | 44 | entries = net_util.translate_network(settings) | ||
3847 | 45 | LOG.debug("Translated ubuntu style network settings %s into %s", | ||
3848 | 46 | settings, entries) | ||
3849 | 47 | # Make the intermediate format as the suse format... | ||
3850 | 48 | nameservers = [] | ||
3851 | 49 | searchservers = [] | ||
3852 | 50 | dev_names = entries.keys() | ||
3853 | 51 | for (dev, info) in entries.items(): | ||
3854 | 52 | net_fn = self.network_script_tpl % (dev) | ||
3855 | 53 | mode = info.get('auto') | ||
3856 | 54 | if mode and mode.lower() == 'true': | ||
3857 | 55 | mode = 'auto' | ||
3858 | 56 | else: | ||
3859 | 57 | mode = 'manual' | ||
3860 | 58 | net_cfg = { | ||
3861 | 59 | 'BOOTPROTO': info.get('bootproto'), | ||
3862 | 60 | 'BROADCAST': info.get('broadcast'), | ||
3863 | 61 | 'GATEWAY': info.get('gateway'), | ||
3864 | 62 | 'IPADDR': info.get('address'), | ||
3865 | 63 | 'LLADDR': info.get('hwaddress'), | ||
3866 | 64 | 'NETMASK': info.get('netmask'), | ||
3867 | 65 | 'STARTMODE': mode, | ||
3868 | 66 | 'USERCONTROL': 'no' | ||
3869 | 67 | } | ||
3870 | 68 | if dev != 'lo': | ||
3871 | 69 | net_cfg['ETHERDEVICE'] = dev | ||
3872 | 70 | net_cfg['ETHTOOL_OPTIONS'] = '' | ||
3873 | 71 | else: | ||
3874 | 72 | net_cfg['FIREWALL'] = 'no' | ||
3875 | 73 | rhel_util.update_sysconfig_file(net_fn, net_cfg, True) | ||
3876 | 74 | if 'dns-nameservers' in info: | ||
3877 | 75 | nameservers.extend(info['dns-nameservers']) | ||
3878 | 76 | if 'dns-search' in info: | ||
3879 | 77 | searchservers.extend(info['dns-search']) | ||
3880 | 78 | if nameservers or searchservers: | ||
3881 | 79 | rhel_util.update_resolve_conf_file(self.resolve_conf_fn, | ||
3882 | 80 | nameservers, searchservers) | ||
3883 | 81 | return dev_names | ||
3884 | 82 | |||
3885 | 83 | def apply_locale(self, locale, out_fn=None): | ||
3886 | 84 | if not out_fn: | ||
3887 | 85 | out_fn = self.locale_conf_fn | ||
3888 | 86 | locale_cfg = { | ||
3889 | 87 | 'RC_LANG': locale, | ||
3890 | 88 | } | ||
3891 | 89 | rhel_util.update_sysconfig_file(out_fn, locale_cfg) | ||
3892 | 90 | |||
3893 | 91 | def _write_hostname(self, hostname, out_fn): | ||
3894 | 92 | conf = None | ||
3895 | 93 | try: | ||
3896 | 94 | # Try to update the previous one | ||
3897 | 95 | # so lets see if we can read it first. | ||
3898 | 96 | conf = self._read_hostname_conf(out_fn) | ||
3899 | 97 | except IOError: | ||
3900 | 98 | pass | ||
3901 | 99 | if not conf: | ||
3902 | 100 | conf = HostnameConf('') | ||
3903 | 101 | conf.set_hostname(hostname) | ||
3904 | 102 | util.write_file(out_fn, str(conf), 0o644) | ||
3905 | 103 | |||
3906 | 104 | def _read_system_hostname(self): | ||
3907 | 105 | host_fn = self.hostname_conf_fn | ||
3908 | 106 | return (host_fn, self._read_hostname(host_fn)) | ||
3909 | 107 | |||
3910 | 108 | def _read_hostname_conf(self, filename): | ||
3911 | 109 | conf = HostnameConf(util.load_file(filename)) | ||
3912 | 110 | conf.parse() | ||
3913 | 111 | return conf | ||
3914 | 112 | |||
3915 | 113 | def _read_hostname(self, filename, default=None): | ||
3916 | 114 | hostname = None | ||
3917 | 115 | try: | ||
3918 | 116 | conf = self._read_hostname_conf(filename) | ||
3919 | 117 | hostname = conf.hostname | ||
3920 | 118 | except IOError: | ||
3921 | 119 | pass | ||
3922 | 120 | if not hostname: | ||
3923 | 121 | return default | ||
3924 | 122 | return hostname | ||
3925 | 123 | |||
3926 | 124 | def _bring_up_interfaces(self, device_names): | ||
3927 | 125 | if device_names and 'all' in device_names: | ||
3928 | 126 | raise RuntimeError(('Distro %s can not translate ' | ||
3929 | 127 | 'the device name "all"') % (self.name)) | ||
3930 | 128 | return distros.Distro._bring_up_interfaces(self, device_names) | ||
3931 | 129 | |||
3932 | 130 | def set_timezone(self, tz): | ||
3933 | 131 | tz_file = self._find_tz_file(tz) | ||
3934 | 132 | # Adjust the sysconfig clock zone setting | ||
3935 | 133 | clock_cfg = { | ||
3936 | 134 | 'TIMEZONE': str(tz), | ||
3937 | 135 | } | ||
3938 | 136 | rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg) | ||
3939 | 137 | # This ensures that the correct tz will be used for the system | ||
3940 | 138 | util.copy(tz_file, self.tz_local_fn) | ||
3941 | 139 | |||
3942 | 140 | def package_command(self, command, args=None, pkgs=None): | ||
3943 | 141 | if pkgs is None: | ||
3944 | 142 | pkgs = [] | ||
3945 | 143 | |||
3946 | 144 | cmd = ['zypper'] | ||
3947 | 145 | # No user interaction possible, enable non-interactive mode | ||
3948 | 146 | cmd.append('--non-interactive') | ||
3949 | 147 | |||
3950 | 148 | # Comand is the operation, such as install | ||
3951 | 149 | cmd.append(command) | ||
3952 | 150 | |||
3953 | 151 | # args are the arguments to the command, not global options | ||
3954 | 152 | if args and isinstance(args, str): | ||
3955 | 153 | cmd.append(args) | ||
3956 | 154 | elif args and isinstance(args, list): | ||
3957 | 155 | cmd.extend(args) | ||
3958 | 156 | |||
3959 | 157 | pkglist = util.expand_package_list('%s-%s', pkgs) | ||
3960 | 158 | cmd.extend(pkglist) | ||
3961 | 159 | |||
3962 | 160 | # Allow the output of this to flow outwards (ie not be captured) | ||
3963 | 161 | util.subp(cmd, capture=False) | ||
3964 | 162 | |||
3965 | 163 | def update_package_sources(self): | ||
3966 | 164 | self._runner.run("update-sources", self.package_command, | ||
3967 | 165 | ['refresh'], freq=PER_INSTANCE) | ||
3970 | 166 | 16 | ||
3971 | 167 | # vi: ts=4 expandtab | 17 | # vi: ts=4 expandtab |
3972 | diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py | |||
3973 | index f01021a..1979cd9 100644 | |||
3974 | --- a/cloudinit/helpers.py | |||
3975 | +++ b/cloudinit/helpers.py | |||
3976 | @@ -13,7 +13,7 @@ from time import time | |||
3977 | 13 | import contextlib | 13 | import contextlib |
3978 | 14 | import os | 14 | import os |
3979 | 15 | 15 | ||
3981 | 16 | import six | 16 | from six import StringIO |
3982 | 17 | from six.moves.configparser import ( | 17 | from six.moves.configparser import ( |
3983 | 18 | NoSectionError, NoOptionError, RawConfigParser) | 18 | NoSectionError, NoOptionError, RawConfigParser) |
3984 | 19 | 19 | ||
3985 | @@ -441,12 +441,12 @@ class DefaultingConfigParser(RawConfigParser): | |||
3986 | 441 | 441 | ||
3987 | 442 | def stringify(self, header=None): | 442 | def stringify(self, header=None): |
3988 | 443 | contents = '' | 443 | contents = '' |
3995 | 444 | with six.StringIO() as outputstream: | 444 | outputstream = StringIO() |
3996 | 445 | self.write(outputstream) | 445 | self.write(outputstream) |
3997 | 446 | outputstream.flush() | 446 | outputstream.flush() |
3998 | 447 | contents = outputstream.getvalue() | 447 | contents = outputstream.getvalue() |
3999 | 448 | if header: | 448 | if header: |
4000 | 449 | contents = "\n".join([header, contents]) | 449 | contents = '\n'.join([header, contents, '']) |
4001 | 450 | return contents | 450 | return contents |
4002 | 451 | 451 | ||
4003 | 452 | # vi: ts=4 expandtab | 452 | # vi: ts=4 expandtab |
4004 | diff --git a/cloudinit/log.py b/cloudinit/log.py | |||
4005 | index 3861709..1d75c9f 100644 | |||
4006 | --- a/cloudinit/log.py | |||
4007 | +++ b/cloudinit/log.py | |||
4008 | @@ -19,6 +19,8 @@ import sys | |||
4009 | 19 | import six | 19 | import six |
4010 | 20 | from six import StringIO | 20 | from six import StringIO |
4011 | 21 | 21 | ||
4012 | 22 | import time | ||
4013 | 23 | |||
4014 | 22 | # Logging levels for easy access | 24 | # Logging levels for easy access |
4015 | 23 | CRITICAL = logging.CRITICAL | 25 | CRITICAL = logging.CRITICAL |
4016 | 24 | FATAL = logging.FATAL | 26 | FATAL = logging.FATAL |
4017 | @@ -32,6 +34,9 @@ NOTSET = logging.NOTSET | |||
4018 | 32 | # Default basic format | 34 | # Default basic format |
4019 | 33 | DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s' | 35 | DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s' |
4020 | 34 | 36 | ||
4021 | 37 | # Always format logging timestamps as UTC time | ||
4022 | 38 | logging.Formatter.converter = time.gmtime | ||
4023 | 39 | |||
4024 | 35 | 40 | ||
4025 | 36 | def setupBasicLogging(level=DEBUG): | 41 | def setupBasicLogging(level=DEBUG): |
4026 | 37 | root = logging.getLogger() | 42 | root = logging.getLogger() |
4027 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py | |||
4028 | index 46cb9c8..a1b0db1 100644 | |||
4029 | --- a/cloudinit/net/__init__.py | |||
4030 | +++ b/cloudinit/net/__init__.py | |||
4031 | @@ -175,13 +175,8 @@ def is_disabled_cfg(cfg): | |||
4032 | 175 | return cfg.get('config') == "disabled" | 175 | return cfg.get('config') == "disabled" |
4033 | 176 | 176 | ||
4034 | 177 | 177 | ||
4042 | 178 | def generate_fallback_config(blacklist_drivers=None, config_driver=None): | 178 | def find_fallback_nic(blacklist_drivers=None): |
4043 | 179 | """Determine which attached net dev is most likely to have a connection and | 179 | """Return the name of the 'fallback' network device.""" |
4037 | 180 | generate network state to run dhcp on that interface""" | ||
4038 | 181 | |||
4039 | 182 | if not config_driver: | ||
4040 | 183 | config_driver = False | ||
4041 | 184 | |||
4044 | 185 | if not blacklist_drivers: | 180 | if not blacklist_drivers: |
4045 | 186 | blacklist_drivers = [] | 181 | blacklist_drivers = [] |
4046 | 187 | 182 | ||
4047 | @@ -233,15 +228,24 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None): | |||
4048 | 233 | if DEFAULT_PRIMARY_INTERFACE in names: | 228 | if DEFAULT_PRIMARY_INTERFACE in names: |
4049 | 234 | names.remove(DEFAULT_PRIMARY_INTERFACE) | 229 | names.remove(DEFAULT_PRIMARY_INTERFACE) |
4050 | 235 | names.insert(0, DEFAULT_PRIMARY_INTERFACE) | 230 | names.insert(0, DEFAULT_PRIMARY_INTERFACE) |
4053 | 236 | target_name = None | 231 | |
4054 | 237 | target_mac = None | 232 | # pick the first that has a mac-address |
4055 | 238 | for name in names: | 233 | for name in names: |
4062 | 239 | mac = read_sys_net_safe(name, 'address') | 234 | if read_sys_net_safe(name, 'address'): |
4063 | 240 | if mac: | 235 | return name |
4064 | 241 | target_name = name | 236 | return None |
4065 | 242 | target_mac = mac | 237 | |
4066 | 243 | break | 238 | |
4067 | 244 | if target_mac and target_name: | 239 | def generate_fallback_config(blacklist_drivers=None, config_driver=None): |
4068 | 240 | """Determine which attached net dev is most likely to have a connection and | ||
4069 | 241 | generate network state to run dhcp on that interface""" | ||
4070 | 242 | |||
4071 | 243 | if not config_driver: | ||
4072 | 244 | config_driver = False | ||
4073 | 245 | |||
4074 | 246 | target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers) | ||
4075 | 247 | if target_name: | ||
4076 | 248 | target_mac = read_sys_net_safe(target_name, 'address') | ||
4077 | 245 | nconf = {'config': [], 'version': 1} | 249 | nconf = {'config': [], 'version': 1} |
4078 | 246 | cfg = {'type': 'physical', 'name': target_name, | 250 | cfg = {'type': 'physical', 'name': target_name, |
4079 | 247 | 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} | 251 | 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} |
4080 | @@ -511,21 +515,7 @@ def get_interfaces_by_mac(): | |||
4081 | 511 | 515 | ||
4082 | 512 | Bridges and any devices that have a 'stolen' mac are excluded.""" | 516 | Bridges and any devices that have a 'stolen' mac are excluded.""" |
4083 | 513 | ret = {} | 517 | ret = {} |
4099 | 514 | devs = get_devicelist() | 518 | for name, mac, _driver, _devid in get_interfaces(): |
4085 | 515 | empty_mac = '00:00:00:00:00:00' | ||
4086 | 516 | for name in devs: | ||
4087 | 517 | if not interface_has_own_mac(name): | ||
4088 | 518 | continue | ||
4089 | 519 | if is_bridge(name): | ||
4090 | 520 | continue | ||
4091 | 521 | if is_vlan(name): | ||
4092 | 522 | continue | ||
4093 | 523 | mac = get_interface_mac(name) | ||
4094 | 524 | # some devices may not have a mac (tun0) | ||
4095 | 525 | if not mac: | ||
4096 | 526 | continue | ||
4097 | 527 | if mac == empty_mac and name != 'lo': | ||
4098 | 528 | continue | ||
4100 | 529 | if mac in ret: | 519 | if mac in ret: |
4101 | 530 | raise RuntimeError( | 520 | raise RuntimeError( |
4102 | 531 | "duplicate mac found! both '%s' and '%s' have mac '%s'" % | 521 | "duplicate mac found! both '%s' and '%s' have mac '%s'" % |
4103 | @@ -599,6 +589,7 @@ class EphemeralIPv4Network(object): | |||
4104 | 599 | self._bringup_router() | 589 | self._bringup_router() |
4105 | 600 | 590 | ||
4106 | 601 | def __exit__(self, excp_type, excp_value, excp_traceback): | 591 | def __exit__(self, excp_type, excp_value, excp_traceback): |
4107 | 592 | """Teardown anything we set up.""" | ||
4108 | 602 | for cmd in self.cleanup_cmds: | 593 | for cmd in self.cleanup_cmds: |
4109 | 603 | util.subp(cmd, capture=True) | 594 | util.subp(cmd, capture=True) |
4110 | 604 | 595 | ||
4111 | diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py | |||
4112 | 605 | new file mode 100644 | 596 | new file mode 100644 |
4113 | index 0000000..0cba703 | |||
4114 | --- /dev/null | |||
4115 | +++ b/cloudinit/net/dhcp.py | |||
4116 | @@ -0,0 +1,163 @@ | |||
4117 | 1 | # Copyright (C) 2017 Canonical Ltd. | ||
4118 | 2 | # | ||
4119 | 3 | # Author: Chad Smith <chad.smith@canonical.com> | ||
4120 | 4 | # | ||
4121 | 5 | # This file is part of cloud-init. See LICENSE file for license information. | ||
4122 | 6 | |||
4123 | 7 | import configobj | ||
4124 | 8 | import logging | ||
4125 | 9 | import os | ||
4126 | 10 | import re | ||
4127 | 11 | |||
4128 | 12 | from cloudinit.net import find_fallback_nic, get_devicelist | ||
4129 | 13 | from cloudinit import temp_utils | ||
4130 | 14 | from cloudinit import util | ||
4131 | 15 | from six import StringIO | ||
4132 | 16 | |||
4133 | 17 | LOG = logging.getLogger(__name__) | ||
4134 | 18 | |||
4135 | 19 | NETWORKD_LEASES_DIR = '/run/systemd/netif/leases' | ||
4136 | 20 | |||
4137 | 21 | |||
4138 | 22 | class InvalidDHCPLeaseFileError(Exception): | ||
4139 | 23 | """Raised when parsing an empty or invalid dhcp.leases file. | ||
4140 | 24 | |||
4141 | 25 | Current uses are DataSourceAzure and DataSourceEc2 during ephemeral | ||
4142 | 26 | boot to scrape metadata. | ||
4143 | 27 | """ | ||
4144 | 28 | pass | ||
4145 | 29 | |||
4146 | 30 | |||
4147 | 31 | def maybe_perform_dhcp_discovery(nic=None): | ||
4148 | 32 | """Perform dhcp discovery if nic valid and dhclient command exists. | ||
4149 | 33 | |||
4150 | 34 | If the nic is invalid or undiscoverable or dhclient command is not found, | ||
4151 | 35 | skip dhcp_discovery and return an empty dict. | ||
4152 | 36 | |||
4153 | 37 | @param nic: Name of the network interface we want to run dhclient on. | ||
4154 | 38 | @return: A dict of dhcp options from the dhclient discovery if run, | ||
4155 | 39 | otherwise an empty dict is returned. | ||
4156 | 40 | """ | ||
4157 | 41 | if nic is None: | ||
4158 | 42 | nic = find_fallback_nic() | ||
4159 | 43 | if nic is None: | ||
4160 | 44 | LOG.debug( | ||
4161 | 45 | 'Skip dhcp_discovery: Unable to find fallback nic.') | ||
4162 | 46 | return {} | ||
4163 | 47 | elif nic not in get_devicelist(): | ||
4164 | 48 | LOG.debug( | ||
4165 | 49 | 'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic) | ||
4166 | 50 | return {} | ||
4167 | 51 | dhclient_path = util.which('dhclient') | ||
4168 | 52 | if not dhclient_path: | ||
4169 | 53 | LOG.debug('Skip dhclient configuration: No dhclient command found.') | ||
4170 | 54 | return {} | ||
4171 | 55 | with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir: | ||
4172 | 56 | # Use /var/tmp because /run/cloud-init/tmp is mounted noexec | ||
4173 | 57 | return dhcp_discovery(dhclient_path, nic, tdir) | ||
4174 | 58 | |||
4175 | 59 | |||
4176 | 60 | def parse_dhcp_lease_file(lease_file): | ||
4177 | 61 | """Parse the given dhcp lease file for the most recent lease. | ||
4178 | 62 | |||
4179 | 63 | Return a dict of dhcp options as key value pairs for the most recent lease | ||
4180 | 64 | block. | ||
4181 | 65 | |||
4182 | 66 | @raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile | ||
4183 | 67 | content. | ||
4184 | 68 | """ | ||
4185 | 69 | lease_regex = re.compile(r"lease {(?P<lease>[^}]*)}\n") | ||
4186 | 70 | dhcp_leases = [] | ||
4187 | 71 | lease_content = util.load_file(lease_file) | ||
4188 | 72 | if len(lease_content) == 0: | ||
4189 | 73 | raise InvalidDHCPLeaseFileError( | ||
4190 | 74 | 'Cannot parse empty dhcp lease file {0}'.format(lease_file)) | ||
4191 | 75 | for lease in lease_regex.findall(lease_content): | ||
4192 | 76 | lease_options = [] | ||
4193 | 77 | for line in lease.split(';'): | ||
4194 | 78 | # Strip newlines, double-quotes and option prefix | ||
4195 | 79 | line = line.strip().replace('"', '').replace('option ', '') | ||
4196 | 80 | if not line: | ||
4197 | 81 | continue | ||
4198 | 82 | lease_options.append(line.split(' ', 1)) | ||
4199 | 83 | dhcp_leases.append(dict(lease_options)) | ||
4200 | 84 | if not dhcp_leases: | ||
4201 | 85 | raise InvalidDHCPLeaseFileError( | ||
4202 | 86 | 'Cannot parse dhcp lease file {0}. No leases found'.format( | ||
4203 | 87 | lease_file)) | ||
4204 | 88 | return dhcp_leases | ||
4205 | 89 | |||
4206 | 90 | |||
4207 | 91 | def dhcp_discovery(dhclient_cmd_path, interface, cleandir): | ||
4208 | 92 | """Run dhclient on the interface without scripts or filesystem artifacts. | ||
4209 | 93 | |||
4210 | 94 | @param dhclient_cmd_path: Full path to the dhclient used. | ||
4211 | 95 | @param interface: Name of the network inteface on which to dhclient. | ||
4212 | 96 | @param cleandir: The directory from which to run dhclient as well as store | ||
4213 | 97 | dhcp leases. | ||
4214 | 98 | |||
4215 | 99 | @return: A dict of dhcp options parsed from the dhcp.leases file or empty | ||
4216 | 100 | dict. | ||
4217 | 101 | """ | ||
4218 | 102 | LOG.debug('Performing a dhcp discovery on %s', interface) | ||
4219 | 103 | |||
4220 | 104 | # XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict | ||
4221 | 105 | # app armor profiles which disallow running dhclient -sf <our-script-file>. | ||
4222 | 106 | # We want to avoid running /sbin/dhclient-script because of side-effects in | ||
4223 | 107 | # /etc/resolv.conf any any other vendor specific scripts in | ||
4224 | 108 | # /etc/dhcp/dhclient*hooks.d. | ||
4225 | 109 | sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient') | ||
4226 | 110 | util.copy(dhclient_cmd_path, sandbox_dhclient_cmd) | ||
4227 | 111 | pid_file = os.path.join(cleandir, 'dhclient.pid') | ||
4228 | 112 | lease_file = os.path.join(cleandir, 'dhcp.leases') | ||
4229 | 113 | |||
4230 | 114 | # ISC dhclient needs the interface up to send initial discovery packets. | ||
4231 | 115 | # Generally dhclient relies on dhclient-script PREINIT action to bring the | ||
4232 | 116 | # link up before attempting discovery. Since we are using -sf /bin/true, | ||
4233 | 117 | # we need to do that "link up" ourselves first. | ||
4234 | 118 | util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True) | ||
4235 | 119 | cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file, | ||
4236 | 120 | '-pf', pid_file, interface, '-sf', '/bin/true'] | ||
4237 | 121 | util.subp(cmd, capture=True) | ||
4238 | 122 | return parse_dhcp_lease_file(lease_file) | ||
4239 | 123 | |||
4240 | 124 | |||
4241 | 125 | def networkd_parse_lease(content): | ||
4242 | 126 | """Parse a systemd lease file content as in /run/systemd/netif/leases/ | ||
4243 | 127 | |||
4244 | 128 | Parse this (almost) ini style file even though it says: | ||
4245 | 129 | # This is private data. Do not parse. | ||
4246 | 130 | |||
4247 | 131 | Simply return a dictionary of key/values.""" | ||
4248 | 132 | |||
4249 | 133 | return dict(configobj.ConfigObj(StringIO(content), list_values=False)) | ||
4250 | 134 | |||
4251 | 135 | |||
4252 | 136 | def networkd_load_leases(leases_d=None): | ||
4253 | 137 | """Return a dictionary of dictionaries representing each lease | ||
4254 | 138 | found in lease_d.i | ||
4255 | 139 | |||
4256 | 140 | The top level key will be the filename, which is typically the ifindex.""" | ||
4257 | 141 | |||
4258 | 142 | if leases_d is None: | ||
4259 | 143 | leases_d = NETWORKD_LEASES_DIR | ||
4260 | 144 | |||
4261 | 145 | ret = {} | ||
4262 | 146 | if not os.path.isdir(leases_d): | ||
4263 | 147 | return ret | ||
4264 | 148 | for lfile in os.listdir(leases_d): | ||
4265 | 149 | ret[lfile] = networkd_parse_lease( | ||
4266 | 150 | util.load_file(os.path.join(leases_d, lfile))) | ||
4267 | 151 | return ret | ||
4268 | 152 | |||
4269 | 153 | |||
4270 | 154 | def networkd_get_option_from_leases(keyname, leases_d=None): | ||
4271 | 155 | if leases_d is None: | ||
4272 | 156 | leases_d = NETWORKD_LEASES_DIR | ||
4273 | 157 | leases = networkd_load_leases(leases_d=leases_d) | ||
4274 | 158 | for ifindex, data in sorted(leases.items()): | ||
4275 | 159 | if data.get(keyname): | ||
4276 | 160 | return data[keyname] | ||
4277 | 161 | return None | ||
4278 | 162 | |||
4279 | 163 | # vi: ts=4 expandtab | ||
4280 | diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py | |||
4281 | index bb80ec0..c6a71d1 100644 | |||
4282 | --- a/cloudinit/net/eni.py | |||
4283 | +++ b/cloudinit/net/eni.py | |||
4284 | @@ -95,6 +95,9 @@ def _iface_add_attrs(iface, index): | |||
4285 | 95 | ignore_map.append('mac_address') | 95 | ignore_map.append('mac_address') |
4286 | 96 | 96 | ||
4287 | 97 | for key, value in iface.items(): | 97 | for key, value in iface.items(): |
4288 | 98 | # convert bool to string for eni | ||
4289 | 99 | if type(value) == bool: | ||
4290 | 100 | value = 'on' if iface[key] else 'off' | ||
4291 | 98 | if not value or key in ignore_map: | 101 | if not value or key in ignore_map: |
4292 | 99 | continue | 102 | continue |
4293 | 100 | if key in multiline_keys: | 103 | if key in multiline_keys: |
4294 | diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py | |||
4295 | index 9f35b72..d3788af 100644 | |||
4296 | --- a/cloudinit/net/netplan.py | |||
4297 | +++ b/cloudinit/net/netplan.py | |||
4298 | @@ -4,7 +4,7 @@ import copy | |||
4299 | 4 | import os | 4 | import os |
4300 | 5 | 5 | ||
4301 | 6 | from . import renderer | 6 | from . import renderer |
4303 | 7 | from .network_state import subnet_is_ipv6 | 7 | from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2 |
4304 | 8 | 8 | ||
4305 | 9 | from cloudinit import log as logging | 9 | from cloudinit import log as logging |
4306 | 10 | from cloudinit import util | 10 | from cloudinit import util |
4307 | @@ -27,31 +27,6 @@ network: | |||
4308 | 27 | """ | 27 | """ |
4309 | 28 | 28 | ||
4310 | 29 | LOG = logging.getLogger(__name__) | 29 | LOG = logging.getLogger(__name__) |
4311 | 30 | NET_CONFIG_TO_V2 = { | ||
4312 | 31 | 'bond': {'bond-ad-select': 'ad-select', | ||
4313 | 32 | 'bond-arp-interval': 'arp-interval', | ||
4314 | 33 | 'bond-arp-ip-target': 'arp-ip-target', | ||
4315 | 34 | 'bond-arp-validate': 'arp-validate', | ||
4316 | 35 | 'bond-downdelay': 'down-delay', | ||
4317 | 36 | 'bond-fail-over-mac': 'fail-over-mac-policy', | ||
4318 | 37 | 'bond-lacp-rate': 'lacp-rate', | ||
4319 | 38 | 'bond-miimon': 'mii-monitor-interval', | ||
4320 | 39 | 'bond-min-links': 'min-links', | ||
4321 | 40 | 'bond-mode': 'mode', | ||
4322 | 41 | 'bond-num-grat-arp': 'gratuitious-arp', | ||
4323 | 42 | 'bond-primary-reselect': 'primary-reselect-policy', | ||
4324 | 43 | 'bond-updelay': 'up-delay', | ||
4325 | 44 | 'bond-xmit-hash-policy': 'transmit-hash-policy'}, | ||
4326 | 45 | 'bridge': {'bridge_ageing': 'ageing-time', | ||
4327 | 46 | 'bridge_bridgeprio': 'priority', | ||
4328 | 47 | 'bridge_fd': 'forward-delay', | ||
4329 | 48 | 'bridge_gcint': None, | ||
4330 | 49 | 'bridge_hello': 'hello-time', | ||
4331 | 50 | 'bridge_maxage': 'max-age', | ||
4332 | 51 | 'bridge_maxwait': None, | ||
4333 | 52 | 'bridge_pathcost': 'path-cost', | ||
4334 | 53 | 'bridge_portprio': None, | ||
4335 | 54 | 'bridge_waitport': None}} | ||
4336 | 55 | 30 | ||
4337 | 56 | 31 | ||
4338 | 57 | def _get_params_dict_by_match(config, match): | 32 | def _get_params_dict_by_match(config, match): |
4339 | @@ -247,6 +222,14 @@ class Renderer(renderer.Renderer): | |||
4340 | 247 | util.subp(cmd, capture=True) | 222 | util.subp(cmd, capture=True) |
4341 | 248 | 223 | ||
4342 | 249 | def _render_content(self, network_state): | 224 | def _render_content(self, network_state): |
4343 | 225 | |||
4344 | 226 | # if content already in netplan format, pass it back | ||
4345 | 227 | if network_state.version == 2: | ||
4346 | 228 | LOG.debug('V2 to V2 passthrough') | ||
4347 | 229 | return util.yaml_dumps({'network': network_state.config}, | ||
4348 | 230 | explicit_start=False, | ||
4349 | 231 | explicit_end=False) | ||
4350 | 232 | |||
4351 | 250 | ethernets = {} | 233 | ethernets = {} |
4352 | 251 | wifis = {} | 234 | wifis = {} |
4353 | 252 | bridges = {} | 235 | bridges = {} |
4354 | @@ -261,9 +244,9 @@ class Renderer(renderer.Renderer): | |||
4355 | 261 | 244 | ||
4356 | 262 | for config in network_state.iter_interfaces(): | 245 | for config in network_state.iter_interfaces(): |
4357 | 263 | ifname = config.get('name') | 246 | ifname = config.get('name') |
4359 | 264 | # filter None entries up front so we can do simple if key in dict | 247 | # filter None (but not False) entries up front |
4360 | 265 | ifcfg = dict((key, value) for (key, value) in config.items() | 248 | ifcfg = dict((key, value) for (key, value) in config.items() |
4362 | 266 | if value) | 249 | if value is not None) |
4363 | 267 | 250 | ||
4364 | 268 | if_type = ifcfg.get('type') | 251 | if_type = ifcfg.get('type') |
4365 | 269 | if if_type == 'physical': | 252 | if if_type == 'physical': |
4366 | @@ -335,6 +318,7 @@ class Renderer(renderer.Renderer): | |||
4367 | 335 | (port, cost) = costval.split() | 318 | (port, cost) = costval.split() |
4368 | 336 | newvalue[port] = int(cost) | 319 | newvalue[port] = int(cost) |
4369 | 337 | br_config.update({newname: newvalue}) | 320 | br_config.update({newname: newvalue}) |
4370 | 321 | |||
4371 | 338 | if len(br_config) > 0: | 322 | if len(br_config) > 0: |
4372 | 339 | bridge.update({'parameters': br_config}) | 323 | bridge.update({'parameters': br_config}) |
4373 | 340 | _extract_addresses(ifcfg, bridge) | 324 | _extract_addresses(ifcfg, bridge) |
4374 | diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py | |||
4375 | index 87a7222..0e830ee 100644 | |||
4376 | --- a/cloudinit/net/network_state.py | |||
4377 | +++ b/cloudinit/net/network_state.py | |||
4378 | @@ -23,6 +23,34 @@ NETWORK_V2_KEY_FILTER = [ | |||
4379 | 23 | 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' | 23 | 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' |
4380 | 24 | ] | 24 | ] |
4381 | 25 | 25 | ||
4382 | 26 | NET_CONFIG_TO_V2 = { | ||
4383 | 27 | 'bond': {'bond-ad-select': 'ad-select', | ||
4384 | 28 | 'bond-arp-interval': 'arp-interval', | ||
4385 | 29 | 'bond-arp-ip-target': 'arp-ip-target', | ||
4386 | 30 | 'bond-arp-validate': 'arp-validate', | ||
4387 | 31 | 'bond-downdelay': 'down-delay', | ||
4388 | 32 | 'bond-fail-over-mac': 'fail-over-mac-policy', | ||
4389 | 33 | 'bond-lacp-rate': 'lacp-rate', | ||
4390 | 34 | 'bond-miimon': 'mii-monitor-interval', | ||
4391 | 35 | 'bond-min-links': 'min-links', | ||
4392 | 36 | 'bond-mode': 'mode', | ||
4393 | 37 | 'bond-num-grat-arp': 'gratuitious-arp', | ||
4394 | 38 | 'bond-primary': 'primary', | ||
4395 | 39 | 'bond-primary-reselect': 'primary-reselect-policy', | ||
4396 | 40 | 'bond-updelay': 'up-delay', | ||
4397 | 41 | 'bond-xmit-hash-policy': 'transmit-hash-policy'}, | ||
4398 | 42 | 'bridge': {'bridge_ageing': 'ageing-time', | ||
4399 | 43 | 'bridge_bridgeprio': 'priority', | ||
4400 | 44 | 'bridge_fd': 'forward-delay', | ||
4401 | 45 | 'bridge_gcint': None, | ||
4402 | 46 | 'bridge_hello': 'hello-time', | ||
4403 | 47 | 'bridge_maxage': 'max-age', | ||
4404 | 48 | 'bridge_maxwait': None, | ||
4405 | 49 | 'bridge_pathcost': 'path-cost', | ||
4406 | 50 | 'bridge_portprio': None, | ||
4407 | 51 | 'bridge_stp': 'stp', | ||
4408 | 52 | 'bridge_waitport': None}} | ||
4409 | 53 | |||
4410 | 26 | 54 | ||
4411 | 27 | def parse_net_config_data(net_config, skip_broken=True): | 55 | def parse_net_config_data(net_config, skip_broken=True): |
4412 | 28 | """Parses the config, returns NetworkState object | 56 | """Parses the config, returns NetworkState object |
4413 | @@ -120,6 +148,10 @@ class NetworkState(object): | |||
4414 | 120 | self.use_ipv6 = network_state.get('use_ipv6', False) | 148 | self.use_ipv6 = network_state.get('use_ipv6', False) |
4415 | 121 | 149 | ||
4416 | 122 | @property | 150 | @property |
4417 | 151 | def config(self): | ||
4418 | 152 | return self._network_state['config'] | ||
4419 | 153 | |||
4420 | 154 | @property | ||
4421 | 123 | def version(self): | 155 | def version(self): |
4422 | 124 | return self._version | 156 | return self._version |
4423 | 125 | 157 | ||
4424 | @@ -166,12 +198,14 @@ class NetworkStateInterpreter(object): | |||
4425 | 166 | 'search': [], | 198 | 'search': [], |
4426 | 167 | }, | 199 | }, |
4427 | 168 | 'use_ipv6': False, | 200 | 'use_ipv6': False, |
4428 | 201 | 'config': None, | ||
4429 | 169 | } | 202 | } |
4430 | 170 | 203 | ||
4431 | 171 | def __init__(self, version=NETWORK_STATE_VERSION, config=None): | 204 | def __init__(self, version=NETWORK_STATE_VERSION, config=None): |
4432 | 172 | self._version = version | 205 | self._version = version |
4433 | 173 | self._config = config | 206 | self._config = config |
4434 | 174 | self._network_state = copy.deepcopy(self.initial_network_state) | 207 | self._network_state = copy.deepcopy(self.initial_network_state) |
4435 | 208 | self._network_state['config'] = config | ||
4436 | 175 | self._parsed = False | 209 | self._parsed = False |
4437 | 176 | 210 | ||
4438 | 177 | @property | 211 | @property |
4439 | @@ -432,6 +466,18 @@ class NetworkStateInterpreter(object): | |||
4440 | 432 | for param, val in command.get('params', {}).items(): | 466 | for param, val in command.get('params', {}).items(): |
4441 | 433 | iface.update({param: val}) | 467 | iface.update({param: val}) |
4442 | 434 | 468 | ||
4443 | 469 | # convert value to boolean | ||
4444 | 470 | bridge_stp = iface.get('bridge_stp') | ||
4445 | 471 | if bridge_stp is not None and type(bridge_stp) != bool: | ||
4446 | 472 | if bridge_stp in ['on', '1', 1]: | ||
4447 | 473 | bridge_stp = True | ||
4448 | 474 | elif bridge_stp in ['off', '0', 0]: | ||
4449 | 475 | bridge_stp = False | ||
4450 | 476 | else: | ||
4451 | 477 | raise ValueError("Cannot convert bridge_stp value" | ||
4452 | 478 | "(%s) to boolean", bridge_stp) | ||
4453 | 479 | iface.update({'bridge_stp': bridge_stp}) | ||
4454 | 480 | |||
4455 | 435 | interfaces.update({iface['name']: iface}) | 481 | interfaces.update({iface['name']: iface}) |
4456 | 436 | 482 | ||
4457 | 437 | @ensure_command_keys(['address']) | 483 | @ensure_command_keys(['address']) |
4458 | @@ -460,12 +506,15 @@ class NetworkStateInterpreter(object): | |||
4459 | 460 | v2_command = { | 506 | v2_command = { |
4460 | 461 | bond0: { | 507 | bond0: { |
4461 | 462 | 'interfaces': ['interface0', 'interface1'], | 508 | 'interfaces': ['interface0', 'interface1'], |
4465 | 463 | 'miimon': 100, | 509 | 'parameters': { |
4466 | 464 | 'mode': '802.3ad', | 510 | 'mii-monitor-interval': 100, |
4467 | 465 | 'xmit_hash_policy': 'layer3+4'}, | 511 | 'mode': '802.3ad', |
4468 | 512 | 'xmit_hash_policy': 'layer3+4'}}, | ||
4469 | 466 | bond1: { | 513 | bond1: { |
4470 | 467 | 'bond-slaves': ['interface2', 'interface7'], | 514 | 'bond-slaves': ['interface2', 'interface7'], |
4472 | 468 | 'mode': 1 | 515 | 'parameters': { |
4473 | 516 | 'mode': 1, | ||
4474 | 517 | } | ||
4475 | 469 | } | 518 | } |
4476 | 470 | } | 519 | } |
4477 | 471 | 520 | ||
4478 | @@ -489,8 +538,8 @@ class NetworkStateInterpreter(object): | |||
4479 | 489 | v2_command = { | 538 | v2_command = { |
4480 | 490 | br0: { | 539 | br0: { |
4481 | 491 | 'interfaces': ['interface0', 'interface1'], | 540 | 'interfaces': ['interface0', 'interface1'], |
4484 | 492 | 'fd': 0, | 541 | 'forward-delay': 0, |
4485 | 493 | 'stp': 'off', | 542 | 'stp': False, |
4486 | 494 | 'maxwait': 0, | 543 | 'maxwait': 0, |
4487 | 495 | } | 544 | } |
4488 | 496 | } | 545 | } |
4489 | @@ -554,6 +603,7 @@ class NetworkStateInterpreter(object): | |||
4490 | 554 | if not mac_address: | 603 | if not mac_address: |
4491 | 555 | LOG.debug('NetworkState Version2: missing "macaddress" info ' | 604 | LOG.debug('NetworkState Version2: missing "macaddress" info ' |
4492 | 556 | 'in config entry: %s: %s', eth, str(cfg)) | 605 | 'in config entry: %s: %s', eth, str(cfg)) |
4493 | 606 | phy_cmd.update({'mac_address': mac_address}) | ||
4494 | 557 | 607 | ||
4495 | 558 | for key in ['mtu', 'match', 'wakeonlan']: | 608 | for key in ['mtu', 'match', 'wakeonlan']: |
4496 | 559 | if key in cfg: | 609 | if key in cfg: |
4497 | @@ -598,8 +648,8 @@ class NetworkStateInterpreter(object): | |||
4498 | 598 | self.handle_vlan(vlan_cmd) | 648 | self.handle_vlan(vlan_cmd) |
4499 | 599 | 649 | ||
4500 | 600 | def handle_wifis(self, command): | 650 | def handle_wifis(self, command): |
4503 | 601 | raise NotImplementedError("NetworkState V2: " | 651 | LOG.warning('Wifi configuration is only available to distros with' |
4504 | 602 | "Skipping wifi configuration") | 652 | 'netplan rendering support.') |
4505 | 603 | 653 | ||
4506 | 604 | def _v2_common(self, cfg): | 654 | def _v2_common(self, cfg): |
4507 | 605 | LOG.debug('v2_common: handling config:\n%s', cfg) | 655 | LOG.debug('v2_common: handling config:\n%s', cfg) |
4508 | @@ -616,6 +666,11 @@ class NetworkStateInterpreter(object): | |||
4509 | 616 | 666 | ||
4510 | 617 | def _handle_bond_bridge(self, command, cmd_type=None): | 667 | def _handle_bond_bridge(self, command, cmd_type=None): |
4511 | 618 | """Common handler for bond and bridge types""" | 668 | """Common handler for bond and bridge types""" |
4512 | 669 | |||
4513 | 670 | # inverse mapping for v2 keynames to v1 keynames | ||
4514 | 671 | v2key_to_v1 = dict((v, k) for k, v in | ||
4515 | 672 | NET_CONFIG_TO_V2.get(cmd_type).items()) | ||
4516 | 673 | |||
4517 | 619 | for item_name, item_cfg in command.items(): | 674 | for item_name, item_cfg in command.items(): |
4518 | 620 | item_params = dict((key, value) for (key, value) in | 675 | item_params = dict((key, value) for (key, value) in |
4519 | 621 | item_cfg.items() if key not in | 676 | item_cfg.items() if key not in |
4520 | @@ -624,14 +679,20 @@ class NetworkStateInterpreter(object): | |||
4521 | 624 | 'type': cmd_type, | 679 | 'type': cmd_type, |
4522 | 625 | 'name': item_name, | 680 | 'name': item_name, |
4523 | 626 | cmd_type + '_interfaces': item_cfg.get('interfaces'), | 681 | cmd_type + '_interfaces': item_cfg.get('interfaces'), |
4525 | 627 | 'params': item_params, | 682 | 'params': dict((v2key_to_v1[k], v) for k, v in |
4526 | 683 | item_params.get('parameters', {}).items()) | ||
4527 | 628 | } | 684 | } |
4528 | 629 | subnets = self._v2_to_v1_ipcfg(item_cfg) | 685 | subnets = self._v2_to_v1_ipcfg(item_cfg) |
4529 | 630 | if len(subnets) > 0: | 686 | if len(subnets) > 0: |
4530 | 631 | v1_cmd.update({'subnets': subnets}) | 687 | v1_cmd.update({'subnets': subnets}) |
4531 | 632 | 688 | ||
4534 | 633 | LOG.debug('v2(%ss) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd) | 689 | LOG.debug('v2(%s) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd) |
4535 | 634 | self.handle_bridge(v1_cmd) | 690 | if cmd_type == "bridge": |
4536 | 691 | self.handle_bridge(v1_cmd) | ||
4537 | 692 | elif cmd_type == "bond": | ||
4538 | 693 | self.handle_bond(v1_cmd) | ||
4539 | 694 | else: | ||
4540 | 695 | raise ValueError('Unknown command type: %s', cmd_type) | ||
4541 | 635 | 696 | ||
4542 | 636 | def _v2_to_v1_ipcfg(self, cfg): | 697 | def _v2_to_v1_ipcfg(self, cfg): |
4543 | 637 | """Common ipconfig extraction from v2 to v1 subnets array.""" | 698 | """Common ipconfig extraction from v2 to v1 subnets array.""" |
4544 | @@ -651,12 +712,6 @@ class NetworkStateInterpreter(object): | |||
4545 | 651 | 'address': address, | 712 | 'address': address, |
4546 | 652 | } | 713 | } |
4547 | 653 | 714 | ||
4548 | 654 | routes = [] | ||
4549 | 655 | for route in cfg.get('routes', []): | ||
4550 | 656 | routes.append(_normalize_route( | ||
4551 | 657 | {'address': route.get('to'), 'gateway': route.get('via')})) | ||
4552 | 658 | subnet['routes'] = routes | ||
4553 | 659 | |||
4554 | 660 | if ":" in address: | 715 | if ":" in address: |
4555 | 661 | if 'gateway6' in cfg and gateway6 is None: | 716 | if 'gateway6' in cfg and gateway6 is None: |
4556 | 662 | gateway6 = cfg.get('gateway6') | 717 | gateway6 = cfg.get('gateway6') |
4557 | @@ -667,6 +722,17 @@ class NetworkStateInterpreter(object): | |||
4558 | 667 | subnet.update({'gateway': gateway4}) | 722 | subnet.update({'gateway': gateway4}) |
4559 | 668 | 723 | ||
4560 | 669 | subnets.append(subnet) | 724 | subnets.append(subnet) |
4561 | 725 | |||
4562 | 726 | routes = [] | ||
4563 | 727 | for route in cfg.get('routes', []): | ||
4564 | 728 | routes.append(_normalize_route( | ||
4565 | 729 | {'destination': route.get('to'), 'gateway': route.get('via')})) | ||
4566 | 730 | |||
4567 | 731 | # v2 routes are bound to the interface, in v1 we add them under | ||
4568 | 732 | # the first subnet since there isn't an equivalent interface level. | ||
4569 | 733 | if len(subnets) and len(routes): | ||
4570 | 734 | subnets[0]['routes'] = routes | ||
4571 | 735 | |||
4572 | 670 | return subnets | 736 | return subnets |
4573 | 671 | 737 | ||
4574 | 672 | 738 | ||
4575 | @@ -721,7 +787,7 @@ def _normalize_net_keys(network, address_keys=()): | |||
4576 | 721 | elif netmask: | 787 | elif netmask: |
4577 | 722 | prefix = mask_to_net_prefix(netmask) | 788 | prefix = mask_to_net_prefix(netmask) |
4578 | 723 | elif 'prefix' in net: | 789 | elif 'prefix' in net: |
4580 | 724 | prefix = int(prefix) | 790 | prefix = int(net['prefix']) |
4581 | 725 | else: | 791 | else: |
4582 | 726 | prefix = 64 if ipv6 else 24 | 792 | prefix = 64 if ipv6 else 24 |
4583 | 727 | 793 | ||
4584 | diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py | |||
4585 | index a550f97..f572796 100644 | |||
4586 | --- a/cloudinit/net/sysconfig.py | |||
4587 | +++ b/cloudinit/net/sysconfig.py | |||
4588 | @@ -484,7 +484,11 @@ class Renderer(renderer.Renderer): | |||
4589 | 484 | content.add_nameserver(nameserver) | 484 | content.add_nameserver(nameserver) |
4590 | 485 | for searchdomain in network_state.dns_searchdomains: | 485 | for searchdomain in network_state.dns_searchdomains: |
4591 | 486 | content.add_search_domain(searchdomain) | 486 | content.add_search_domain(searchdomain) |
4593 | 487 | return "\n".join([_make_header(';'), str(content)]) | 487 | header = _make_header(';') |
4594 | 488 | content_str = str(content) | ||
4595 | 489 | if not content_str.startswith(header): | ||
4596 | 490 | content_str = header + '\n' + content_str | ||
4597 | 491 | return content_str | ||
4598 | 488 | 492 | ||
4599 | 489 | @staticmethod | 493 | @staticmethod |
4600 | 490 | def _render_networkmanager_conf(network_state): | 494 | def _render_networkmanager_conf(network_state): |
4601 | diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py | |||
4602 | 491 | new file mode 100644 | 495 | new file mode 100644 |
4603 | index 0000000..1c1f504 | |||
4604 | --- /dev/null | |||
4605 | +++ b/cloudinit/net/tests/test_dhcp.py | |||
4606 | @@ -0,0 +1,260 @@ | |||
4607 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
4608 | 2 | |||
4609 | 3 | import mock | ||
4610 | 4 | import os | ||
4611 | 5 | from textwrap import dedent | ||
4612 | 6 | |||
4613 | 7 | from cloudinit.net.dhcp import ( | ||
4614 | 8 | InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, | ||
4615 | 9 | parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases) | ||
4616 | 10 | from cloudinit.util import ensure_file, write_file | ||
4617 | 11 | from cloudinit.tests.helpers import CiTestCase, wrap_and_call, populate_dir | ||
4618 | 12 | |||
4619 | 13 | |||
4620 | 14 | class TestParseDHCPLeasesFile(CiTestCase): | ||
4621 | 15 | |||
4622 | 16 | def test_parse_empty_lease_file_errors(self): | ||
4623 | 17 | """parse_dhcp_lease_file errors when file content is empty.""" | ||
4624 | 18 | empty_file = self.tmp_path('leases') | ||
4625 | 19 | ensure_file(empty_file) | ||
4626 | 20 | with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: | ||
4627 | 21 | parse_dhcp_lease_file(empty_file) | ||
4628 | 22 | error = context_manager.exception | ||
4629 | 23 | self.assertIn('Cannot parse empty dhcp lease file', str(error)) | ||
4630 | 24 | |||
4631 | 25 | def test_parse_malformed_lease_file_content_errors(self): | ||
4632 | 26 | """parse_dhcp_lease_file errors when file content isn't dhcp leases.""" | ||
4633 | 27 | non_lease_file = self.tmp_path('leases') | ||
4634 | 28 | write_file(non_lease_file, 'hi mom.') | ||
4635 | 29 | with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: | ||
4636 | 30 | parse_dhcp_lease_file(non_lease_file) | ||
4637 | 31 | error = context_manager.exception | ||
4638 | 32 | self.assertIn('Cannot parse dhcp lease file', str(error)) | ||
4639 | 33 | |||
4640 | 34 | def test_parse_multiple_leases(self): | ||
4641 | 35 | """parse_dhcp_lease_file returns a list of all leases within.""" | ||
4642 | 36 | lease_file = self.tmp_path('leases') | ||
4643 | 37 | content = dedent(""" | ||
4644 | 38 | lease { | ||
4645 | 39 | interface "wlp3s0"; | ||
4646 | 40 | fixed-address 192.168.2.74; | ||
4647 | 41 | option subnet-mask 255.255.255.0; | ||
4648 | 42 | option routers 192.168.2.1; | ||
4649 | 43 | renew 4 2017/07/27 18:02:30; | ||
4650 | 44 | expire 5 2017/07/28 07:08:15; | ||
4651 | 45 | } | ||
4652 | 46 | lease { | ||
4653 | 47 | interface "wlp3s0"; | ||
4654 | 48 | fixed-address 192.168.2.74; | ||
4655 | 49 | option subnet-mask 255.255.255.0; | ||
4656 | 50 | option routers 192.168.2.1; | ||
4657 | 51 | } | ||
4658 | 52 | """) | ||
4659 | 53 | expected = [ | ||
4660 | 54 | {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', | ||
4661 | 55 | 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', | ||
4662 | 56 | 'renew': '4 2017/07/27 18:02:30', | ||
4663 | 57 | 'expire': '5 2017/07/28 07:08:15'}, | ||
4664 | 58 | {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', | ||
4665 | 59 | 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}] | ||
4666 | 60 | write_file(lease_file, content) | ||
4667 | 61 | self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) | ||
4668 | 62 | |||
4669 | 63 | |||
4670 | 64 | class TestDHCPDiscoveryClean(CiTestCase): | ||
4671 | 65 | with_logs = True | ||
4672 | 66 | |||
4673 | 67 | @mock.patch('cloudinit.net.dhcp.find_fallback_nic') | ||
4674 | 68 | def test_no_fallback_nic_found(self, m_fallback_nic): | ||
4675 | 69 | """Log and do nothing when nic is absent and no fallback is found.""" | ||
4676 | 70 | m_fallback_nic.return_value = None # No fallback nic found | ||
4677 | 71 | self.assertEqual({}, maybe_perform_dhcp_discovery()) | ||
4678 | 72 | self.assertIn( | ||
4679 | 73 | 'Skip dhcp_discovery: Unable to find fallback nic.', | ||
4680 | 74 | self.logs.getvalue()) | ||
4681 | 75 | |||
4682 | 76 | def test_provided_nic_does_not_exist(self): | ||
4683 | 77 | """When the provided nic doesn't exist, log a message and no-op.""" | ||
4684 | 78 | self.assertEqual({}, maybe_perform_dhcp_discovery('idontexist')) | ||
4685 | 79 | self.assertIn( | ||
4686 | 80 | 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.', | ||
4687 | 81 | self.logs.getvalue()) | ||
4688 | 82 | |||
4689 | 83 | @mock.patch('cloudinit.net.dhcp.util.which') | ||
4690 | 84 | @mock.patch('cloudinit.net.dhcp.find_fallback_nic') | ||
4691 | 85 | def test_absent_dhclient_command(self, m_fallback, m_which): | ||
4692 | 86 | """When dhclient doesn't exist in the OS, log the issue and no-op.""" | ||
4693 | 87 | m_fallback.return_value = 'eth9' | ||
4694 | 88 | m_which.return_value = None # dhclient isn't found | ||
4695 | 89 | self.assertEqual({}, maybe_perform_dhcp_discovery()) | ||
4696 | 90 | self.assertIn( | ||
4697 | 91 | 'Skip dhclient configuration: No dhclient command found.', | ||
4698 | 92 | self.logs.getvalue()) | ||
4699 | 93 | |||
4700 | 94 | @mock.patch('cloudinit.temp_utils.os.getuid') | ||
4701 | 95 | @mock.patch('cloudinit.net.dhcp.dhcp_discovery') | ||
4702 | 96 | @mock.patch('cloudinit.net.dhcp.util.which') | ||
4703 | 97 | @mock.patch('cloudinit.net.dhcp.find_fallback_nic') | ||
4704 | 98 | def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid): | ||
4705 | 99 | """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery.""" | ||
4706 | 100 | m_uid.return_value = 0 # Fake root user for tmpdir | ||
4707 | 101 | m_fback.return_value = 'eth9' | ||
4708 | 102 | m_which.return_value = '/sbin/dhclient' | ||
4709 | 103 | m_dhcp.return_value = {'address': '192.168.2.2'} | ||
4710 | 104 | retval = wrap_and_call( | ||
4711 | 105 | 'cloudinit.temp_utils', | ||
4712 | 106 | {'_TMPDIR': {'new': None}, | ||
4713 | 107 | 'os.getuid': 0}, | ||
4714 | 108 | maybe_perform_dhcp_discovery) | ||
4715 | 109 | self.assertEqual({'address': '192.168.2.2'}, retval) | ||
4716 | 110 | self.assertEqual( | ||
4717 | 111 | 1, m_dhcp.call_count, 'dhcp_discovery not called once') | ||
4718 | 112 | call = m_dhcp.call_args_list[0] | ||
4719 | 113 | self.assertEqual('/sbin/dhclient', call[0][0]) | ||
4720 | 114 | self.assertEqual('eth9', call[0][1]) | ||
4721 | 115 | self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) | ||
4722 | 116 | |||
4723 | 117 | @mock.patch('cloudinit.net.dhcp.util.subp') | ||
4724 | 118 | def test_dhcp_discovery_run_in_sandbox(self, m_subp): | ||
4725 | 119 | """dhcp_discovery brings up the interface and runs dhclient. | ||
4726 | 120 | |||
4727 | 121 | It also returns the parsed dhcp.leases file generated in the sandbox. | ||
4728 | 122 | """ | ||
4729 | 123 | tmpdir = self.tmp_dir() | ||
4730 | 124 | dhclient_script = os.path.join(tmpdir, 'dhclient.orig') | ||
4731 | 125 | script_content = '#!/bin/bash\necho fake-dhclient' | ||
4732 | 126 | write_file(dhclient_script, script_content, mode=0o755) | ||
4733 | 127 | lease_content = dedent(""" | ||
4734 | 128 | lease { | ||
4735 | 129 | interface "eth9"; | ||
4736 | 130 | fixed-address 192.168.2.74; | ||
4737 | 131 | option subnet-mask 255.255.255.0; | ||
4738 | 132 | option routers 192.168.2.1; | ||
4739 | 133 | } | ||
4740 | 134 | """) | ||
4741 | 135 | lease_file = os.path.join(tmpdir, 'dhcp.leases') | ||
4742 | 136 | write_file(lease_file, lease_content) | ||
4743 | 137 | self.assertItemsEqual( | ||
4744 | 138 | [{'interface': 'eth9', 'fixed-address': '192.168.2.74', | ||
4745 | 139 | 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], | ||
4746 | 140 | dhcp_discovery(dhclient_script, 'eth9', tmpdir)) | ||
4747 | 141 | # dhclient script got copied | ||
4748 | 142 | with open(os.path.join(tmpdir, 'dhclient')) as stream: | ||
4749 | 143 | self.assertEqual(script_content, stream.read()) | ||
4750 | 144 | # Interface was brought up before dhclient called from sandbox | ||
4751 | 145 | m_subp.assert_has_calls([ | ||
4752 | 146 | mock.call( | ||
4753 | 147 | ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), | ||
4754 | 148 | mock.call( | ||
4755 | 149 | [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf', | ||
4756 | 150 | lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), | ||
4757 | 151 | 'eth9', '-sf', '/bin/true'], capture=True)]) | ||
4758 | 152 | |||
4759 | 153 | |||
4760 | 154 | class TestSystemdParseLeases(CiTestCase): | ||
4761 | 155 | |||
4762 | 156 | lxd_lease = dedent("""\ | ||
4763 | 157 | # This is private data. Do not parse. | ||
4764 | 158 | ADDRESS=10.75.205.242 | ||
4765 | 159 | NETMASK=255.255.255.0 | ||
4766 | 160 | ROUTER=10.75.205.1 | ||
4767 | 161 | SERVER_ADDRESS=10.75.205.1 | ||
4768 | 162 | NEXT_SERVER=10.75.205.1 | ||
4769 | 163 | BROADCAST=10.75.205.255 | ||
4770 | 164 | T1=1580 | ||
4771 | 165 | T2=2930 | ||
4772 | 166 | LIFETIME=3600 | ||
4773 | 167 | DNS=10.75.205.1 | ||
4774 | 168 | DOMAINNAME=lxd | ||
4775 | 169 | HOSTNAME=a1 | ||
4776 | 170 | CLIENTID=ffe617693400020000ab110c65a6a0866931c2 | ||
4777 | 171 | """) | ||
4778 | 172 | |||
4779 | 173 | lxd_parsed = { | ||
4780 | 174 | 'ADDRESS': '10.75.205.242', | ||
4781 | 175 | 'NETMASK': '255.255.255.0', | ||
4782 | 176 | 'ROUTER': '10.75.205.1', | ||
4783 | 177 | 'SERVER_ADDRESS': '10.75.205.1', | ||
4784 | 178 | 'NEXT_SERVER': '10.75.205.1', | ||
4785 | 179 | 'BROADCAST': '10.75.205.255', | ||
4786 | 180 | 'T1': '1580', | ||
4787 | 181 | 'T2': '2930', | ||
4788 | 182 | 'LIFETIME': '3600', | ||
4789 | 183 | 'DNS': '10.75.205.1', | ||
4790 | 184 | 'DOMAINNAME': 'lxd', | ||
4791 | 185 | 'HOSTNAME': 'a1', | ||
4792 | 186 | 'CLIENTID': 'ffe617693400020000ab110c65a6a0866931c2', | ||
4793 | 187 | } | ||
4794 | 188 | |||
4795 | 189 | azure_lease = dedent("""\ | ||
4796 | 190 | # This is private data. Do not parse. | ||
4797 | 191 | ADDRESS=10.132.0.5 | ||
4798 | 192 | NETMASK=255.255.255.255 | ||
4799 | 193 | ROUTER=10.132.0.1 | ||
4800 | 194 | SERVER_ADDRESS=169.254.169.254 | ||
4801 | 195 | NEXT_SERVER=10.132.0.1 | ||
4802 | 196 | MTU=1460 | ||
4803 | 197 | T1=43200 | ||
4804 | 198 | T2=75600 | ||
4805 | 199 | LIFETIME=86400 | ||
4806 | 200 | DNS=169.254.169.254 | ||
4807 | 201 | NTP=169.254.169.254 | ||
4808 | 202 | DOMAINNAME=c.ubuntu-foundations.internal | ||
4809 | 203 | DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal | ||
4810 | 204 | HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal | ||
4811 | 205 | ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 | ||
4812 | 206 | CLIENTID=ff405663a200020000ab11332859494d7a8b4c | ||
4813 | 207 | OPTION_245=624c3620 | ||
4814 | 208 | """) | ||
4815 | 209 | |||
4816 | 210 | azure_parsed = { | ||
4817 | 211 | 'ADDRESS': '10.132.0.5', | ||
4818 | 212 | 'NETMASK': '255.255.255.255', | ||
4819 | 213 | 'ROUTER': '10.132.0.1', | ||
4820 | 214 | 'SERVER_ADDRESS': '169.254.169.254', | ||
4821 | 215 | 'NEXT_SERVER': '10.132.0.1', | ||
4822 | 216 | 'MTU': '1460', | ||
4823 | 217 | 'T1': '43200', | ||
4824 | 218 | 'T2': '75600', | ||
4825 | 219 | 'LIFETIME': '86400', | ||
4826 | 220 | 'DNS': '169.254.169.254', | ||
4827 | 221 | 'NTP': '169.254.169.254', | ||
4828 | 222 | 'DOMAINNAME': 'c.ubuntu-foundations.internal', | ||
4829 | 223 | 'DOMAIN_SEARCH_LIST': 'c.ubuntu-foundations.internal google.internal', | ||
4830 | 224 | 'HOSTNAME': 'tribaal-test-171002-1349.c.ubuntu-foundations.internal', | ||
4831 | 225 | 'ROUTES': '10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1', | ||
4832 | 226 | 'CLIENTID': 'ff405663a200020000ab11332859494d7a8b4c', | ||
4833 | 227 | 'OPTION_245': '624c3620'} | ||
4834 | 228 | |||
4835 | 229 | def setUp(self): | ||
4836 | 230 | super(TestSystemdParseLeases, self).setUp() | ||
4837 | 231 | self.lease_d = self.tmp_dir() | ||
4838 | 232 | |||
4839 | 233 | def test_no_leases_returns_empty_dict(self): | ||
4840 | 234 | """A leases dir with no lease files should return empty dictionary.""" | ||
4841 | 235 | self.assertEqual({}, networkd_load_leases(self.lease_d)) | ||
4842 | 236 | |||
4843 | 237 | def test_no_leases_dir_returns_empty_dict(self): | ||
4844 | 238 | """A non-existing leases dir should return empty dict.""" | ||
4845 | 239 | enodir = os.path.join(self.lease_d, 'does-not-exist') | ||
4846 | 240 | self.assertEqual({}, networkd_load_leases(enodir)) | ||
4847 | 241 | |||
4848 | 242 | def test_single_leases_file(self): | ||
4849 | 243 | """A leases dir with one leases file.""" | ||
4850 | 244 | populate_dir(self.lease_d, {'2': self.lxd_lease}) | ||
4851 | 245 | self.assertEqual( | ||
4852 | 246 | {'2': self.lxd_parsed}, networkd_load_leases(self.lease_d)) | ||
4853 | 247 | |||
4854 | 248 | def test_single_azure_leases_file(self): | ||
4855 | 249 | """On Azure, option 245 should be present, verify it specifically.""" | ||
4856 | 250 | populate_dir(self.lease_d, {'1': self.azure_lease}) | ||
4857 | 251 | self.assertEqual( | ||
4858 | 252 | {'1': self.azure_parsed}, networkd_load_leases(self.lease_d)) | ||
4859 | 253 | |||
4860 | 254 | def test_multiple_files(self): | ||
4861 | 255 | """Multiple leases files on azure with one found return that value.""" | ||
4862 | 256 | self.maxDiff = None | ||
4863 | 257 | populate_dir(self.lease_d, {'1': self.azure_lease, | ||
4864 | 258 | '9': self.lxd_lease}) | ||
4865 | 259 | self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed}, | ||
4866 | 260 | networkd_load_leases(self.lease_d)) | ||
4867 | diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py | |||
4868 | index 272a6eb..8cb4114 100644 | |||
4869 | --- a/cloudinit/net/tests/test_init.py | |||
4870 | +++ b/cloudinit/net/tests/test_init.py | |||
4871 | @@ -7,7 +7,7 @@ import os | |||
4872 | 7 | 7 | ||
4873 | 8 | import cloudinit.net as net | 8 | import cloudinit.net as net |
4874 | 9 | from cloudinit.util import ensure_file, write_file, ProcessExecutionError | 9 | from cloudinit.util import ensure_file, write_file, ProcessExecutionError |
4876 | 10 | from tests.unittests.helpers import CiTestCase | 10 | from cloudinit.tests.helpers import CiTestCase |
4877 | 11 | 11 | ||
4878 | 12 | 12 | ||
4879 | 13 | class TestSysDevPath(CiTestCase): | 13 | class TestSysDevPath(CiTestCase): |
4880 | @@ -414,7 +414,7 @@ class TestEphemeralIPV4Network(CiTestCase): | |||
4881 | 414 | self.assertIn('Cannot init network on', str(error)) | 414 | self.assertIn('Cannot init network on', str(error)) |
4882 | 415 | self.assertEqual(0, m_subp.call_count) | 415 | self.assertEqual(0, m_subp.call_count) |
4883 | 416 | 416 | ||
4885 | 417 | def test_ephemeral_ipv4_network_errors_invalid_mask(self, m_subp): | 417 | def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp): |
4886 | 418 | """Raise an error when prefix_or_mask is not a netmask or prefix.""" | 418 | """Raise an error when prefix_or_mask is not a netmask or prefix.""" |
4887 | 419 | params = { | 419 | params = { |
4888 | 420 | 'interface': 'eth0', 'ip': '192.168.2.2', | 420 | 'interface': 'eth0', 'ip': '192.168.2.2', |
4889 | diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py | |||
4890 | index 39c79de..8f99d99 100644 | |||
4891 | --- a/cloudinit/netinfo.py | |||
4892 | +++ b/cloudinit/netinfo.py | |||
4893 | @@ -13,7 +13,7 @@ import re | |||
4894 | 13 | from cloudinit import log as logging | 13 | from cloudinit import log as logging |
4895 | 14 | from cloudinit import util | 14 | from cloudinit import util |
4896 | 15 | 15 | ||
4898 | 16 | from prettytable import PrettyTable | 16 | from cloudinit.simpletable import SimpleTable |
4899 | 17 | 17 | ||
4900 | 18 | LOG = logging.getLogger() | 18 | LOG = logging.getLogger() |
4901 | 19 | 19 | ||
4902 | @@ -170,7 +170,7 @@ def netdev_pformat(): | |||
4903 | 170 | lines.append(util.center("Net device info failed", '!', 80)) | 170 | lines.append(util.center("Net device info failed", '!', 80)) |
4904 | 171 | else: | 171 | else: |
4905 | 172 | fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] | 172 | fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] |
4907 | 173 | tbl = PrettyTable(fields) | 173 | tbl = SimpleTable(fields) |
4908 | 174 | for (dev, d) in netdev.items(): | 174 | for (dev, d) in netdev.items(): |
4909 | 175 | tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]]) | 175 | tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]]) |
4910 | 176 | if d.get('addr6'): | 176 | if d.get('addr6'): |
4911 | @@ -194,7 +194,7 @@ def route_pformat(): | |||
4912 | 194 | if routes.get('ipv4'): | 194 | if routes.get('ipv4'): |
4913 | 195 | fields_v4 = ['Route', 'Destination', 'Gateway', | 195 | fields_v4 = ['Route', 'Destination', 'Gateway', |
4914 | 196 | 'Genmask', 'Interface', 'Flags'] | 196 | 'Genmask', 'Interface', 'Flags'] |
4916 | 197 | tbl_v4 = PrettyTable(fields_v4) | 197 | tbl_v4 = SimpleTable(fields_v4) |
4917 | 198 | for (n, r) in enumerate(routes.get('ipv4')): | 198 | for (n, r) in enumerate(routes.get('ipv4')): |
4918 | 199 | route_id = str(n) | 199 | route_id = str(n) |
4919 | 200 | tbl_v4.add_row([route_id, r['destination'], | 200 | tbl_v4.add_row([route_id, r['destination'], |
4920 | @@ -207,7 +207,7 @@ def route_pformat(): | |||
4921 | 207 | if routes.get('ipv6'): | 207 | if routes.get('ipv6'): |
4922 | 208 | fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q', | 208 | fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q', |
4923 | 209 | 'Local Address', 'Foreign Address', 'State'] | 209 | 'Local Address', 'Foreign Address', 'State'] |
4925 | 210 | tbl_v6 = PrettyTable(fields_v6) | 210 | tbl_v6 = SimpleTable(fields_v6) |
4926 | 211 | for (n, r) in enumerate(routes.get('ipv6')): | 211 | for (n, r) in enumerate(routes.get('ipv6')): |
4927 | 212 | route_id = str(n) | 212 | route_id = str(n) |
4928 | 213 | tbl_v6.add_row([route_id, r['proto'], | 213 | tbl_v6.add_row([route_id, r['proto'], |
4929 | diff --git a/cloudinit/simpletable.py b/cloudinit/simpletable.py | |||
4930 | 214 | new file mode 100644 | 214 | new file mode 100644 |
4931 | index 0000000..9060322 | |||
4932 | --- /dev/null | |||
4933 | +++ b/cloudinit/simpletable.py | |||
4934 | @@ -0,0 +1,62 @@ | |||
4935 | 1 | # Copyright (C) 2017 Amazon.com, Inc. or its affiliates | ||
4936 | 2 | # | ||
4937 | 3 | # Author: Ethan Faust <efaust@amazon.com> | ||
4938 | 4 | # Author: Andrew Jorgensen <ajorgens@amazon.com> | ||
4939 | 5 | # | ||
4940 | 6 | # This file is part of cloud-init. See LICENSE file for license information. | ||
4941 | 7 | |||
4942 | 8 | |||
4943 | 9 | class SimpleTable(object): | ||
4944 | 10 | """A minimal implementation of PrettyTable | ||
4945 | 11 | for distribution with cloud-init. | ||
4946 | 12 | """ | ||
4947 | 13 | |||
4948 | 14 | def __init__(self, fields): | ||
4949 | 15 | self.fields = fields | ||
4950 | 16 | self.rows = [] | ||
4951 | 17 | |||
4952 | 18 | # initialize list of 0s the same length | ||
4953 | 19 | # as the number of fields | ||
4954 | 20 | self.column_widths = [0] * len(self.fields) | ||
4955 | 21 | self.update_column_widths(fields) | ||
4956 | 22 | |||
4957 | 23 | def update_column_widths(self, values): | ||
4958 | 24 | for i, value in enumerate(values): | ||
4959 | 25 | self.column_widths[i] = max( | ||
4960 | 26 | len(value), | ||
4961 | 27 | self.column_widths[i]) | ||
4962 | 28 | |||
4963 | 29 | def add_row(self, values): | ||
4964 | 30 | if len(values) > len(self.fields): | ||
4965 | 31 | raise TypeError('too many values') | ||
4966 | 32 | values = [str(value) for value in values] | ||
4967 | 33 | self.rows.append(values) | ||
4968 | 34 | self.update_column_widths(values) | ||
4969 | 35 | |||
4970 | 36 | def _hdiv(self): | ||
4971 | 37 | """Returns a horizontal divider for the table.""" | ||
4972 | 38 | return '+' + '+'.join( | ||
4973 | 39 | ['-' * (w + 2) for w in self.column_widths]) + '+' | ||
4974 | 40 | |||
4975 | 41 | def _row(self, row): | ||
4976 | 42 | """Returns a formatted row.""" | ||
4977 | 43 | return '|' + '|'.join( | ||
4978 | 44 | [col.center(self.column_widths[i] + 2) | ||
4979 | 45 | for i, col in enumerate(row)]) + '|' | ||
4980 | 46 | |||
4981 | 47 | def __str__(self): | ||
4982 | 48 | """Returns a string representation of the table with lines around. | ||
4983 | 49 | |||
4984 | 50 | +-----+-----+ | ||
4985 | 51 | | one | two | | ||
4986 | 52 | +-----+-----+ | ||
4987 | 53 | | 1 | 2 | | ||
4988 | 54 | | 01 | 10 | | ||
4989 | 55 | +-----+-----+ | ||
4990 | 56 | """ | ||
4991 | 57 | lines = [self._hdiv(), self._row(self.fields), self._hdiv()] | ||
4992 | 58 | lines += [self._row(r) for r in self.rows] + [self._hdiv()] | ||
4993 | 59 | return '\n'.join(lines) | ||
4994 | 60 | |||
4995 | 61 | def get_string(self): | ||
4996 | 62 | return repr(self) | ||
4997 | diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py | |||
4998 | index 380e27c..43a7e42 100644 | |||
4999 | --- a/cloudinit/sources/DataSourceAliYun.py | |||
5000 | +++ b/cloudinit/sources/DataSourceAliYun.py |
The diff has been truncated for viewing.
PASSED: Continuous integration, rev:9612b7bdf7b 57244c5612c3c12 136467bbd217e1 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 393/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 393/rebuild
https:/