Merge ~chad.smith/cloud-init:ubuntu/zesty into cloud-init:ubuntu/zesty
- Git
- lp:~chad.smith/cloud-init
- ubuntu/zesty
- Merge into ubuntu/zesty
Proposed by
Chad Smith
on 2017-10-06
| Status: | Merged | ||||||||
|---|---|---|---|---|---|---|---|---|---|
| Merged at revision: | ca2730e2ac86b05f7e6bbb607862884448c9f584 | ||||||||
| Proposed branch: | ~chad.smith/cloud-init:ubuntu/zesty | ||||||||
| Merge into: | cloud-init:ubuntu/zesty | ||||||||
| Diff against target: |
15737 lines (+9583/-1296) 273 files modified
ChangeLog (+422/-0) Makefile (+3/-3) cloudinit/analyze/__init__.py (+0/-0) cloudinit/analyze/__main__.py (+155/-0) cloudinit/analyze/dump.py (+176/-0) cloudinit/analyze/show.py (+207/-0) cloudinit/analyze/tests/test_dump.py (+210/-0) cloudinit/apport.py (+105/-0) cloudinit/cmd/devel/__init__.py (+0/-0) cloudinit/cmd/devel/logs.py (+101/-0) cloudinit/cmd/devel/parser.py (+26/-0) cloudinit/cmd/devel/tests/__init__.py (+0/-0) cloudinit/cmd/devel/tests/test_logs.py (+120/-0) cloudinit/cmd/main.py (+35/-34) cloudinit/config/cc_bootcmd.py (+60/-30) cloudinit/config/cc_chef.py (+33/-11) cloudinit/config/cc_landscape.py (+2/-2) cloudinit/config/cc_ntp.py (+57/-49) cloudinit/config/cc_puppet.py (+18/-15) cloudinit/config/cc_resizefs.py (+87/-70) cloudinit/config/cc_resolv_conf.py (+1/-1) cloudinit/config/cc_runcmd.py (+57/-27) cloudinit/config/cc_snappy.py (+2/-2) cloudinit/config/cc_ssh_authkey_fingerprints.py (+2/-2) cloudinit/config/cc_zypper_add_repo.py (+218/-0) cloudinit/config/schema.py (+181/-43) cloudinit/distros/__init__.py (+8/-1) cloudinit/distros/arch.py (+59/-31) cloudinit/distros/debian.py (+71/-23) cloudinit/distros/opensuse.py (+212/-0) cloudinit/distros/sles.py (+5/-155) cloudinit/helpers.py (+7/-7) cloudinit/log.py (+5/-0) cloudinit/net/__init__.py (+21/-30) cloudinit/net/dhcp.py (+163/-0) cloudinit/net/eni.py (+3/-0) cloudinit/net/netplan.py (+12/-28) cloudinit/net/network_state.py (+84/-18) cloudinit/net/sysconfig.py (+5/-1) cloudinit/net/tests/test_dhcp.py (+260/-0) cloudinit/net/tests/test_init.py (+2/-2) cloudinit/netinfo.py (+4/-4) cloudinit/simpletable.py (+62/-0) cloudinit/sources/DataSourceAliYun.py (+6/-3) cloudinit/sources/DataSourceAltCloud.py (+2/-2) cloudinit/sources/DataSourceAzure.py (+7/-3) cloudinit/sources/DataSourceCloudStack.py (+37/-14) cloudinit/sources/DataSourceEc2.py (+164/-22) cloudinit/sources/DataSourceGCE.py (+126/-72) cloudinit/sources/DataSourceOVF.py (+169/-51) cloudinit/sources/__init__.py (+8/-1) cloudinit/sources/helpers/azure.py (+16/-8) cloudinit/sources/helpers/vmware/imc/config.py (+21/-3) cloudinit/sources/helpers/vmware/imc/config_nic.py (+130/-71) cloudinit/sources/helpers/vmware/imc/config_passwd.py (+67/-0) cloudinit/sources/helpers/vmware/imc/guestcust_util.py (+7/-5) cloudinit/stages.py (+20/-13) cloudinit/temp_utils.py (+101/-0) cloudinit/tests/__init__.py (+0/-0) cloudinit/tests/helpers.py (+16/-2) cloudinit/tests/test_simpletable.py (+100/-0) cloudinit/tests/test_temp_utils.py (+101/-0) cloudinit/tests/test_url_helper.py (+40/-0) cloudinit/url_helper.py (+5/-1) cloudinit/util.py (+41/-42) cloudinit/version.py (+1/-1) config/cloud.cfg.tmpl (+9/-5) debian/apport-launcher.py (+6/-0) debian/changelog (+117/-2) debian/rules (+2/-1) dev/null (+0/-2) doc/examples/cloud-config-chef.txt (+4/-0) doc/rtd/index.rst (+1/-0) doc/rtd/topics/capabilities.rst (+40/-10) doc/rtd/topics/datasources.rst (+1/-0) doc/rtd/topics/datasources/gce.rst (+20/-0) doc/rtd/topics/debugging.rst (+146/-0) doc/rtd/topics/format.rst (+1/-0) doc/rtd/topics/modules.rst (+0/-1) packages/bddeb (+4/-4) packages/debian/copyright (+10/-15) packages/debian/dirs (+0/-1) packages/debian/rules.in (+2/-1) packages/pkg-deps.json (+0/-3) packages/redhat/cloud-init.spec.in (+0/-6) requirements.txt (+0/-3) setup.py (+4/-4) systemd/cloud-final.service.tmpl (+3/-1) systemd/cloud-init-local.service.tmpl (+6/-0) systemd/cloud-init.service.tmpl (+10/-0) sysvinit/suse/cloud-config (+113/-0) sysvinit/suse/cloud-final (+113/-0) sysvinit/suse/cloud-init (+114/-0) sysvinit/suse/cloud-init-local (+113/-0) templates/hosts.opensuse.tmpl (+26/-0) templates/hosts.suse.tmpl (+0/-3) templates/sources.list.debian.tmpl (+8/-8) templates/timesyncd.conf.tmpl (+8/-0) tests/cloud_tests/__init__.py (+1/-1) tests/cloud_tests/__main__.py (+4/-1) tests/cloud_tests/args.py (+2/-2) tests/cloud_tests/bddeb.py (+10/-9) tests/cloud_tests/collect.py (+3/-0) tests/cloud_tests/config.py (+1/-0) tests/cloud_tests/images/nocloudkvm.py (+88/-0) tests/cloud_tests/instances/base.py (+7/-5) tests/cloud_tests/instances/lxd.py (+9/-1) tests/cloud_tests/instances/nocloudkvm.py (+217/-0) tests/cloud_tests/platforms.yaml (+4/-0) tests/cloud_tests/platforms/__init__.py (+2/-0) tests/cloud_tests/platforms/nocloudkvm.py (+90/-0) tests/cloud_tests/releases.yaml (+18/-1) tests/cloud_tests/setup_image.py (+24/-8) tests/cloud_tests/snapshots/nocloudkvm.py (+74/-0) tests/cloud_tests/testcases/bugs/README.md (+0/-0) tests/cloud_tests/testcases/bugs/lp1511485.yaml (+0/-0) tests/cloud_tests/testcases/bugs/lp1611074.yaml (+0/-0) tests/cloud_tests/testcases/bugs/lp1628337.yaml (+0/-0) tests/cloud_tests/testcases/examples/README.md (+0/-0) tests/cloud_tests/testcases/examples/TODO.md (+0/-0) tests/cloud_tests/testcases/examples/add_apt_repositories.yaml (+0/-0) tests/cloud_tests/testcases/examples/alter_completion_message.yaml (+0/-0) tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml (+0/-0) tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml (+0/-0) tests/cloud_tests/testcases/examples/including_user_groups.yaml (+0/-0) tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml (+0/-0) tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml (+0/-0) tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml (+0/-0) tests/cloud_tests/testcases/examples/run_commands.yaml (+0/-0) tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml (+0/-0) tests/cloud_tests/testcases/examples/setup_run_puppet.yaml (+0/-0) tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml (+0/-0) tests/cloud_tests/testcases/main/README.md (+0/-0) tests/cloud_tests/testcases/main/command_output_simple.yaml (+0/-0) tests/cloud_tests/testcases/modules/README.md (+0/-0) tests/cloud_tests/testcases/modules/TODO.md (+0/-2) tests/cloud_tests/testcases/modules/apt_configure_conf.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_primary.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_security.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml (+0/-0) tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml (+0/-0) tests/cloud_tests/testcases/modules/bootcmd.yaml (+0/-0) tests/cloud_tests/testcases/modules/byobu.yaml (+0/-0) tests/cloud_tests/testcases/modules/ca_certs.yaml (+0/-0) tests/cloud_tests/testcases/modules/debug_disable.yaml (+0/-0) tests/cloud_tests/testcases/modules/debug_enable.yaml (+0/-0) tests/cloud_tests/testcases/modules/final_message.yaml (+0/-0) tests/cloud_tests/testcases/modules/keys_to_console.yaml (+0/-0) tests/cloud_tests/testcases/modules/landscape.yaml (+0/-0) tests/cloud_tests/testcases/modules/locale.yaml (+0/-0) tests/cloud_tests/testcases/modules/lxd_bridge.yaml (+0/-0) tests/cloud_tests/testcases/modules/lxd_dir.yaml (+0/-0) tests/cloud_tests/testcases/modules/ntp.yaml (+0/-0) tests/cloud_tests/testcases/modules/ntp_pools.yaml (+0/-0) tests/cloud_tests/testcases/modules/ntp_servers.yaml (+0/-0) tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+0/-0) tests/cloud_tests/testcases/modules/runcmd.yaml (+0/-0) tests/cloud_tests/testcases/modules/salt_minion.yaml (+0/-0) tests/cloud_tests/testcases/modules/seed_random_command.yaml (+0/-0) tests/cloud_tests/testcases/modules/seed_random_data.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_hostname.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_password.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_password_expire.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_password_list.yaml (+0/-0) tests/cloud_tests/testcases/modules/set_password_list_string.yaml (+0/-0) tests/cloud_tests/testcases/modules/snappy.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_import_id.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml (+0/-0) tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml (+0/-0) tests/cloud_tests/testcases/modules/timezone.yaml (+0/-0) tests/cloud_tests/testcases/modules/user_groups.yaml (+0/-0) tests/cloud_tests/testcases/modules/write_files.yaml (+0/-0) tests/cloud_tests/util.py (+43/-0) tests/unittests/test__init__.py (+1/-1) tests/unittests/test_atomic_helper.py (+1/-1) tests/unittests/test_builtin_handlers.py (+1/-1) tests/unittests/test_cli.py (+146/-4) tests/unittests/test_cs_util.py (+1/-1) tests/unittests/test_data.py (+1/-1) tests/unittests/test_datasource/test_aliyun.py (+7/-6) tests/unittests/test_datasource/test_altcloud.py (+3/-3) tests/unittests/test_datasource/test_azure.py (+4/-2) tests/unittests/test_datasource/test_azure_helper.py (+97/-50) tests/unittests/test_datasource/test_cloudsigma.py (+1/-1) tests/unittests/test_datasource/test_cloudstack.py (+83/-7) tests/unittests/test_datasource/test_common.py (+2/-1) tests/unittests/test_datasource/test_configdrive.py (+1/-1) tests/unittests/test_datasource/test_digitalocean.py (+1/-1) tests/unittests/test_datasource/test_ec2.py (+255/-35) tests/unittests/test_datasource/test_gce.py (+3/-2) tests/unittests/test_datasource/test_maas.py (+1/-1) tests/unittests/test_datasource/test_nocloud.py (+1/-1) tests/unittests/test_datasource/test_opennebula.py (+1/-1) tests/unittests/test_datasource/test_openstack.py (+4/-1) tests/unittests/test_datasource/test_ovf.py (+165/-1) tests/unittests/test_datasource/test_scaleway.py (+1/-1) tests/unittests/test_datasource/test_smartos.py (+1/-1) tests/unittests/test_distros/__init__.py (+21/-0) tests/unittests/test_distros/test_arch.py (+45/-0) tests/unittests/test_distros/test_create_users.py (+1/-1) tests/unittests/test_distros/test_debian.py (+42/-24) tests/unittests/test_distros/test_generic.py (+17/-1) tests/unittests/test_distros/test_netconfig.py (+3/-3) tests/unittests/test_distros/test_opensuse.py (+12/-0) tests/unittests/test_distros/test_resolv.py (+1/-1) tests/unittests/test_distros/test_sles.py (+12/-0) tests/unittests/test_distros/test_sysconfig.py (+1/-1) tests/unittests/test_distros/test_user_data_normalize.py (+1/-1) tests/unittests/test_ds_identify.py (+47/-4) tests/unittests/test_ec2_util.py (+1/-1) tests/unittests/test_filters/test_launch_index.py (+1/-1) tests/unittests/test_handler/test_handler_apt_conf_v1.py (+1/-1) tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+1/-1) tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py (+1/-1) tests/unittests/test_handler/test_handler_apt_source_v1.py (+1/-1) tests/unittests/test_handler/test_handler_apt_source_v3.py (+1/-1) tests/unittests/test_handler/test_handler_bootcmd.py (+146/-0) tests/unittests/test_handler/test_handler_ca_certs.py (+1/-1) tests/unittests/test_handler/test_handler_chef.py (+76/-12) tests/unittests/test_handler/test_handler_debug.py (+7/-4) tests/unittests/test_handler/test_handler_disk_setup.py (+1/-1) tests/unittests/test_handler/test_handler_growpart.py (+1/-1) tests/unittests/test_handler/test_handler_landscape.py (+130/-0) tests/unittests/test_handler/test_handler_locale.py (+57/-3) tests/unittests/test_handler/test_handler_lxd.py (+1/-1) tests/unittests/test_handler/test_handler_mcollective.py (+1/-1) tests/unittests/test_handler/test_handler_mounts.py (+1/-1) tests/unittests/test_handler/test_handler_ntp.py (+102/-5) tests/unittests/test_handler/test_handler_power_state.py (+2/-2) tests/unittests/test_handler/test_handler_puppet.py (+142/-0) tests/unittests/test_handler/test_handler_resizefs.py (+222/-7) tests/unittests/test_handler/test_handler_rsyslog.py (+1/-1) tests/unittests/test_handler/test_handler_runcmd.py (+108/-0) tests/unittests/test_handler/test_handler_seed_random.py (+1/-1) tests/unittests/test_handler/test_handler_set_hostname.py (+4/-3) tests/unittests/test_handler/test_handler_snappy.py (+2/-2) tests/unittests/test_handler/test_handler_spacewalk.py (+1/-1) tests/unittests/test_handler/test_handler_timezone.py (+1/-1) tests/unittests/test_handler/test_handler_write_files.py (+1/-1) tests/unittests/test_handler/test_handler_yum_add_repo.py (+1/-1) tests/unittests/test_handler/test_handler_zypper_add_repo.py (+237/-0) tests/unittests/test_handler/test_schema.py (+151/-16) tests/unittests/test_helpers.py (+1/-1) tests/unittests/test_log.py (+58/-0) tests/unittests/test_merging.py (+1/-1) tests/unittests/test_net.py (+128/-8) tests/unittests/test_pathprefix2dict.py (+1/-1) tests/unittests/test_registry.py (+1/-1) tests/unittests/test_reporting.py (+1/-1) tests/unittests/test_rh_subscription.py (+1/-1) tests/unittests/test_runs/test_merge_run.py (+1/-1) tests/unittests/test_runs/test_simple_run.py (+106/-21) tests/unittests/test_sshutil.py (+2/-1) tests/unittests/test_templating.py (+1/-1) tests/unittests/test_util.py (+13/-2) tests/unittests/test_version.py (+1/-1) tests/unittests/test_vmware_config_file.py (+247/-2) tools/build-on-freebsd (+0/-1) tools/ds-identify (+6/-0) tools/make-tarball (+1/-1) tools/read-version (+1/-1) tools/render-cloudcfg (+3/-2) tools/xkvm (+664/-0) tox.ini (+31/-11) |
||||||||
| Related bugs: |
|
| Reviewer | Review Type | Date Requested | Status |
|---|---|---|---|
| Server Team CI bot | continuous-integration | Approve on 2017-10-06 | |
| Scott Moser | 2017-10-06 | Pending | |
|
Review via email:
|
|||
Commit Message
Description of the Change
Upstream snapshot pulled into Zesty for SRU
To post a comment you must log in.
PASSED: Continuous integration, rev:ca2730e2ac8
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild:
https:/
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
| 1 | diff --git a/ChangeLog b/ChangeLog |
| 2 | index 80405bc..0260c57 100644 |
| 3 | --- a/ChangeLog |
| 4 | +++ b/ChangeLog |
| 5 | @@ -1,3 +1,425 @@ |
| 6 | +17.1: |
| 7 | + - doc: document GCE datasource. [Arnd Hannemann] |
| 8 | + - suse: updates to templates to support openSUSE and SLES. |
| 9 | + [Robert Schweikert] (LP: #1718640) |
| 10 | + - suse: Copy sysvinit files from redhat with slight changes. |
| 11 | + [Robert Schweikert] (LP: #1718649) |
| 12 | + - docs: fix sphinx module schema documentation [Chad Smith] |
| 13 | + - tests: Add cloudinit package to all test targets [Chad Smith] |
| 14 | + - Makefile: No longer look for yaml files in obsolete ./bin/. |
| 15 | + - tests: fix ds-identify unit tests to set EC2_STRICT_ID_DEFAULT. |
| 16 | + - ec2: Fix maybe_perform_dhcp_discovery to use /var/tmp as a tmpdir |
| 17 | + [Chad Smith] (LP: #1717627) |
| 18 | + - Azure: wait longer for SSH pub keys to arrive. |
| 19 | + [Paul Meyer] (LP: #1717611) |
| 20 | + - GCE: Fix usage of user-data. (LP: #1717598) |
| 21 | + - cmdline: add collect-logs subcommand. [Chad Smith] (LP: #1607345) |
| 22 | + - CloudStack: consider dhclient lease files named with a hyphen. |
| 23 | + (LP: #1717147) |
| 24 | + - resizefs: Drop check for read-only device file, do not warn on |
| 25 | + overlayroot. [Chad Smith] |
| 26 | + - Do not provide systemd-fsck drop-in which could cause ordering cycles. |
| 27 | + [Balint Reczey] (LP: #1717477) |
| 28 | + - tests: Enable the NoCloud KVM platform [Joshua Powers] |
| 29 | + - resizefs: pass mount point to xfs_growfs [Dusty Mabe] |
| 30 | + - vmware: Enable nics before sending the SUCCESS event. [Sankar Tanguturi] |
| 31 | + - cloud-config modules: honor distros definitions in each module |
| 32 | + [Chad Smith] (LP: #1715738, #1715690) |
| 33 | + - chef: Add option to pin chef omnibus install version |
| 34 | + [Ethan Apodaca] (LP: #1462693) |
| 35 | + - tests: execute: support command as string [Joshua Powers] |
| 36 | + - schema and docs: Add jsonschema to resizefs and bootcmd modules |
| 37 | + [Chad Smith] |
| 38 | + - tools: Add xkvm script, wrapper around qemu-system [Joshua Powers] |
| 39 | + - vmware customization: return network config format |
| 40 | + [Sankar Tanguturi] (LP: #1675063) |
| 41 | + - Ec2: only attempt to operate at local mode on known platforms. |
| 42 | + (LP: #1715128) |
| 43 | + - Use /run/cloud-init for tempfile operations. (LP: #1707222) |
| 44 | + - ds-identify: Make OpenStack return maybe on arch other than intel. |
| 45 | + (LP: #1715241) |
| 46 | + - tests: mock missed openstack metadata uri network_data.json |
| 47 | + [Chad Smith] (LP: #1714376) |
| 48 | + - relocate tests/unittests/helpers.py to cloudinit/tests |
| 49 | + [Lars Kellogg-Stedman] |
| 50 | + - tox: add nose timer output [Joshua Powers] |
| 51 | + - upstart: do not package upstart jobs, drop ubuntu-init-switch module. |
| 52 | + - tests: Stop leaking calls through unmocked metadata addresses |
| 53 | + [Chad Smith] (LP: #1714117) |
| 54 | + - distro: allow distro to specify a default locale [Ryan Harper] |
| 55 | + - tests: fix two recently added tests for sles distro. |
| 56 | + - url_helper: dynamically import oauthlib import from inside oauth_headers |
| 57 | + [Chad Smith] |
| 58 | + - tox: make xenial environment run with python3.6 |
| 59 | + - suse: Add support for openSUSE and return SLES to a working state. |
| 60 | + [Robert Schweikert] |
| 61 | + - GCE: Add a main to the GCE Datasource. |
| 62 | + - ec2: Add IPv6 dhcp support to Ec2DataSource. [Chad Smith] (LP: #1639030) |
| 63 | + - url_helper: fail gracefully if oauthlib is not available |
| 64 | + [Lars Kellogg-Stedman] (LP: #1713760) |
| 65 | + - cloud-init analyze: fix issues running under python 2. [Andrew Jorgensen] |
| 66 | + - Configure logging module to always use UTC time. |
| 67 | + [Ryan Harper] (LP: #1713158) |
| 68 | + - Log a helpful message if a user script does not include shebang. |
| 69 | + [Andrew Jorgensen] |
| 70 | + - cli: Fix command line parsing of coniditionally loaded subcommands. |
| 71 | + [Chad Smith] (LP: #1712676) |
| 72 | + - doc: Explain error behavior in user data include file format. |
| 73 | + [Jason Butz] |
| 74 | + - cc_landscape & cc_puppet: Fix six.StringIO use in writing configs |
| 75 | + [Chad Smith] (LP: #1699282, #1710932) |
| 76 | + - schema cli: Add schema subcommand to cloud-init cli and cc_runcmd schema |
| 77 | + [Chad Smith] |
| 78 | + - Debian: Remove non-free repositories from apt sources template. |
| 79 | + [Joonas Kylmälä] (LP: #1700091) |
| 80 | + - tools: Add tooling for basic cloud-init performance analysis. |
| 81 | + [Chad Smith] (LP: #1709761) |
| 82 | + - network: add v2 passthrough and fix parsing v2 config with bonds/bridge |
| 83 | + params [Ryan Harper] (LP: #1709180) |
| 84 | + - doc: update capabilities with features available, link doc reference, |
| 85 | + cli example [Ryan Harper] |
| 86 | + - vcloud directory: Guest Customization support for passwords |
| 87 | + [Maitreyee Saikia] |
| 88 | + - ec2: Allow Ec2 to run in init-local using dhclient in a sandbox. |
| 89 | + [Chad Smith] (LP: #1709772) |
| 90 | + - cc_ntp: fallback on timesyncd configuration if ntp is not installable |
| 91 | + [Ryan Harper] (LP: #1686485) |
| 92 | + - net: Reduce duplicate code. Have get_interfaces_by_mac use |
| 93 | + get_interfaces. |
| 94 | + - tests: Fix build tree integration tests [Joshua Powers] |
| 95 | + - sysconfig: Dont repeat header when rendering resolv.conf |
| 96 | + [Ryan Harper] (LP: #1701420) |
| 97 | + - archlinux: Fix bug with empty dns, do not render 'lo' devices. |
| 98 | + (LP: #1663045, #1706593) |
| 99 | + - cloudinit.net: add initialize_network_device function and tests |
| 100 | + [Chad Smith] |
| 101 | + - makefile: fix ci-deps-ubuntu target [Chad Smith] |
| 102 | + - tests: adjust locale integration test to parse default locale. |
| 103 | + - tests: remove 'yakkety' from releases as it is EOL. |
| 104 | + - tests: Add initial tests for EC2 and improve a docstring. |
| 105 | + - locale: Do not re-run locale-gen if provided locale is system default. |
| 106 | + - archlinux: fix set hostname usage of write_file. |
| 107 | + [Joshua Powers] (LP: #1705306) |
| 108 | + - sysconfig: support subnet type of 'manual'. |
| 109 | + - tools/run-centos: make running with no argument show help. |
| 110 | + - Drop rand_str() usage in DNS redirection detection |
| 111 | + [Bob Aman] (LP: #1088611) |
| 112 | + - sysconfig: use MACADDR on bonds/bridges to configure mac_address |
| 113 | + [Ryan Harper] (LP: #1701417) |
| 114 | + - net: eni route rendering missed ipv6 default route config |
| 115 | + [Ryan Harper] (LP: #1701097) |
| 116 | + - sysconfig: enable mtu set per subnet, including ipv6 mtu |
| 117 | + [Ryan Harper] (LP: #1702513) |
| 118 | + - sysconfig: handle manual type subnets [Ryan Harper] (LP: #1687725) |
| 119 | + - sysconfig: fix ipv6 gateway routes [Ryan Harper] (LP: #1694801) |
| 120 | + - sysconfig: fix rendering of bond, bridge and vlan types. |
| 121 | + [Ryan Harper] (LP: #1695092) |
| 122 | + - Templatize systemd unit files for cross distro deltas. [Ryan Harper] |
| 123 | + - sysconfig: ipv6 and default gateway fixes. [Ryan Harper] (LP: #1704872) |
| 124 | + - net: fix renaming of nics to support mac addresses written in upper |
| 125 | + case. (LP: #1705147) |
| 126 | + - tests: fixes for issues uncovered when moving to python 3.6. |
| 127 | + (LP: #1703697) |
| 128 | + - sysconfig: include GATEWAY value if set in subnet |
| 129 | + [Ryan Harper] (LP: #1686856) |
| 130 | + - Scaleway: add datasource with user and vendor data for Scaleway. |
| 131 | + [Julien Castets] |
| 132 | + - Support comments in content read by load_shell_content. |
| 133 | + - cloudinitlocal fail to run during boot [Hongjiang Zhang] |
| 134 | + - doc: fix disk setup example table_type options |
| 135 | + [Sandor Zeestraten] (LP: #1703789) |
| 136 | + - tools: Fix exception handling. [Joonas Kylmälä] (LP: #1701527) |
| 137 | + - tests: fix usage of mock in GCE test. |
| 138 | + - test_gce: Fix invalid mock of platform_reports_gce to return False |
| 139 | + [Chad Smith] |
| 140 | + - test: fix incorrect keyid for apt repository. |
| 141 | + [Joshua Powers] (LP: #1702717) |
| 142 | + - tests: Update version of pylxd [Joshua Powers] |
| 143 | + - write_files: Remove log from helper function signatures. |
| 144 | + [Andrew Jorgensen] |
| 145 | + - doc: document the cmdline options to NoCloud [Brian Candler] |
| 146 | + - read_dmi_data: always return None when inside a container. (LP: #1701325) |
| 147 | + - requirements.txt: remove trailing white space. |
| 148 | + - Azure: Add network-config, Refactor net layer to handle duplicate macs. |
| 149 | + [Ryan Harper] |
| 150 | + - Tests: Simplify the check on ssh-import-id [Joshua Powers] |
| 151 | + - tests: update ntp tests after sntp added [Joshua Powers] |
| 152 | + - FreeBSD: Make freebsd a variant, fix unittests and |
| 153 | + tools/build-on-freebsd. |
| 154 | + - FreeBSD: fix test failure |
| 155 | + - FreeBSD: replace ifdown/ifup with "ifconfig down" and "ifconfig up". |
| 156 | + [Hongjiang Zhang] (LP: #1697815) |
| 157 | + - FreeBSD: fix cdrom mounting failure if /mnt/cdrom/secure did not exist. |
| 158 | + [Hongjiang Zhang] (LP: #1696295) |
| 159 | + - main: Don't use templater to format the welcome message |
| 160 | + [Andrew Jorgensen] |
| 161 | + - docs: Automatically generate module docs form schema if present. |
| 162 | + [Chad Smith] |
| 163 | + - debian: fix path comment in /etc/hosts template. |
| 164 | + [Jens Sandmann] (LP: #1606406) |
| 165 | + - suse: add hostname and fully qualified domain to template. |
| 166 | + [Jens Sandmann] |
| 167 | + - write_file(s): Print permissions as octal, not decimal [Andrew Jorgensen] |
| 168 | + - ci deps: Add --test-distro to read-dependencies to install all deps |
| 169 | + [Chad Smith] |
| 170 | + - tools/run-centos: cleanups and move to using read-dependencies |
| 171 | + - pkg build ci: Add make ci-deps-<distro> target to install pkgs |
| 172 | + [Chad Smith] |
| 173 | + - systemd: make cloud-final.service run before apt daily services. |
| 174 | + (LP: #1693361) |
| 175 | + - selinux: Allow restorecon to be non-fatal. [Ryan Harper] (LP: #1686751) |
| 176 | + - net: Allow netinfo subprocesses to return 0 or 1. |
| 177 | + [Ryan Harper] (LP: #1686751) |
| 178 | + - net: Allow for NetworkManager configuration [Ryan McCabe] (LP: #1693251) |
| 179 | + - Use distro release version to determine if we use systemd in redhat spec |
| 180 | + [Ryan Harper] |
| 181 | + - net: normalize data in network_state object |
| 182 | + - Integration Testing: tox env, pyxld 2.2.3, and revamp framework |
| 183 | + [Wesley Wiedenmeier] |
| 184 | + - Chef: Update omnibus url to chef.io, minor doc changes. [JJ Asghar] |
| 185 | + - tools: add centos scripts to build and test [Joshua Powers] |
| 186 | + - Drop cheetah python module as it is not needed by trunk [Ryan Harper] |
| 187 | + - rhel/centos spec cleanups. |
| 188 | + - cloud.cfg: move to a template. setup.py changes along the way. |
| 189 | + - Makefile: add deb-src and srpm targets. use PYVER more places. |
| 190 | + - makefile: fix python 2/3 detection in the Makefile [Chad Smith] |
| 191 | + - snap: Removing snapcraft plug line [Joshua Powers] (LP: #1695333) |
| 192 | + - RHEL/CentOS: Fix default routes for IPv4/IPv6 configuration. |
| 193 | + [Andreas Karis] (LP: #1696176) |
| 194 | + - test: Fix pyflakes complaint of unused import. |
| 195 | + [Joshua Powers] (LP: #1695918) |
| 196 | + - NoCloud: support seed of nocloud from smbios information |
| 197 | + [Vladimir Pouzanov] (LP: #1691772) |
| 198 | + - net: when selecting a network device, use natural sort order |
| 199 | + [Marc-Aurèle Brothier] |
| 200 | + - fix typos and remove whitespace in various docs [Stephan Telling] |
| 201 | + - systemd: Fix typo in comment in cloud-init.target. [Chen-Han Hsiao] |
| 202 | + - Tests: Skip jsonschema related unit tests when dependency is absent. |
| 203 | + [Chad Smith] (LP: #1695318) |
| 204 | + - azure: remove accidental duplicate line in merge. |
| 205 | + - azure: identify platform by well known value in chassis asset tag. |
| 206 | + [Chad Smith] (LP: #1693939) |
| 207 | + - tools/net-convert.py: support old cloudinit versions by using kwargs. |
| 208 | + - ntp: Add schema definition and passive schema validation. |
| 209 | + [Chad Smith] (LP: #1692916) |
| 210 | + - Fix eni rendering for bridge params that require repeated key for |
| 211 | + values. [Ryan Harper] |
| 212 | + - net: remove systemd link file writing from eni renderer [Ryan Harper] |
| 213 | + - AliYun: Enable platform identification and enable by default. |
| 214 | + [Junjie Wang] (LP: #1638931) |
| 215 | + - net: fix reading and rendering addresses in cidr format. |
| 216 | + [Dimitri John Ledkov] (LP: #1689346, #1684349) |
| 217 | + - disk_setup: udev settle before attempting partitioning or fs creation. |
| 218 | + (LP: #1692093) |
| 219 | + - GCE: Update the attribute used to find instance SSH keys. |
| 220 | + [Daniel Watkins] (LP: #1693582) |
| 221 | + - nplan: For bonds, allow dashed or underscore names of keys. |
| 222 | + [Dimitri John Ledkov] (LP: #1690480) |
| 223 | + - python2.6: fix unit tests usage of assertNone and format. |
| 224 | + - test: update docstring on test_configured_list_with_none |
| 225 | + - fix tools/ds-identify to not write None twice. |
| 226 | + - tox/build: do not package depend on style requirements. |
| 227 | + - cc_ntp: Restructure cc_ntp unit tests. [Chad Smith] (LP: #1692794) |
| 228 | + - flake8: move the pinned version of flake8 up to 3.3.0 |
| 229 | + - tests: Apply workaround for snapd bug in test case. [Joshua Powers] |
| 230 | + - RHEL/CentOS: Fix dual stack IPv4/IPv6 configuration. |
| 231 | + [Andreas Karis] (LP: #1679817, #1685534, #1685532) |
| 232 | + - disk_setup: fix several issues with gpt disk partitions. (LP: #1692087) |
| 233 | + - function spelling & docstring update [Joshua Powers] |
| 234 | + - Fixing wrong file name regression. [Joshua Powers] |
| 235 | + - tox: move pylint target to 1.7.1 |
| 236 | + - Fix get_interfaces_by_mac for empty macs (LP: #1692028) |
| 237 | + - DigitalOcean: remove routes except for the public interface. |
| 238 | + [Ben Howard] (LP: #1681531.) |
| 239 | + - netplan: pass macaddress, when specified, for vlans |
| 240 | + [Dimitri John Ledkov] (LP: #1690388) |
| 241 | + - doc: various improvements for the docs on cc_users_groups. |
| 242 | + [Felix Dreissig] |
| 243 | + - cc_ntp: write template before installing and add service restart |
| 244 | + [Ryan Harper] (LP: #1645644) |
| 245 | + - cloudstack: fix tests to avoid accessing /var/lib/NetworkManager |
| 246 | + [Lars Kellogg-Stedman] |
| 247 | + - tests: fix hardcoded path to mkfs.ext4 [Joshua Powers] (LP: #1691517) |
| 248 | + - Actually skip warnings when .skip file is present. |
| 249 | + [Chris Brinker] (LP: #1691551) |
| 250 | + - netplan: fix netplan render_network_state signature. |
| 251 | + [Dimitri John Ledkov] (LP: #1685944) |
| 252 | + - Azure: fix reformatting of ephemeral disks on resize to large types. |
| 253 | + (LP: #1686514) |
| 254 | + - Revert "tools/net-convert: fix argument order for render_network_state" |
| 255 | + - make deb: Add devscripts dependency for make deb. Cleanup |
| 256 | + packages/bddeb. [Chad Smith] (LP: #1685935) |
| 257 | + - tools/net-convert: fix argument order for render_network_state |
| 258 | + [Ryan Harper] (LP: #1685944) |
| 259 | + - openstack: fix log message copy/paste typo in _get_url_settings |
| 260 | + [Lars Kellogg-Stedman] |
| 261 | + - unittests: fix unittests run on centos [Joshua Powers] |
| 262 | + - Improve detection of snappy to include os-release and kernel cmdline. |
| 263 | + (LP: #1689944) |
| 264 | + - Add address to config entry generated by _klibc_to_config_entry. |
| 265 | + [Julien Castets] (LP: #1691135) |
| 266 | + - sysconfig: Raise ValueError when multiple default gateways are present. |
| 267 | + [Chad Smith] (LP: #1687485) |
| 268 | + - FreeBSD: improvements and fixes for use on Azure |
| 269 | + [Hongjiang Zhang] (LP: #1636345) |
| 270 | + - Add unit tests for ds-identify, fix Ec2 bug found. |
| 271 | + - fs_setup: if cmd is specified, use shell interpretation. |
| 272 | + [Paul Meyer] (LP: #1687712) |
| 273 | + - doc: document network configuration defaults policy and formats. |
| 274 | + [Ryan Harper] |
| 275 | + - Fix name of "uri" key in docs for "cc_apt_configure" module |
| 276 | + [Felix Dreissig] |
| 277 | + - tests: Enable artful [Joshua Powers] |
| 278 | + - nova-lxd: read product_name from environment, not platform. |
| 279 | + (LP: #1685810) |
| 280 | + - Fix yum repo config where keys contain array values |
| 281 | + [Dylan Perry] (LP: #1592150) |
| 282 | + - template: Update debian backports template [Joshua Powers] (LP: #1627293) |
| 283 | + - rsyslog: replace ~ with stop [Joshua Powers] (LP: #1367899) |
| 284 | + - Doc: add additional RTD examples [Joshua Powers] (LP: #1459604) |
| 285 | + - Fix growpart for some cases when booted with root=PARTUUID. |
| 286 | + (LP: #1684869) |
| 287 | + - pylint: update output style to parseable [Joshua Powers] |
| 288 | + - pylint: fix all logging warnings [Joshua Powers] |
| 289 | + - CloudStack: Add NetworkManager to list of supported DHCP lease dirs. |
| 290 | + [Syed] |
| 291 | + - net: kernel lies about vlans not stealing mac addresses, when they do |
| 292 | + [Dimitri John Ledkov] (LP: #1682871) |
| 293 | + - ds-identify: Check correct path for "latest" config drive |
| 294 | + [Daniel Watkins] (LP: #1673637) |
| 295 | + - doc: Fix example for resolve.conf configuration. |
| 296 | + [Jon Grimm] (LP: #1531582) |
| 297 | + - Fix examples that reference upstream chef repository. |
| 298 | + [Jon Grimm] (LP: #1678145) |
| 299 | + - doc: correct grammar and improve clarity in merging documentation. |
| 300 | + [David Tagatac] |
| 301 | + - doc: Add missing doc link to snap-config module. [Ryan Harper] |
| 302 | + - snap: allows for creating cloud-init snap [Joshua Powers] |
| 303 | + - DigitalOcean: assign IPv4ll address to lowest indexed interface. |
| 304 | + [Ben Howard] |
| 305 | + - DigitalOcean: configure all NICs presented in meta-data. [Ben Howard] |
| 306 | + - Remove (and/or fix) URL shortener references [Jon Grimm] (LP: #1669727) |
| 307 | + - HACKING.rst: more info on filling out contributors agreement. |
| 308 | + - util: teach write_file about copy_mode option |
| 309 | + [Lars Kellogg-Stedman] (LP: #1644064) |
| 310 | + - DigitalOcean: bind resolvers to loopback interface. [Ben Howard] |
| 311 | + - tests: fix AltCloud tests to not rely on blkid (LP: #1636531) |
| 312 | + - OpenStack: add 'dvs' to the list of physical link types. (LP: #1674946) |
| 313 | + - Fix bug that resulted in an attempt to rename bonds or vlans. |
| 314 | + (LP: #1669860) |
| 315 | + - tests: update OpenNebula and Digital Ocean to not rely on host |
| 316 | + interfaces. |
| 317 | + - net: in netplan renderer delete known image-builtin content. |
| 318 | + (LP: #1675576) |
| 319 | + - doc: correct grammar in capabilities.rst [David Tagatac] |
| 320 | + - ds-identify: fix detecting of maas datasource. (LP: #1677710) |
| 321 | + - netplan: remove debugging prints, add debug logging [Ryan Harper] |
| 322 | + - ds-identify: do not write None twice to datasource_list. |
| 323 | + - support resizing partition and rootfs on system booted without |
| 324 | + initramfs. [Steve Langasek] (LP: #1677376) |
| 325 | + - apt_configure: run only when needed. (LP: #1675185) |
| 326 | + - OpenStack: identify OpenStack by product 'OpenStack Compute'. |
| 327 | + (LP: #1675349) |
| 328 | + - GCE: Search GCE in ds-identify, consider serial number in check. |
| 329 | + (LP: #1674861) |
| 330 | + - Add support for setting hashed passwords [Tore S. Lonoy] (LP: #1570325) |
| 331 | + - Fix filesystem creation when using "partition: auto" |
| 332 | + [Jonathan Ballet] (LP: #1634678) |
| 333 | + - ConfigDrive: support reading config drive data from /config-drive. |
| 334 | + (LP: #1673411) |
| 335 | + - ds-identify: fix detection of Bigstep datasource. (LP: #1674766) |
| 336 | + - test: add running of pylint [Joshua Powers] |
| 337 | + - ds-identify: fix bug where filename expansion was left on. |
| 338 | + - advertise network config v2 support (NETWORK_CONFIG_V2) in features. |
| 339 | + - Bigstep: fix bug when executing in python3. [root] |
| 340 | + - Fix unit test when running in a system deployed with cloud-init. |
| 341 | + - Bounce network interface for Azure when using the built-in path. |
| 342 | + [Brent Baude] (LP: #1674685) |
| 343 | + - cloudinit.net: add network config v2 parsing and rendering [Ryan Harper] |
| 344 | + - net: Fix incorrect call to isfile [Joshua Powers] (LP: #1674317) |
| 345 | + - net: add renderers for automatically selecting the renderer. |
| 346 | + - doc: fix config drive doc with regard to unpartitioned disks. |
| 347 | + (LP: #1673818) |
| 348 | + - test: Adding integratiron test for password as list [Joshua Powers] |
| 349 | + - render_network_state: switch arguments around, do not require target |
| 350 | + - support 'loopback' as a device type. |
| 351 | + - Integration Testing: improve testcase subclassing [Wesley Wiedenmeier] |
| 352 | + - gitignore: adding doc/rtd_html [Joshua Powers] |
| 353 | + - doc: add instructions for running integration tests via tox. |
| 354 | + [Joshua Powers] |
| 355 | + - test: avoid differences in 'date' output due to daylight savings. |
| 356 | + - Fix chef config module in omnibus install. [Jeremy Melvin] (LP: #1583837) |
| 357 | + - Add feature flags to cloudinit.version. [Wesley Wiedenmeier] |
| 358 | + - tox: add a citest environment |
| 359 | + - Further fix regression to support 'password' for default user. |
| 360 | + - fix regression when no chpasswd/list was provided. |
| 361 | + - Support chpasswd/list being a list in addition to a string. |
| 362 | + [Sergio Lystopad] (LP: #1665694) |
| 363 | + - doc: Fix configuration example for cc_set_passwords module. |
| 364 | + [Sergio Lystopad] (LP: #1665773) |
| 365 | + - net: support both ipv4 and ipv6 gateways in sysconfig. |
| 366 | + [Lars Kellogg-Stedman] (LP: #1669504) |
| 367 | + - net: do not raise exception for > 3 nameservers |
| 368 | + [Lars Kellogg-Stedman] (LP: #1670052) |
| 369 | + - ds-identify: report cleanups for config and exit value. (LP: #1669949) |
| 370 | + - ds-identify: move default setting for Ec2/strict_id to a global. |
| 371 | + - ds-identify: record not found in cloud.cfg and always add None. |
| 372 | + - Support warning if the used datasource is not in ds-identify's list. |
| 373 | + - tools/ds-identify: make report mode write namespaced results. |
| 374 | + - Move warning functionality to cloudinit/warnings.py |
| 375 | + - Add profile.d script for showing warnings on login. |
| 376 | + - Z99-cloud-locale-test.sh: install and make consistent. |
| 377 | + - tools/ds-identify: look at cloud.cfg when looking for ec2 strict_id. |
| 378 | + - tools/ds-identify: disable vmware_guest_customization by default. |
| 379 | + - tools/ds-identify: ovf identify vmware guest customization. |
| 380 | + - Identify Brightbox as an Ec2 datasource user. (LP: #1661693) |
| 381 | + - DatasourceEc2: add warning message when not on AWS. |
| 382 | + - ds-identify: add reading of datasource/Ec2/strict_id |
| 383 | + - tools/ds-identify: add support for found or maybe contributing config. |
| 384 | + - tools/ds-identify: read the seed directory on Ec2 |
| 385 | + - tools/ds-identify: use quotes in local declarations. |
| 386 | + - tools/ds-identify: fix documentation of policy setting in a comment. |
| 387 | + - ds-identify: only run once per boot unless --force is given. |
| 388 | + - flake8: fix flake8 complaints in previous commit. |
| 389 | + - net: correct errors in cloudinit/net/sysconfig.py |
| 390 | + [Lars Kellogg-Stedman] (LP: #1665441) |
| 391 | + - ec2_utils: fix MetadataLeafDecoder that returned bytes on empty |
| 392 | + - apply the runtime configuration written by ds-identify. |
| 393 | + - ds-identify: fix checking for filesystem label (LP: #1663735) |
| 394 | + - ds-identify: read ds=nocloud properly (LP: #1663723) |
| 395 | + - support nova-lxd by reading platform from environment of pid 1. |
| 396 | + (LP: #1661797) |
| 397 | + - ds-identify: change aarch64 to use the default for non-dmi systems. |
| 398 | + - Remove style checking during build and add latest style checks to tox |
| 399 | + [Joshua Powers] (LP: #1652329) |
| 400 | + - code-style: make master pass pycodestyle (2.3.1) cleanly, currently: |
| 401 | + [Joshua Powers] |
| 402 | + - manual_cache_clean: When manually cleaning touch a file in instance dir. |
| 403 | + - Add tools/ds-identify to identify datasources available. |
| 404 | + - Fix small typo and change iso-filename for consistency [Robin Naundorf] |
| 405 | + - Fix eni rendering of multiple IPs per interface |
| 406 | + [Ryan Harper] (LP: #1657940) |
| 407 | + - tools/mock-meta: support python2 or python3 and ipv6 in both. |
| 408 | + - tests: remove executable bit on test_net, so it runs, and fix it. |
| 409 | + - tests: No longer monkey patch httpretty for python 3.4.2 |
| 410 | + - Add 3 ecdsa-sha2-nistp* ssh key types now that they are standardized |
| 411 | + [Lars Kellogg-Stedman] (LP: #1658174) |
| 412 | + - reset httppretty for each test [Lars Kellogg-Stedman] (LP: #1658200) |
| 413 | + - build: fix running Make on a branch with tags other than master |
| 414 | + - EC2: Do not cache security credentials on disk |
| 415 | + [Andrew Jorgensen] (LP: #1638312) |
| 416 | + - doc: Fix typos and clarify some aspects of the part-handler |
| 417 | + [Erik M. Bray] |
| 418 | + - doc: add some documentation on OpenStack datasource. |
| 419 | + - OpenStack: Use timeout and retries from config in get_data. |
| 420 | + [Lars Kellogg-Stedman] (LP: #1657130) |
| 421 | + - Fixed Misc issues related to VMware customization. [Sankar Tanguturi] |
| 422 | + - Fix minor docs typo: perserve > preserve [Jeremy Bicha] |
| 423 | + - Use dnf instead of yum when available |
| 424 | + [Lars Kellogg-Stedman] (LP: #1647118) |
| 425 | + - validate-yaml: use python rather than explicitly python3 |
| 426 | + - Get early logging logged, including failures of cmdline url. |
| 427 | + |
| 428 | 0.7.9: |
| 429 | - doc: adjust headers in tests documentation for consistency. |
| 430 | - pep8: fix issue found in zesty build with pycodestyle. |
| 431 | diff --git a/Makefile b/Makefile |
| 432 | index f280911..4ace227 100644 |
| 433 | --- a/Makefile |
| 434 | +++ b/Makefile |
| 435 | @@ -4,7 +4,7 @@ PYVER ?= $(shell for p in python3 python2; do \ |
| 436 | |
| 437 | noseopts ?= -v |
| 438 | |
| 439 | -YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f ) |
| 440 | +YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f ) |
| 441 | YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) |
| 442 | |
| 443 | PIP_INSTALL := pip install |
| 444 | @@ -48,10 +48,10 @@ pyflakes3: |
| 445 | @$(CWD)/tools/run-pyflakes3 |
| 446 | |
| 447 | unittest: clean_pyc |
| 448 | - nosetests $(noseopts) tests/unittests |
| 449 | + nosetests $(noseopts) tests/unittests cloudinit |
| 450 | |
| 451 | unittest3: clean_pyc |
| 452 | - nosetests3 $(noseopts) tests/unittests |
| 453 | + nosetests3 $(noseopts) tests/unittests cloudinit |
| 454 | |
| 455 | ci-deps-ubuntu: |
| 456 | @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro |
| 457 | diff --git a/cloudinit/analyze/__init__.py b/cloudinit/analyze/__init__.py |
| 458 | new file mode 100644 |
| 459 | index 0000000..e69de29 |
| 460 | --- /dev/null |
| 461 | +++ b/cloudinit/analyze/__init__.py |
| 462 | diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py |
| 463 | new file mode 100644 |
| 464 | index 0000000..69b9e43 |
| 465 | --- /dev/null |
| 466 | +++ b/cloudinit/analyze/__main__.py |
| 467 | @@ -0,0 +1,155 @@ |
| 468 | +# Copyright (C) 2017 Canonical Ltd. |
| 469 | +# |
| 470 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 471 | + |
| 472 | +import argparse |
| 473 | +import re |
| 474 | +import sys |
| 475 | + |
| 476 | +from . import dump |
| 477 | +from . import show |
| 478 | + |
| 479 | + |
| 480 | +def get_parser(parser=None): |
| 481 | + if not parser: |
| 482 | + parser = argparse.ArgumentParser( |
| 483 | + prog='cloudinit-analyze', |
| 484 | + description='Devel tool: Analyze cloud-init logs and data') |
| 485 | + subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') |
| 486 | + subparsers.required = True |
| 487 | + |
| 488 | + parser_blame = subparsers.add_parser( |
| 489 | + 'blame', help='Print list of executed stages ordered by time to init') |
| 490 | + parser_blame.add_argument( |
| 491 | + '-i', '--infile', action='store', dest='infile', |
| 492 | + default='/var/log/cloud-init.log', |
| 493 | + help='specify where to read input.') |
| 494 | + parser_blame.add_argument( |
| 495 | + '-o', '--outfile', action='store', dest='outfile', default='-', |
| 496 | + help='specify where to write output. ') |
| 497 | + parser_blame.set_defaults(action=('blame', analyze_blame)) |
| 498 | + |
| 499 | + parser_show = subparsers.add_parser( |
| 500 | + 'show', help='Print list of in-order events during execution') |
| 501 | + parser_show.add_argument('-f', '--format', action='store', |
| 502 | + dest='print_format', default='%I%D @%Es +%ds', |
| 503 | + help='specify formatting of output.') |
| 504 | + parser_show.add_argument('-i', '--infile', action='store', |
| 505 | + dest='infile', default='/var/log/cloud-init.log', |
| 506 | + help='specify where to read input.') |
| 507 | + parser_show.add_argument('-o', '--outfile', action='store', |
| 508 | + dest='outfile', default='-', |
| 509 | + help='specify where to write output.') |
| 510 | + parser_show.set_defaults(action=('show', analyze_show)) |
| 511 | + parser_dump = subparsers.add_parser( |
| 512 | + 'dump', help='Dump cloud-init events in JSON format') |
| 513 | + parser_dump.add_argument('-i', '--infile', action='store', |
| 514 | + dest='infile', default='/var/log/cloud-init.log', |
| 515 | + help='specify where to read input. ') |
| 516 | + parser_dump.add_argument('-o', '--outfile', action='store', |
| 517 | + dest='outfile', default='-', |
| 518 | + help='specify where to write output. ') |
| 519 | + parser_dump.set_defaults(action=('dump', analyze_dump)) |
| 520 | + return parser |
| 521 | + |
| 522 | + |
| 523 | +def analyze_blame(name, args): |
| 524 | + """Report a list of records sorted by largest time delta. |
| 525 | + |
| 526 | + For example: |
| 527 | + 30.210s (init-local) searching for datasource |
| 528 | + 8.706s (init-network) reading and applying user-data |
| 529 | + 166ms (modules-config) .... |
| 530 | + 807us (modules-final) ... |
| 531 | + |
| 532 | + We generate event records parsing cloud-init logs, formatting the output |
| 533 | + and sorting by record data ('delta') |
| 534 | + """ |
| 535 | + (infh, outfh) = configure_io(args) |
| 536 | + blame_format = ' %ds (%n)' |
| 537 | + r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE) |
| 538 | + for idx, record in enumerate(show.show_events(_get_events(infh), |
| 539 | + blame_format)): |
| 540 | + srecs = sorted(filter(r.match, record), reverse=True) |
| 541 | + outfh.write('-- Boot Record %02d --\n' % (idx + 1)) |
| 542 | + outfh.write('\n'.join(srecs) + '\n') |
| 543 | + outfh.write('\n') |
| 544 | + outfh.write('%d boot records analyzed\n' % (idx + 1)) |
| 545 | + |
| 546 | + |
| 547 | +def analyze_show(name, args): |
| 548 | + """Generate output records using the 'standard' format to printing events. |
| 549 | + |
| 550 | + Example output follows: |
| 551 | + Starting stage: (init-local) |
| 552 | + ... |
| 553 | + Finished stage: (init-local) 0.105195 seconds |
| 554 | + |
| 555 | + Starting stage: (init-network) |
| 556 | + ... |
| 557 | + Finished stage: (init-network) 0.339024 seconds |
| 558 | + |
| 559 | + Starting stage: (modules-config) |
| 560 | + ... |
| 561 | + Finished stage: (modules-config) 0.NNN seconds |
| 562 | + |
| 563 | + Starting stage: (modules-final) |
| 564 | + ... |
| 565 | + Finished stage: (modules-final) 0.NNN seconds |
| 566 | + """ |
| 567 | + (infh, outfh) = configure_io(args) |
| 568 | + for idx, record in enumerate(show.show_events(_get_events(infh), |
| 569 | + args.print_format)): |
| 570 | + outfh.write('-- Boot Record %02d --\n' % (idx + 1)) |
| 571 | + outfh.write('The total time elapsed since completing an event is' |
| 572 | + ' printed after the "@" character.\n') |
| 573 | + outfh.write('The time the event takes is printed after the "+" ' |
| 574 | + 'character.\n\n') |
| 575 | + outfh.write('\n'.join(record) + '\n') |
| 576 | + outfh.write('%d boot records analyzed\n' % (idx + 1)) |
| 577 | + |
| 578 | + |
| 579 | +def analyze_dump(name, args): |
| 580 | + """Dump cloud-init events in json format""" |
| 581 | + (infh, outfh) = configure_io(args) |
| 582 | + outfh.write(dump.json_dumps(_get_events(infh)) + '\n') |
| 583 | + |
| 584 | + |
| 585 | +def _get_events(infile): |
| 586 | + rawdata = None |
| 587 | + events, rawdata = show.load_events(infile, None) |
| 588 | + if not events: |
| 589 | + events, _ = dump.dump_events(rawdata=rawdata) |
| 590 | + return events |
| 591 | + |
| 592 | + |
| 593 | +def configure_io(args): |
| 594 | + """Common parsing and setup of input/output files""" |
| 595 | + if args.infile == '-': |
| 596 | + infh = sys.stdin |
| 597 | + else: |
| 598 | + try: |
| 599 | + infh = open(args.infile, 'r') |
| 600 | + except OSError: |
| 601 | + sys.stderr.write('Cannot open file %s\n' % args.infile) |
| 602 | + sys.exit(1) |
| 603 | + |
| 604 | + if args.outfile == '-': |
| 605 | + outfh = sys.stdout |
| 606 | + else: |
| 607 | + try: |
| 608 | + outfh = open(args.outfile, 'w') |
| 609 | + except OSError: |
| 610 | + sys.stderr.write('Cannot open file %s\n' % args.outfile) |
| 611 | + sys.exit(1) |
| 612 | + |
| 613 | + return (infh, outfh) |
| 614 | + |
| 615 | + |
| 616 | +if __name__ == '__main__': |
| 617 | + parser = get_parser() |
| 618 | + args = parser.parse_args() |
| 619 | + (name, action_functor) = args.action |
| 620 | + action_functor(name, args) |
| 621 | + |
| 622 | +# vi: ts=4 expandtab |
| 623 | diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py |
| 624 | new file mode 100644 |
| 625 | index 0000000..ca4da49 |
| 626 | --- /dev/null |
| 627 | +++ b/cloudinit/analyze/dump.py |
| 628 | @@ -0,0 +1,176 @@ |
| 629 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 630 | + |
| 631 | +import calendar |
| 632 | +from datetime import datetime |
| 633 | +import json |
| 634 | +import sys |
| 635 | + |
| 636 | +from cloudinit import util |
| 637 | + |
| 638 | +stage_to_description = { |
| 639 | + 'finished': 'finished running cloud-init', |
| 640 | + 'init-local': 'starting search for local datasources', |
| 641 | + 'init-network': 'searching for network datasources', |
| 642 | + 'init': 'searching for network datasources', |
| 643 | + 'modules-config': 'running config modules', |
| 644 | + 'modules-final': 'finalizing modules', |
| 645 | + 'modules': 'running modules for', |
| 646 | + 'single': 'running single module ', |
| 647 | +} |
| 648 | + |
| 649 | +# logger's asctime format |
| 650 | +CLOUD_INIT_ASCTIME_FMT = "%Y-%m-%d %H:%M:%S,%f" |
| 651 | + |
| 652 | +# journctl -o short-precise |
| 653 | +CLOUD_INIT_JOURNALCTL_FMT = "%b %d %H:%M:%S.%f %Y" |
| 654 | + |
| 655 | +# other |
| 656 | +DEFAULT_FMT = "%b %d %H:%M:%S %Y" |
| 657 | + |
| 658 | + |
| 659 | +def parse_timestamp(timestampstr): |
| 660 | + # default syslog time does not include the current year |
| 661 | + months = [calendar.month_abbr[m] for m in range(1, 13)] |
| 662 | + if timestampstr.split()[0] in months: |
| 663 | + # Aug 29 22:55:26 |
| 664 | + FMT = DEFAULT_FMT |
| 665 | + if '.' in timestampstr: |
| 666 | + FMT = CLOUD_INIT_JOURNALCTL_FMT |
| 667 | + dt = datetime.strptime(timestampstr + " " + |
| 668 | + str(datetime.now().year), |
| 669 | + FMT) |
| 670 | + timestamp = dt.strftime("%s.%f") |
| 671 | + elif "," in timestampstr: |
| 672 | + # 2016-09-12 14:39:20,839 |
| 673 | + dt = datetime.strptime(timestampstr, CLOUD_INIT_ASCTIME_FMT) |
| 674 | + timestamp = dt.strftime("%s.%f") |
| 675 | + else: |
| 676 | + # allow date(1) to handle other formats we don't expect |
| 677 | + timestamp = parse_timestamp_from_date(timestampstr) |
| 678 | + |
| 679 | + return float(timestamp) |
| 680 | + |
| 681 | + |
| 682 | +def parse_timestamp_from_date(timestampstr): |
| 683 | + out, _ = util.subp(['date', '+%s.%3N', '-d', timestampstr]) |
| 684 | + timestamp = out.strip() |
| 685 | + return float(timestamp) |
| 686 | + |
| 687 | + |
| 688 | +def parse_ci_logline(line): |
| 689 | + # Stage Starts: |
| 690 | + # Cloud-init v. 0.7.7 running 'init-local' at \ |
| 691 | + # Fri, 02 Sep 2016 19:28:07 +0000. Up 1.0 seconds. |
| 692 | + # Cloud-init v. 0.7.7 running 'init' at \ |
| 693 | + # Fri, 02 Sep 2016 19:28:08 +0000. Up 2.0 seconds. |
| 694 | + # Cloud-init v. 0.7.7 finished at |
| 695 | + # Aug 29 22:55:26 test1 [CLOUDINIT] handlers.py[DEBUG]: \ |
| 696 | + # finish: modules-final: SUCCESS: running modules for final |
| 697 | + # 2016-08-30T21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: \ |
| 698 | + # finish: modules-final: SUCCESS: running modules for final |
| 699 | + # |
| 700 | + # Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]: \ |
| 701 | + # Cloud-init v. 0.7.8 running 'init-local' at \ |
| 702 | + # Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds. |
| 703 | + # |
| 704 | + # 2017-05-22 18:02:01,088 - util.py[DEBUG]: Cloud-init v. 0.7.9 running \ |
| 705 | + # 'init-local' at Mon, 22 May 2017 18:02:01 +0000. Up 2.0 seconds. |
| 706 | + |
| 707 | + separators = [' - ', ' [CLOUDINIT] '] |
| 708 | + found = False |
| 709 | + for sep in separators: |
| 710 | + if sep in line: |
| 711 | + found = True |
| 712 | + break |
| 713 | + |
| 714 | + if not found: |
| 715 | + return None |
| 716 | + |
| 717 | + (timehost, eventstr) = line.split(sep) |
| 718 | + |
| 719 | + # journalctl -o short-precise |
| 720 | + if timehost.endswith(":"): |
| 721 | + timehost = " ".join(timehost.split()[0:-1]) |
| 722 | + |
| 723 | + if "," in timehost: |
| 724 | + timestampstr, extra = timehost.split(",") |
| 725 | + timestampstr += ",%s" % extra.split()[0] |
| 726 | + if ' ' in extra: |
| 727 | + hostname = extra.split()[-1] |
| 728 | + else: |
| 729 | + hostname = timehost.split()[-1] |
| 730 | + timestampstr = timehost.split(hostname)[0].strip() |
| 731 | + if 'Cloud-init v.' in eventstr: |
| 732 | + event_type = 'start' |
| 733 | + if 'running' in eventstr: |
| 734 | + stage_and_timestamp = eventstr.split('running')[1].lstrip() |
| 735 | + event_name, _ = stage_and_timestamp.split(' at ') |
| 736 | + event_name = event_name.replace("'", "").replace(":", "-") |
| 737 | + if event_name == "init": |
| 738 | + event_name = "init-network" |
| 739 | + else: |
| 740 | + # don't generate a start for the 'finished at' banner |
| 741 | + return None |
| 742 | + event_description = stage_to_description[event_name] |
| 743 | + else: |
| 744 | + (pymodloglvl, event_type, event_name) = eventstr.split()[0:3] |
| 745 | + event_description = eventstr.split(event_name)[1].strip() |
| 746 | + |
| 747 | + event = { |
| 748 | + 'name': event_name.rstrip(":"), |
| 749 | + 'description': event_description, |
| 750 | + 'timestamp': parse_timestamp(timestampstr), |
| 751 | + 'origin': 'cloudinit', |
| 752 | + 'event_type': event_type.rstrip(":"), |
| 753 | + } |
| 754 | + if event['event_type'] == "finish": |
| 755 | + result = event_description.split(":")[0] |
| 756 | + desc = event_description.split(result)[1].lstrip(':').strip() |
| 757 | + event['result'] = result |
| 758 | + event['description'] = desc.strip() |
| 759 | + |
| 760 | + return event |
| 761 | + |
| 762 | + |
| 763 | +def json_dumps(data): |
| 764 | + return json.dumps(data, indent=1, sort_keys=True, |
| 765 | + separators=(',', ': ')) |
| 766 | + |
| 767 | + |
| 768 | +def dump_events(cisource=None, rawdata=None): |
| 769 | + events = [] |
| 770 | + event = None |
| 771 | + CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.'] |
| 772 | + |
| 773 | + if not any([cisource, rawdata]): |
| 774 | + raise ValueError('Either cisource or rawdata parameters are required') |
| 775 | + |
| 776 | + if rawdata: |
| 777 | + data = rawdata.splitlines() |
| 778 | + else: |
| 779 | + data = cisource.readlines() |
| 780 | + |
| 781 | + for line in data: |
| 782 | + for match in CI_EVENT_MATCHES: |
| 783 | + if match in line: |
| 784 | + try: |
| 785 | + event = parse_ci_logline(line) |
| 786 | + except ValueError: |
| 787 | + sys.stderr.write('Skipping invalid entry\n') |
| 788 | + if event: |
| 789 | + events.append(event) |
| 790 | + |
| 791 | + return events, data |
| 792 | + |
| 793 | + |
| 794 | +def main(): |
| 795 | + if len(sys.argv) > 1: |
| 796 | + cisource = open(sys.argv[1]) |
| 797 | + else: |
| 798 | + cisource = sys.stdin |
| 799 | + |
| 800 | + return json_dumps(dump_events(cisource)) |
| 801 | + |
| 802 | + |
| 803 | +if __name__ == "__main__": |
| 804 | + print(main()) |
| 805 | diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py |
| 806 | new file mode 100644 |
| 807 | index 0000000..3e778b8 |
| 808 | --- /dev/null |
| 809 | +++ b/cloudinit/analyze/show.py |
| 810 | @@ -0,0 +1,207 @@ |
| 811 | +# Copyright (C) 2016 Canonical Ltd. |
| 812 | +# |
| 813 | +# Author: Ryan Harper <ryan.harper@canonical.com> |
| 814 | +# |
| 815 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 816 | + |
| 817 | +import base64 |
| 818 | +import datetime |
| 819 | +import json |
| 820 | +import os |
| 821 | + |
| 822 | +from cloudinit import util |
| 823 | + |
| 824 | +# An event: |
| 825 | +''' |
| 826 | +{ |
| 827 | + "description": "executing late commands", |
| 828 | + "event_type": "start", |
| 829 | + "level": "INFO", |
| 830 | + "name": "cmd-install/stage-late" |
| 831 | + "origin": "cloudinit", |
| 832 | + "timestamp": 1461164249.1590767, |
| 833 | +}, |
| 834 | + |
| 835 | + { |
| 836 | + "description": "executing late commands", |
| 837 | + "event_type": "finish", |
| 838 | + "level": "INFO", |
| 839 | + "name": "cmd-install/stage-late", |
| 840 | + "origin": "cloudinit", |
| 841 | + "result": "SUCCESS", |
| 842 | + "timestamp": 1461164249.1590767 |
| 843 | + } |
| 844 | + |
| 845 | +''' |
| 846 | +format_key = { |
| 847 | + '%d': 'delta', |
| 848 | + '%D': 'description', |
| 849 | + '%E': 'elapsed', |
| 850 | + '%e': 'event_type', |
| 851 | + '%I': 'indent', |
| 852 | + '%l': 'level', |
| 853 | + '%n': 'name', |
| 854 | + '%o': 'origin', |
| 855 | + '%r': 'result', |
| 856 | + '%t': 'timestamp', |
| 857 | + '%T': 'total_time', |
| 858 | +} |
| 859 | + |
| 860 | +formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v) |
| 861 | + for k, v in format_key.items()]) |
| 862 | + |
| 863 | + |
| 864 | +def format_record(msg, event): |
| 865 | + for i, j in format_key.items(): |
| 866 | + if i in msg: |
| 867 | + # ensure consistent formatting of time values |
| 868 | + if j in ['delta', 'elapsed', 'timestamp']: |
| 869 | + msg = msg.replace(i, "{%s:08.5f}" % j) |
| 870 | + else: |
| 871 | + msg = msg.replace(i, "{%s}" % j) |
| 872 | + return msg.format(**event) |
| 873 | + |
| 874 | + |
| 875 | +def dump_event_files(event): |
| 876 | + content = dict((k, v) for k, v in event.items() if k not in ['content']) |
| 877 | + files = content['files'] |
| 878 | + saved = [] |
| 879 | + for f in files: |
| 880 | + fname = f['path'] |
| 881 | + fn_local = os.path.basename(fname) |
| 882 | + fcontent = base64.b64decode(f['content']).decode('ascii') |
| 883 | + util.write_file(fn_local, fcontent) |
| 884 | + saved.append(fn_local) |
| 885 | + |
| 886 | + return saved |
| 887 | + |
| 888 | + |
| 889 | +def event_name(event): |
| 890 | + if event: |
| 891 | + return event.get('name') |
| 892 | + return None |
| 893 | + |
| 894 | + |
| 895 | +def event_type(event): |
| 896 | + if event: |
| 897 | + return event.get('event_type') |
| 898 | + return None |
| 899 | + |
| 900 | + |
| 901 | +def event_parent(event): |
| 902 | + if event: |
| 903 | + return event_name(event).split("/")[0] |
| 904 | + return None |
| 905 | + |
| 906 | + |
| 907 | +def event_timestamp(event): |
| 908 | + return float(event.get('timestamp')) |
| 909 | + |
| 910 | + |
| 911 | +def event_datetime(event): |
| 912 | + return datetime.datetime.utcfromtimestamp(event_timestamp(event)) |
| 913 | + |
| 914 | + |
| 915 | +def delta_seconds(t1, t2): |
| 916 | + return (t2 - t1).total_seconds() |
| 917 | + |
| 918 | + |
| 919 | +def event_duration(start, finish): |
| 920 | + return delta_seconds(event_datetime(start), event_datetime(finish)) |
| 921 | + |
| 922 | + |
| 923 | +def event_record(start_time, start, finish): |
| 924 | + record = finish.copy() |
| 925 | + record.update({ |
| 926 | + 'delta': event_duration(start, finish), |
| 927 | + 'elapsed': delta_seconds(start_time, event_datetime(start)), |
| 928 | + 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->', |
| 929 | + }) |
| 930 | + |
| 931 | + return record |
| 932 | + |
| 933 | + |
| 934 | +def total_time_record(total_time): |
| 935 | + return 'Total Time: %3.5f seconds\n' % total_time |
| 936 | + |
| 937 | + |
| 938 | +def generate_records(events, blame_sort=False, |
| 939 | + print_format="(%n) %d seconds in %I%D", |
| 940 | + dump_files=False, log_datafiles=False): |
| 941 | + |
| 942 | + sorted_events = sorted(events, key=lambda x: x['timestamp']) |
| 943 | + records = [] |
| 944 | + start_time = None |
| 945 | + total_time = 0.0 |
| 946 | + stage_start_time = {} |
| 947 | + stages_seen = [] |
| 948 | + boot_records = [] |
| 949 | + |
| 950 | + unprocessed = [] |
| 951 | + for e in range(0, len(sorted_events)): |
| 952 | + event = events[e] |
| 953 | + try: |
| 954 | + next_evt = events[e + 1] |
| 955 | + except IndexError: |
| 956 | + next_evt = None |
| 957 | + |
| 958 | + if event_type(event) == 'start': |
| 959 | + if event.get('name') in stages_seen: |
| 960 | + records.append(total_time_record(total_time)) |
| 961 | + boot_records.append(records) |
| 962 | + records = [] |
| 963 | + start_time = None |
| 964 | + total_time = 0.0 |
| 965 | + |
| 966 | + if start_time is None: |
| 967 | + stages_seen = [] |
| 968 | + start_time = event_datetime(event) |
| 969 | + stage_start_time[event_parent(event)] = start_time |
| 970 | + |
| 971 | + # see if we have a pair |
| 972 | + if event_name(event) == event_name(next_evt): |
| 973 | + if event_type(next_evt) == 'finish': |
| 974 | + records.append(format_record(print_format, |
| 975 | + event_record(start_time, |
| 976 | + event, |
| 977 | + next_evt))) |
| 978 | + else: |
| 979 | + # This is a parent event |
| 980 | + records.append("Starting stage: %s" % event.get('name')) |
| 981 | + unprocessed.append(event) |
| 982 | + stages_seen.append(event.get('name')) |
| 983 | + continue |
| 984 | + else: |
| 985 | + prev_evt = unprocessed.pop() |
| 986 | + if event_name(event) == event_name(prev_evt): |
| 987 | + record = event_record(start_time, prev_evt, event) |
| 988 | + records.append(format_record("Finished stage: " |
| 989 | + "(%n) %d seconds ", |
| 990 | + record) + "\n") |
| 991 | + total_time += record.get('delta') |
| 992 | + else: |
| 993 | + # not a match, put it back |
| 994 | + unprocessed.append(prev_evt) |
| 995 | + |
| 996 | + records.append(total_time_record(total_time)) |
| 997 | + boot_records.append(records) |
| 998 | + return boot_records |
| 999 | + |
| 1000 | + |
| 1001 | +def show_events(events, print_format): |
| 1002 | + return generate_records(events, print_format=print_format) |
| 1003 | + |
| 1004 | + |
| 1005 | +def load_events(infile, rawdata=None): |
| 1006 | + if rawdata: |
| 1007 | + data = rawdata.read() |
| 1008 | + else: |
| 1009 | + data = infile.read() |
| 1010 | + |
| 1011 | + j = None |
| 1012 | + try: |
| 1013 | + j = json.loads(data) |
| 1014 | + except ValueError: |
| 1015 | + pass |
| 1016 | + |
| 1017 | + return j, data |
| 1018 | diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py |
| 1019 | new file mode 100644 |
| 1020 | index 0000000..f4c4284 |
| 1021 | --- /dev/null |
| 1022 | +++ b/cloudinit/analyze/tests/test_dump.py |
| 1023 | @@ -0,0 +1,210 @@ |
| 1024 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 1025 | + |
| 1026 | +from datetime import datetime |
| 1027 | +from textwrap import dedent |
| 1028 | + |
| 1029 | +from cloudinit.analyze.dump import ( |
| 1030 | + dump_events, parse_ci_logline, parse_timestamp) |
| 1031 | +from cloudinit.util import subp, write_file |
| 1032 | +from cloudinit.tests.helpers import CiTestCase |
| 1033 | + |
| 1034 | + |
| 1035 | +class TestParseTimestamp(CiTestCase): |
| 1036 | + |
| 1037 | + def test_parse_timestamp_handles_cloud_init_default_format(self): |
| 1038 | + """Logs with cloud-init detailed formats will be properly parsed.""" |
| 1039 | + trusty_fmt = '%Y-%m-%d %H:%M:%S,%f' |
| 1040 | + trusty_stamp = '2016-09-12 14:39:20,839' |
| 1041 | + |
| 1042 | + parsed = parse_timestamp(trusty_stamp) |
| 1043 | + |
| 1044 | + # convert ourselves |
| 1045 | + dt = datetime.strptime(trusty_stamp, trusty_fmt) |
| 1046 | + expected = float(dt.strftime('%s.%f')) |
| 1047 | + |
| 1048 | + # use date(1) |
| 1049 | + out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp]) |
| 1050 | + timestamp = out.strip() |
| 1051 | + date_ts = float(timestamp) |
| 1052 | + |
| 1053 | + self.assertEqual(expected, parsed) |
| 1054 | + self.assertEqual(expected, date_ts) |
| 1055 | + self.assertEqual(date_ts, parsed) |
| 1056 | + |
| 1057 | + def test_parse_timestamp_handles_syslog_adding_year(self): |
| 1058 | + """Syslog timestamps lack a year. Add year and properly parse.""" |
| 1059 | + syslog_fmt = '%b %d %H:%M:%S %Y' |
| 1060 | + syslog_stamp = 'Aug 08 15:12:51' |
| 1061 | + |
| 1062 | + # convert stamp ourselves by adding the missing year value |
| 1063 | + year = datetime.now().year |
| 1064 | + dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt) |
| 1065 | + expected = float(dt.strftime('%s.%f')) |
| 1066 | + parsed = parse_timestamp(syslog_stamp) |
| 1067 | + |
| 1068 | + # use date(1) |
| 1069 | + out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp]) |
| 1070 | + timestamp = out.strip() |
| 1071 | + date_ts = float(timestamp) |
| 1072 | + |
| 1073 | + self.assertEqual(expected, parsed) |
| 1074 | + self.assertEqual(expected, date_ts) |
| 1075 | + self.assertEqual(date_ts, parsed) |
| 1076 | + |
| 1077 | + def test_parse_timestamp_handles_journalctl_format_adding_year(self): |
| 1078 | + """Journalctl precise timestamps lack a year. Add year and parse.""" |
| 1079 | + journal_fmt = '%b %d %H:%M:%S.%f %Y' |
| 1080 | + journal_stamp = 'Aug 08 17:15:50.606811' |
| 1081 | + |
| 1082 | + # convert stamp ourselves by adding the missing year value |
| 1083 | + year = datetime.now().year |
| 1084 | + dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt) |
| 1085 | + expected = float(dt.strftime('%s.%f')) |
| 1086 | + parsed = parse_timestamp(journal_stamp) |
| 1087 | + |
| 1088 | + # use date(1) |
| 1089 | + out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp]) |
| 1090 | + timestamp = out.strip() |
| 1091 | + date_ts = float(timestamp) |
| 1092 | + |
| 1093 | + self.assertEqual(expected, parsed) |
| 1094 | + self.assertEqual(expected, date_ts) |
| 1095 | + self.assertEqual(date_ts, parsed) |
| 1096 | + |
| 1097 | + def test_parse_unexpected_timestamp_format_with_date_command(self): |
| 1098 | + """Dump sends unexpected timestamp formats to data for processing.""" |
| 1099 | + new_fmt = '%H:%M %m/%d %Y' |
| 1100 | + new_stamp = '17:15 08/08' |
| 1101 | + |
| 1102 | + # convert stamp ourselves by adding the missing year value |
| 1103 | + year = datetime.now().year |
| 1104 | + dt = datetime.strptime(new_stamp + " " + str(year), new_fmt) |
| 1105 | + expected = float(dt.strftime('%s.%f')) |
| 1106 | + parsed = parse_timestamp(new_stamp) |
| 1107 | + |
| 1108 | + # use date(1) |
| 1109 | + out, _ = subp(['date', '+%s.%6N', '-d', new_stamp]) |
| 1110 | + timestamp = out.strip() |
| 1111 | + date_ts = float(timestamp) |
| 1112 | + |
| 1113 | + self.assertEqual(expected, parsed) |
| 1114 | + self.assertEqual(expected, date_ts) |
| 1115 | + self.assertEqual(date_ts, parsed) |
| 1116 | + |
| 1117 | + |
| 1118 | +class TestParseCILogLine(CiTestCase): |
| 1119 | + |
| 1120 | + def test_parse_logline_returns_none_without_separators(self): |
| 1121 | + """When no separators are found, parse_ci_logline returns None.""" |
| 1122 | + expected_parse_ignores = [ |
| 1123 | + '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT'] |
| 1124 | + for parse_ignores in expected_parse_ignores: |
| 1125 | + self.assertIsNone(parse_ci_logline(parse_ignores)) |
| 1126 | + |
| 1127 | + def test_parse_logline_returns_event_for_cloud_init_logs(self): |
| 1128 | + """parse_ci_logline returns an event parse from cloud-init format.""" |
| 1129 | + line = ( |
| 1130 | + "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9" |
| 1131 | + " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up" |
| 1132 | + " 6.26 seconds.") |
| 1133 | + dt = datetime.strptime( |
| 1134 | + '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f') |
| 1135 | + timestamp = float(dt.strftime('%s.%f')) |
| 1136 | + expected = { |
| 1137 | + 'description': 'starting search for local datasources', |
| 1138 | + 'event_type': 'start', |
| 1139 | + 'name': 'init-local', |
| 1140 | + 'origin': 'cloudinit', |
| 1141 | + 'timestamp': timestamp} |
| 1142 | + self.assertEqual(expected, parse_ci_logline(line)) |
| 1143 | + |
| 1144 | + def test_parse_logline_returns_event_for_journalctl_logs(self): |
| 1145 | + """parse_ci_logline returns an event parse from journalctl format.""" |
| 1146 | + line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]" |
| 1147 | + " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at" |
| 1148 | + " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.") |
| 1149 | + year = datetime.now().year |
| 1150 | + dt = datetime.strptime( |
| 1151 | + 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') |
| 1152 | + timestamp = float(dt.strftime('%s.%f')) |
| 1153 | + expected = { |
| 1154 | + 'description': 'starting search for local datasources', |
| 1155 | + 'event_type': 'start', |
| 1156 | + 'name': 'init-local', |
| 1157 | + 'origin': 'cloudinit', |
| 1158 | + 'timestamp': timestamp} |
| 1159 | + self.assertEqual(expected, parse_ci_logline(line)) |
| 1160 | + |
| 1161 | + def test_parse_logline_returns_event_for_finish_events(self): |
| 1162 | + """parse_ci_logline returns a finish event for a parsed log line.""" |
| 1163 | + line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]' |
| 1164 | + ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running' |
| 1165 | + ' modules for final') |
| 1166 | + expected = { |
| 1167 | + 'description': 'running modules for final', |
| 1168 | + 'event_type': 'finish', |
| 1169 | + 'name': 'modules-final', |
| 1170 | + 'origin': 'cloudinit', |
| 1171 | + 'result': 'SUCCESS', |
| 1172 | + 'timestamp': 1472594005.972} |
| 1173 | + self.assertEqual(expected, parse_ci_logline(line)) |
| 1174 | + |
| 1175 | + |
| 1176 | +SAMPLE_LOGS = dedent("""\ |
| 1177 | +Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\ |
| 1178 | + Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\ |
| 1179 | + 06:51:06 +0000. Up 1.0 seconds. |
| 1180 | +2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\ |
| 1181 | + modules-final: SUCCESS: running modules for final |
| 1182 | +""") |
| 1183 | + |
| 1184 | + |
| 1185 | +class TestDumpEvents(CiTestCase): |
| 1186 | + maxDiff = None |
| 1187 | + |
| 1188 | + def test_dump_events_with_rawdata(self): |
| 1189 | + """Rawdata is split and parsed into a tuple of events and data""" |
| 1190 | + events, data = dump_events(rawdata=SAMPLE_LOGS) |
| 1191 | + expected_data = SAMPLE_LOGS.splitlines() |
| 1192 | + year = datetime.now().year |
| 1193 | + dt1 = datetime.strptime( |
| 1194 | + 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') |
| 1195 | + timestamp1 = float(dt1.strftime('%s.%f')) |
| 1196 | + expected_events = [{ |
| 1197 | + 'description': 'starting search for local datasources', |
| 1198 | + 'event_type': 'start', |
| 1199 | + 'name': 'init-local', |
| 1200 | + 'origin': 'cloudinit', |
| 1201 | + 'timestamp': timestamp1}, { |
| 1202 | + 'description': 'running modules for final', |
| 1203 | + 'event_type': 'finish', |
| 1204 | + 'name': 'modules-final', |
| 1205 | + 'origin': 'cloudinit', |
| 1206 | + 'result': 'SUCCESS', |
| 1207 | + 'timestamp': 1472594005.972}] |
| 1208 | + self.assertEqual(expected_events, events) |
| 1209 | + self.assertEqual(expected_data, data) |
| 1210 | + |
| 1211 | + def test_dump_events_with_cisource(self): |
| 1212 | + """Cisource file is read and parsed into a tuple of events and data.""" |
| 1213 | + tmpfile = self.tmp_path('logfile') |
| 1214 | + write_file(tmpfile, SAMPLE_LOGS) |
| 1215 | + events, data = dump_events(cisource=open(tmpfile)) |
| 1216 | + year = datetime.now().year |
| 1217 | + dt1 = datetime.strptime( |
| 1218 | + 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') |
| 1219 | + timestamp1 = float(dt1.strftime('%s.%f')) |
| 1220 | + expected_events = [{ |
| 1221 | + 'description': 'starting search for local datasources', |
| 1222 | + 'event_type': 'start', |
| 1223 | + 'name': 'init-local', |
| 1224 | + 'origin': 'cloudinit', |
| 1225 | + 'timestamp': timestamp1}, { |
| 1226 | + 'description': 'running modules for final', |
| 1227 | + 'event_type': 'finish', |
| 1228 | + 'name': 'modules-final', |
| 1229 | + 'origin': 'cloudinit', |
| 1230 | + 'result': 'SUCCESS', |
| 1231 | + 'timestamp': 1472594005.972}] |
| 1232 | + self.assertEqual(expected_events, events) |
| 1233 | + self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data]) |
| 1234 | diff --git a/cloudinit/apport.py b/cloudinit/apport.py |
| 1235 | new file mode 100644 |
| 1236 | index 0000000..221f341 |
| 1237 | --- /dev/null |
| 1238 | +++ b/cloudinit/apport.py |
| 1239 | @@ -0,0 +1,105 @@ |
| 1240 | +# Copyright (C) 2017 Canonical Ltd. |
| 1241 | +# |
| 1242 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 1243 | + |
| 1244 | +'''Cloud-init apport interface''' |
| 1245 | + |
| 1246 | +try: |
| 1247 | + from apport.hookutils import ( |
| 1248 | + attach_file, attach_root_command_outputs, root_command_output) |
| 1249 | + has_apport = True |
| 1250 | +except ImportError: |
| 1251 | + has_apport = False |
| 1252 | + |
| 1253 | + |
| 1254 | +KNOWN_CLOUD_NAMES = [ |
| 1255 | + 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', |
| 1256 | + 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS', |
| 1257 | + 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS', |
| 1258 | + 'VMware', 'Other'] |
| 1259 | + |
| 1260 | +# Potentially clear text collected logs |
| 1261 | +CLOUDINIT_LOG = '/var/log/cloud-init.log' |
| 1262 | +CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log' |
| 1263 | +USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional |
| 1264 | + |
| 1265 | + |
| 1266 | +def attach_cloud_init_logs(report, ui=None): |
| 1267 | + '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.''' |
| 1268 | + attach_root_command_outputs(report, { |
| 1269 | + 'cloud-init-log-warnings': |
| 1270 | + 'egrep -i "warn|error" /var/log/cloud-init.log', |
| 1271 | + 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'}) |
| 1272 | + root_command_output( |
| 1273 | + ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz']) |
| 1274 | + attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz') |
| 1275 | + |
| 1276 | + |
| 1277 | +def attach_hwinfo(report, ui=None): |
| 1278 | + '''Optionally attach hardware info from lshw.''' |
| 1279 | + prompt = ( |
| 1280 | + 'Your device details (lshw) may be useful to developers when' |
| 1281 | + ' addressing this bug, but gathering it requires admin privileges.' |
| 1282 | + ' Would you like to include this info?') |
| 1283 | + if ui and ui.yesno(prompt): |
| 1284 | + attach_root_command_outputs(report, {'lshw.txt': 'lshw'}) |
| 1285 | + |
| 1286 | + |
| 1287 | +def attach_cloud_info(report, ui=None): |
| 1288 | + '''Prompt for cloud details if available.''' |
| 1289 | + if ui: |
| 1290 | + prompt = 'Is this machine running in a cloud environment?' |
| 1291 | + response = ui.yesno(prompt) |
| 1292 | + if response is None: |
| 1293 | + raise StopIteration # User cancelled |
| 1294 | + if response: |
| 1295 | + prompt = ('Please select the cloud vendor or environment in which' |
| 1296 | + ' this instance is running') |
| 1297 | + response = ui.choice(prompt, KNOWN_CLOUD_NAMES) |
| 1298 | + if response: |
| 1299 | + report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]] |
| 1300 | + else: |
| 1301 | + report['CloudName'] = 'None' |
| 1302 | + |
| 1303 | + |
| 1304 | +def attach_user_data(report, ui=None): |
| 1305 | + '''Optionally provide user-data if desired.''' |
| 1306 | + if ui: |
| 1307 | + prompt = ( |
| 1308 | + 'Your user-data or cloud-config file can optionally be provided' |
| 1309 | + ' from {0} and could be useful to developers when addressing this' |
| 1310 | + ' bug. Do you wish to attach user-data to this bug?'.format( |
| 1311 | + USER_DATA_FILE)) |
| 1312 | + response = ui.yesno(prompt) |
| 1313 | + if response is None: |
| 1314 | + raise StopIteration # User cancelled |
| 1315 | + if response: |
| 1316 | + attach_file(report, USER_DATA_FILE, 'user_data.txt') |
| 1317 | + |
| 1318 | + |
| 1319 | +def add_bug_tags(report): |
| 1320 | + '''Add any appropriate tags to the bug.''' |
| 1321 | + if 'JournalErrors' in report.keys(): |
| 1322 | + errors = report['JournalErrors'] |
| 1323 | + if 'Breaking ordering cycle' in errors: |
| 1324 | + report['Tags'] = 'systemd-ordering' |
| 1325 | + |
| 1326 | + |
| 1327 | +def add_info(report, ui): |
| 1328 | + '''This is an entry point to run cloud-init's apport functionality. |
| 1329 | + |
| 1330 | + Distros which want apport support will have a cloud-init package-hook at |
| 1331 | + /usr/share/apport/package-hooks/cloud-init.py which defines an add_info |
| 1332 | + function and returns the result of cloudinit.apport.add_info(report, ui). |
| 1333 | + ''' |
| 1334 | + if not has_apport: |
| 1335 | + raise RuntimeError( |
| 1336 | + 'No apport imports discovered. Apport functionality disabled') |
| 1337 | + attach_cloud_init_logs(report, ui) |
| 1338 | + attach_hwinfo(report, ui) |
| 1339 | + attach_cloud_info(report, ui) |
| 1340 | + attach_user_data(report, ui) |
| 1341 | + add_bug_tags(report) |
| 1342 | + return True |
| 1343 | + |
| 1344 | +# vi: ts=4 expandtab |
| 1345 | diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py |
| 1346 | new file mode 100644 |
| 1347 | index 0000000..e69de29 |
| 1348 | --- /dev/null |
| 1349 | +++ b/cloudinit/cmd/devel/__init__.py |
| 1350 | diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py |
| 1351 | new file mode 100644 |
| 1352 | index 0000000..35ca478 |
| 1353 | --- /dev/null |
| 1354 | +++ b/cloudinit/cmd/devel/logs.py |
| 1355 | @@ -0,0 +1,101 @@ |
| 1356 | +# Copyright (C) 2017 Canonical Ltd. |
| 1357 | +# |
| 1358 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 1359 | + |
| 1360 | +"""Define 'collect-logs' utility and handler to include in cloud-init cmd.""" |
| 1361 | + |
| 1362 | +import argparse |
| 1363 | +from cloudinit.util import ( |
| 1364 | + ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file) |
| 1365 | +from cloudinit.temp_utils import tempdir |
| 1366 | +from datetime import datetime |
| 1367 | +import os |
| 1368 | +import shutil |
| 1369 | + |
| 1370 | + |
| 1371 | +CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] |
| 1372 | +CLOUDINIT_RUN_DIR = '/run/cloud-init' |
| 1373 | +USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional |
| 1374 | + |
| 1375 | + |
| 1376 | +def get_parser(parser=None): |
| 1377 | + """Build or extend and arg parser for collect-logs utility. |
| 1378 | + |
| 1379 | + @param parser: Optional existing ArgumentParser instance representing the |
| 1380 | + collect-logs subcommand which will be extended to support the args of |
| 1381 | + this utility. |
| 1382 | + |
| 1383 | + @returns: ArgumentParser with proper argument configuration. |
| 1384 | + """ |
| 1385 | + if not parser: |
| 1386 | + parser = argparse.ArgumentParser( |
| 1387 | + prog='collect-logs', |
| 1388 | + description='Collect and tar all cloud-init debug info') |
| 1389 | + parser.add_argument( |
| 1390 | + "--tarfile", '-t', default='cloud-init.tar.gz', |
| 1391 | + help=('The tarfile to create containing all collected logs.' |
| 1392 | + ' Default: cloud-init.tar.gz')) |
| 1393 | + parser.add_argument( |
| 1394 | + "--include-userdata", '-u', default=False, action='store_true', |
| 1395 | + dest='userdata', help=( |
| 1396 | + 'Optionally include user-data from {0} which could contain' |
| 1397 | + ' sensitive information.'.format(USER_DATA_FILE))) |
| 1398 | + return parser |
| 1399 | + |
| 1400 | + |
| 1401 | +def _write_command_output_to_file(cmd, filename): |
| 1402 | + """Helper which runs a command and writes output or error to filename.""" |
| 1403 | + try: |
| 1404 | + out, _ = subp(cmd) |
| 1405 | + except ProcessExecutionError as e: |
| 1406 | + write_file(filename, str(e)) |
| 1407 | + else: |
| 1408 | + write_file(filename, out) |
| 1409 | + |
| 1410 | + |
| 1411 | +def collect_logs(tarfile, include_userdata): |
| 1412 | + """Collect all cloud-init logs and tar them up into the provided tarfile. |
| 1413 | + |
| 1414 | + @param tarfile: The path of the tar-gzipped file to create. |
| 1415 | + @param include_userdata: Boolean, true means include user-data. |
| 1416 | + """ |
| 1417 | + tarfile = os.path.abspath(tarfile) |
| 1418 | + date = datetime.utcnow().date().strftime('%Y-%m-%d') |
| 1419 | + log_dir = 'cloud-init-logs-{0}'.format(date) |
| 1420 | + with tempdir(dir='/tmp') as tmp_dir: |
| 1421 | + log_dir = os.path.join(tmp_dir, log_dir) |
| 1422 | + _write_command_output_to_file( |
| 1423 | + ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], |
| 1424 | + os.path.join(log_dir, 'version')) |
| 1425 | + _write_command_output_to_file( |
| 1426 | + ['dmesg'], os.path.join(log_dir, 'dmesg.txt')) |
| 1427 | + _write_command_output_to_file( |
| 1428 | + ['journalctl', '-o', 'short-precise'], |
| 1429 | + os.path.join(log_dir, 'journal.txt')) |
| 1430 | + for log in CLOUDINIT_LOGS: |
| 1431 | + copy(log, log_dir) |
| 1432 | + if include_userdata: |
| 1433 | + copy(USER_DATA_FILE, log_dir) |
| 1434 | + run_dir = os.path.join(log_dir, 'run') |
| 1435 | + ensure_dir(run_dir) |
| 1436 | + shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init')) |
| 1437 | + with chdir(tmp_dir): |
| 1438 | + subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) |
| 1439 | + |
| 1440 | + |
| 1441 | +def handle_collect_logs_args(name, args): |
| 1442 | + """Handle calls to 'cloud-init collect-logs' as a subcommand.""" |
| 1443 | + collect_logs(args.tarfile, args.userdata) |
| 1444 | + |
| 1445 | + |
| 1446 | +def main(): |
| 1447 | + """Tool to collect and tar all cloud-init related logs.""" |
| 1448 | + parser = get_parser() |
| 1449 | + handle_collect_logs_args('collect-logs', parser.parse_args()) |
| 1450 | + return 0 |
| 1451 | + |
| 1452 | + |
| 1453 | +if __name__ == '__main__': |
| 1454 | + main() |
| 1455 | + |
| 1456 | +# vi: ts=4 expandtab |
| 1457 | diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py |
| 1458 | new file mode 100644 |
| 1459 | index 0000000..acacc4e |
| 1460 | --- /dev/null |
| 1461 | +++ b/cloudinit/cmd/devel/parser.py |
| 1462 | @@ -0,0 +1,26 @@ |
| 1463 | +# Copyright (C) 2017 Canonical Ltd. |
| 1464 | +# |
| 1465 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 1466 | + |
| 1467 | +"""Define 'devel' subcommand argument parsers to include in cloud-init cmd.""" |
| 1468 | + |
| 1469 | +import argparse |
| 1470 | +from cloudinit.config.schema import ( |
| 1471 | + get_parser as schema_parser, handle_schema_args) |
| 1472 | + |
| 1473 | + |
| 1474 | +def get_parser(parser=None): |
| 1475 | + if not parser: |
| 1476 | + parser = argparse.ArgumentParser( |
| 1477 | + prog='cloudinit-devel', |
| 1478 | + description='Run development cloud-init tools') |
| 1479 | + subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') |
| 1480 | + subparsers.required = True |
| 1481 | + |
| 1482 | + parser_schema = subparsers.add_parser( |
| 1483 | + 'schema', help='Validate cloud-config files or document schema') |
| 1484 | + # Construct schema subcommand parser |
| 1485 | + schema_parser(parser_schema) |
| 1486 | + parser_schema.set_defaults(action=('schema', handle_schema_args)) |
| 1487 | + |
| 1488 | + return parser |
| 1489 | diff --git a/cloudinit/cmd/devel/tests/__init__.py b/cloudinit/cmd/devel/tests/__init__.py |
| 1490 | new file mode 100644 |
| 1491 | index 0000000..e69de29 |
| 1492 | --- /dev/null |
| 1493 | +++ b/cloudinit/cmd/devel/tests/__init__.py |
| 1494 | diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py |
| 1495 | new file mode 100644 |
| 1496 | index 0000000..dc4947c |
| 1497 | --- /dev/null |
| 1498 | +++ b/cloudinit/cmd/devel/tests/test_logs.py |
| 1499 | @@ -0,0 +1,120 @@ |
| 1500 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 1501 | + |
| 1502 | +from cloudinit.cmd.devel import logs |
| 1503 | +from cloudinit.util import ensure_dir, load_file, subp, write_file |
| 1504 | +from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call |
| 1505 | +from datetime import datetime |
| 1506 | +import os |
| 1507 | + |
| 1508 | + |
| 1509 | +class TestCollectLogs(FilesystemMockingTestCase): |
| 1510 | + |
| 1511 | + def setUp(self): |
| 1512 | + super(TestCollectLogs, self).setUp() |
| 1513 | + self.new_root = self.tmp_dir() |
| 1514 | + self.run_dir = self.tmp_path('run', self.new_root) |
| 1515 | + |
| 1516 | + def test_collect_logs_creates_tarfile(self): |
| 1517 | + """collect-logs creates a tarfile with all related cloud-init info.""" |
| 1518 | + log1 = self.tmp_path('cloud-init.log', self.new_root) |
| 1519 | + write_file(log1, 'cloud-init-log') |
| 1520 | + log2 = self.tmp_path('cloud-init-output.log', self.new_root) |
| 1521 | + write_file(log2, 'cloud-init-output-log') |
| 1522 | + ensure_dir(self.run_dir) |
| 1523 | + write_file(self.tmp_path('results.json', self.run_dir), 'results') |
| 1524 | + output_tarfile = self.tmp_path('logs.tgz') |
| 1525 | + |
| 1526 | + date = datetime.utcnow().date().strftime('%Y-%m-%d') |
| 1527 | + date_logdir = 'cloud-init-logs-{0}'.format(date) |
| 1528 | + |
| 1529 | + expected_subp = { |
| 1530 | + ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): |
| 1531 | + '0.7fake\n', |
| 1532 | + ('dmesg',): 'dmesg-out\n', |
| 1533 | + ('journalctl', '-o', 'short-precise'): 'journal-out\n', |
| 1534 | + ('tar', 'czvf', output_tarfile, date_logdir): '' |
| 1535 | + } |
| 1536 | + |
| 1537 | + def fake_subp(cmd): |
| 1538 | + cmd_tuple = tuple(cmd) |
| 1539 | + if cmd_tuple not in expected_subp: |
| 1540 | + raise AssertionError( |
| 1541 | + 'Unexpected command provided to subp: {0}'.format(cmd)) |
| 1542 | + if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: |
| 1543 | + subp(cmd) # Pass through tar cmd so we can check output |
| 1544 | + return expected_subp[cmd_tuple], '' |
| 1545 | + |
| 1546 | + wrap_and_call( |
| 1547 | + 'cloudinit.cmd.devel.logs', |
| 1548 | + {'subp': {'side_effect': fake_subp}, |
| 1549 | + 'CLOUDINIT_LOGS': {'new': [log1, log2]}, |
| 1550 | + 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, |
| 1551 | + logs.collect_logs, output_tarfile, include_userdata=False) |
| 1552 | + # unpack the tarfile and check file contents |
| 1553 | + subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) |
| 1554 | + out_logdir = self.tmp_path(date_logdir, self.new_root) |
| 1555 | + self.assertEqual( |
| 1556 | + '0.7fake\n', |
| 1557 | + load_file(os.path.join(out_logdir, 'version'))) |
| 1558 | + self.assertEqual( |
| 1559 | + 'cloud-init-log', |
| 1560 | + load_file(os.path.join(out_logdir, 'cloud-init.log'))) |
| 1561 | + self.assertEqual( |
| 1562 | + 'cloud-init-output-log', |
| 1563 | + load_file(os.path.join(out_logdir, 'cloud-init-output.log'))) |
| 1564 | + self.assertEqual( |
| 1565 | + 'dmesg-out\n', |
| 1566 | + load_file(os.path.join(out_logdir, 'dmesg.txt'))) |
| 1567 | + self.assertEqual( |
| 1568 | + 'journal-out\n', |
| 1569 | + load_file(os.path.join(out_logdir, 'journal.txt'))) |
| 1570 | + self.assertEqual( |
| 1571 | + 'results', |
| 1572 | + load_file( |
| 1573 | + os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) |
| 1574 | + |
| 1575 | + def test_collect_logs_includes_optional_userdata(self): |
| 1576 | + """collect-logs include userdata when --include-userdata is set.""" |
| 1577 | + log1 = self.tmp_path('cloud-init.log', self.new_root) |
| 1578 | + write_file(log1, 'cloud-init-log') |
| 1579 | + log2 = self.tmp_path('cloud-init-output.log', self.new_root) |
| 1580 | + write_file(log2, 'cloud-init-output-log') |
| 1581 | + userdata = self.tmp_path('user-data.txt', self.new_root) |
| 1582 | + write_file(userdata, 'user-data') |
| 1583 | + ensure_dir(self.run_dir) |
| 1584 | + write_file(self.tmp_path('results.json', self.run_dir), 'results') |
| 1585 | + output_tarfile = self.tmp_path('logs.tgz') |
| 1586 | + |
| 1587 | + date = datetime.utcnow().date().strftime('%Y-%m-%d') |
| 1588 | + date_logdir = 'cloud-init-logs-{0}'.format(date) |
| 1589 | + |
| 1590 | + expected_subp = { |
| 1591 | + ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): |
| 1592 | + '0.7fake', |
| 1593 | + ('dmesg',): 'dmesg-out\n', |
| 1594 | + ('journalctl', '-o', 'short-precise'): 'journal-out\n', |
| 1595 | + ('tar', 'czvf', output_tarfile, date_logdir): '' |
| 1596 | + } |
| 1597 | + |
| 1598 | + def fake_subp(cmd): |
| 1599 | + cmd_tuple = tuple(cmd) |
| 1600 | + if cmd_tuple not in expected_subp: |
| 1601 | + raise AssertionError( |
| 1602 | + 'Unexpected command provided to subp: {0}'.format(cmd)) |
| 1603 | + if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: |
| 1604 | + subp(cmd) # Pass through tar cmd so we can check output |
| 1605 | + return expected_subp[cmd_tuple], '' |
| 1606 | + |
| 1607 | + wrap_and_call( |
| 1608 | + 'cloudinit.cmd.devel.logs', |
| 1609 | + {'subp': {'side_effect': fake_subp}, |
| 1610 | + 'CLOUDINIT_LOGS': {'new': [log1, log2]}, |
| 1611 | + 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, |
| 1612 | + 'USER_DATA_FILE': {'new': userdata}}, |
| 1613 | + logs.collect_logs, output_tarfile, include_userdata=True) |
| 1614 | + # unpack the tarfile and check file contents |
| 1615 | + subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) |
| 1616 | + out_logdir = self.tmp_path(date_logdir, self.new_root) |
| 1617 | + self.assertEqual( |
| 1618 | + 'user-data', |
| 1619 | + load_file(os.path.join(out_logdir, 'user-data.txt'))) |
| 1620 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py |
| 1621 | index 139e03b..6fb9d9e 100644 |
| 1622 | --- a/cloudinit/cmd/main.py |
| 1623 | +++ b/cloudinit/cmd/main.py |
| 1624 | @@ -50,13 +50,6 @@ WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at " |
| 1625 | # Module section template |
| 1626 | MOD_SECTION_TPL = "cloud_%s_modules" |
| 1627 | |
| 1628 | -# Things u can query on |
| 1629 | -QUERY_DATA_TYPES = [ |
| 1630 | - 'data', |
| 1631 | - 'data_raw', |
| 1632 | - 'instance_id', |
| 1633 | -] |
| 1634 | - |
| 1635 | # Frequency shortname to full name |
| 1636 | # (so users don't have to remember the full name...) |
| 1637 | FREQ_SHORT_NAMES = { |
| 1638 | @@ -510,11 +503,6 @@ def main_modules(action_name, args): |
| 1639 | return run_module_section(mods, name, name) |
| 1640 | |
| 1641 | |
| 1642 | -def main_query(name, _args): |
| 1643 | - raise NotImplementedError(("Action '%s' is not" |
| 1644 | - " currently implemented") % (name)) |
| 1645 | - |
| 1646 | - |
| 1647 | def main_single(name, args): |
| 1648 | # Cloud-init single stage is broken up into the following sub-stages |
| 1649 | # 1. Ensure that the init object fetches its config without errors |
| 1650 | @@ -688,11 +676,10 @@ def main_features(name, args): |
| 1651 | |
| 1652 | |
| 1653 | def main(sysv_args=None): |
| 1654 | - if sysv_args is not None: |
| 1655 | - parser = argparse.ArgumentParser(prog=sysv_args[0]) |
| 1656 | - sysv_args = sysv_args[1:] |
| 1657 | - else: |
| 1658 | - parser = argparse.ArgumentParser() |
| 1659 | + if not sysv_args: |
| 1660 | + sysv_args = sys.argv |
| 1661 | + parser = argparse.ArgumentParser(prog=sysv_args[0]) |
| 1662 | + sysv_args = sysv_args[1:] |
| 1663 | |
| 1664 | # Top level args |
| 1665 | parser.add_argument('--version', '-v', action='version', |
| 1666 | @@ -713,7 +700,8 @@ def main(sysv_args=None): |
| 1667 | default=False) |
| 1668 | |
| 1669 | parser.set_defaults(reporter=None) |
| 1670 | - subparsers = parser.add_subparsers() |
| 1671 | + subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') |
| 1672 | + subparsers.required = True |
| 1673 | |
| 1674 | # Each action and its sub-options (if any) |
| 1675 | parser_init = subparsers.add_parser('init', |
| 1676 | @@ -737,17 +725,6 @@ def main(sysv_args=None): |
| 1677 | choices=('init', 'config', 'final')) |
| 1678 | parser_mod.set_defaults(action=('modules', main_modules)) |
| 1679 | |
| 1680 | - # These settings are used when you want to query information |
| 1681 | - # stored in the cloud-init data objects/directories/files |
| 1682 | - parser_query = subparsers.add_parser('query', |
| 1683 | - help=('query information stored ' |
| 1684 | - 'in cloud-init')) |
| 1685 | - parser_query.add_argument("--name", '-n', action="store", |
| 1686 | - help="item name to query on", |
| 1687 | - required=True, |
| 1688 | - choices=QUERY_DATA_TYPES) |
| 1689 | - parser_query.set_defaults(action=('query', main_query)) |
| 1690 | - |
| 1691 | # This subcommand allows you to run a single module |
| 1692 | parser_single = subparsers.add_parser('single', |
| 1693 | help=('run a single module ')) |
| 1694 | @@ -781,15 +758,39 @@ def main(sysv_args=None): |
| 1695 | help=('list defined features')) |
| 1696 | parser_features.set_defaults(action=('features', main_features)) |
| 1697 | |
| 1698 | + parser_analyze = subparsers.add_parser( |
| 1699 | + 'analyze', help='Devel tool: Analyze cloud-init logs and data') |
| 1700 | + |
| 1701 | + parser_devel = subparsers.add_parser( |
| 1702 | + 'devel', help='Run development tools') |
| 1703 | + |
| 1704 | + parser_collect_logs = subparsers.add_parser( |
| 1705 | + 'collect-logs', help='Collect and tar all cloud-init debug info') |
| 1706 | + |
| 1707 | + if sysv_args: |
| 1708 | + # Only load subparsers if subcommand is specified to avoid load cost |
| 1709 | + if sysv_args[0] == 'analyze': |
| 1710 | + from cloudinit.analyze.__main__ import get_parser as analyze_parser |
| 1711 | + # Construct analyze subcommand parser |
| 1712 | + analyze_parser(parser_analyze) |
| 1713 | + elif sysv_args[0] == 'devel': |
| 1714 | + from cloudinit.cmd.devel.parser import get_parser as devel_parser |
| 1715 | + # Construct devel subcommand parser |
| 1716 | + devel_parser(parser_devel) |
| 1717 | + elif sysv_args[0] == 'collect-logs': |
| 1718 | + from cloudinit.cmd.devel.logs import ( |
| 1719 | + get_parser as logs_parser, handle_collect_logs_args) |
| 1720 | + logs_parser(parser_collect_logs) |
| 1721 | + parser_collect_logs.set_defaults( |
| 1722 | + action=('collect-logs', handle_collect_logs_args)) |
| 1723 | + |
| 1724 | args = parser.parse_args(args=sysv_args) |
| 1725 | |
| 1726 | - try: |
| 1727 | - (name, functor) = args.action |
| 1728 | - except AttributeError: |
| 1729 | - parser.error('too few arguments') |
| 1730 | + # Subparsers.required = True and each subparser sets action=(name, functor) |
| 1731 | + (name, functor) = args.action |
| 1732 | |
| 1733 | # Setup basic logging to start (until reinitialized) |
| 1734 | - # iff in debug mode... |
| 1735 | + # iff in debug mode. |
| 1736 | if args.debug: |
| 1737 | logging.setupBasicLogging() |
| 1738 | |
| 1739 | diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py |
| 1740 | index 604f93b..233da1e 100644 |
| 1741 | --- a/cloudinit/config/cc_bootcmd.py |
| 1742 | +++ b/cloudinit/config/cc_bootcmd.py |
| 1743 | @@ -3,44 +3,73 @@ |
| 1744 | # |
| 1745 | # Author: Scott Moser <scott.moser@canonical.com> |
| 1746 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> |
| 1747 | +# Author: Chad Smith <chad.smith@canonical.com> |
| 1748 | # |
| 1749 | # This file is part of cloud-init. See LICENSE file for license information. |
| 1750 | |
| 1751 | -""" |
| 1752 | -Bootcmd |
| 1753 | -------- |
| 1754 | -**Summary:** run commands early in boot process |
| 1755 | - |
| 1756 | -This module runs arbitrary commands very early in the boot process, |
| 1757 | -only slightly after a boothook would run. This is very similar to a |
| 1758 | -boothook, but more user friendly. The environment variable ``INSTANCE_ID`` |
| 1759 | -will be set to the current instance id for all run commands. Commands can be |
| 1760 | -specified either as lists or strings. For invocation details, see ``runcmd``. |
| 1761 | - |
| 1762 | -.. note:: |
| 1763 | - bootcmd should only be used for things that could not be done later in the |
| 1764 | - boot process. |
| 1765 | - |
| 1766 | -**Internal name:** ``cc_bootcmd`` |
| 1767 | - |
| 1768 | -**Module frequency:** per always |
| 1769 | - |
| 1770 | -**Supported distros:** all |
| 1771 | - |
| 1772 | -**Config keys**:: |
| 1773 | - |
| 1774 | - bootcmd: |
| 1775 | - - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts |
| 1776 | - - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] |
| 1777 | -""" |
| 1778 | +"""Bootcmd: run arbitrary commands early in the boot process.""" |
| 1779 | |
| 1780 | import os |
| 1781 | +from textwrap import dedent |
| 1782 | |
| 1783 | +from cloudinit.config.schema import ( |
| 1784 | + get_schema_doc, validate_cloudconfig_schema) |
| 1785 | from cloudinit.settings import PER_ALWAYS |
| 1786 | +from cloudinit import temp_utils |
| 1787 | from cloudinit import util |
| 1788 | |
| 1789 | frequency = PER_ALWAYS |
| 1790 | |
| 1791 | +# The schema definition for each cloud-config module is a strict contract for |
| 1792 | +# describing supported configuration parameters for each cloud-config section. |
| 1793 | +# It allows cloud-config to validate and alert users to invalid or ignored |
| 1794 | +# configuration options before actually attempting to deploy with said |
| 1795 | +# configuration. |
| 1796 | + |
| 1797 | +distros = ['all'] |
| 1798 | + |
| 1799 | +schema = { |
| 1800 | + 'id': 'cc_bootcmd', |
| 1801 | + 'name': 'Bootcmd', |
| 1802 | + 'title': 'Run arbitrary commands early in the boot process', |
| 1803 | + 'description': dedent("""\ |
| 1804 | + This module runs arbitrary commands very early in the boot process, |
| 1805 | + only slightly after a boothook would run. This is very similar to a |
| 1806 | + boothook, but more user friendly. The environment variable |
| 1807 | + ``INSTANCE_ID`` will be set to the current instance id for all run |
| 1808 | + commands. Commands can be specified either as lists or strings. For |
| 1809 | + invocation details, see ``runcmd``. |
| 1810 | + |
| 1811 | + .. note:: |
| 1812 | + bootcmd should only be used for things that could not be done later |
| 1813 | + in the boot process."""), |
| 1814 | + 'distros': distros, |
| 1815 | + 'examples': [dedent("""\ |
| 1816 | + bootcmd: |
| 1817 | + - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts |
| 1818 | + - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] |
| 1819 | + """)], |
| 1820 | + 'frequency': PER_ALWAYS, |
| 1821 | + 'type': 'object', |
| 1822 | + 'properties': { |
| 1823 | + 'bootcmd': { |
| 1824 | + 'type': 'array', |
| 1825 | + 'items': { |
| 1826 | + 'oneOf': [ |
| 1827 | + {'type': 'array', 'items': {'type': 'string'}}, |
| 1828 | + {'type': 'string'}] |
| 1829 | + }, |
| 1830 | + 'additionalItems': False, # Reject items of non-string non-list |
| 1831 | + 'additionalProperties': False, |
| 1832 | + 'minItems': 1, |
| 1833 | + 'required': [], |
| 1834 | + 'uniqueItems': True |
| 1835 | + } |
| 1836 | + } |
| 1837 | +} |
| 1838 | + |
| 1839 | +__doc__ = get_schema_doc(schema) # Supplement python help() |
| 1840 | + |
| 1841 | |
| 1842 | def handle(name, cfg, cloud, log, _args): |
| 1843 | |
| 1844 | @@ -49,13 +78,14 @@ def handle(name, cfg, cloud, log, _args): |
| 1845 | " no 'bootcmd' key in configuration"), name) |
| 1846 | return |
| 1847 | |
| 1848 | - with util.ExtendedTemporaryFile(suffix=".sh") as tmpf: |
| 1849 | + validate_cloudconfig_schema(cfg, schema) |
| 1850 | + with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf: |
| 1851 | try: |
| 1852 | content = util.shellify(cfg["bootcmd"]) |
| 1853 | tmpf.write(util.encode_text(content)) |
| 1854 | tmpf.flush() |
| 1855 | - except Exception: |
| 1856 | - util.logexc(log, "Failed to shellify bootcmd") |
| 1857 | + except Exception as e: |
| 1858 | + util.logexc(log, "Failed to shellify bootcmd: %s", str(e)) |
| 1859 | raise |
| 1860 | |
| 1861 | try: |
| 1862 | diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py |
| 1863 | index 02c70b1..46abedd 100644 |
| 1864 | --- a/cloudinit/config/cc_chef.py |
| 1865 | +++ b/cloudinit/config/cc_chef.py |
| 1866 | @@ -58,6 +58,9 @@ file). |
| 1867 | log_level: |
| 1868 | log_location: |
| 1869 | node_name: |
| 1870 | + omnibus_url: |
| 1871 | + omnibus_url_retries: |
| 1872 | + omnibus_version: |
| 1873 | pid_file: |
| 1874 | server_url: |
| 1875 | show_time: |
| 1876 | @@ -279,6 +282,31 @@ def run_chef(chef_cfg, log): |
| 1877 | util.subp(cmd, capture=False) |
| 1878 | |
| 1879 | |
| 1880 | +def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): |
| 1881 | + """Install an omnibus unified package from url. |
| 1882 | + |
| 1883 | + @param url: URL where blob of chef content may be downloaded. Defaults to |
| 1884 | + OMNIBUS_URL. |
| 1885 | + @param retries: Number of retries to perform when attempting to read url. |
| 1886 | + Defaults to OMNIBUS_URL_RETRIES |
| 1887 | + @param omnibus_version: Optional version string to require for omnibus |
| 1888 | + install. |
| 1889 | + """ |
| 1890 | + if url is None: |
| 1891 | + url = OMNIBUS_URL |
| 1892 | + if retries is None: |
| 1893 | + retries = OMNIBUS_URL_RETRIES |
| 1894 | + |
| 1895 | + if omnibus_version is None: |
| 1896 | + args = [] |
| 1897 | + else: |
| 1898 | + args = ['-v', omnibus_version] |
| 1899 | + content = url_helper.readurl(url=url, retries=retries).contents |
| 1900 | + return util.subp_blob_in_tempfile( |
| 1901 | + blob=content, args=args, |
| 1902 | + basename='chef-omnibus-install', capture=False) |
| 1903 | + |
| 1904 | + |
| 1905 | def install_chef(cloud, chef_cfg, log): |
| 1906 | # If chef is not installed, we install chef based on 'install_type' |
| 1907 | install_type = util.get_cfg_option_str(chef_cfg, 'install_type', |
| 1908 | @@ -297,17 +325,11 @@ def install_chef(cloud, chef_cfg, log): |
| 1909 | # This will install and run the chef-client from packages |
| 1910 | cloud.distro.install_packages(('chef',)) |
| 1911 | elif install_type == 'omnibus': |
| 1912 | - # This will install as a omnibus unified package |
| 1913 | - url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) |
| 1914 | - retries = max(0, util.get_cfg_option_int(chef_cfg, |
| 1915 | - "omnibus_url_retries", |
| 1916 | - default=OMNIBUS_URL_RETRIES)) |
| 1917 | - content = url_helper.readurl(url=url, retries=retries).contents |
| 1918 | - with util.tempdir() as tmpd: |
| 1919 | - # Use tmpdir over tmpfile to avoid 'text file busy' on execute |
| 1920 | - tmpf = "%s/chef-omnibus-install" % tmpd |
| 1921 | - util.write_file(tmpf, content, mode=0o700) |
| 1922 | - util.subp([tmpf], capture=False) |
| 1923 | + omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version") |
| 1924 | + install_chef_from_omnibus( |
| 1925 | + url=util.get_cfg_option_str(chef_cfg, "omnibus_url"), |
| 1926 | + retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), |
| 1927 | + omnibus_version=omnibus_version) |
| 1928 | else: |
| 1929 | log.warn("Unknown chef install type '%s'", install_type) |
| 1930 | run = False |
| 1931 | diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py |
| 1932 | index 86b7138..8f9f1ab 100644 |
| 1933 | --- a/cloudinit/config/cc_landscape.py |
| 1934 | +++ b/cloudinit/config/cc_landscape.py |
| 1935 | @@ -57,7 +57,7 @@ The following default client config is provided, but can be overridden:: |
| 1936 | |
| 1937 | import os |
| 1938 | |
| 1939 | -from six import StringIO |
| 1940 | +from six import BytesIO |
| 1941 | |
| 1942 | from configobj import ConfigObj |
| 1943 | |
| 1944 | @@ -109,7 +109,7 @@ def handle(_name, cfg, cloud, log, _args): |
| 1945 | ls_cloudcfg, |
| 1946 | ] |
| 1947 | merged = merge_together(merge_data) |
| 1948 | - contents = StringIO() |
| 1949 | + contents = BytesIO() |
| 1950 | merged.write(contents) |
| 1951 | |
| 1952 | util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) |
| 1953 | diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py |
| 1954 | index 31ed64e..15ae1ec 100644 |
| 1955 | --- a/cloudinit/config/cc_ntp.py |
| 1956 | +++ b/cloudinit/config/cc_ntp.py |
| 1957 | @@ -4,39 +4,10 @@ |
| 1958 | # |
| 1959 | # This file is part of cloud-init. See LICENSE file for license information. |
| 1960 | |
| 1961 | -""" |
| 1962 | -NTP |
| 1963 | ---- |
| 1964 | -**Summary:** enable and configure ntp |
| 1965 | - |
| 1966 | -Handle ntp configuration. If ntp is not installed on the system and ntp |
| 1967 | -configuration is specified, ntp will be installed. If there is a default ntp |
| 1968 | -config file in the image or one is present in the distro's ntp package, it will |
| 1969 | -be copied to ``/etc/ntp.conf.dist`` before any changes are made. A list of ntp |
| 1970 | -pools and ntp servers can be provided under the ``ntp`` config key. If no ntp |
| 1971 | -servers or pools are provided, 4 pools will be used in the format |
| 1972 | -``{0-3}.{distro}.pool.ntp.org``. |
| 1973 | - |
| 1974 | -**Internal name:** ``cc_ntp`` |
| 1975 | - |
| 1976 | -**Module frequency:** per instance |
| 1977 | - |
| 1978 | -**Supported distros:** centos, debian, fedora, opensuse, ubuntu |
| 1979 | - |
| 1980 | -**Config keys**:: |
| 1981 | - |
| 1982 | - ntp: |
| 1983 | - pools: |
| 1984 | - - 0.company.pool.ntp.org |
| 1985 | - - 1.company.pool.ntp.org |
| 1986 | - - ntp.myorg.org |
| 1987 | - servers: |
| 1988 | - - my.ntp.server.local |
| 1989 | - - ntp.ubuntu.com |
| 1990 | - - 192.168.23.2 |
| 1991 | -""" |
| 1992 | +"""NTP: enable and configure ntp""" |
| 1993 | |
| 1994 | -from cloudinit.config.schema import validate_cloudconfig_schema |
| 1995 | +from cloudinit.config.schema import ( |
| 1996 | + get_schema_doc, validate_cloudconfig_schema) |
| 1997 | from cloudinit import log as logging |
| 1998 | from cloudinit.settings import PER_INSTANCE |
| 1999 | from cloudinit import templater |
| 2000 | @@ -50,6 +21,7 @@ LOG = logging.getLogger(__name__) |
| 2001 | |
| 2002 | frequency = PER_INSTANCE |
| 2003 | NTP_CONF = '/etc/ntp.conf' |
| 2004 | +TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf' |
| 2005 | NR_POOL_SERVERS = 4 |
| 2006 | distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] |
| 2007 | |
| 2008 | @@ -75,10 +47,13 @@ schema = { |
| 2009 | ``{0-3}.{distro}.pool.ntp.org``."""), |
| 2010 | 'distros': distros, |
| 2011 | 'examples': [ |
| 2012 | - {'ntp': {'pools': ['0.company.pool.ntp.org', '1.company.pool.ntp.org', |
| 2013 | - 'ntp.myorg.org'], |
| 2014 | - 'servers': ['my.ntp.server.local', 'ntp.ubuntu.com', |
| 2015 | - '192.168.23.2']}}], |
| 2016 | + dedent("""\ |
| 2017 | + ntp: |
| 2018 | + pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] |
| 2019 | + servers: |
| 2020 | + - ntp.server.local |
| 2021 | + - ntp.ubuntu.com |
| 2022 | + - 192.168.23.2""")], |
| 2023 | 'frequency': PER_INSTANCE, |
| 2024 | 'type': 'object', |
| 2025 | 'properties': { |
| 2026 | @@ -116,6 +91,8 @@ schema = { |
| 2027 | } |
| 2028 | } |
| 2029 | |
| 2030 | +__doc__ = get_schema_doc(schema) # Supplement python help() |
| 2031 | + |
| 2032 | |
| 2033 | def handle(name, cfg, cloud, log, _args): |
| 2034 | """Enable and configure ntp.""" |
| 2035 | @@ -132,20 +109,50 @@ def handle(name, cfg, cloud, log, _args): |
| 2036 | " is a %s %instead"), type_utils.obj_name(ntp_cfg)) |
| 2037 | |
| 2038 | validate_cloudconfig_schema(cfg, schema) |
| 2039 | + if ntp_installable(): |
| 2040 | + service_name = 'ntp' |
| 2041 | + confpath = NTP_CONF |
| 2042 | + template_name = None |
| 2043 | + packages = ['ntp'] |
| 2044 | + check_exe = 'ntpd' |
| 2045 | + else: |
| 2046 | + service_name = 'systemd-timesyncd' |
| 2047 | + confpath = TIMESYNCD_CONF |
| 2048 | + template_name = 'timesyncd.conf' |
| 2049 | + packages = [] |
| 2050 | + check_exe = '/lib/systemd/systemd-timesyncd' |
| 2051 | + |
| 2052 | rename_ntp_conf() |
| 2053 | # ensure when ntp is installed it has a configuration file |
| 2054 | # to use instead of starting up with packaged defaults |
| 2055 | - write_ntp_config_template(ntp_cfg, cloud) |
| 2056 | - install_ntp(cloud.distro.install_packages, packages=['ntp'], |
| 2057 | - check_exe="ntpd") |
| 2058 | - # if ntp was already installed, it may not have started |
| 2059 | + write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name) |
| 2060 | + install_ntp(cloud.distro.install_packages, packages=packages, |
| 2061 | + check_exe=check_exe) |
| 2062 | + |
| 2063 | try: |
| 2064 | - reload_ntp(systemd=cloud.distro.uses_systemd()) |
| 2065 | + reload_ntp(service_name, systemd=cloud.distro.uses_systemd()) |
| 2066 | except util.ProcessExecutionError as e: |
| 2067 | LOG.exception("Failed to reload/start ntp service: %s", e) |
| 2068 | raise |
| 2069 | |
| 2070 | |
| 2071 | +def ntp_installable(): |
| 2072 | + """Check if we can install ntp package |
| 2073 | + |
| 2074 | + Ubuntu-Core systems do not have an ntp package available, so |
| 2075 | + we always return False. Other systems require package managers to install |
| 2076 | + the ntp package If we fail to find one of the package managers, then we |
| 2077 | + cannot install ntp. |
| 2078 | + """ |
| 2079 | + if util.system_is_snappy(): |
| 2080 | + return False |
| 2081 | + |
| 2082 | + if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])): |
| 2083 | + return True |
| 2084 | + |
| 2085 | + return False |
| 2086 | + |
| 2087 | + |
| 2088 | def install_ntp(install_func, packages=None, check_exe="ntpd"): |
| 2089 | if util.which(check_exe): |
| 2090 | return |
| 2091 | @@ -156,7 +163,7 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): |
| 2092 | |
| 2093 | |
| 2094 | def rename_ntp_conf(config=None): |
| 2095 | - """Rename any existing ntp.conf file and render from template""" |
| 2096 | + """Rename any existing ntp.conf file""" |
| 2097 | if config is None: # For testing |
| 2098 | config = NTP_CONF |
| 2099 | if os.path.exists(config): |
| 2100 | @@ -171,7 +178,7 @@ def generate_server_names(distro): |
| 2101 | return names |
| 2102 | |
| 2103 | |
| 2104 | -def write_ntp_config_template(cfg, cloud): |
| 2105 | +def write_ntp_config_template(cfg, cloud, path, template=None): |
| 2106 | servers = cfg.get('servers', []) |
| 2107 | pools = cfg.get('pools', []) |
| 2108 | |
| 2109 | @@ -185,19 +192,20 @@ def write_ntp_config_template(cfg, cloud): |
| 2110 | 'pools': pools, |
| 2111 | } |
| 2112 | |
| 2113 | - template_fn = cloud.get_template_filename('ntp.conf.%s' % |
| 2114 | - (cloud.distro.name)) |
| 2115 | + if template is None: |
| 2116 | + template = 'ntp.conf.%s' % cloud.distro.name |
| 2117 | + |
| 2118 | + template_fn = cloud.get_template_filename(template) |
| 2119 | if not template_fn: |
| 2120 | template_fn = cloud.get_template_filename('ntp.conf') |
| 2121 | if not template_fn: |
| 2122 | raise RuntimeError(("No template found, " |
| 2123 | - "not rendering %s"), NTP_CONF) |
| 2124 | + "not rendering %s"), path) |
| 2125 | |
| 2126 | - templater.render_to_file(template_fn, NTP_CONF, params) |
| 2127 | + templater.render_to_file(template_fn, path, params) |
| 2128 | |
| 2129 | |
| 2130 | -def reload_ntp(systemd=False): |
| 2131 | - service = 'ntp' |
| 2132 | +def reload_ntp(service, systemd=False): |
| 2133 | if systemd: |
| 2134 | cmd = ['systemctl', 'reload-or-restart', service] |
| 2135 | else: |
| 2136 | diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py |
| 2137 | index dc11561..28b1d56 100644 |
| 2138 | --- a/cloudinit/config/cc_puppet.py |
| 2139 | +++ b/cloudinit/config/cc_puppet.py |
| 2140 | @@ -15,21 +15,23 @@ This module handles puppet installation and configuration. If the ``puppet`` |
| 2141 | key does not exist in global configuration, no action will be taken. If a |
| 2142 | config entry for ``puppet`` is present, then by default the latest version of |
| 2143 | puppet will be installed. If ``install`` is set to ``false``, puppet will not |
| 2144 | -be installed. However, this may result in an error if puppet is not already |
| 2145 | +be installed. However, this will result in an error if puppet is not already |
| 2146 | present on the system. The version of puppet to be installed can be specified |
| 2147 | under ``version``, and defaults to ``none``, which selects the latest version |
| 2148 | in the repos. If the ``puppet`` config key exists in the config archive, this |
| 2149 | module will attempt to start puppet even if no installation was performed. |
| 2150 | |
| 2151 | -Puppet configuration can be specified under the ``conf`` key. The configuration |
| 2152 | -is specified as a dictionary which is converted into ``<key>=<value>`` format |
| 2153 | -and appended to ``puppet.conf`` under the ``[puppetd]`` section. The |
| 2154 | +Puppet configuration can be specified under the ``conf`` key. The |
| 2155 | +configuration is specified as a dictionary containing high-level ``<section>`` |
| 2156 | +keys and lists of ``<key>=<value>`` pairs within each section. Each section |
| 2157 | +name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As |
| 2158 | +such, section names should be one of: ``main``, ``master``, ``agent`` or |
| 2159 | +``user`` and keys should be valid puppet configuration options. The |
| 2160 | ``certname`` key supports string substitutions for ``%i`` and ``%f``, |
| 2161 | corresponding to the instance id and fqdn of the machine respectively. |
| 2162 | -If ``ca_cert`` is present under ``conf``, it will not be written to |
| 2163 | -``puppet.conf``, but instead will be used as the puppermaster certificate. |
| 2164 | -It should be specified in pem format as a multi-line string (using the ``|`` |
| 2165 | -yaml notation). |
| 2166 | +If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but |
| 2167 | +instead will be used as the puppermaster certificate. It should be specified |
| 2168 | +in pem format as a multi-line string (using the ``|`` yaml notation). |
| 2169 | |
| 2170 | **Internal name:** ``cc_puppet`` |
| 2171 | |
| 2172 | @@ -43,12 +45,13 @@ yaml notation). |
| 2173 | install: <true/false> |
| 2174 | version: <version> |
| 2175 | conf: |
| 2176 | - server: "puppetmaster.example.org" |
| 2177 | - certname: "%i.%f" |
| 2178 | - ca_cert: | |
| 2179 | - -------BEGIN CERTIFICATE------- |
| 2180 | - <cert data> |
| 2181 | - -------END CERTIFICATE------- |
| 2182 | + agent: |
| 2183 | + server: "puppetmaster.example.org" |
| 2184 | + certname: "%i.%f" |
| 2185 | + ca_cert: | |
| 2186 | + -------BEGIN CERTIFICATE------- |
| 2187 | + <cert data> |
| 2188 | + -------END CERTIFICATE------- |
| 2189 | """ |
| 2190 | |
| 2191 | from six import StringIO |
| 2192 | @@ -127,7 +130,7 @@ def handle(name, cfg, cloud, log, _args): |
| 2193 | util.write_file(PUPPET_SSL_CERT_PATH, cfg) |
| 2194 | util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') |
| 2195 | else: |
| 2196 | - # Iterate throug the config items, we'll use ConfigParser.set |
| 2197 | + # Iterate through the config items, we'll use ConfigParser.set |
| 2198 | # to overwrite or create new items as needed |
| 2199 | for (o, v) in cfg.items(): |
| 2200 | if o == 'certname': |
| 2201 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py |
| 2202 | index ceee952..f774baa 100644 |
| 2203 | --- a/cloudinit/config/cc_resizefs.py |
| 2204 | +++ b/cloudinit/config/cc_resizefs.py |
| 2205 | @@ -6,31 +6,8 @@ |
| 2206 | # |
| 2207 | # This file is part of cloud-init. See LICENSE file for license information. |
| 2208 | |
| 2209 | -""" |
| 2210 | -Resizefs |
| 2211 | --------- |
| 2212 | -**Summary:** resize filesystem |
| 2213 | +"""Resizefs: cloud-config module which resizes the filesystem""" |
| 2214 | |
| 2215 | -Resize a filesystem to use all avaliable space on partition. This module is |
| 2216 | -useful along with ``cc_growpart`` and will ensure that if the root partition |
| 2217 | -has been resized the root filesystem will be resized along with it. By default, |
| 2218 | -``cc_resizefs`` will resize the root partition and will block the boot process |
| 2219 | -while the resize command is running. Optionally, the resize operation can be |
| 2220 | -performed in the background while cloud-init continues running modules. This |
| 2221 | -can be enabled by setting ``resize_rootfs`` to ``true``. This module can be |
| 2222 | -disabled altogether by setting ``resize_rootfs`` to ``false``. |
| 2223 | - |
| 2224 | -**Internal name:** ``cc_resizefs`` |
| 2225 | - |
| 2226 | -**Module frequency:** per always |
| 2227 | - |
| 2228 | -**Supported distros:** all |
| 2229 | - |
| 2230 | -**Config keys**:: |
| 2231 | - |
| 2232 | - resize_rootfs: <true/false/"noblock"> |
| 2233 | - resize_rootfs_tmp: <directory> |
| 2234 | -""" |
| 2235 | |
| 2236 | import errno |
| 2237 | import getopt |
| 2238 | @@ -38,11 +15,47 @@ import os |
| 2239 | import re |
| 2240 | import shlex |
| 2241 | import stat |
| 2242 | +from textwrap import dedent |
| 2243 | |
| 2244 | +from cloudinit.config.schema import ( |
| 2245 | + get_schema_doc, validate_cloudconfig_schema) |
| 2246 | from cloudinit.settings import PER_ALWAYS |
| 2247 | from cloudinit import util |
| 2248 | |
| 2249 | +NOBLOCK = "noblock" |
| 2250 | + |
| 2251 | frequency = PER_ALWAYS |
| 2252 | +distros = ['all'] |
| 2253 | + |
| 2254 | +schema = { |
| 2255 | + 'id': 'cc_resizefs', |
| 2256 | + 'name': 'Resizefs', |
| 2257 | + 'title': 'Resize filesystem', |
| 2258 | + 'description': dedent("""\ |
| 2259 | + Resize a filesystem to use all avaliable space on partition. This |
| 2260 | + module is useful along with ``cc_growpart`` and will ensure that if the |
| 2261 | + root partition has been resized the root filesystem will be resized |
| 2262 | + along with it. By default, ``cc_resizefs`` will resize the root |
| 2263 | + partition and will block the boot process while the resize command is |
| 2264 | + running. Optionally, the resize operation can be performed in the |
| 2265 | + background while cloud-init continues running modules. This can be |
| 2266 | + enabled by setting ``resize_rootfs`` to ``true``. This module can be |
| 2267 | + disabled altogether by setting ``resize_rootfs`` to ``false``."""), |
| 2268 | + 'distros': distros, |
| 2269 | + 'examples': [ |
| 2270 | + 'resize_rootfs: false # disable root filesystem resize operation'], |
| 2271 | + 'frequency': PER_ALWAYS, |
| 2272 | + 'type': 'object', |
| 2273 | + 'properties': { |
| 2274 | + 'resize_rootfs': { |
| 2275 | + 'enum': [True, False, NOBLOCK], |
| 2276 | + 'description': dedent("""\ |
| 2277 | + Whether to resize the root partition. Default: 'true'""") |
| 2278 | + } |
| 2279 | + } |
| 2280 | +} |
| 2281 | + |
| 2282 | +__doc__ = get_schema_doc(schema) # Supplement python help() |
| 2283 | |
| 2284 | |
| 2285 | def _resize_btrfs(mount_point, devpth): |
| 2286 | @@ -54,7 +67,7 @@ def _resize_ext(mount_point, devpth): |
| 2287 | |
| 2288 | |
| 2289 | def _resize_xfs(mount_point, devpth): |
| 2290 | - return ('xfs_growfs', devpth) |
| 2291 | + return ('xfs_growfs', mount_point) |
| 2292 | |
| 2293 | |
| 2294 | def _resize_ufs(mount_point, devpth): |
| 2295 | @@ -131,8 +144,6 @@ RESIZE_FS_PRECHECK_CMDS = { |
| 2296 | 'ufs': _can_skip_resize_ufs |
| 2297 | } |
| 2298 | |
| 2299 | -NOBLOCK = "noblock" |
| 2300 | - |
| 2301 | |
| 2302 | def rootdev_from_cmdline(cmdline): |
| 2303 | found = None |
| 2304 | @@ -161,71 +172,77 @@ def can_skip_resize(fs_type, resize_what, devpth): |
| 2305 | return False |
| 2306 | |
| 2307 | |
| 2308 | -def handle(name, cfg, _cloud, log, args): |
| 2309 | - if len(args) != 0: |
| 2310 | - resize_root = args[0] |
| 2311 | - else: |
| 2312 | - resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) |
| 2313 | +def is_device_path_writable_block(devpath, info, log): |
| 2314 | + """Return True if devpath is a writable block device. |
| 2315 | |
| 2316 | - if not util.translate_bool(resize_root, addons=[NOBLOCK]): |
| 2317 | - log.debug("Skipping module named %s, resizing disabled", name) |
| 2318 | - return |
| 2319 | - |
| 2320 | - # TODO(harlowja) is the directory ok to be used?? |
| 2321 | - resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") |
| 2322 | - util.ensure_dir(resize_root_d) |
| 2323 | - |
| 2324 | - # TODO(harlowja): allow what is to be resized to be configurable?? |
| 2325 | - resize_what = "/" |
| 2326 | - result = util.get_mount_info(resize_what, log) |
| 2327 | - if not result: |
| 2328 | - log.warn("Could not determine filesystem type of %s", resize_what) |
| 2329 | - return |
| 2330 | - |
| 2331 | - (devpth, fs_type, mount_point) = result |
| 2332 | - |
| 2333 | - info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) |
| 2334 | - log.debug("resize_info: %s" % info) |
| 2335 | + @param devpath: Path to the root device we want to resize. |
| 2336 | + @param info: String representing information about the requested device. |
| 2337 | + @param log: Logger to which logs will be added upon error. |
| 2338 | |
| 2339 | + @returns Boolean True if block device is writable |
| 2340 | + """ |
| 2341 | container = util.is_container() |
| 2342 | |
| 2343 | # Ensure the path is a block device. |
| 2344 | - if (devpth == "/dev/root" and not os.path.exists(devpth) and |
| 2345 | + if (devpath == "/dev/root" and not os.path.exists(devpath) and |
| 2346 | not container): |
| 2347 | - devpth = util.rootdev_from_cmdline(util.get_cmdline()) |
| 2348 | - if devpth is None: |
| 2349 | + devpath = util.rootdev_from_cmdline(util.get_cmdline()) |
| 2350 | + if devpath is None: |
| 2351 | log.warn("Unable to find device '/dev/root'") |
| 2352 | - return |
| 2353 | - log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth) |
| 2354 | + return False |
| 2355 | + log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) |
| 2356 | + |
| 2357 | + if devpath == 'overlayroot': |
| 2358 | + log.debug("Not attempting to resize devpath '%s': %s", devpath, info) |
| 2359 | + return False |
| 2360 | |
| 2361 | try: |
| 2362 | - statret = os.stat(devpth) |
| 2363 | + statret = os.stat(devpath) |
| 2364 | except OSError as exc: |
| 2365 | if container and exc.errno == errno.ENOENT: |
| 2366 | log.debug("Device '%s' did not exist in container. " |
| 2367 | - "cannot resize: %s", devpth, info) |
| 2368 | + "cannot resize: %s", devpath, info) |
| 2369 | elif exc.errno == errno.ENOENT: |
| 2370 | log.warn("Device '%s' did not exist. cannot resize: %s", |
| 2371 | - devpth, info) |
| 2372 | + devpath, info) |
| 2373 | else: |
| 2374 | raise exc |
| 2375 | - return |
| 2376 | - |
| 2377 | - if not os.access(devpth, os.W_OK): |
| 2378 | - if container: |
| 2379 | - log.debug("'%s' not writable in container. cannot resize: %s", |
| 2380 | - devpth, info) |
| 2381 | - else: |
| 2382 | - log.warn("'%s' not writable. cannot resize: %s", devpth, info) |
| 2383 | - return |
| 2384 | + return False |
| 2385 | |
| 2386 | if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): |
| 2387 | if container: |
| 2388 | log.debug("device '%s' not a block device in container." |
| 2389 | - " cannot resize: %s" % (devpth, info)) |
| 2390 | + " cannot resize: %s" % (devpath, info)) |
| 2391 | else: |
| 2392 | log.warn("device '%s' not a block device. cannot resize: %s" % |
| 2393 | - (devpth, info)) |
| 2394 | + (devpath, info)) |
| 2395 | + return False |
| 2396 | + return True |
| 2397 | + |
| 2398 | + |
| 2399 | +def handle(name, cfg, _cloud, log, args): |
| 2400 | + if len(args) != 0: |
| 2401 | + resize_root = args[0] |
| 2402 | + else: |
| 2403 | + resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) |
| 2404 | + validate_cloudconfig_schema(cfg, schema) |
| 2405 | + if not util.translate_bool(resize_root, addons=[NOBLOCK]): |
| 2406 | + log.debug("Skipping module named %s, resizing disabled", name) |
| 2407 | + return |
| 2408 | + |
| 2409 | + # TODO(harlowja): allow what is to be resized to be configurable?? |
| 2410 | + resize_what = "/" |
| 2411 | + result = util.get_mount_info(resize_what, log) |
| 2412 | + if not result: |
| 2413 | + log.warn("Could not determine filesystem type of %s", resize_what) |
| 2414 | + return |
| 2415 | + |
| 2416 | + (devpth, fs_type, mount_point) = result |
| 2417 | + |
| 2418 | + info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) |
| 2419 | + log.debug("resize_info: %s" % info) |
| 2420 | + |
| 2421 | + if not is_device_path_writable_block(devpth, info, log): |
| 2422 | return |
| 2423 | |
| 2424 | resizer = None |
| 2425 | diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py |
| 2426 | index 2548d1f..9812562 100644 |
| 2427 | --- a/cloudinit/config/cc_resolv_conf.py |
| 2428 | +++ b/cloudinit/config/cc_resolv_conf.py |
| 2429 | @@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__) |
| 2430 | |
| 2431 | frequency = PER_INSTANCE |
| 2432 | |
| 2433 | -distros = ['fedora', 'rhel', 'sles'] |
| 2434 | +distros = ['fedora', 'opensuse', 'rhel', 'sles'] |
| 2435 | |
| 2436 | |
| 2437 | def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): |
| 2438 | diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py |
| 2439 | index dfa8cb3..449872f 100644 |
| 2440 | --- a/cloudinit/config/cc_runcmd.py |
| 2441 | +++ b/cloudinit/config/cc_runcmd.py |
| 2442 | @@ -6,41 +6,70 @@ |
| 2443 | # |
| 2444 | # This file is part of cloud-init. See LICENSE file for license information. |
| 2445 | |
| 2446 | -""" |
| 2447 | -Runcmd |
| 2448 | ------- |
| 2449 | -**Summary:** run commands |
| 2450 | +"""Runcmd: run arbitrary commands at rc.local with output to the console""" |
| 2451 | |
| 2452 | -Run arbitrary commands at a rc.local like level with output to the console. |
| 2453 | -Each item can be either a list or a string. If the item is a list, it will be |
| 2454 | -properly executed as if passed to ``execve()`` (with the first arg as the |
| 2455 | -command). If the item is a string, it will be written to a file and interpreted |
| 2456 | -using ``sh``. |
| 2457 | - |
| 2458 | -.. note:: |
| 2459 | - all commands must be proper yaml, so you have to quote any characters yaml |
| 2460 | - would eat (':' can be problematic) |
| 2461 | - |
| 2462 | -**Internal name:** ``cc_runcmd`` |
| 2463 | +from cloudinit.config.schema import ( |
| 2464 | + get_schema_doc, validate_cloudconfig_schema) |
| 2465 | +from cloudinit.distros import ALL_DISTROS |
| 2466 | +from cloudinit.settings import PER_INSTANCE |
| 2467 | +from cloudinit import util |
| 2468 | |
| 2469 | -**Module frequency:** per instance |
| 2470 | +import os |
| 2471 | +from textwrap import dedent |
| 2472 | |
| 2473 | -**Supported distros:** all |
| 2474 | |
| 2475 | -**Config keys**:: |
| 2476 | +# The schema definition for each cloud-config module is a strict contract for |
| 2477 | +# describing supported configuration parameters for each cloud-config section. |
| 2478 | +# It allows cloud-config to validate and alert users to invalid or ignored |
| 2479 | +# configuration options before actually attempting to deploy with said |
| 2480 | +# configuration. |
| 2481 | |
| 2482 | - runcmd: |
| 2483 | - - [ ls, -l, / ] |
| 2484 | - - [ sh, -xc, "echo $(date) ': hello world!'" ] |
| 2485 | - - [ sh, -c, echo "=========hello world'=========" ] |
| 2486 | - - ls -l /root |
| 2487 | - - [ wget, "http://example.org", -O, /tmp/index.html ] |
| 2488 | -""" |
| 2489 | +distros = [ALL_DISTROS] |
| 2490 | |
| 2491 | +schema = { |
| 2492 | + 'id': 'cc_runcmd', |
| 2493 | + 'name': 'Runcmd', |
| 2494 | + 'title': 'Run arbitrary commands', |
| 2495 | + 'description': dedent("""\ |
| 2496 | + Run arbitrary commands at a rc.local like level with output to the |
| 2497 | + console. Each item can be either a list or a string. If the item is a |
| 2498 | + list, it will be properly executed as if passed to ``execve()`` (with |
| 2499 | + the first arg as the command). If the item is a string, it will be |
| 2500 | + written to a file and interpreted |
| 2501 | + using ``sh``. |
| 2502 | |
| 2503 | -import os |
| 2504 | + .. note:: |
| 2505 | + all commands must be proper yaml, so you have to quote any characters |
| 2506 | + yaml would eat (':' can be problematic)"""), |
| 2507 | + 'distros': distros, |
| 2508 | + 'examples': [dedent("""\ |
| 2509 | + runcmd: |
| 2510 | + - [ ls, -l, / ] |
| 2511 | + - [ sh, -xc, "echo $(date) ': hello world!'" ] |
| 2512 | + - [ sh, -c, echo "=========hello world'=========" ] |
| 2513 | + - ls -l /root |
| 2514 | + - [ wget, "http://example.org", -O, /tmp/index.html ] |
| 2515 | + """)], |
| 2516 | + 'frequency': PER_INSTANCE, |
| 2517 | + 'type': 'object', |
| 2518 | + 'properties': { |
| 2519 | + 'runcmd': { |
| 2520 | + 'type': 'array', |
| 2521 | + 'items': { |
| 2522 | + 'oneOf': [ |
| 2523 | + {'type': 'array', 'items': {'type': 'string'}}, |
| 2524 | + {'type': 'string'}] |
| 2525 | + }, |
| 2526 | + 'additionalItems': False, # Reject items of non-string non-list |
| 2527 | + 'additionalProperties': False, |
| 2528 | + 'minItems': 1, |
| 2529 | + 'required': [], |
| 2530 | + 'uniqueItems': True |
| 2531 | + } |
| 2532 | + } |
| 2533 | +} |
| 2534 | |
| 2535 | -from cloudinit import util |
| 2536 | +__doc__ = get_schema_doc(schema) # Supplement python help() |
| 2537 | |
| 2538 | |
| 2539 | def handle(name, cfg, cloud, log, _args): |
| 2540 | @@ -49,6 +78,7 @@ def handle(name, cfg, cloud, log, _args): |
| 2541 | " no 'runcmd' key in configuration"), name) |
| 2542 | return |
| 2543 | |
| 2544 | + validate_cloudconfig_schema(cfg, schema) |
| 2545 | out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd") |
| 2546 | cmd = cfg["runcmd"] |
| 2547 | try: |
| 2548 | diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py |
| 2549 | index a9682f1..eecb817 100644 |
| 2550 | --- a/cloudinit/config/cc_snappy.py |
| 2551 | +++ b/cloudinit/config/cc_snappy.py |
| 2552 | @@ -63,11 +63,11 @@ is ``auto``. Options are: |
| 2553 | |
| 2554 | from cloudinit import log as logging |
| 2555 | from cloudinit.settings import PER_INSTANCE |
| 2556 | +from cloudinit import temp_utils |
| 2557 | from cloudinit import util |
| 2558 | |
| 2559 | import glob |
| 2560 | import os |
| 2561 | -import tempfile |
| 2562 | |
| 2563 | LOG = logging.getLogger(__name__) |
| 2564 | |
| 2565 | @@ -183,7 +183,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None): |
| 2566 | # config |
| 2567 | # Note, however, we do not touch config files on disk. |
| 2568 | nested_cfg = {'config': {shortname: config}} |
| 2569 | - (fd, cfg_tmpf) = tempfile.mkstemp() |
| 2570 | + (fd, cfg_tmpf) = temp_utils.mkstemp() |
| 2571 | os.write(fd, util.yaml_dumps(nested_cfg).encode()) |
| 2572 | os.close(fd) |
| 2573 | cfgfile = cfg_tmpf |
| 2574 | diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py |
| 2575 | index 0066e97..35d8c57 100755 |
| 2576 | --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py |
| 2577 | +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py |
| 2578 | @@ -28,7 +28,7 @@ the keys can be specified, but defaults to ``md5``. |
| 2579 | import base64 |
| 2580 | import hashlib |
| 2581 | |
| 2582 | -from prettytable import PrettyTable |
| 2583 | +from cloudinit.simpletable import SimpleTable |
| 2584 | |
| 2585 | from cloudinit.distros import ug_util |
| 2586 | from cloudinit import ssh_util |
| 2587 | @@ -74,7 +74,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', |
| 2588 | return |
| 2589 | tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', |
| 2590 | 'Comment'] |
| 2591 | - tbl = PrettyTable(tbl_fields) |
| 2592 | + tbl = SimpleTable(tbl_fields) |
| 2593 | for entry in key_entries: |
| 2594 | if _is_printable_key(entry): |
| 2595 | row = [] |
| 2596 | diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py |
| 2597 | deleted file mode 100644 |
| 2598 | index 5dd2690..0000000 |
| 2599 | --- a/cloudinit/config/cc_ubuntu_init_switch.py |
| 2600 | +++ /dev/null |
| 2601 | @@ -1,160 +0,0 @@ |
| 2602 | -# Copyright (C) 2014 Canonical Ltd. |
| 2603 | -# |
| 2604 | -# Author: Scott Moser <scott.moser@canonical.com> |
| 2605 | -# |
| 2606 | -# This file is part of cloud-init. See LICENSE file for license information. |
| 2607 | - |
| 2608 | -""" |
| 2609 | -Ubuntu Init Switch |
| 2610 | ------------------- |
| 2611 | -**Summary:** reboot system into another init. |
| 2612 | - |
| 2613 | -This module provides a way for the user to boot with systemd even if the image |
| 2614 | -is set to boot with upstart. It should be run as one of the first |
| 2615 | -``cloud_init_modules``, and will switch the init system and then issue a |
| 2616 | -reboot. The next boot will come up in the target init system and no action |
| 2617 | -will be taken. This should be inert on non-ubuntu systems, and also |
| 2618 | -exit quickly. |
| 2619 | - |
| 2620 | -.. note:: |
| 2621 | - best effort is made, but it's possible this system will break, and probably |
| 2622 | - won't interact well with any other mechanism you've used to switch the init |
| 2623 | - system. |
| 2624 | - |
| 2625 | -**Internal name:** ``cc_ubuntu_init_switch`` |
| 2626 | - |
| 2627 | -**Module frequency:** once per instance |
| 2628 | - |
| 2629 | -**Supported distros:** ubuntu |
| 2630 | - |
| 2631 | -**Config keys**:: |
| 2632 | - |
| 2633 | - init_switch: |
| 2634 | - target: systemd (can be 'systemd' or 'upstart') |
| 2635 | - reboot: true (reboot if a change was made, or false to not reboot) |
| 2636 | -""" |
| 2637 | - |
| 2638 | -from cloudinit.distros import ubuntu |
| 2639 | -from cloudinit import log as logging |
| 2640 | -from cloudinit.settings import PER_INSTANCE |
| 2641 | -from cloudinit import util |
| 2642 | - |
| 2643 | -import os |
| 2644 | -import time |
| 2645 | - |
| 2646 | -frequency = PER_INSTANCE |
| 2647 | -REBOOT_CMD = ["/sbin/reboot", "--force"] |
| 2648 | - |
| 2649 | -DEFAULT_CONFIG = { |
| 2650 | - 'init_switch': {'target': None, 'reboot': True} |
| 2651 | -} |
| 2652 | - |
| 2653 | -SWITCH_INIT = """ |
| 2654 | -#!/bin/sh |
| 2655 | -# switch_init: [upstart | systemd] |
| 2656 | - |
| 2657 | -is_systemd() { |
| 2658 | - [ "$(dpkg-divert --listpackage /sbin/init)" = "systemd-sysv" ] |
| 2659 | -} |
| 2660 | -debug() { echo "$@" 1>&2; } |
| 2661 | -fail() { echo "$@" 1>&2; exit 1; } |
| 2662 | - |
| 2663 | -if [ "$1" = "systemd" ]; then |
| 2664 | - if is_systemd; then |
| 2665 | - debug "already systemd, nothing to do" |
| 2666 | - else |
| 2667 | - [ -f /lib/systemd/systemd ] || fail "no systemd available"; |
| 2668 | - dpkg-divert --package systemd-sysv --divert /sbin/init.diverted \\ |
| 2669 | - --rename /sbin/init |
| 2670 | - fi |
| 2671 | - [ -f /sbin/init ] || ln /lib/systemd/systemd /sbin/init |
| 2672 | -elif [ "$1" = "upstart" ]; then |
| 2673 | - if is_systemd; then |
| 2674 | - rm -f /sbin/init |
| 2675 | - dpkg-divert --package systemd-sysv --rename --remove /sbin/init |
| 2676 | - else |
| 2677 | - debug "already upstart, nothing to do." |
| 2678 | - fi |
| 2679 | -else |
| 2680 | - fail "Error. expect 'upstart' or 'systemd'" |
| 2681 | -fi |
| 2682 | -""" |
| 2683 | - |
| 2684 | -distros = ['ubuntu'] |
| 2685 | - |
| 2686 | - |
| 2687 | -def handle(name, cfg, cloud, log, args): |
| 2688 | - """Handler method activated by cloud-init.""" |
| 2689 | - |
| 2690 | - if not isinstance(cloud.distro, ubuntu.Distro): |
| 2691 | - log.debug("%s: distro is '%s', not ubuntu. returning", |
| 2692 | - name, cloud.distro.__class__) |
| 2693 | - return |
| 2694 | - |
| 2695 | - cfg = util.mergemanydict([cfg, DEFAULT_CONFIG]) |
| 2696 | - target = cfg['init_switch']['target'] |
| 2697 | - reboot = cfg['init_switch']['reboot'] |
| 2698 | - |
| 2699 | - if len(args) != 0: |
| 2700 | - target = args[0] |
| 2701 | - if len(args) > 1: |
| 2702 | - reboot = util.is_true(args[1]) |
| 2703 | - |
| 2704 | - if not target: |
| 2705 | - log.debug("%s: target=%s. nothing to do", name, target) |
| 2706 | - return |
| 2707 | - |
| 2708 | - if not util.which('dpkg'): |
| 2709 | - log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name) |
| 2710 | - return |
| 2711 | - |
| 2712 | - supported = ('upstart', 'systemd') |
| 2713 | - if target not in supported: |
| 2714 | - log.warn("%s: target set to %s, expected one of: %s", |
| 2715 | - name, target, str(supported)) |
| 2716 | - |
| 2717 | - if os.path.exists("/run/systemd/system"): |
| 2718 | - current = "systemd" |
| 2719 | - else: |
| 2720 | - current = "upstart" |
| 2721 | - |
| 2722 | - if current == target: |
| 2723 | - log.debug("%s: current = target = %s. nothing to do", name, target) |
| 2724 | - return |
| 2725 | - |
| 2726 | - try: |
| 2727 | - util.subp(['sh', '-s', target], data=SWITCH_INIT) |
| 2728 | - except util.ProcessExecutionError as e: |
| 2729 | - log.warn("%s: Failed to switch to init '%s'. %s", name, target, e) |
| 2730 | - return |
| 2731 | - |
| 2732 | - if util.is_false(reboot): |
| 2733 | - log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.", |
| 2734 | - name, current, target) |
| 2735 | - return |
| 2736 | - |
| 2737 | - try: |
| 2738 | - log.warn("%s: switched '%s' to '%s'. rebooting.", |
| 2739 | - name, current, target) |
| 2740 | - logging.flushLoggers(log) |
| 2741 | - _fire_reboot(log, wait_attempts=4, initial_sleep=4) |
| 2742 | - except Exception as e: |
| 2743 | - util.logexc(log, "Requested reboot did not happen!") |
| 2744 | - raise |
| 2745 | - |
| 2746 | - |
| 2747 | -def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): |
| 2748 | - util.subp(REBOOT_CMD) |
| 2749 | - start = time.time() |
| 2750 | - wait_time = initial_sleep |
| 2751 | - for _i in range(0, wait_attempts): |
| 2752 | - time.sleep(wait_time) |
| 2753 | - wait_time *= backoff |
| 2754 | - elapsed = time.time() - start |
| 2755 | - log.debug("Rebooted, but still running after %s seconds", int(elapsed)) |
| 2756 | - # If we got here, not good |
| 2757 | - elapsed = time.time() - start |
| 2758 | - raise RuntimeError(("Reboot did not happen" |
| 2759 | - " after %s seconds!") % (int(elapsed))) |
| 2760 | - |
| 2761 | -# vi: ts=4 expandtab |
| 2762 | diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py |
| 2763 | new file mode 100644 |
| 2764 | index 0000000..aba2695 |
| 2765 | --- /dev/null |
| 2766 | +++ b/cloudinit/config/cc_zypper_add_repo.py |
| 2767 | @@ -0,0 +1,218 @@ |
| 2768 | +# |
| 2769 | +# Copyright (C) 2017 SUSE LLC. |
| 2770 | +# |
| 2771 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 2772 | + |
| 2773 | +"""zypper_add_repo: Add zyper repositories to the system""" |
| 2774 | + |
| 2775 | +import configobj |
| 2776 | +import os |
| 2777 | +from six import string_types |
| 2778 | +from textwrap import dedent |
| 2779 | + |
| 2780 | +from cloudinit.config.schema import get_schema_doc |
| 2781 | +from cloudinit import log as logging |
| 2782 | +from cloudinit.settings import PER_ALWAYS |
| 2783 | +from cloudinit import util |
| 2784 | + |
| 2785 | +distros = ['opensuse', 'sles'] |
| 2786 | + |
| 2787 | +schema = { |
| 2788 | + 'id': 'cc_zypper_add_repo', |
| 2789 | + 'name': 'ZypperAddRepo', |
| 2790 | + 'title': 'Configure zypper behavior and add zypper repositories', |
| 2791 | + 'description': dedent("""\ |
| 2792 | + Configure zypper behavior by modifying /etc/zypp/zypp.conf. The |
| 2793 | + configuration writer is "dumb" and will simply append the provided |
| 2794 | + configuration options to the configuration file. Option settings |
| 2795 | + that may be duplicate will be resolved by the way the zypp.conf file |
| 2796 | + is parsed. The file is in INI format. |
| 2797 | + Add repositories to the system. No validation is performed on the |
| 2798 | + repository file entries, it is assumed the user is familiar with |
| 2799 | + the zypper repository file format."""), |
| 2800 | + 'distros': distros, |
| 2801 | + 'examples': [dedent("""\ |
| 2802 | + zypper: |
| 2803 | + repos: |
| 2804 | + - id: opensuse-oss |
| 2805 | + name: os-oss |
| 2806 | + baseurl: http://dl.opensuse.org/dist/leap/v/repo/oss/ |
| 2807 | + enabled: 1 |
| 2808 | + autorefresh: 1 |
| 2809 | + - id: opensuse-oss-update |
| 2810 | + name: os-oss-up |
| 2811 | + baseurl: http://dl.opensuse.org/dist/leap/v/update |
| 2812 | + # any setting per |
| 2813 | + # https://en.opensuse.org/openSUSE:Standards_RepoInfo |
| 2814 | + # enable and autorefresh are on by default |
| 2815 | + config: |
| 2816 | + reposdir: /etc/zypp/repos.dir |
| 2817 | + servicesdir: /etc/zypp/services.d |
| 2818 | + download.use_deltarpm: true |
| 2819 | + # any setting in /etc/zypp/zypp.conf |
| 2820 | + """)], |
| 2821 | + 'frequency': PER_ALWAYS, |
| 2822 | + 'type': 'object', |
| 2823 | + 'properties': { |
| 2824 | + 'zypper': { |
| 2825 | + 'type': 'object', |
| 2826 | + 'properties': { |
| 2827 | + 'repos': { |
| 2828 | + 'type': 'array', |
| 2829 | + 'items': { |
| 2830 | + 'type': 'object', |
| 2831 | + 'properties': { |
| 2832 | + 'id': { |
| 2833 | + 'type': 'string', |
| 2834 | + 'description': dedent("""\ |
| 2835 | + The unique id of the repo, used when |
| 2836 | + writing |
| 2837 | + /etc/zypp/repos.d/<id>.repo.""") |
| 2838 | + }, |
| 2839 | + 'baseurl': { |
| 2840 | + 'type': 'string', |
| 2841 | + 'format': 'uri', # built-in format type |
| 2842 | + 'description': 'The base repositoy URL' |
| 2843 | + } |
| 2844 | + }, |
| 2845 | + 'required': ['id', 'baseurl'], |
| 2846 | + 'additionalProperties': True |
| 2847 | + }, |
| 2848 | + 'minItems': 1 |
| 2849 | + }, |
| 2850 | + 'config': { |
| 2851 | + 'type': 'object', |
| 2852 | + 'description': dedent("""\ |
| 2853 | + Any supported zypo.conf key is written to |
| 2854 | + /etc/zypp/zypp.conf'""") |
| 2855 | + } |
| 2856 | + }, |
| 2857 | + 'required': [], |
| 2858 | + 'minProperties': 1, # Either config or repo must be provided |
| 2859 | + 'additionalProperties': False, # only repos and config allowed |
| 2860 | + } |
| 2861 | + } |
| 2862 | +} |
| 2863 | + |
| 2864 | +__doc__ = get_schema_doc(schema) # Supplement python help() |
| 2865 | + |
| 2866 | +LOG = logging.getLogger(__name__) |
| 2867 | + |
| 2868 | + |
| 2869 | +def _canonicalize_id(repo_id): |
| 2870 | + repo_id = repo_id.replace(" ", "_") |
| 2871 | + return repo_id |
| 2872 | + |
| 2873 | + |
| 2874 | +def _format_repo_value(val): |
| 2875 | + if isinstance(val, bool): |
| 2876 | + # zypp prefers 1/0 |
| 2877 | + return 1 if val else 0 |
| 2878 | + if isinstance(val, (list, tuple)): |
| 2879 | + return "\n ".join([_format_repo_value(v) for v in val]) |
| 2880 | + if not isinstance(val, string_types): |
| 2881 | + return str(val) |
| 2882 | + return val |
| 2883 | + |
| 2884 | + |
| 2885 | +def _format_repository_config(repo_id, repo_config): |
| 2886 | + to_be = configobj.ConfigObj() |
| 2887 | + to_be[repo_id] = {} |
| 2888 | + # Do basic translation of the items -> values |
| 2889 | + for (k, v) in repo_config.items(): |
| 2890 | + # For now assume that people using this know the format |
| 2891 | + # of zypper repos and don't verify keys/values further |
| 2892 | + to_be[repo_id][k] = _format_repo_value(v) |
| 2893 | + lines = to_be.write() |
| 2894 | + return "\n".join(lines) |
| 2895 | + |
| 2896 | + |
| 2897 | +def _write_repos(repos, repo_base_path): |
| 2898 | + """Write the user-provided repo definition files |
| 2899 | + @param repos: A list of repo dictionary objects provided by the user's |
| 2900 | + cloud config. |
| 2901 | + @param repo_base_path: The directory path to which repo definitions are |
| 2902 | + written. |
| 2903 | + """ |
| 2904 | + |
| 2905 | + if not repos: |
| 2906 | + return |
| 2907 | + valid_repos = {} |
| 2908 | + for index, user_repo_config in enumerate(repos): |
| 2909 | + # Skip on absent required keys |
| 2910 | + missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config)) |
| 2911 | + if missing_keys: |
| 2912 | + LOG.warning( |
| 2913 | + "Repo config at index %d is missing required config keys: %s", |
| 2914 | + index, ",".join(missing_keys)) |
| 2915 | + continue |
| 2916 | + repo_id = user_repo_config.get('id') |
| 2917 | + canon_repo_id = _canonicalize_id(repo_id) |
| 2918 | + repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) |
| 2919 | + if os.path.exists(repo_fn_pth): |
| 2920 | + LOG.info("Skipping repo %s, file %s already exists!", |
| 2921 | + repo_id, repo_fn_pth) |
| 2922 | + continue |
| 2923 | + elif repo_id in valid_repos: |
| 2924 | + LOG.info("Skipping repo %s, file %s already pending!", |
| 2925 | + repo_id, repo_fn_pth) |
| 2926 | + continue |
| 2927 | + |
| 2928 | + # Do some basic key formatting |
| 2929 | + repo_config = dict( |
| 2930 | + (k.lower().strip().replace("-", "_"), v) |
| 2931 | + for k, v in user_repo_config.items() |
| 2932 | + if k and k != 'id') |
| 2933 | + |
| 2934 | + # Set defaults if not present |
| 2935 | + for field in ['enabled', 'autorefresh']: |
| 2936 | + if field not in repo_config: |
| 2937 | + repo_config[field] = '1' |
| 2938 | + |
| 2939 | + valid_repos[repo_id] = (repo_fn_pth, repo_config) |
| 2940 | + |
| 2941 | + for (repo_id, repo_data) in valid_repos.items(): |
| 2942 | + repo_blob = _format_repository_config(repo_id, repo_data[-1]) |
| 2943 | + util.write_file(repo_data[0], repo_blob) |
| 2944 | + |
| 2945 | + |
| 2946 | +def _write_zypp_config(zypper_config): |
| 2947 | + """Write to the default zypp configuration file /etc/zypp/zypp.conf""" |
| 2948 | + if not zypper_config: |
| 2949 | + return |
| 2950 | + zypp_config = '/etc/zypp/zypp.conf' |
| 2951 | + zypp_conf_content = util.load_file(zypp_config) |
| 2952 | + new_settings = ['# Added via cloud.cfg'] |
| 2953 | + for setting, value in zypper_config.items(): |
| 2954 | + if setting == 'configdir': |
| 2955 | + msg = 'Changing the location of the zypper configuration is ' |
| 2956 | + msg += 'not supported, skipping "configdir" setting' |
| 2957 | + LOG.warning(msg) |
| 2958 | + continue |
| 2959 | + if value: |
| 2960 | + new_settings.append('%s=%s' % (setting, value)) |
| 2961 | + if len(new_settings) > 1: |
| 2962 | + new_config = zypp_conf_content + '\n'.join(new_settings) |
| 2963 | + else: |
| 2964 | + new_config = zypp_conf_content |
| 2965 | + util.write_file(zypp_config, new_config) |
| 2966 | + |
| 2967 | + |
| 2968 | +def handle(name, cfg, _cloud, log, _args): |
| 2969 | + zypper_section = cfg.get('zypper') |
| 2970 | + if not zypper_section: |
| 2971 | + LOG.debug(("Skipping module named %s," |
| 2972 | + " no 'zypper' relevant configuration found"), name) |
| 2973 | + return |
| 2974 | + repos = zypper_section.get('repos') |
| 2975 | + if not repos: |
| 2976 | + LOG.debug(("Skipping module named %s," |
| 2977 | + " no 'repos' configuration found"), name) |
| 2978 | + return |
| 2979 | + zypper_config = zypper_section.get('config', {}) |
| 2980 | + repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/') |
| 2981 | + |
| 2982 | + _write_zypp_config(zypper_config) |
| 2983 | + _write_repos(repos, repo_base_path) |
| 2984 | + |
| 2985 | +# vi: ts=4 expandtab |
| 2986 | diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py |
| 2987 | index 6400f00..bb291ff 100644 |
| 2988 | --- a/cloudinit/config/schema.py |
| 2989 | +++ b/cloudinit/config/schema.py |
| 2990 | @@ -3,19 +3,24 @@ |
| 2991 | |
| 2992 | from __future__ import print_function |
| 2993 | |
| 2994 | -from cloudinit.util import read_file_or_url |
| 2995 | +from cloudinit import importer |
| 2996 | +from cloudinit.util import find_modules, read_file_or_url |
| 2997 | |
| 2998 | import argparse |
| 2999 | +from collections import defaultdict |
| 3000 | +from copy import deepcopy |
| 3001 | import logging |
| 3002 | import os |
| 3003 | +import re |
| 3004 | import sys |
| 3005 | import yaml |
| 3006 | |
| 3007 | +_YAML_MAP = {True: 'true', False: 'false', None: 'null'} |
| 3008 | SCHEMA_UNDEFINED = b'UNDEFINED' |
| 3009 | CLOUD_CONFIG_HEADER = b'#cloud-config' |
| 3010 | SCHEMA_DOC_TMPL = """ |
| 3011 | {name} |
| 3012 | ---- |
| 3013 | +{title_underbar} |
| 3014 | **Summary:** {title} |
| 3015 | |
| 3016 | {description} |
| 3017 | @@ -31,6 +36,8 @@ SCHEMA_DOC_TMPL = """ |
| 3018 | {examples} |
| 3019 | """ |
| 3020 | SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}' |
| 3021 | +SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n' |
| 3022 | +SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---' |
| 3023 | |
| 3024 | |
| 3025 | class SchemaValidationError(ValueError): |
| 3026 | @@ -83,11 +90,49 @@ def validate_cloudconfig_schema(config, schema, strict=False): |
| 3027 | logging.warning('Invalid config:\n%s', '\n'.join(messages)) |
| 3028 | |
| 3029 | |
| 3030 | -def validate_cloudconfig_file(config_path, schema): |
| 3031 | +def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): |
| 3032 | + """Return contents of the cloud-config file annotated with schema errors. |
| 3033 | + |
| 3034 | + @param cloudconfig: YAML-loaded object from the original_content. |
| 3035 | + @param original_content: The contents of a cloud-config file |
| 3036 | + @param schema_errors: List of tuples from a JSONSchemaValidationError. The |
| 3037 | + tuples consist of (schemapath, error_message). |
| 3038 | + """ |
| 3039 | + if not schema_errors: |
| 3040 | + return original_content |
| 3041 | + schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content) |
| 3042 | + errors_by_line = defaultdict(list) |
| 3043 | + error_count = 1 |
| 3044 | + error_footer = [] |
| 3045 | + annotated_content = [] |
| 3046 | + for path, msg in schema_errors: |
| 3047 | + errors_by_line[schemapaths[path]].append(msg) |
| 3048 | + error_footer.append('# E{0}: {1}'.format(error_count, msg)) |
| 3049 | + error_count += 1 |
| 3050 | + lines = original_content.decode().split('\n') |
| 3051 | + error_count = 1 |
| 3052 | + for line_number, line in enumerate(lines): |
| 3053 | + errors = errors_by_line[line_number + 1] |
| 3054 | + if errors: |
| 3055 | + error_label = ','.join( |
| 3056 | + ['E{0}'.format(count + error_count) |
| 3057 | + for count in range(0, len(errors))]) |
| 3058 | + error_count += len(errors) |
| 3059 | + annotated_content.append(line + '\t\t# ' + error_label) |
| 3060 | + else: |
| 3061 | + annotated_content.append(line) |
| 3062 | + annotated_content.append( |
| 3063 | + '# Errors: -------------\n{0}\n\n'.format('\n'.join(error_footer))) |
| 3064 | + return '\n'.join(annotated_content) |
| 3065 | + |
| 3066 | + |
| 3067 | +def validate_cloudconfig_file(config_path, schema, annotate=False): |
| 3068 | """Validate cloudconfig file adheres to a specific jsonschema. |
| 3069 | |
| 3070 | @param config_path: Path to the yaml cloud-config file to parse. |
| 3071 | @param schema: Dict describing a valid jsonschema to validate against. |
| 3072 | + @param annotate: Boolean set True to print original config file with error |
| 3073 | + annotations on the offending lines. |
| 3074 | |
| 3075 | @raises SchemaValidationError containing any of schema_errors encountered. |
| 3076 | @raises RuntimeError when config_path does not exist. |
| 3077 | @@ -108,18 +153,83 @@ def validate_cloudconfig_file(config_path, schema): |
| 3078 | ('format', 'File {0} is not valid yaml. {1}'.format( |
| 3079 | config_path, str(e))),) |
| 3080 | raise SchemaValidationError(errors) |
| 3081 | - validate_cloudconfig_schema( |
| 3082 | - cloudconfig, schema, strict=True) |
| 3083 | + |
| 3084 | + try: |
| 3085 | + validate_cloudconfig_schema( |
| 3086 | + cloudconfig, schema, strict=True) |
| 3087 | + except SchemaValidationError as e: |
| 3088 | + if annotate: |
| 3089 | + print(annotated_cloudconfig_file( |
| 3090 | + cloudconfig, content, e.schema_errors)) |
| 3091 | + raise |
| 3092 | + |
| 3093 | + |
| 3094 | +def _schemapath_for_cloudconfig(config, original_content): |
| 3095 | + """Return a dictionary mapping schemapath to original_content line number. |
| 3096 | + |
| 3097 | + @param config: The yaml.loaded config dictionary of a cloud-config file. |
| 3098 | + @param original_content: The simple file content of the cloud-config file |
| 3099 | + """ |
| 3100 | + # FIXME Doesn't handle multi-line lists or multi-line strings |
| 3101 | + content_lines = original_content.decode().split('\n') |
| 3102 | + schema_line_numbers = {} |
| 3103 | + list_index = 0 |
| 3104 | + RE_YAML_INDENT = r'^(\s*)' |
| 3105 | + scopes = [] |
| 3106 | + for line_number, line in enumerate(content_lines): |
| 3107 | + indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) |
| 3108 | + line = line.strip() |
| 3109 | + if not line or line.startswith('#'): |
| 3110 | + continue |
| 3111 | + if scopes: |
| 3112 | + previous_depth, path_prefix = scopes[-1] |
| 3113 | + else: |
| 3114 | + previous_depth = -1 |
| 3115 | + path_prefix = '' |
| 3116 | + if line.startswith('- '): |
| 3117 | + key = str(list_index) |
| 3118 | + value = line[1:] |
| 3119 | + list_index += 1 |
| 3120 | + else: |
| 3121 | + list_index = 0 |
| 3122 | + key, value = line.split(':', 1) |
| 3123 | + while indent_depth <= previous_depth: |
| 3124 | + if scopes: |
| 3125 | + previous_depth, path_prefix = scopes.pop() |
| 3126 | + else: |
| 3127 | + previous_depth = -1 |
| 3128 | + path_prefix = '' |
| 3129 | + if path_prefix: |
| 3130 | + key = path_prefix + '.' + key |
| 3131 | + scopes.append((indent_depth, key)) |
| 3132 | + if value: |
| 3133 | + value = value.strip() |
| 3134 | + if value.startswith('['): |
| 3135 | + scopes.append((indent_depth + 2, key + '.0')) |
| 3136 | + for inner_list_index in range(0, len(yaml.safe_load(value))): |
| 3137 | + list_key = key + '.' + str(inner_list_index) |
| 3138 | + schema_line_numbers[list_key] = line_number + 1 |
| 3139 | + schema_line_numbers[key] = line_number + 1 |
| 3140 | + return schema_line_numbers |
| 3141 | |
| 3142 | |
| 3143 | def _get_property_type(property_dict): |
| 3144 | """Return a string representing a property type from a given jsonschema.""" |
| 3145 | property_type = property_dict.get('type', SCHEMA_UNDEFINED) |
| 3146 | + if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'): |
| 3147 | + property_type = [ |
| 3148 | + str(_YAML_MAP.get(k, k)) for k in property_dict['enum']] |
| 3149 | if isinstance(property_type, list): |
| 3150 | property_type = '/'.join(property_type) |
| 3151 | - item_type = property_dict.get('items', {}).get('type') |
| 3152 | - if item_type: |
| 3153 | - property_type = '{0} of {1}'.format(property_type, item_type) |
| 3154 | + items = property_dict.get('items', {}) |
| 3155 | + sub_property_type = items.get('type', '') |
| 3156 | + # Collect each item type |
| 3157 | + for sub_item in items.get('oneOf', {}): |
| 3158 | + if sub_property_type: |
| 3159 | + sub_property_type += '/' |
| 3160 | + sub_property_type += '(' + _get_property_type(sub_item) + ')' |
| 3161 | + if sub_property_type: |
| 3162 | + return '{0} of {1}'.format(property_type, sub_property_type) |
| 3163 | return property_type |
| 3164 | |
| 3165 | |
| 3166 | @@ -146,12 +256,14 @@ def _get_schema_examples(schema, prefix=''): |
| 3167 | examples = schema.get('examples') |
| 3168 | if not examples: |
| 3169 | return '' |
| 3170 | - rst_content = '\n**Examples**::\n\n' |
| 3171 | - for example in examples: |
| 3172 | - example_yaml = yaml.dump(example, default_flow_style=False) |
| 3173 | + rst_content = SCHEMA_EXAMPLES_HEADER |
| 3174 | + for count, example in enumerate(examples): |
| 3175 | # Python2.6 is missing textwrapper.indent |
| 3176 | - lines = example_yaml.split('\n') |
| 3177 | + lines = example.split('\n') |
| 3178 | indented_lines = [' {0}'.format(line) for line in lines] |
| 3179 | + if rst_content != SCHEMA_EXAMPLES_HEADER: |
| 3180 | + indented_lines.insert( |
| 3181 | + 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1)) |
| 3182 | rst_content += '\n'.join(indented_lines) |
| 3183 | return rst_content |
| 3184 | |
| 3185 | @@ -162,61 +274,87 @@ def get_schema_doc(schema): |
| 3186 | @param schema: Dict of jsonschema to render. |
| 3187 | @raise KeyError: If schema lacks an expected key. |
| 3188 | """ |
| 3189 | - schema['property_doc'] = _get_property_doc(schema) |
| 3190 | - schema['examples'] = _get_schema_examples(schema) |
| 3191 | - schema['distros'] = ', '.join(schema['distros']) |
| 3192 | - return SCHEMA_DOC_TMPL.format(**schema) |
| 3193 | - |
| 3194 | - |
| 3195 | -def get_schema(section_key=None): |
| 3196 | - """Return a dict of jsonschema defined in any cc_* module. |
| 3197 | - |
| 3198 | - @param: section_key: Optionally limit schema to a specific top-level key. |
| 3199 | - """ |
| 3200 | - # TODO use util.find_modules in subsequent branch |
| 3201 | - from cloudinit.config.cc_ntp import schema |
| 3202 | - return schema |
| 3203 | + schema_copy = deepcopy(schema) |
| 3204 | + schema_copy['property_doc'] = _get_property_doc(schema) |
| 3205 | + schema_copy['examples'] = _get_schema_examples(schema) |
| 3206 | + schema_copy['distros'] = ', '.join(schema['distros']) |
| 3207 | + # Need an underbar of the same length as the name |
| 3208 | + schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name']) |
| 3209 | + return SCHEMA_DOC_TMPL.format(**schema_copy) |
| 3210 | + |
| 3211 | + |
| 3212 | +FULL_SCHEMA = None |
| 3213 | + |
| 3214 | + |
| 3215 | +def get_schema(): |
| 3216 | + """Return jsonschema coalesced from all cc_* cloud-config module.""" |
| 3217 | + global FULL_SCHEMA |
| 3218 | + if FULL_SCHEMA: |
| 3219 | + return FULL_SCHEMA |
| 3220 | + full_schema = { |
| 3221 | + '$schema': 'http://json-schema.org/draft-04/schema#', |
| 3222 | + 'id': 'cloud-config-schema', 'allOf': []} |
| 3223 | + |
| 3224 | + configs_dir = os.path.dirname(os.path.abspath(__file__)) |
| 3225 | + potential_handlers = find_modules(configs_dir) |
| 3226 | + for (fname, mod_name) in potential_handlers.items(): |
| 3227 | + mod_locs, looked_locs = importer.find_module( |
| 3228 | + mod_name, ['cloudinit.config'], ['schema']) |
| 3229 | + if mod_locs: |
| 3230 | + mod = importer.import_module(mod_locs[0]) |
| 3231 | + full_schema['allOf'].append(mod.schema) |
| 3232 | + FULL_SCHEMA = full_schema |
| 3233 | + return full_schema |
| 3234 | |
| 3235 | |
| 3236 | def error(message): |
| 3237 | print(message, file=sys.stderr) |
| 3238 | - return 1 |
| 3239 | + sys.exit(1) |
| 3240 | |
| 3241 | |
| 3242 | -def get_parser(): |
| 3243 | +def get_parser(parser=None): |
| 3244 | """Return a parser for supported cmdline arguments.""" |
| 3245 | - parser = argparse.ArgumentParser() |
| 3246 | + if not parser: |
| 3247 | + parser = argparse.ArgumentParser( |
| 3248 | + prog='cloudconfig-schema', |
| 3249 | + description='Validate cloud-config files or document schema') |
| 3250 | parser.add_argument('-c', '--config-file', |
| 3251 | help='Path of the cloud-config yaml file to validate') |
| 3252 | parser.add_argument('-d', '--doc', action="store_true", default=False, |
| 3253 | help='Print schema documentation') |
| 3254 | - parser.add_argument('-k', '--key', |
| 3255 | - help='Limit validation or docs to a section key') |
| 3256 | + parser.add_argument('--annotate', action="store_true", default=False, |
| 3257 | + help='Annotate existing cloud-config file with errors') |
| 3258 | return parser |
| 3259 | |
| 3260 | |
| 3261 | -def main(): |
| 3262 | - """Tool to validate schema of a cloud-config file or print schema docs.""" |
| 3263 | - parser = get_parser() |
| 3264 | - args = parser.parse_args() |
| 3265 | +def handle_schema_args(name, args): |
| 3266 | + """Handle provided schema args and perform the appropriate actions.""" |
| 3267 | exclusive_args = [args.config_file, args.doc] |
| 3268 | if not any(exclusive_args) or all(exclusive_args): |
| 3269 | - return error('Expected either --config-file argument or --doc') |
| 3270 | - |
| 3271 | - schema = get_schema() |
| 3272 | + error('Expected either --config-file argument or --doc') |
| 3273 | + full_schema = get_schema() |
| 3274 | if args.config_file: |
| 3275 | try: |
| 3276 | - validate_cloudconfig_file(args.config_file, schema) |
| 3277 | + validate_cloudconfig_file( |
| 3278 | + args.config_file, full_schema, args.annotate) |
| 3279 | except (SchemaValidationError, RuntimeError) as e: |
| 3280 | - return error(str(e)) |
| 3281 | - print("Valid cloud-config file {0}".format(args.config_file)) |
| 3282 | + if not args.annotate: |
| 3283 | + error(str(e)) |
| 3284 | + else: |
| 3285 | + print("Valid cloud-config file {0}".format(args.config_file)) |
| 3286 | if args.doc: |
| 3287 | - print(get_schema_doc(schema)) |
| 3288 | + for subschema in full_schema['allOf']: |
| 3289 | + print(get_schema_doc(subschema)) |
| 3290 | + |
| 3291 | + |
| 3292 | +def main(): |
| 3293 | + """Tool to validate schema of a cloud-config file or print schema docs.""" |
| 3294 | + parser = get_parser() |
| 3295 | + handle_schema_args('cloudconfig-schema', parser.parse_args()) |
| 3296 | return 0 |
| 3297 | |
| 3298 | |
| 3299 | if __name__ == '__main__': |
| 3300 | sys.exit(main()) |
| 3301 | |
| 3302 | - |
| 3303 | # vi: ts=4 expandtab |
| 3304 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py |
| 3305 | index 1fd48a7..d5becd1 100755 |
| 3306 | --- a/cloudinit/distros/__init__.py |
| 3307 | +++ b/cloudinit/distros/__init__.py |
| 3308 | @@ -30,12 +30,16 @@ from cloudinit import util |
| 3309 | from cloudinit.distros.parsers import hosts |
| 3310 | |
| 3311 | |
| 3312 | +# Used when a cloud-config module can be run on all cloud-init distibutions. |
| 3313 | +# The value 'all' is surfaced in module documentation for distro support. |
| 3314 | +ALL_DISTROS = 'all' |
| 3315 | + |
| 3316 | OSFAMILIES = { |
| 3317 | 'debian': ['debian', 'ubuntu'], |
| 3318 | 'redhat': ['centos', 'fedora', 'rhel'], |
| 3319 | 'gentoo': ['gentoo'], |
| 3320 | 'freebsd': ['freebsd'], |
| 3321 | - 'suse': ['sles'], |
| 3322 | + 'suse': ['opensuse', 'sles'], |
| 3323 | 'arch': ['arch'], |
| 3324 | } |
| 3325 | |
| 3326 | @@ -188,6 +192,9 @@ class Distro(object): |
| 3327 | def _get_localhost_ip(self): |
| 3328 | return "127.0.0.1" |
| 3329 | |
| 3330 | + def get_locale(self): |
| 3331 | + raise NotImplementedError() |
| 3332 | + |
| 3333 | @abc.abstractmethod |
| 3334 | def _read_hostname(self, filename, default=None): |
| 3335 | raise NotImplementedError() |
| 3336 | diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py |
| 3337 | index b4c0ba7..f87a343 100644 |
| 3338 | --- a/cloudinit/distros/arch.py |
| 3339 | +++ b/cloudinit/distros/arch.py |
| 3340 | @@ -14,6 +14,8 @@ from cloudinit.distros.parsers.hostname import HostnameConf |
| 3341 | |
| 3342 | from cloudinit.settings import PER_INSTANCE |
| 3343 | |
| 3344 | +import os |
| 3345 | + |
| 3346 | LOG = logging.getLogger(__name__) |
| 3347 | |
| 3348 | |
| 3349 | @@ -52,31 +54,10 @@ class Distro(distros.Distro): |
| 3350 | entries = net_util.translate_network(settings) |
| 3351 | LOG.debug("Translated ubuntu style network settings %s into %s", |
| 3352 | settings, entries) |
| 3353 | - dev_names = entries.keys() |
| 3354 | - # Format for netctl |
| 3355 | - for (dev, info) in entries.items(): |
| 3356 | - nameservers = [] |
| 3357 | - net_fn = self.network_conf_dir + dev |
| 3358 | - net_cfg = { |
| 3359 | - 'Connection': 'ethernet', |
| 3360 | - 'Interface': dev, |
| 3361 | - 'IP': info.get('bootproto'), |
| 3362 | - 'Address': "('%s/%s')" % (info.get('address'), |
| 3363 | - info.get('netmask')), |
| 3364 | - 'Gateway': info.get('gateway'), |
| 3365 | - 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '') |
| 3366 | - } |
| 3367 | - util.write_file(net_fn, convert_netctl(net_cfg)) |
| 3368 | - if info.get('auto'): |
| 3369 | - self._enable_interface(dev) |
| 3370 | - if 'dns-nameservers' in info: |
| 3371 | - nameservers.extend(info['dns-nameservers']) |
| 3372 | - |
| 3373 | - if nameservers: |
| 3374 | - util.write_file(self.resolve_conf_fn, |
| 3375 | - convert_resolv_conf(nameservers)) |
| 3376 | - |
| 3377 | - return dev_names |
| 3378 | + return _render_network( |
| 3379 | + entries, resolv_conf=self.resolve_conf_fn, |
| 3380 | + conf_dir=self.network_conf_dir, |
| 3381 | + enable_func=self._enable_interface) |
| 3382 | |
| 3383 | def _enable_interface(self, device_name): |
| 3384 | cmd = ['netctl', 'reenable', device_name] |
| 3385 | @@ -173,13 +154,60 @@ class Distro(distros.Distro): |
| 3386 | ["-y"], freq=PER_INSTANCE) |
| 3387 | |
| 3388 | |
| 3389 | +def _render_network(entries, target="/", conf_dir="etc/netctl", |
| 3390 | + resolv_conf="etc/resolv.conf", enable_func=None): |
| 3391 | + """Render the translate_network format into netctl files in target. |
| 3392 | + Paths will be rendered under target. |
| 3393 | + """ |
| 3394 | + |
| 3395 | + devs = [] |
| 3396 | + nameservers = [] |
| 3397 | + resolv_conf = util.target_path(target, resolv_conf) |
| 3398 | + conf_dir = util.target_path(target, conf_dir) |
| 3399 | + |
| 3400 | + for (dev, info) in entries.items(): |
| 3401 | + if dev == 'lo': |
| 3402 | + # no configuration should be rendered for 'lo' |
| 3403 | + continue |
| 3404 | + devs.append(dev) |
| 3405 | + net_fn = os.path.join(conf_dir, dev) |
| 3406 | + net_cfg = { |
| 3407 | + 'Connection': 'ethernet', |
| 3408 | + 'Interface': dev, |
| 3409 | + 'IP': info.get('bootproto'), |
| 3410 | + 'Address': "%s/%s" % (info.get('address'), |
| 3411 | + info.get('netmask')), |
| 3412 | + 'Gateway': info.get('gateway'), |
| 3413 | + 'DNS': info.get('dns-nameservers', []), |
| 3414 | + } |
| 3415 | + util.write_file(net_fn, convert_netctl(net_cfg)) |
| 3416 | + if enable_func and info.get('auto'): |
| 3417 | + enable_func(dev) |
| 3418 | + if 'dns-nameservers' in info: |
| 3419 | + nameservers.extend(info['dns-nameservers']) |
| 3420 | + |
| 3421 | + if nameservers: |
| 3422 | + util.write_file(resolv_conf, |
| 3423 | + convert_resolv_conf(nameservers)) |
| 3424 | + return devs |
| 3425 | + |
| 3426 | + |
| 3427 | def convert_netctl(settings): |
| 3428 | - """Returns a settings string formatted for netctl.""" |
| 3429 | - result = '' |
| 3430 | - if isinstance(settings, dict): |
| 3431 | - for k, v in settings.items(): |
| 3432 | - result = result + '%s=%s\n' % (k, v) |
| 3433 | - return result |
| 3434 | + """Given a dictionary, returns a string in netctl profile format. |
| 3435 | + |
| 3436 | + netctl profile is described at: |
| 3437 | + https://git.archlinux.org/netctl.git/tree/docs/netctl.profile.5.txt |
| 3438 | + |
| 3439 | + Note that the 'Special Quoting Rules' are not handled here.""" |
| 3440 | + result = [] |
| 3441 | + for key in sorted(settings): |
| 3442 | + val = settings[key] |
| 3443 | + if val is None: |
| 3444 | + val = "" |
| 3445 | + elif isinstance(val, (tuple, list)): |
| 3446 | + val = "(" + ' '.join("'%s'" % v for v in val) + ")" |
| 3447 | + result.append("%s=%s\n" % (key, val)) |
| 3448 | + return ''.join(result) |
| 3449 | |
| 3450 | |
| 3451 | def convert_resolv_conf(settings): |
| 3452 | diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py |
| 3453 | index abfb81f..33cc0bf 100644 |
| 3454 | --- a/cloudinit/distros/debian.py |
| 3455 | +++ b/cloudinit/distros/debian.py |
| 3456 | @@ -61,11 +61,49 @@ class Distro(distros.Distro): |
| 3457 | # should only happen say once per instance...) |
| 3458 | self._runner = helpers.Runners(paths) |
| 3459 | self.osfamily = 'debian' |
| 3460 | + self.default_locale = 'en_US.UTF-8' |
| 3461 | + self.system_locale = None |
| 3462 | |
| 3463 | - def apply_locale(self, locale, out_fn=None): |
| 3464 | + def get_locale(self): |
| 3465 | + """Return the default locale if set, else use default locale""" |
| 3466 | + |
| 3467 | + # read system locale value |
| 3468 | + if not self.system_locale: |
| 3469 | + self.system_locale = read_system_locale() |
| 3470 | + |
| 3471 | + # Return system_locale setting if valid, else use default locale |
| 3472 | + return (self.system_locale if self.system_locale else |
| 3473 | + self.default_locale) |
| 3474 | + |
| 3475 | + def apply_locale(self, locale, out_fn=None, keyname='LANG'): |
| 3476 | + """Apply specified locale to system, regenerate if specified locale |
| 3477 | + differs from system default.""" |
| 3478 | if not out_fn: |
| 3479 | out_fn = LOCALE_CONF_FN |
| 3480 | - apply_locale(locale, out_fn) |
| 3481 | + |
| 3482 | + if not locale: |
| 3483 | + raise ValueError('Failed to provide locale value.') |
| 3484 | + |
| 3485 | + # Only call locale regeneration if needed |
| 3486 | + # Update system locale config with specified locale if needed |
| 3487 | + distro_locale = self.get_locale() |
| 3488 | + conf_fn_exists = os.path.exists(out_fn) |
| 3489 | + sys_locale_unset = False if self.system_locale else True |
| 3490 | + need_regen = (locale.lower() != distro_locale.lower() or |
| 3491 | + not conf_fn_exists or sys_locale_unset) |
| 3492 | + need_conf = not conf_fn_exists or need_regen or sys_locale_unset |
| 3493 | + |
| 3494 | + if need_regen: |
| 3495 | + regenerate_locale(locale, out_fn, keyname=keyname) |
| 3496 | + else: |
| 3497 | + LOG.debug( |
| 3498 | + "System has '%s=%s' requested '%s', skipping regeneration.", |
| 3499 | + keyname, self.system_locale, locale) |
| 3500 | + |
| 3501 | + if need_conf: |
| 3502 | + update_locale_conf(locale, out_fn, keyname=keyname) |
| 3503 | + # once we've updated the system config, invalidate cache |
| 3504 | + self.system_locale = None |
| 3505 | |
| 3506 | def install_packages(self, pkglist): |
| 3507 | self.update_package_sources() |
| 3508 | @@ -218,37 +256,47 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"): |
| 3509 | LOG.warning(msg) |
| 3510 | |
| 3511 | |
| 3512 | -def apply_locale(locale, sys_path=LOCALE_CONF_FN, keyname='LANG'): |
| 3513 | - """Apply the locale. |
| 3514 | - |
| 3515 | - Run locale-gen for the provided locale and set the default |
| 3516 | - system variable `keyname` appropriately in the provided `sys_path`. |
| 3517 | - |
| 3518 | - If sys_path indicates that `keyname` is already set to `locale` |
| 3519 | - then no changes will be made and locale-gen not called. |
| 3520 | - This allows images built with a locale already generated to not re-run |
| 3521 | - locale-gen which can be very heavy. |
| 3522 | - """ |
| 3523 | - if not locale: |
| 3524 | - raise ValueError('Failed to provide locale value.') |
| 3525 | - |
| 3526 | +def read_system_locale(sys_path=LOCALE_CONF_FN, keyname='LANG'): |
| 3527 | + """Read system default locale setting, if present""" |
| 3528 | + sys_val = "" |
| 3529 | if not sys_path: |
| 3530 | raise ValueError('Invalid path: %s' % sys_path) |
| 3531 | |
| 3532 | if os.path.exists(sys_path): |
| 3533 | locale_content = util.load_file(sys_path) |
| 3534 | - # if LANG isn't present, regen |
| 3535 | sys_defaults = util.load_shell_content(locale_content) |
| 3536 | sys_val = sys_defaults.get(keyname, "") |
| 3537 | - if sys_val.lower() == locale.lower(): |
| 3538 | - LOG.debug( |
| 3539 | - "System has '%s=%s' requested '%s', skipping regeneration.", |
| 3540 | - keyname, sys_val, locale) |
| 3541 | - return |
| 3542 | |
| 3543 | - util.subp(['locale-gen', locale], capture=False) |
| 3544 | + return sys_val |
| 3545 | + |
| 3546 | + |
| 3547 | +def update_locale_conf(locale, sys_path, keyname='LANG'): |
| 3548 | + """Update system locale config""" |
| 3549 | + LOG.debug('Updating %s with locale setting %s=%s', |
| 3550 | + sys_path, keyname, locale) |
| 3551 | util.subp( |
| 3552 | ['update-locale', '--locale-file=' + sys_path, |
| 3553 | '%s=%s' % (keyname, locale)], capture=False) |
| 3554 | |
| 3555 | + |
| 3556 | +def regenerate_locale(locale, sys_path, keyname='LANG'): |
| 3557 | + """ |
| 3558 | + Run locale-gen for the provided locale and set the default |
| 3559 | + system variable `keyname` appropriately in the provided `sys_path`. |
| 3560 | + |
| 3561 | + """ |
| 3562 | + # special case for locales which do not require regen |
| 3563 | + # % locale -a |
| 3564 | + # C |
| 3565 | + # C.UTF-8 |
| 3566 | + # POSIX |
| 3567 | + if locale.lower() in ['c', 'c.utf-8', 'posix']: |
| 3568 | + LOG.debug('%s=%s does not require rengeneration', keyname, locale) |
| 3569 | + return |
| 3570 | + |
| 3571 | + # finally, trigger regeneration |
| 3572 | + LOG.debug('Generating locales for %s', locale) |
| 3573 | + util.subp(['locale-gen', locale], capture=False) |
| 3574 | + |
| 3575 | + |
| 3576 | # vi: ts=4 expandtab |
| 3577 | diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py |
| 3578 | new file mode 100644 |
| 3579 | index 0000000..a219e9f |
| 3580 | --- /dev/null |
| 3581 | +++ b/cloudinit/distros/opensuse.py |
| 3582 | @@ -0,0 +1,212 @@ |
| 3583 | +# Copyright (C) 2017 SUSE LLC |
| 3584 | +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. |
| 3585 | +# |
| 3586 | +# Author: Robert Schweikert <rjschwei@suse.com> |
| 3587 | +# Author: Juerg Haefliger <juerg.haefliger@hp.com> |
| 3588 | +# |
| 3589 | +# Leaning very heavily on the RHEL and Debian implementation |
| 3590 | +# |
| 3591 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 3592 | + |
| 3593 | +from cloudinit import distros |
| 3594 | + |
| 3595 | +from cloudinit.distros.parsers.hostname import HostnameConf |
| 3596 | + |
| 3597 | +from cloudinit import helpers |
| 3598 | +from cloudinit import log as logging |
| 3599 | +from cloudinit import util |
| 3600 | + |
| 3601 | +from cloudinit.distros import net_util |
| 3602 | +from cloudinit.distros import rhel_util as rhutil |
| 3603 | +from cloudinit.settings import PER_INSTANCE |
| 3604 | + |
| 3605 | +LOG = logging.getLogger(__name__) |
| 3606 | + |
| 3607 | + |
| 3608 | +class Distro(distros.Distro): |
| 3609 | + clock_conf_fn = '/etc/sysconfig/clock' |
| 3610 | + hostname_conf_fn = '/etc/HOSTNAME' |
| 3611 | + init_cmd = ['service'] |
| 3612 | + locale_conf_fn = '/etc/sysconfig/language' |
| 3613 | + network_conf_fn = '/etc/sysconfig/network' |
| 3614 | + network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' |
| 3615 | + resolve_conf_fn = '/etc/resolv.conf' |
| 3616 | + route_conf_tpl = '/etc/sysconfig/network/ifroute-%s' |
| 3617 | + systemd_hostname_conf_fn = '/etc/hostname' |
| 3618 | + systemd_locale_conf_fn = '/etc/locale.conf' |
| 3619 | + tz_local_fn = '/etc/localtime' |
| 3620 | + |
| 3621 | + def __init__(self, name, cfg, paths): |
| 3622 | + distros.Distro.__init__(self, name, cfg, paths) |
| 3623 | + self._runner = helpers.Runners(paths) |
| 3624 | + self.osfamily = 'suse' |
| 3625 | + cfg['ssh_svcname'] = 'sshd' |
| 3626 | + if self.uses_systemd(): |
| 3627 | + self.init_cmd = ['systemctl'] |
| 3628 | + cfg['ssh_svcname'] = 'sshd.service' |
| 3629 | + |
| 3630 | + def apply_locale(self, locale, out_fn=None): |
| 3631 | + if self.uses_systemd(): |
| 3632 | + if not out_fn: |
| 3633 | + out_fn = self.systemd_locale_conf_fn |
| 3634 | + locale_cfg = {'LANG': locale} |
| 3635 | + else: |
| 3636 | + if not out_fn: |
| 3637 | + out_fn = self.locale_conf_fn |
| 3638 | + locale_cfg = {'RC_LANG': locale} |
| 3639 | + rhutil.update_sysconfig_file(out_fn, locale_cfg) |
| 3640 | + |
| 3641 | + def install_packages(self, pkglist): |
| 3642 | + self.package_command( |
| 3643 | + 'install', |
| 3644 | + args='--auto-agree-with-licenses', |
| 3645 | + pkgs=pkglist |
| 3646 | + ) |
| 3647 | + |
| 3648 | + def package_command(self, command, args=None, pkgs=None): |
| 3649 | + if pkgs is None: |
| 3650 | + pkgs = [] |
| 3651 | + |
| 3652 | + cmd = ['zypper'] |
| 3653 | + # No user interaction possible, enable non-interactive mode |
| 3654 | + cmd.append('--non-interactive') |
| 3655 | + |
| 3656 | + # Comand is the operation, such as install |
| 3657 | + if command == 'upgrade': |
| 3658 | + command = 'update' |
| 3659 | + cmd.append(command) |
| 3660 | + |
| 3661 | + # args are the arguments to the command, not global options |
| 3662 | + if args and isinstance(args, str): |
| 3663 | + cmd.append(args) |
| 3664 | + elif args and isinstance(args, list): |
| 3665 | + cmd.extend(args) |
| 3666 | + |
| 3667 | + pkglist = util.expand_package_list('%s-%s', pkgs) |
| 3668 | + cmd.extend(pkglist) |
| 3669 | + |
| 3670 | + # Allow the output of this to flow outwards (ie not be captured) |
| 3671 | + util.subp(cmd, capture=False) |
| 3672 | + |
| 3673 | + def set_timezone(self, tz): |
| 3674 | + tz_file = self._find_tz_file(tz) |
| 3675 | + if self.uses_systemd(): |
| 3676 | + # Currently, timedatectl complains if invoked during startup |
| 3677 | + # so for compatibility, create the link manually. |
| 3678 | + util.del_file(self.tz_local_fn) |
| 3679 | + util.sym_link(tz_file, self.tz_local_fn) |
| 3680 | + else: |
| 3681 | + # Adjust the sysconfig clock zone setting |
| 3682 | + clock_cfg = { |
| 3683 | + 'TIMEZONE': str(tz), |
| 3684 | + } |
| 3685 | + rhutil.update_sysconfig_file(self.clock_conf_fn, clock_cfg) |
| 3686 | + # This ensures that the correct tz will be used for the system |
| 3687 | + util.copy(tz_file, self.tz_local_fn) |
| 3688 | + |
| 3689 | + def update_package_sources(self): |
| 3690 | + self._runner.run("update-sources", self.package_command, |
| 3691 | + ['refresh'], freq=PER_INSTANCE) |
| 3692 | + |
| 3693 | + def _bring_up_interfaces(self, device_names): |
| 3694 | + if device_names and 'all' in device_names: |
| 3695 | + raise RuntimeError(('Distro %s can not translate ' |
| 3696 | + 'the device name "all"') % (self.name)) |
| 3697 | + return distros.Distro._bring_up_interfaces(self, device_names) |
| 3698 | + |
| 3699 | + def _read_hostname(self, filename, default=None): |
| 3700 | + if self.uses_systemd() and filename.endswith('/previous-hostname'): |
| 3701 | + return util.load_file(filename).strip() |
| 3702 | + elif self.uses_systemd(): |
| 3703 | + (out, _err) = util.subp(['hostname']) |
| 3704 | + if len(out): |
| 3705 | + return out |
| 3706 | + else: |
| 3707 | + return default |
| 3708 | + else: |
| 3709 | + try: |
| 3710 | + conf = self._read_hostname_conf(filename) |
| 3711 | + hostname = conf.hostname |
| 3712 | + except IOError: |
| 3713 | + pass |
| 3714 | + if not hostname: |
| 3715 | + return default |
| 3716 | + return hostname |
| 3717 | + |
| 3718 | + def _read_hostname_conf(self, filename): |
| 3719 | + conf = HostnameConf(util.load_file(filename)) |
| 3720 | + conf.parse() |
| 3721 | + return conf |
| 3722 | + |
| 3723 | + def _read_system_hostname(self): |
| 3724 | + if self.uses_systemd(): |
| 3725 | + host_fn = self.systemd_hostname_conf_fn |
| 3726 | + else: |
| 3727 | + host_fn = self.hostname_conf_fn |
| 3728 | + return (host_fn, self._read_hostname(host_fn)) |
| 3729 | + |
| 3730 | + def _write_hostname(self, hostname, out_fn): |
| 3731 | + if self.uses_systemd() and out_fn.endswith('/previous-hostname'): |
| 3732 | + util.write_file(out_fn, hostname) |
| 3733 | + elif self.uses_systemd(): |
| 3734 | + util.subp(['hostnamectl', 'set-hostname', str(hostname)]) |
| 3735 | + else: |
| 3736 | + conf = None |
| 3737 | + try: |
| 3738 | + # Try to update the previous one |
| 3739 | + # so lets see if we can read it first. |
| 3740 | + conf = self._read_hostname_conf(out_fn) |
| 3741 | + except IOError: |
| 3742 | + pass |
| 3743 | + if not conf: |
| 3744 | + conf = HostnameConf('') |
| 3745 | + conf.set_hostname(hostname) |
| 3746 | + util.write_file(out_fn, str(conf), 0o644) |
| 3747 | + |
| 3748 | + def _write_network(self, settings): |
| 3749 | + # Convert debian settings to ifcfg format |
| 3750 | + entries = net_util.translate_network(settings) |
| 3751 | + LOG.debug("Translated ubuntu style network settings %s into %s", |
| 3752 | + settings, entries) |
| 3753 | + # Make the intermediate format as the suse format... |
| 3754 | + nameservers = [] |
| 3755 | + searchservers = [] |
| 3756 | + dev_names = entries.keys() |
| 3757 | + for (dev, info) in entries.items(): |
| 3758 | + net_fn = self.network_script_tpl % (dev) |
| 3759 | + route_fn = self.route_conf_tpl % (dev) |
| 3760 | + mode = None |
| 3761 | + if info.get('auto', None): |
| 3762 | + mode = 'auto' |
| 3763 | + else: |
| 3764 | + mode = 'manual' |
| 3765 | + bootproto = info.get('bootproto', None) |
| 3766 | + gateway = info.get('gateway', None) |
| 3767 | + net_cfg = { |
| 3768 | + 'BOOTPROTO': bootproto, |
| 3769 | + 'BROADCAST': info.get('broadcast'), |
| 3770 | + 'GATEWAY': gateway, |
| 3771 | + 'IPADDR': info.get('address'), |
| 3772 | + 'LLADDR': info.get('hwaddress'), |
| 3773 | + 'NETMASK': info.get('netmask'), |
| 3774 | + 'STARTMODE': mode, |
| 3775 | + 'USERCONTROL': 'no' |
| 3776 | + } |
| 3777 | + if dev != 'lo': |
| 3778 | + net_cfg['ETHTOOL_OPTIONS'] = '' |
| 3779 | + else: |
| 3780 | + net_cfg['FIREWALL'] = 'no' |
| 3781 | + rhutil.update_sysconfig_file(net_fn, net_cfg, True) |
| 3782 | + if gateway and bootproto == 'static': |
| 3783 | + default_route = 'default %s' % gateway |
| 3784 | + util.write_file(route_fn, default_route, 0o644) |
| 3785 | + if 'dns-nameservers' in info: |
| 3786 | + nameservers.extend(info['dns-nameservers']) |
| 3787 | + if 'dns-search' in info: |
| 3788 | + searchservers.extend(info['dns-search']) |
| 3789 | + if nameservers or searchservers: |
| 3790 | + rhutil.update_resolve_conf_file(self.resolve_conf_fn, |
| 3791 | + nameservers, searchservers) |
| 3792 | + return dev_names |
| 3793 | + |
| 3794 | +# vi: ts=4 expandtab |
| 3795 | diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py |
| 3796 | index dbec2ed..6e336cb 100644 |
| 3797 | --- a/cloudinit/distros/sles.py |
| 3798 | +++ b/cloudinit/distros/sles.py |
| 3799 | @@ -1,167 +1,17 @@ |
| 3800 | -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. |
| 3801 | +# Copyright (C) 2017 SUSE LLC |
| 3802 | # |
| 3803 | -# Author: Juerg Haefliger <juerg.haefliger@hp.com> |
| 3804 | +# Author: Robert Schweikert <rjschwei@suse.com> |
| 3805 | # |
| 3806 | # This file is part of cloud-init. See LICENSE file for license information. |
| 3807 | |
| 3808 | -from cloudinit import distros |
| 3809 | +from cloudinit.distros import opensuse |
| 3810 | |
| 3811 | -from cloudinit.distros.parsers.hostname import HostnameConf |
| 3812 | - |
| 3813 | -from cloudinit import helpers |
| 3814 | from cloudinit import log as logging |
| 3815 | -from cloudinit import util |
| 3816 | - |
| 3817 | -from cloudinit.distros import net_util |
| 3818 | -from cloudinit.distros import rhel_util |
| 3819 | -from cloudinit.settings import PER_INSTANCE |
| 3820 | |
| 3821 | LOG = logging.getLogger(__name__) |
| 3822 | |
| 3823 | |
| 3824 | -class Distro(distros.Distro): |
| 3825 | - clock_conf_fn = '/etc/sysconfig/clock' |
| 3826 | - locale_conf_fn = '/etc/sysconfig/language' |
| 3827 | - network_conf_fn = '/etc/sysconfig/network' |
| 3828 | - hostname_conf_fn = '/etc/HOSTNAME' |
| 3829 | - network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' |
| 3830 | - resolve_conf_fn = '/etc/resolv.conf' |
| 3831 | - tz_local_fn = '/etc/localtime' |
| 3832 | - |
| 3833 | - def __init__(self, name, cfg, paths): |
| 3834 | - distros.Distro.__init__(self, name, cfg, paths) |
| 3835 | - # This will be used to restrict certain |
| 3836 | - # calls from repeatly happening (when they |
| 3837 | - # should only happen say once per instance...) |
| 3838 | - self._runner = helpers.Runners(paths) |
| 3839 | - self.osfamily = 'suse' |
| 3840 | - |
| 3841 | - def install_packages(self, pkglist): |
| 3842 | - self.package_command('install', args='-l', pkgs=pkglist) |
| 3843 | - |
| 3844 | - def _write_network(self, settings): |
| 3845 | - # Convert debian settings to ifcfg format |
| 3846 | - entries = net_util.translate_network(settings) |
| 3847 | - LOG.debug("Translated ubuntu style network settings %s into %s", |
| 3848 | - settings, entries) |
| 3849 | - # Make the intermediate format as the suse format... |
| 3850 | - nameservers = [] |
| 3851 | - searchservers = [] |
| 3852 | - dev_names = entries.keys() |
| 3853 | - for (dev, info) in entries.items(): |
| 3854 | - net_fn = self.network_script_tpl % (dev) |
| 3855 | - mode = info.get('auto') |
| 3856 | - if mode and mode.lower() == 'true': |
| 3857 | - mode = 'auto' |
| 3858 | - else: |
| 3859 | - mode = 'manual' |
| 3860 | - net_cfg = { |
| 3861 | - 'BOOTPROTO': info.get('bootproto'), |
| 3862 | - 'BROADCAST': info.get('broadcast'), |
| 3863 | - 'GATEWAY': info.get('gateway'), |
| 3864 | - 'IPADDR': info.get('address'), |
| 3865 | - 'LLADDR': info.get('hwaddress'), |
| 3866 | - 'NETMASK': info.get('netmask'), |
| 3867 | - 'STARTMODE': mode, |
| 3868 | - 'USERCONTROL': 'no' |
| 3869 | - } |
| 3870 | - if dev != 'lo': |
| 3871 | - net_cfg['ETHERDEVICE'] = dev |
| 3872 | - net_cfg['ETHTOOL_OPTIONS'] = '' |
| 3873 | - else: |
| 3874 | - net_cfg['FIREWALL'] = 'no' |
| 3875 | - rhel_util.update_sysconfig_file(net_fn, net_cfg, True) |
| 3876 | - if 'dns-nameservers' in info: |
| 3877 | - nameservers.extend(info['dns-nameservers']) |
| 3878 | - if 'dns-search' in info: |
| 3879 | - searchservers.extend(info['dns-search']) |
| 3880 | - if nameservers or searchservers: |
| 3881 | - rhel_util.update_resolve_conf_file(self.resolve_conf_fn, |
| 3882 | - nameservers, searchservers) |
| 3883 | - return dev_names |
| 3884 | - |
| 3885 | - def apply_locale(self, locale, out_fn=None): |
| 3886 | - if not out_fn: |
| 3887 | - out_fn = self.locale_conf_fn |
| 3888 | - locale_cfg = { |
| 3889 | - 'RC_LANG': locale, |
| 3890 | - } |
| 3891 | - rhel_util.update_sysconfig_file(out_fn, locale_cfg) |
| 3892 | - |
| 3893 | - def _write_hostname(self, hostname, out_fn): |
| 3894 | - conf = None |
| 3895 | - try: |
| 3896 | - # Try to update the previous one |
| 3897 | - # so lets see if we can read it first. |
| 3898 | - conf = self._read_hostname_conf(out_fn) |
| 3899 | - except IOError: |
| 3900 | - pass |
| 3901 | - if not conf: |
| 3902 | - conf = HostnameConf('') |
| 3903 | - conf.set_hostname(hostname) |
| 3904 | - util.write_file(out_fn, str(conf), 0o644) |
| 3905 | - |
| 3906 | - def _read_system_hostname(self): |
| 3907 | - host_fn = self.hostname_conf_fn |
| 3908 | - return (host_fn, self._read_hostname(host_fn)) |
| 3909 | - |
| 3910 | - def _read_hostname_conf(self, filename): |
| 3911 | - conf = HostnameConf(util.load_file(filename)) |
| 3912 | - conf.parse() |
| 3913 | - return conf |
| 3914 | - |
| 3915 | - def _read_hostname(self, filename, default=None): |
| 3916 | - hostname = None |
| 3917 | - try: |
| 3918 | - conf = self._read_hostname_conf(filename) |
| 3919 | - hostname = conf.hostname |
| 3920 | - except IOError: |
| 3921 | - pass |
| 3922 | - if not hostname: |
| 3923 | - return default |
| 3924 | - return hostname |
| 3925 | - |
| 3926 | - def _bring_up_interfaces(self, device_names): |
| 3927 | - if device_names and 'all' in device_names: |
| 3928 | - raise RuntimeError(('Distro %s can not translate ' |
| 3929 | - 'the device name "all"') % (self.name)) |
| 3930 | - return distros.Distro._bring_up_interfaces(self, device_names) |
| 3931 | - |
| 3932 | - def set_timezone(self, tz): |
| 3933 | - tz_file = self._find_tz_file(tz) |
| 3934 | - # Adjust the sysconfig clock zone setting |
| 3935 | - clock_cfg = { |
| 3936 | - 'TIMEZONE': str(tz), |
| 3937 | - } |
| 3938 | - rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg) |
| 3939 | - # This ensures that the correct tz will be used for the system |
| 3940 | - util.copy(tz_file, self.tz_local_fn) |
| 3941 | - |
| 3942 | - def package_command(self, command, args=None, pkgs=None): |
| 3943 | - if pkgs is None: |
| 3944 | - pkgs = [] |
| 3945 | - |
| 3946 | - cmd = ['zypper'] |
| 3947 | - # No user interaction possible, enable non-interactive mode |
| 3948 | - cmd.append('--non-interactive') |
| 3949 | - |
| 3950 | - # Comand is the operation, such as install |
| 3951 | - cmd.append(command) |
| 3952 | - |
| 3953 | - # args are the arguments to the command, not global options |
| 3954 | - if args and isinstance(args, str): |
| 3955 | - cmd.append(args) |
| 3956 | - elif args and isinstance(args, list): |
| 3957 | - cmd.extend(args) |
| 3958 | - |
| 3959 | - pkglist = util.expand_package_list('%s-%s', pkgs) |
| 3960 | - cmd.extend(pkglist) |
| 3961 | - |
| 3962 | - # Allow the output of this to flow outwards (ie not be captured) |
| 3963 | - util.subp(cmd, capture=False) |
| 3964 | - |
| 3965 | - def update_package_sources(self): |
| 3966 | - self._runner.run("update-sources", self.package_command, |
| 3967 | - ['refresh'], freq=PER_INSTANCE) |
| 3968 | +class Distro(opensuse.Distro): |
| 3969 | + pass |
| 3970 | |
| 3971 | # vi: ts=4 expandtab |
| 3972 | diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py |
| 3973 | index f01021a..1979cd9 100644 |
| 3974 | --- a/cloudinit/helpers.py |
| 3975 | +++ b/cloudinit/helpers.py |
| 3976 | @@ -13,7 +13,7 @@ from time import time |
| 3977 | import contextlib |
| 3978 | import os |
| 3979 | |
| 3980 | -import six |
| 3981 | +from six import StringIO |
| 3982 | from six.moves.configparser import ( |
| 3983 | NoSectionError, NoOptionError, RawConfigParser) |
| 3984 | |
| 3985 | @@ -441,12 +441,12 @@ class DefaultingConfigParser(RawConfigParser): |
| 3986 | |
| 3987 | def stringify(self, header=None): |
| 3988 | contents = '' |
| 3989 | - with six.StringIO() as outputstream: |
| 3990 | - self.write(outputstream) |
| 3991 | - outputstream.flush() |
| 3992 | - contents = outputstream.getvalue() |
| 3993 | - if header: |
| 3994 | - contents = "\n".join([header, contents]) |
| 3995 | + outputstream = StringIO() |
| 3996 | + self.write(outputstream) |
| 3997 | + outputstream.flush() |
| 3998 | + contents = outputstream.getvalue() |
| 3999 | + if header: |
| 4000 | + contents = '\n'.join([header, contents, '']) |
| 4001 | return contents |
| 4002 | |
| 4003 | # vi: ts=4 expandtab |
| 4004 | diff --git a/cloudinit/log.py b/cloudinit/log.py |
| 4005 | index 3861709..1d75c9f 100644 |
| 4006 | --- a/cloudinit/log.py |
| 4007 | +++ b/cloudinit/log.py |
| 4008 | @@ -19,6 +19,8 @@ import sys |
| 4009 | import six |
| 4010 | from six import StringIO |
| 4011 | |
| 4012 | +import time |
| 4013 | + |
| 4014 | # Logging levels for easy access |
| 4015 | CRITICAL = logging.CRITICAL |
| 4016 | FATAL = logging.FATAL |
| 4017 | @@ -32,6 +34,9 @@ NOTSET = logging.NOTSET |
| 4018 | # Default basic format |
| 4019 | DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s' |
| 4020 | |
| 4021 | +# Always format logging timestamps as UTC time |
| 4022 | +logging.Formatter.converter = time.gmtime |
| 4023 | + |
| 4024 | |
| 4025 | def setupBasicLogging(level=DEBUG): |
| 4026 | root = logging.getLogger() |
| 4027 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py |
| 4028 | index 46cb9c8..a1b0db1 100644 |
| 4029 | --- a/cloudinit/net/__init__.py |
| 4030 | +++ b/cloudinit/net/__init__.py |
| 4031 | @@ -175,13 +175,8 @@ def is_disabled_cfg(cfg): |
| 4032 | return cfg.get('config') == "disabled" |
| 4033 | |
| 4034 | |
| 4035 | -def generate_fallback_config(blacklist_drivers=None, config_driver=None): |
| 4036 | - """Determine which attached net dev is most likely to have a connection and |
| 4037 | - generate network state to run dhcp on that interface""" |
| 4038 | - |
| 4039 | - if not config_driver: |
| 4040 | - config_driver = False |
| 4041 | - |
| 4042 | +def find_fallback_nic(blacklist_drivers=None): |
| 4043 | + """Return the name of the 'fallback' network device.""" |
| 4044 | if not blacklist_drivers: |
| 4045 | blacklist_drivers = [] |
| 4046 | |
| 4047 | @@ -233,15 +228,24 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None): |
| 4048 | if DEFAULT_PRIMARY_INTERFACE in names: |
| 4049 | names.remove(DEFAULT_PRIMARY_INTERFACE) |
| 4050 | names.insert(0, DEFAULT_PRIMARY_INTERFACE) |
| 4051 | - target_name = None |
| 4052 | - target_mac = None |
| 4053 | + |
| 4054 | + # pick the first that has a mac-address |
| 4055 | for name in names: |
| 4056 | - mac = read_sys_net_safe(name, 'address') |
| 4057 | - if mac: |
| 4058 | - target_name = name |
| 4059 | - target_mac = mac |
| 4060 | - break |
| 4061 | - if target_mac and target_name: |
| 4062 | + if read_sys_net_safe(name, 'address'): |
| 4063 | + return name |
| 4064 | + return None |
| 4065 | + |
| 4066 | + |
| 4067 | +def generate_fallback_config(blacklist_drivers=None, config_driver=None): |
| 4068 | + """Determine which attached net dev is most likely to have a connection and |
| 4069 | + generate network state to run dhcp on that interface""" |
| 4070 | + |
| 4071 | + if not config_driver: |
| 4072 | + config_driver = False |
| 4073 | + |
| 4074 | + target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers) |
| 4075 | + if target_name: |
| 4076 | + target_mac = read_sys_net_safe(target_name, 'address') |
| 4077 | nconf = {'config': [], 'version': 1} |
| 4078 | cfg = {'type': 'physical', 'name': target_name, |
| 4079 | 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} |
| 4080 | @@ -511,21 +515,7 @@ def get_interfaces_by_mac(): |
| 4081 | |
| 4082 | Bridges and any devices that have a 'stolen' mac are excluded.""" |
| 4083 | ret = {} |
| 4084 | - devs = get_devicelist() |
| 4085 | - empty_mac = '00:00:00:00:00:00' |
| 4086 | - for name in devs: |
| 4087 | - if not interface_has_own_mac(name): |
| 4088 | - continue |
| 4089 | - if is_bridge(name): |
| 4090 | - continue |
| 4091 | - if is_vlan(name): |
| 4092 | - continue |
| 4093 | - mac = get_interface_mac(name) |
| 4094 | - # some devices may not have a mac (tun0) |
| 4095 | - if not mac: |
| 4096 | - continue |
| 4097 | - if mac == empty_mac and name != 'lo': |
| 4098 | - continue |
| 4099 | + for name, mac, _driver, _devid in get_interfaces(): |
| 4100 | if mac in ret: |
| 4101 | raise RuntimeError( |
| 4102 | "duplicate mac found! both '%s' and '%s' have mac '%s'" % |
| 4103 | @@ -599,6 +589,7 @@ class EphemeralIPv4Network(object): |
| 4104 | self._bringup_router() |
| 4105 | |
| 4106 | def __exit__(self, excp_type, excp_value, excp_traceback): |
| 4107 | + """Teardown anything we set up.""" |
| 4108 | for cmd in self.cleanup_cmds: |
| 4109 | util.subp(cmd, capture=True) |
| 4110 | |
| 4111 | diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py |
| 4112 | new file mode 100644 |
| 4113 | index 0000000..0cba703 |
| 4114 | --- /dev/null |
| 4115 | +++ b/cloudinit/net/dhcp.py |
| 4116 | @@ -0,0 +1,163 @@ |
| 4117 | +# Copyright (C) 2017 Canonical Ltd. |
| 4118 | +# |
| 4119 | +# Author: Chad Smith <chad.smith@canonical.com> |
| 4120 | +# |
| 4121 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 4122 | + |
| 4123 | +import configobj |
| 4124 | +import logging |
| 4125 | +import os |
| 4126 | +import re |
| 4127 | + |
| 4128 | +from cloudinit.net import find_fallback_nic, get_devicelist |
| 4129 | +from cloudinit import temp_utils |
| 4130 | +from cloudinit import util |
| 4131 | +from six import StringIO |
| 4132 | + |
| 4133 | +LOG = logging.getLogger(__name__) |
| 4134 | + |
| 4135 | +NETWORKD_LEASES_DIR = '/run/systemd/netif/leases' |
| 4136 | + |
| 4137 | + |
| 4138 | +class InvalidDHCPLeaseFileError(Exception): |
| 4139 | + """Raised when parsing an empty or invalid dhcp.leases file. |
| 4140 | + |
| 4141 | + Current uses are DataSourceAzure and DataSourceEc2 during ephemeral |
| 4142 | + boot to scrape metadata. |
| 4143 | + """ |
| 4144 | + pass |
| 4145 | + |
| 4146 | + |
| 4147 | +def maybe_perform_dhcp_discovery(nic=None): |
| 4148 | + """Perform dhcp discovery if nic valid and dhclient command exists. |
| 4149 | + |
| 4150 | + If the nic is invalid or undiscoverable or dhclient command is not found, |
| 4151 | + skip dhcp_discovery and return an empty dict. |
| 4152 | + |
| 4153 | + @param nic: Name of the network interface we want to run dhclient on. |
| 4154 | + @return: A dict of dhcp options from the dhclient discovery if run, |
| 4155 | + otherwise an empty dict is returned. |
| 4156 | + """ |
| 4157 | + if nic is None: |
| 4158 | + nic = find_fallback_nic() |
| 4159 | + if nic is None: |
| 4160 | + LOG.debug( |
| 4161 | + 'Skip dhcp_discovery: Unable to find fallback nic.') |
| 4162 | + return {} |
| 4163 | + elif nic not in get_devicelist(): |
| 4164 | + LOG.debug( |
| 4165 | + 'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic) |
| 4166 | + return {} |
| 4167 | + dhclient_path = util.which('dhclient') |
| 4168 | + if not dhclient_path: |
| 4169 | + LOG.debug('Skip dhclient configuration: No dhclient command found.') |
| 4170 | + return {} |
| 4171 | + with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir: |
| 4172 | + # Use /var/tmp because /run/cloud-init/tmp is mounted noexec |
| 4173 | + return dhcp_discovery(dhclient_path, nic, tdir) |
| 4174 | + |
| 4175 | + |
| 4176 | +def parse_dhcp_lease_file(lease_file): |
| 4177 | + """Parse the given dhcp lease file for the most recent lease. |
| 4178 | + |
| 4179 | + Return a dict of dhcp options as key value pairs for the most recent lease |
| 4180 | + block. |
| 4181 | + |
| 4182 | + @raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile |
| 4183 | + content. |
| 4184 | + """ |
| 4185 | + lease_regex = re.compile(r"lease {(?P<lease>[^}]*)}\n") |
| 4186 | + dhcp_leases = [] |
| 4187 | + lease_content = util.load_file(lease_file) |
| 4188 | + if len(lease_content) == 0: |
| 4189 | + raise InvalidDHCPLeaseFileError( |
| 4190 | + 'Cannot parse empty dhcp lease file {0}'.format(lease_file)) |
| 4191 | + for lease in lease_regex.findall(lease_content): |
| 4192 | + lease_options = [] |
| 4193 | + for line in lease.split(';'): |
| 4194 | + # Strip newlines, double-quotes and option prefix |
| 4195 | + line = line.strip().replace('"', '').replace('option ', '') |
| 4196 | + if not line: |
| 4197 | + continue |
| 4198 | + lease_options.append(line.split(' ', 1)) |
| 4199 | + dhcp_leases.append(dict(lease_options)) |
| 4200 | + if not dhcp_leases: |
| 4201 | + raise InvalidDHCPLeaseFileError( |
| 4202 | + 'Cannot parse dhcp lease file {0}. No leases found'.format( |
| 4203 | + lease_file)) |
| 4204 | + return dhcp_leases |
| 4205 | + |
| 4206 | + |
| 4207 | +def dhcp_discovery(dhclient_cmd_path, interface, cleandir): |
| 4208 | + """Run dhclient on the interface without scripts or filesystem artifacts. |
| 4209 | + |
| 4210 | + @param dhclient_cmd_path: Full path to the dhclient used. |
| 4211 | + @param interface: Name of the network inteface on which to dhclient. |
| 4212 | + @param cleandir: The directory from which to run dhclient as well as store |
| 4213 | + dhcp leases. |
| 4214 | + |
| 4215 | + @return: A dict of dhcp options parsed from the dhcp.leases file or empty |
| 4216 | + dict. |
| 4217 | + """ |
| 4218 | + LOG.debug('Performing a dhcp discovery on %s', interface) |
| 4219 | + |
| 4220 | + # XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict |
| 4221 | + # app armor profiles which disallow running dhclient -sf <our-script-file>. |
| 4222 | + # We want to avoid running /sbin/dhclient-script because of side-effects in |
| 4223 | + # /etc/resolv.conf any any other vendor specific scripts in |
| 4224 | + # /etc/dhcp/dhclient*hooks.d. |
| 4225 | + sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient') |
| 4226 | + util.copy(dhclient_cmd_path, sandbox_dhclient_cmd) |
| 4227 | + pid_file = os.path.join(cleandir, 'dhclient.pid') |
| 4228 | + lease_file = os.path.join(cleandir, 'dhcp.leases') |
| 4229 | + |
| 4230 | + # ISC dhclient needs the interface up to send initial discovery packets. |
| 4231 | + # Generally dhclient relies on dhclient-script PREINIT action to bring the |
| 4232 | + # link up before attempting discovery. Since we are using -sf /bin/true, |
| 4233 | + # we need to do that "link up" ourselves first. |
| 4234 | + util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True) |
| 4235 | + cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file, |
| 4236 | + '-pf', pid_file, interface, '-sf', '/bin/true'] |
| 4237 | + util.subp(cmd, capture=True) |
| 4238 | + return parse_dhcp_lease_file(lease_file) |
| 4239 | + |
| 4240 | + |
| 4241 | +def networkd_parse_lease(content): |
| 4242 | + """Parse a systemd lease file content as in /run/systemd/netif/leases/ |
| 4243 | + |
| 4244 | + Parse this (almost) ini style file even though it says: |
| 4245 | + # This is private data. Do not parse. |
| 4246 | + |
| 4247 | + Simply return a dictionary of key/values.""" |
| 4248 | + |
| 4249 | + return dict(configobj.ConfigObj(StringIO(content), list_values=False)) |
| 4250 | + |
| 4251 | + |
| 4252 | +def networkd_load_leases(leases_d=None): |
| 4253 | + """Return a dictionary of dictionaries representing each lease |
| 4254 | + found in lease_d.i |
| 4255 | + |
| 4256 | + The top level key will be the filename, which is typically the ifindex.""" |
| 4257 | + |
| 4258 | + if leases_d is None: |
| 4259 | + leases_d = NETWORKD_LEASES_DIR |
| 4260 | + |
| 4261 | + ret = {} |
| 4262 | + if not os.path.isdir(leases_d): |
| 4263 | + return ret |
| 4264 | + for lfile in os.listdir(leases_d): |
| 4265 | + ret[lfile] = networkd_parse_lease( |
| 4266 | + util.load_file(os.path.join(leases_d, lfile))) |
| 4267 | + return ret |
| 4268 | + |
| 4269 | + |
| 4270 | +def networkd_get_option_from_leases(keyname, leases_d=None): |
| 4271 | + if leases_d is None: |
| 4272 | + leases_d = NETWORKD_LEASES_DIR |
| 4273 | + leases = networkd_load_leases(leases_d=leases_d) |
| 4274 | + for ifindex, data in sorted(leases.items()): |
| 4275 | + if data.get(keyname): |
| 4276 | + return data[keyname] |
| 4277 | + return None |
| 4278 | + |
| 4279 | +# vi: ts=4 expandtab |
| 4280 | diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py |
| 4281 | index bb80ec0..c6a71d1 100644 |
| 4282 | --- a/cloudinit/net/eni.py |
| 4283 | +++ b/cloudinit/net/eni.py |
| 4284 | @@ -95,6 +95,9 @@ def _iface_add_attrs(iface, index): |
| 4285 | ignore_map.append('mac_address') |
| 4286 | |
| 4287 | for key, value in iface.items(): |
| 4288 | + # convert bool to string for eni |
| 4289 | + if type(value) == bool: |
| 4290 | + value = 'on' if iface[key] else 'off' |
| 4291 | if not value or key in ignore_map: |
| 4292 | continue |
| 4293 | if key in multiline_keys: |
| 4294 | diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py |
| 4295 | index 9f35b72..d3788af 100644 |
| 4296 | --- a/cloudinit/net/netplan.py |
| 4297 | +++ b/cloudinit/net/netplan.py |
| 4298 | @@ -4,7 +4,7 @@ import copy |
| 4299 | import os |
| 4300 | |
| 4301 | from . import renderer |
| 4302 | -from .network_state import subnet_is_ipv6 |
| 4303 | +from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2 |
| 4304 | |
| 4305 | from cloudinit import log as logging |
| 4306 | from cloudinit import util |
| 4307 | @@ -27,31 +27,6 @@ network: |
| 4308 | """ |
| 4309 | |
| 4310 | LOG = logging.getLogger(__name__) |
| 4311 | -NET_CONFIG_TO_V2 = { |
| 4312 | - 'bond': {'bond-ad-select': 'ad-select', |
| 4313 | - 'bond-arp-interval': 'arp-interval', |
| 4314 | - 'bond-arp-ip-target': 'arp-ip-target', |
| 4315 | - 'bond-arp-validate': 'arp-validate', |
| 4316 | - 'bond-downdelay': 'down-delay', |
| 4317 | - 'bond-fail-over-mac': 'fail-over-mac-policy', |
| 4318 | - 'bond-lacp-rate': 'lacp-rate', |
| 4319 | - 'bond-miimon': 'mii-monitor-interval', |
| 4320 | - 'bond-min-links': 'min-links', |
| 4321 | - 'bond-mode': 'mode', |
| 4322 | - 'bond-num-grat-arp': 'gratuitious-arp', |
| 4323 | - 'bond-primary-reselect': 'primary-reselect-policy', |
| 4324 | - 'bond-updelay': 'up-delay', |
| 4325 | - 'bond-xmit-hash-policy': 'transmit-hash-policy'}, |
| 4326 | - 'bridge': {'bridge_ageing': 'ageing-time', |
| 4327 | - 'bridge_bridgeprio': 'priority', |
| 4328 | - 'bridge_fd': 'forward-delay', |
| 4329 | - 'bridge_gcint': None, |
| 4330 | - 'bridge_hello': 'hello-time', |
| 4331 | - 'bridge_maxage': 'max-age', |
| 4332 | - 'bridge_maxwait': None, |
| 4333 | - 'bridge_pathcost': 'path-cost', |
| 4334 | - 'bridge_portprio': None, |
| 4335 | - 'bridge_waitport': None}} |
| 4336 | |
| 4337 | |
| 4338 | def _get_params_dict_by_match(config, match): |
| 4339 | @@ -247,6 +222,14 @@ class Renderer(renderer.Renderer): |
| 4340 | util.subp(cmd, capture=True) |
| 4341 | |
| 4342 | def _render_content(self, network_state): |
| 4343 | + |
| 4344 | + # if content already in netplan format, pass it back |
| 4345 | + if network_state.version == 2: |
| 4346 | + LOG.debug('V2 to V2 passthrough') |
| 4347 | + return util.yaml_dumps({'network': network_state.config}, |
| 4348 | + explicit_start=False, |
| 4349 | + explicit_end=False) |
| 4350 | + |
| 4351 | ethernets = {} |
| 4352 | wifis = {} |
| 4353 | bridges = {} |
| 4354 | @@ -261,9 +244,9 @@ class Renderer(renderer.Renderer): |
| 4355 | |
| 4356 | for config in network_state.iter_interfaces(): |
| 4357 | ifname = config.get('name') |
| 4358 | - # filter None entries up front so we can do simple if key in dict |
| 4359 | + # filter None (but not False) entries up front |
| 4360 | ifcfg = dict((key, value) for (key, value) in config.items() |
| 4361 | - if value) |
| 4362 | + if value is not None) |
| 4363 | |
| 4364 | if_type = ifcfg.get('type') |
| 4365 | if if_type == 'physical': |
| 4366 | @@ -335,6 +318,7 @@ class Renderer(renderer.Renderer): |
| 4367 | (port, cost) = costval.split() |
| 4368 | newvalue[port] = int(cost) |
| 4369 | br_config.update({newname: newvalue}) |
| 4370 | + |
| 4371 | if len(br_config) > 0: |
| 4372 | bridge.update({'parameters': br_config}) |
| 4373 | _extract_addresses(ifcfg, bridge) |
| 4374 | diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py |
| 4375 | index 87a7222..0e830ee 100644 |
| 4376 | --- a/cloudinit/net/network_state.py |
| 4377 | +++ b/cloudinit/net/network_state.py |
| 4378 | @@ -23,6 +23,34 @@ NETWORK_V2_KEY_FILTER = [ |
| 4379 | 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' |
| 4380 | ] |
| 4381 | |
| 4382 | +NET_CONFIG_TO_V2 = { |
| 4383 | + 'bond': {'bond-ad-select': 'ad-select', |
| 4384 | + 'bond-arp-interval': 'arp-interval', |
| 4385 | + 'bond-arp-ip-target': 'arp-ip-target', |
| 4386 | + 'bond-arp-validate': 'arp-validate', |
| 4387 | + 'bond-downdelay': 'down-delay', |
| 4388 | + 'bond-fail-over-mac': 'fail-over-mac-policy', |
| 4389 | + 'bond-lacp-rate': 'lacp-rate', |
| 4390 | + 'bond-miimon': 'mii-monitor-interval', |
| 4391 | + 'bond-min-links': 'min-links', |
| 4392 | + 'bond-mode': 'mode', |
| 4393 | + 'bond-num-grat-arp': 'gratuitious-arp', |
| 4394 | + 'bond-primary': 'primary', |
| 4395 | + 'bond-primary-reselect': 'primary-reselect-policy', |
| 4396 | + 'bond-updelay': 'up-delay', |
| 4397 | + 'bond-xmit-hash-policy': 'transmit-hash-policy'}, |
| 4398 | + 'bridge': {'bridge_ageing': 'ageing-time', |
| 4399 | + 'bridge_bridgeprio': 'priority', |
| 4400 | + 'bridge_fd': 'forward-delay', |
| 4401 | + 'bridge_gcint': None, |
| 4402 | + 'bridge_hello': 'hello-time', |
| 4403 | + 'bridge_maxage': 'max-age', |
| 4404 | + 'bridge_maxwait': None, |
| 4405 | + 'bridge_pathcost': 'path-cost', |
| 4406 | + 'bridge_portprio': None, |
| 4407 | + 'bridge_stp': 'stp', |
| 4408 | + 'bridge_waitport': None}} |
| 4409 | + |
| 4410 | |
| 4411 | def parse_net_config_data(net_config, skip_broken=True): |
| 4412 | """Parses the config, returns NetworkState object |
| 4413 | @@ -120,6 +148,10 @@ class NetworkState(object): |
| 4414 | self.use_ipv6 = network_state.get('use_ipv6', False) |
| 4415 | |
| 4416 | @property |
| 4417 | + def config(self): |
| 4418 | + return self._network_state['config'] |
| 4419 | + |
| 4420 | + @property |
| 4421 | def version(self): |
| 4422 | return self._version |
| 4423 | |
| 4424 | @@ -166,12 +198,14 @@ class NetworkStateInterpreter(object): |
| 4425 | 'search': [], |
| 4426 | }, |
| 4427 | 'use_ipv6': False, |
| 4428 | + 'config': None, |
| 4429 | } |
| 4430 | |
| 4431 | def __init__(self, version=NETWORK_STATE_VERSION, config=None): |
| 4432 | self._version = version |
| 4433 | self._config = config |
| 4434 | self._network_state = copy.deepcopy(self.initial_network_state) |
| 4435 | + self._network_state['config'] = config |
| 4436 | self._parsed = False |
| 4437 | |
| 4438 | @property |
| 4439 | @@ -432,6 +466,18 @@ class NetworkStateInterpreter(object): |
| 4440 | for param, val in command.get('params', {}).items(): |
| 4441 | iface.update({param: val}) |
| 4442 | |
| 4443 | + # convert value to boolean |
| 4444 | + bridge_stp = iface.get('bridge_stp') |
| 4445 | + if bridge_stp is not None and type(bridge_stp) != bool: |
| 4446 | + if bridge_stp in ['on', '1', 1]: |
| 4447 | + bridge_stp = True |
| 4448 | + elif bridge_stp in ['off', '0', 0]: |
| 4449 | + bridge_stp = False |
| 4450 | + else: |
| 4451 | + raise ValueError("Cannot convert bridge_stp value" |
| 4452 | + "(%s) to boolean", bridge_stp) |
| 4453 | + iface.update({'bridge_stp': bridge_stp}) |
| 4454 | + |
| 4455 | interfaces.update({iface['name']: iface}) |
| 4456 | |
| 4457 | @ensure_command_keys(['address']) |
| 4458 | @@ -460,12 +506,15 @@ class NetworkStateInterpreter(object): |
| 4459 | v2_command = { |
| 4460 | bond0: { |
| 4461 | 'interfaces': ['interface0', 'interface1'], |
| 4462 | - 'miimon': 100, |
| 4463 | - 'mode': '802.3ad', |
| 4464 | - 'xmit_hash_policy': 'layer3+4'}, |
| 4465 | + 'parameters': { |
| 4466 | + 'mii-monitor-interval': 100, |
| 4467 | + 'mode': '802.3ad', |
| 4468 | + 'xmit_hash_policy': 'layer3+4'}}, |
| 4469 | bond1: { |
| 4470 | 'bond-slaves': ['interface2', 'interface7'], |
| 4471 | - 'mode': 1 |
| 4472 | + 'parameters': { |
| 4473 | + 'mode': 1, |
| 4474 | + } |
| 4475 | } |
| 4476 | } |
| 4477 | |
| 4478 | @@ -489,8 +538,8 @@ class NetworkStateInterpreter(object): |
| 4479 | v2_command = { |
| 4480 | br0: { |
| 4481 | 'interfaces': ['interface0', 'interface1'], |
| 4482 | - 'fd': 0, |
| 4483 | - 'stp': 'off', |
| 4484 | + 'forward-delay': 0, |
| 4485 | + 'stp': False, |
| 4486 | 'maxwait': 0, |
| 4487 | } |
| 4488 | } |
| 4489 | @@ -554,6 +603,7 @@ class NetworkStateInterpreter(object): |
| 4490 | if not mac_address: |
| 4491 | LOG.debug('NetworkState Version2: missing "macaddress" info ' |
| 4492 | 'in config entry: %s: %s', eth, str(cfg)) |
| 4493 | + phy_cmd.update({'mac_address': mac_address}) |
| 4494 | |
| 4495 | for key in ['mtu', 'match', 'wakeonlan']: |
| 4496 | if key in cfg: |
| 4497 | @@ -598,8 +648,8 @@ class NetworkStateInterpreter(object): |
| 4498 | self.handle_vlan(vlan_cmd) |
| 4499 | |
| 4500 | def handle_wifis(self, command): |
| 4501 | - raise NotImplementedError("NetworkState V2: " |
| 4502 | - "Skipping wifi configuration") |
| 4503 | + LOG.warning('Wifi configuration is only available to distros with' |
| 4504 | + 'netplan rendering support.') |
| 4505 | |
| 4506 | def _v2_common(self, cfg): |
| 4507 | LOG.debug('v2_common: handling config:\n%s', cfg) |
| 4508 | @@ -616,6 +666,11 @@ class NetworkStateInterpreter(object): |
| 4509 | |
| 4510 | def _handle_bond_bridge(self, command, cmd_type=None): |
| 4511 | """Common handler for bond and bridge types""" |
| 4512 | + |
| 4513 | + # inverse mapping for v2 keynames to v1 keynames |
| 4514 | + v2key_to_v1 = dict((v, k) for k, v in |
| 4515 | + NET_CONFIG_TO_V2.get(cmd_type).items()) |
| 4516 | + |
| 4517 | for item_name, item_cfg in command.items(): |
| 4518 | item_params = dict((key, value) for (key, value) in |
| 4519 | item_cfg.items() if key not in |
| 4520 | @@ -624,14 +679,20 @@ class NetworkStateInterpreter(object): |
| 4521 | 'type': cmd_type, |
| 4522 | 'name': item_name, |
| 4523 | cmd_type + '_interfaces': item_cfg.get('interfaces'), |
| 4524 | - 'params': item_params, |
| 4525 | + 'params': dict((v2key_to_v1[k], v) for k, v in |
| 4526 | + item_params.get('parameters', {}).items()) |
| 4527 | } |
| 4528 | subnets = self._v2_to_v1_ipcfg(item_cfg) |
| 4529 | if len(subnets) > 0: |
| 4530 | v1_cmd.update({'subnets': subnets}) |
| 4531 | |
| 4532 | - LOG.debug('v2(%ss) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd) |
| 4533 | - self.handle_bridge(v1_cmd) |
| 4534 | + LOG.debug('v2(%s) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd) |
| 4535 | + if cmd_type == "bridge": |
| 4536 | + self.handle_bridge(v1_cmd) |
| 4537 | + elif cmd_type == "bond": |
| 4538 | + self.handle_bond(v1_cmd) |
| 4539 | + else: |
| 4540 | + raise ValueError('Unknown command type: %s', cmd_type) |
| 4541 | |
| 4542 | def _v2_to_v1_ipcfg(self, cfg): |
| 4543 | """Common ipconfig extraction from v2 to v1 subnets array.""" |
| 4544 | @@ -651,12 +712,6 @@ class NetworkStateInterpreter(object): |
| 4545 | 'address': address, |
| 4546 | } |
| 4547 | |
| 4548 | - routes = [] |
| 4549 | - for route in cfg.get('routes', []): |
| 4550 | - routes.append(_normalize_route( |
| 4551 | - {'address': route.get('to'), 'gateway': route.get('via')})) |
| 4552 | - subnet['routes'] = routes |
| 4553 | - |
| 4554 | if ":" in address: |
| 4555 | if 'gateway6' in cfg and gateway6 is None: |
| 4556 | gateway6 = cfg.get('gateway6') |
| 4557 | @@ -667,6 +722,17 @@ class NetworkStateInterpreter(object): |
| 4558 | subnet.update({'gateway': gateway4}) |
| 4559 | |
| 4560 | subnets.append(subnet) |
| 4561 | + |
| 4562 | + routes = [] |
| 4563 | + for route in cfg.get('routes', []): |
| 4564 | + routes.append(_normalize_route( |
| 4565 | + {'destination': route.get('to'), 'gateway': route.get('via')})) |
| 4566 | + |
| 4567 | + # v2 routes are bound to the interface, in v1 we add them under |
| 4568 | + # the first subnet since there isn't an equivalent interface level. |
| 4569 | + if len(subnets) and len(routes): |
| 4570 | + subnets[0]['routes'] = routes |
| 4571 | + |
| 4572 | return subnets |
| 4573 | |
| 4574 | |
| 4575 | @@ -721,7 +787,7 @@ def _normalize_net_keys(network, address_keys=()): |
| 4576 | elif netmask: |
| 4577 | prefix = mask_to_net_prefix(netmask) |
| 4578 | elif 'prefix' in net: |
| 4579 | - prefix = int(prefix) |
| 4580 | + prefix = int(net['prefix']) |
| 4581 | else: |
| 4582 | prefix = 64 if ipv6 else 24 |
| 4583 | |
| 4584 | diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py |
| 4585 | index a550f97..f572796 100644 |
| 4586 | --- a/cloudinit/net/sysconfig.py |
| 4587 | +++ b/cloudinit/net/sysconfig.py |
| 4588 | @@ -484,7 +484,11 @@ class Renderer(renderer.Renderer): |
| 4589 | content.add_nameserver(nameserver) |
| 4590 | for searchdomain in network_state.dns_searchdomains: |
| 4591 | content.add_search_domain(searchdomain) |
| 4592 | - return "\n".join([_make_header(';'), str(content)]) |
| 4593 | + header = _make_header(';') |
| 4594 | + content_str = str(content) |
| 4595 | + if not content_str.startswith(header): |
| 4596 | + content_str = header + '\n' + content_str |
| 4597 | + return content_str |
| 4598 | |
| 4599 | @staticmethod |
| 4600 | def _render_networkmanager_conf(network_state): |
| 4601 | diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py |
| 4602 | new file mode 100644 |
| 4603 | index 0000000..1c1f504 |
| 4604 | --- /dev/null |
| 4605 | +++ b/cloudinit/net/tests/test_dhcp.py |
| 4606 | @@ -0,0 +1,260 @@ |
| 4607 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 4608 | + |
| 4609 | +import mock |
| 4610 | +import os |
| 4611 | +from textwrap import dedent |
| 4612 | + |
| 4613 | +from cloudinit.net.dhcp import ( |
| 4614 | + InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, |
| 4615 | + parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases) |
| 4616 | +from cloudinit.util import ensure_file, write_file |
| 4617 | +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, populate_dir |
| 4618 | + |
| 4619 | + |
| 4620 | +class TestParseDHCPLeasesFile(CiTestCase): |
| 4621 | + |
| 4622 | + def test_parse_empty_lease_file_errors(self): |
| 4623 | + """parse_dhcp_lease_file errors when file content is empty.""" |
| 4624 | + empty_file = self.tmp_path('leases') |
| 4625 | + ensure_file(empty_file) |
| 4626 | + with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: |
| 4627 | + parse_dhcp_lease_file(empty_file) |
| 4628 | + error = context_manager.exception |
| 4629 | + self.assertIn('Cannot parse empty dhcp lease file', str(error)) |
| 4630 | + |
| 4631 | + def test_parse_malformed_lease_file_content_errors(self): |
| 4632 | + """parse_dhcp_lease_file errors when file content isn't dhcp leases.""" |
| 4633 | + non_lease_file = self.tmp_path('leases') |
| 4634 | + write_file(non_lease_file, 'hi mom.') |
| 4635 | + with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: |
| 4636 | + parse_dhcp_lease_file(non_lease_file) |
| 4637 | + error = context_manager.exception |
| 4638 | + self.assertIn('Cannot parse dhcp lease file', str(error)) |
| 4639 | + |
| 4640 | + def test_parse_multiple_leases(self): |
| 4641 | + """parse_dhcp_lease_file returns a list of all leases within.""" |
| 4642 | + lease_file = self.tmp_path('leases') |
| 4643 | + content = dedent(""" |
| 4644 | + lease { |
| 4645 | + interface "wlp3s0"; |
| 4646 | + fixed-address 192.168.2.74; |
| 4647 | + option subnet-mask 255.255.255.0; |
| 4648 | + option routers 192.168.2.1; |
| 4649 | + renew 4 2017/07/27 18:02:30; |
| 4650 | + expire 5 2017/07/28 07:08:15; |
| 4651 | + } |
| 4652 | + lease { |
| 4653 | + interface "wlp3s0"; |
| 4654 | + fixed-address 192.168.2.74; |
| 4655 | + option subnet-mask 255.255.255.0; |
| 4656 | + option routers 192.168.2.1; |
| 4657 | + } |
| 4658 | + """) |
| 4659 | + expected = [ |
| 4660 | + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', |
| 4661 | + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', |
| 4662 | + 'renew': '4 2017/07/27 18:02:30', |
| 4663 | + 'expire': '5 2017/07/28 07:08:15'}, |
| 4664 | + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', |
| 4665 | + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}] |
| 4666 | + write_file(lease_file, content) |
| 4667 | + self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) |
| 4668 | + |
| 4669 | + |
| 4670 | +class TestDHCPDiscoveryClean(CiTestCase): |
| 4671 | + with_logs = True |
| 4672 | + |
| 4673 | + @mock.patch('cloudinit.net.dhcp.find_fallback_nic') |
| 4674 | + def test_no_fallback_nic_found(self, m_fallback_nic): |
| 4675 | + """Log and do nothing when nic is absent and no fallback is found.""" |
| 4676 | + m_fallback_nic.return_value = None # No fallback nic found |
| 4677 | + self.assertEqual({}, maybe_perform_dhcp_discovery()) |
| 4678 | + self.assertIn( |
| 4679 | + 'Skip dhcp_discovery: Unable to find fallback nic.', |
| 4680 | + self.logs.getvalue()) |
| 4681 | + |
| 4682 | + def test_provided_nic_does_not_exist(self): |
| 4683 | + """When the provided nic doesn't exist, log a message and no-op.""" |
| 4684 | + self.assertEqual({}, maybe_perform_dhcp_discovery('idontexist')) |
| 4685 | + self.assertIn( |
| 4686 | + 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.', |
| 4687 | + self.logs.getvalue()) |
| 4688 | + |
| 4689 | + @mock.patch('cloudinit.net.dhcp.util.which') |
| 4690 | + @mock.patch('cloudinit.net.dhcp.find_fallback_nic') |
| 4691 | + def test_absent_dhclient_command(self, m_fallback, m_which): |
| 4692 | + """When dhclient doesn't exist in the OS, log the issue and no-op.""" |
| 4693 | + m_fallback.return_value = 'eth9' |
| 4694 | + m_which.return_value = None # dhclient isn't found |
| 4695 | + self.assertEqual({}, maybe_perform_dhcp_discovery()) |
| 4696 | + self.assertIn( |
| 4697 | + 'Skip dhclient configuration: No dhclient command found.', |
| 4698 | + self.logs.getvalue()) |
| 4699 | + |
| 4700 | + @mock.patch('cloudinit.temp_utils.os.getuid') |
| 4701 | + @mock.patch('cloudinit.net.dhcp.dhcp_discovery') |
| 4702 | + @mock.patch('cloudinit.net.dhcp.util.which') |
| 4703 | + @mock.patch('cloudinit.net.dhcp.find_fallback_nic') |
| 4704 | + def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid): |
| 4705 | + """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery.""" |
| 4706 | + m_uid.return_value = 0 # Fake root user for tmpdir |
| 4707 | + m_fback.return_value = 'eth9' |
| 4708 | + m_which.return_value = '/sbin/dhclient' |
| 4709 | + m_dhcp.return_value = {'address': '192.168.2.2'} |
| 4710 | + retval = wrap_and_call( |
| 4711 | + 'cloudinit.temp_utils', |
| 4712 | + {'_TMPDIR': {'new': None}, |
| 4713 | + 'os.getuid': 0}, |
| 4714 | + maybe_perform_dhcp_discovery) |
| 4715 | + self.assertEqual({'address': '192.168.2.2'}, retval) |
| 4716 | + self.assertEqual( |
| 4717 | + 1, m_dhcp.call_count, 'dhcp_discovery not called once') |
| 4718 | + call = m_dhcp.call_args_list[0] |
| 4719 | + self.assertEqual('/sbin/dhclient', call[0][0]) |
| 4720 | + self.assertEqual('eth9', call[0][1]) |
| 4721 | + self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) |
| 4722 | + |
| 4723 | + @mock.patch('cloudinit.net.dhcp.util.subp') |
| 4724 | + def test_dhcp_discovery_run_in_sandbox(self, m_subp): |
| 4725 | + """dhcp_discovery brings up the interface and runs dhclient. |
| 4726 | + |
| 4727 | + It also returns the parsed dhcp.leases file generated in the sandbox. |
| 4728 | + """ |
| 4729 | + tmpdir = self.tmp_dir() |
| 4730 | + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') |
| 4731 | + script_content = '#!/bin/bash\necho fake-dhclient' |
| 4732 | + write_file(dhclient_script, script_content, mode=0o755) |
| 4733 | + lease_content = dedent(""" |
| 4734 | + lease { |
| 4735 | + interface "eth9"; |
| 4736 | + fixed-address 192.168.2.74; |
| 4737 | + option subnet-mask 255.255.255.0; |
| 4738 | + option routers 192.168.2.1; |
| 4739 | + } |
| 4740 | + """) |
| 4741 | + lease_file = os.path.join(tmpdir, 'dhcp.leases') |
| 4742 | + write_file(lease_file, lease_content) |
| 4743 | + self.assertItemsEqual( |
| 4744 | + [{'interface': 'eth9', 'fixed-address': '192.168.2.74', |
| 4745 | + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], |
| 4746 | + dhcp_discovery(dhclient_script, 'eth9', tmpdir)) |
| 4747 | + # dhclient script got copied |
| 4748 | + with open(os.path.join(tmpdir, 'dhclient')) as stream: |
| 4749 | + self.assertEqual(script_content, stream.read()) |
| 4750 | + # Interface was brought up before dhclient called from sandbox |
| 4751 | + m_subp.assert_has_calls([ |
| 4752 | + mock.call( |
| 4753 | + ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), |
| 4754 | + mock.call( |
| 4755 | + [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf', |
| 4756 | + lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), |
| 4757 | + 'eth9', '-sf', '/bin/true'], capture=True)]) |
| 4758 | + |
| 4759 | + |
| 4760 | +class TestSystemdParseLeases(CiTestCase): |
| 4761 | + |
| 4762 | + lxd_lease = dedent("""\ |
| 4763 | + # This is private data. Do not parse. |
| 4764 | + ADDRESS=10.75.205.242 |
| 4765 | + NETMASK=255.255.255.0 |
| 4766 | + ROUTER=10.75.205.1 |
| 4767 | + SERVER_ADDRESS=10.75.205.1 |
| 4768 | + NEXT_SERVER=10.75.205.1 |
| 4769 | + BROADCAST=10.75.205.255 |
| 4770 | + T1=1580 |
| 4771 | + T2=2930 |
| 4772 | + LIFETIME=3600 |
| 4773 | + DNS=10.75.205.1 |
| 4774 | + DOMAINNAME=lxd |
| 4775 | + HOSTNAME=a1 |
| 4776 | + CLIENTID=ffe617693400020000ab110c65a6a0866931c2 |
| 4777 | + """) |
| 4778 | + |
| 4779 | + lxd_parsed = { |
| 4780 | + 'ADDRESS': '10.75.205.242', |
| 4781 | + 'NETMASK': '255.255.255.0', |
| 4782 | + 'ROUTER': '10.75.205.1', |
| 4783 | + 'SERVER_ADDRESS': '10.75.205.1', |
| 4784 | + 'NEXT_SERVER': '10.75.205.1', |
| 4785 | + 'BROADCAST': '10.75.205.255', |
| 4786 | + 'T1': '1580', |
| 4787 | + 'T2': '2930', |
| 4788 | + 'LIFETIME': '3600', |
| 4789 | + 'DNS': '10.75.205.1', |
| 4790 | + 'DOMAINNAME': 'lxd', |
| 4791 | + 'HOSTNAME': 'a1', |
| 4792 | + 'CLIENTID': 'ffe617693400020000ab110c65a6a0866931c2', |
| 4793 | + } |
| 4794 | + |
| 4795 | + azure_lease = dedent("""\ |
| 4796 | + # This is private data. Do not parse. |
| 4797 | + ADDRESS=10.132.0.5 |
| 4798 | + NETMASK=255.255.255.255 |
| 4799 | + ROUTER=10.132.0.1 |
| 4800 | + SERVER_ADDRESS=169.254.169.254 |
| 4801 | + NEXT_SERVER=10.132.0.1 |
| 4802 | + MTU=1460 |
| 4803 | + T1=43200 |
| 4804 | + T2=75600 |
| 4805 | + LIFETIME=86400 |
| 4806 | + DNS=169.254.169.254 |
| 4807 | + NTP=169.254.169.254 |
| 4808 | + DOMAINNAME=c.ubuntu-foundations.internal |
| 4809 | + DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal |
| 4810 | + HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal |
| 4811 | + ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 |
| 4812 | + CLIENTID=ff405663a200020000ab11332859494d7a8b4c |
| 4813 | + OPTION_245=624c3620 |
| 4814 | + """) |
| 4815 | + |
| 4816 | + azure_parsed = { |
| 4817 | + 'ADDRESS': '10.132.0.5', |
| 4818 | + 'NETMASK': '255.255.255.255', |
| 4819 | + 'ROUTER': '10.132.0.1', |
| 4820 | + 'SERVER_ADDRESS': '169.254.169.254', |
| 4821 | + 'NEXT_SERVER': '10.132.0.1', |
| 4822 | + 'MTU': '1460', |
| 4823 | + 'T1': '43200', |
| 4824 | + 'T2': '75600', |
| 4825 | + 'LIFETIME': '86400', |
| 4826 | + 'DNS': '169.254.169.254', |
| 4827 | + 'NTP': '169.254.169.254', |
| 4828 | + 'DOMAINNAME': 'c.ubuntu-foundations.internal', |
| 4829 | + 'DOMAIN_SEARCH_LIST': 'c.ubuntu-foundations.internal google.internal', |
| 4830 | + 'HOSTNAME': 'tribaal-test-171002-1349.c.ubuntu-foundations.internal', |
| 4831 | + 'ROUTES': '10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1', |
| 4832 | + 'CLIENTID': 'ff405663a200020000ab11332859494d7a8b4c', |
| 4833 | + 'OPTION_245': '624c3620'} |
| 4834 | + |
| 4835 | + def setUp(self): |
| 4836 | + super(TestSystemdParseLeases, self).setUp() |
| 4837 | + self.lease_d = self.tmp_dir() |
| 4838 | + |
| 4839 | + def test_no_leases_returns_empty_dict(self): |
| 4840 | + """A leases dir with no lease files should return empty dictionary.""" |
| 4841 | + self.assertEqual({}, networkd_load_leases(self.lease_d)) |
| 4842 | + |
| 4843 | + def test_no_leases_dir_returns_empty_dict(self): |
| 4844 | + """A non-existing leases dir should return empty dict.""" |
| 4845 | + enodir = os.path.join(self.lease_d, 'does-not-exist') |
| 4846 | + self.assertEqual({}, networkd_load_leases(enodir)) |
| 4847 | + |
| 4848 | + def test_single_leases_file(self): |
| 4849 | + """A leases dir with one leases file.""" |
| 4850 | + populate_dir(self.lease_d, {'2': self.lxd_lease}) |
| 4851 | + self.assertEqual( |
| 4852 | + {'2': self.lxd_parsed}, networkd_load_leases(self.lease_d)) |
| 4853 | + |
| 4854 | + def test_single_azure_leases_file(self): |
| 4855 | + """On Azure, option 245 should be present, verify it specifically.""" |
| 4856 | + populate_dir(self.lease_d, {'1': self.azure_lease}) |
| 4857 | + self.assertEqual( |
| 4858 | + {'1': self.azure_parsed}, networkd_load_leases(self.lease_d)) |
| 4859 | + |
| 4860 | + def test_multiple_files(self): |
| 4861 | + """Multiple leases files on azure with one found return that value.""" |
| 4862 | + self.maxDiff = None |
| 4863 | + populate_dir(self.lease_d, {'1': self.azure_lease, |
| 4864 | + '9': self.lxd_lease}) |
| 4865 | + self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed}, |
| 4866 | + networkd_load_leases(self.lease_d)) |
| 4867 | diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py |
| 4868 | index 272a6eb..8cb4114 100644 |
| 4869 | --- a/cloudinit/net/tests/test_init.py |
| 4870 | +++ b/cloudinit/net/tests/test_init.py |
| 4871 | @@ -7,7 +7,7 @@ import os |
| 4872 | |
| 4873 | import cloudinit.net as net |
| 4874 | from cloudinit.util import ensure_file, write_file, ProcessExecutionError |
| 4875 | -from tests.unittests.helpers import CiTestCase |
| 4876 | +from cloudinit.tests.helpers import CiTestCase |
| 4877 | |
| 4878 | |
| 4879 | class TestSysDevPath(CiTestCase): |
| 4880 | @@ -414,7 +414,7 @@ class TestEphemeralIPV4Network(CiTestCase): |
| 4881 | self.assertIn('Cannot init network on', str(error)) |
| 4882 | self.assertEqual(0, m_subp.call_count) |
| 4883 | |
| 4884 | - def test_ephemeral_ipv4_network_errors_invalid_mask(self, m_subp): |
| 4885 | + def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp): |
| 4886 | """Raise an error when prefix_or_mask is not a netmask or prefix.""" |
| 4887 | params = { |
| 4888 | 'interface': 'eth0', 'ip': '192.168.2.2', |
| 4889 | diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py |
| 4890 | index 39c79de..8f99d99 100644 |
| 4891 | --- a/cloudinit/netinfo.py |
| 4892 | +++ b/cloudinit/netinfo.py |
| 4893 | @@ -13,7 +13,7 @@ import re |
| 4894 | from cloudinit import log as logging |
| 4895 | from cloudinit import util |
| 4896 | |
| 4897 | -from prettytable import PrettyTable |
| 4898 | +from cloudinit.simpletable import SimpleTable |
| 4899 | |
| 4900 | LOG = logging.getLogger() |
| 4901 | |
| 4902 | @@ -170,7 +170,7 @@ def netdev_pformat(): |
| 4903 | lines.append(util.center("Net device info failed", '!', 80)) |
| 4904 | else: |
| 4905 | fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] |
| 4906 | - tbl = PrettyTable(fields) |
| 4907 | + tbl = SimpleTable(fields) |
| 4908 | for (dev, d) in netdev.items(): |
| 4909 | tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]]) |
| 4910 | if d.get('addr6'): |
| 4911 | @@ -194,7 +194,7 @@ def route_pformat(): |
| 4912 | if routes.get('ipv4'): |
| 4913 | fields_v4 = ['Route', 'Destination', 'Gateway', |
| 4914 | 'Genmask', 'Interface', 'Flags'] |
| 4915 | - tbl_v4 = PrettyTable(fields_v4) |
| 4916 | + tbl_v4 = SimpleTable(fields_v4) |
| 4917 | for (n, r) in enumerate(routes.get('ipv4')): |
| 4918 | route_id = str(n) |
| 4919 | tbl_v4.add_row([route_id, r['destination'], |
| 4920 | @@ -207,7 +207,7 @@ def route_pformat(): |
| 4921 | if routes.get('ipv6'): |
| 4922 | fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q', |
| 4923 | 'Local Address', 'Foreign Address', 'State'] |
| 4924 | - tbl_v6 = PrettyTable(fields_v6) |
| 4925 | + tbl_v6 = SimpleTable(fields_v6) |
| 4926 | for (n, r) in enumerate(routes.get('ipv6')): |
| 4927 | route_id = str(n) |
| 4928 | tbl_v6.add_row([route_id, r['proto'], |
| 4929 | diff --git a/cloudinit/simpletable.py b/cloudinit/simpletable.py |
| 4930 | new file mode 100644 |
| 4931 | index 0000000..9060322 |
| 4932 | --- /dev/null |
| 4933 | +++ b/cloudinit/simpletable.py |
| 4934 | @@ -0,0 +1,62 @@ |
| 4935 | +# Copyright (C) 2017 Amazon.com, Inc. or its affiliates |
| 4936 | +# |
| 4937 | +# Author: Ethan Faust <efaust@amazon.com> |
| 4938 | +# Author: Andrew Jorgensen <ajorgens@amazon.com> |
| 4939 | +# |
| 4940 | +# This file is part of cloud-init. See LICENSE file for license information. |
| 4941 | + |
| 4942 | + |
| 4943 | +class SimpleTable(object): |
| 4944 | + """A minimal implementation of PrettyTable |
| 4945 | + for distribution with cloud-init. |
| 4946 | + """ |
| 4947 | + |
| 4948 | + def __init__(self, fields): |
| 4949 | + self.fields = fields |
| 4950 | + self.rows = [] |
| 4951 | + |
| 4952 | + # initialize list of 0s the same length |
| 4953 | + # as the number of fields |
| 4954 | + self.column_widths = [0] * len(self.fields) |
| 4955 | + self.update_column_widths(fields) |
| 4956 | + |
| 4957 | + def update_column_widths(self, values): |
| 4958 | + for i, value in enumerate(values): |
| 4959 | + self.column_widths[i] = max( |
| 4960 | + len(value), |
| 4961 | + self.column_widths[i]) |
| 4962 | + |
| 4963 | + def add_row(self, values): |
| 4964 | + if len(values) > len(self.fields): |
| 4965 | + raise TypeError('too many values') |
| 4966 | + values = [str(value) for value in values] |
| 4967 | + self.rows.append(values) |
| 4968 | + self.update_column_widths(values) |
| 4969 | + |
| 4970 | + def _hdiv(self): |
| 4971 | + """Returns a horizontal divider for the table.""" |
| 4972 | + return '+' + '+'.join( |
| 4973 | + ['-' * (w + 2) for w in self.column_widths]) + '+' |
| 4974 | + |
| 4975 | + def _row(self, row): |
| 4976 | + """Returns a formatted row.""" |
| 4977 | + return '|' + '|'.join( |
| 4978 | + [col.center(self.column_widths[i] + 2) |
| 4979 | + for i, col in enumerate(row)]) + '|' |
| 4980 | + |
| 4981 | + def __str__(self): |
| 4982 | + """Returns a string representation of the table with lines around. |
| 4983 | + |
| 4984 | + +-----+-----+ |
| 4985 | + | one | two | |
| 4986 | + +-----+-----+ |
| 4987 | + | 1 | 2 | |
| 4988 | + | 01 | 10 | |
| 4989 | + +-----+-----+ |
| 4990 | + """ |
| 4991 | + lines = [self._hdiv(), self._row(self.fields), self._hdiv()] |
| 4992 | + lines += [self._row(r) for r in self.rows] + [self._hdiv()] |
| 4993 | + return '\n'.join(lines) |
| 4994 | + |
| 4995 | + def get_string(self): |
| 4996 | + return repr(self) |
| 4997 | diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py |
| 4998 | index 380e27c..43a7e42 100644 |
| 4999 | --- a/cloudinit/sources/DataSourceAliYun.py |
| 5000 | +++ b/cloudinit/sources/DataSourceAliYun.py |
The diff has been truncated for viewing.


PASSED: Continuous integration, rev:e290426c7f6 c3a1aa0796d9dc3 d5223a212a2467 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 394/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 394/rebuild
https:/