Merge lp:~cloud-init/cloud-init/rework into lp:~cloud-init-dev/cloud-init/trunk

Proposed by Joshua Harlow
Status: Merged
Merged at revision: 564
Proposed branch: lp:~cloud-init/cloud-init/rework
Merge into: lp:~cloud-init-dev/cloud-init/trunk
Diff against target: 17875 lines (+10046/-4803)
113 files modified
ChangeLog (+193/-0)
Makefile (+24/-5)
Requires (+30/-0)
TODO (+27/-4)
bin/cloud-init (+474/-0)
cloud-init-cfg.py (+0/-115)
cloud-init-query.py (+0/-56)
cloud-init.py (+0/-229)
cloudinit/DataSource.py (+0/-214)
cloudinit/UserDataHandler.py (+0/-262)
cloudinit/__init__.py (+4/-650)
cloudinit/cloud.py (+101/-0)
cloudinit/config/__init__.py (+34/-252)
cloudinit/config/cc_apt_pipelining.py (+35/-29)
cloudinit/config/cc_apt_update_upgrade.py (+117/-86)
cloudinit/config/cc_bootcmd.py (+31/-24)
cloudinit/config/cc_byobu.py (+10/-16)
cloudinit/config/cc_ca_certs.py (+34/-25)
cloudinit/config/cc_chef.py (+72/-62)
cloudinit/config/cc_disable_ec2_metadata.py (+17/-11)
cloudinit/config/cc_final_message.py (+44/-34)
cloudinit/config/cc_foo.py (+32/-9)
cloudinit/config/cc_grub_dpkg.py (+15/-12)
cloudinit/config/cc_keys_to_console.py (+28/-17)
cloudinit/config/cc_landscape.py (+42/-22)
cloudinit/config/cc_locale.py (+9/-26)
cloudinit/config/cc_mcollective.py (+55/-63)
cloudinit/config/cc_mounts.py (+57/-36)
cloudinit/config/cc_phone_home.py (+51/-39)
cloudinit/config/cc_puppet.py (+59/-54)
cloudinit/config/cc_resizefs.py (+99/-67)
cloudinit/config/cc_rightscale_userdata.py (+50/-26)
cloudinit/config/cc_rsyslog.py (+32/-31)
cloudinit/config/cc_runcmd.py (+14/-8)
cloudinit/config/cc_salt_minion.py (+30/-26)
cloudinit/config/cc_scripts_per_boot.py (+17/-10)
cloudinit/config/cc_scripts_per_instance.py (+17/-10)
cloudinit/config/cc_scripts_per_once.py (+17/-10)
cloudinit/config/cc_scripts_user.py (+18/-10)
cloudinit/config/cc_set_hostname.py (+10/-17)
cloudinit/config/cc_set_passwords.py (+62/-45)
cloudinit/config/cc_ssh.py (+76/-50)
cloudinit/config/cc_ssh_import_id.py (+19/-16)
cloudinit/config/cc_timezone.py (+10/-38)
cloudinit/config/cc_update_etc_hosts.py (+36/-63)
cloudinit/config/cc_update_hostname.py (+14/-74)
cloudinit/distros/__init__.py (+163/-0)
cloudinit/distros/debian.py (+149/-0)
cloudinit/distros/fedora.py (+31/-0)
cloudinit/distros/rhel.py (+337/-0)
cloudinit/distros/ubuntu.py (+31/-0)
cloudinit/handlers/__init__.py (+222/-0)
cloudinit/handlers/boot_hook.py (+73/-0)
cloudinit/handlers/cloud_config.py (+62/-0)
cloudinit/handlers/shell_script.py (+52/-0)
cloudinit/handlers/upstart_job.py (+66/-0)
cloudinit/helpers.py (+452/-0)
cloudinit/importer.py (+65/-0)
cloudinit/log.py (+133/-0)
cloudinit/netinfo.py (+81/-30)
cloudinit/settings.py (+57/-0)
cloudinit/sources/DataSourceCloudStack.py (+94/-39)
cloudinit/sources/DataSourceConfigDrive.py (+116/-121)
cloudinit/sources/DataSourceEc2.py (+143/-95)
cloudinit/sources/DataSourceMAAS.py (+81/-162)
cloudinit/sources/DataSourceNoCloud.py (+75/-79)
cloudinit/sources/DataSourceOVF.py (+117/-156)
cloudinit/sources/__init__.py (+223/-0)
cloudinit/ssh_util.py (+275/-188)
cloudinit/stages.py (+551/-0)
cloudinit/templater.py (+41/-0)
cloudinit/url_helper.py (+226/-0)
cloudinit/user_data.py (+243/-0)
cloudinit/util.py (+1136/-592)
cloudinit/version.py (+27/-0)
config/cloud.cfg (+36/-4)
config/cloud.cfg.d/05_logging.cfg (+5/-1)
install.sh (+0/-31)
packages/bddeb (+172/-33)
packages/brpm (+216/-0)
packages/debian/changelog (+1/-1)
packages/debian/control (+4/-6)
packages/debian/rules (+3/-15)
packages/make-dist-tarball (+2/-2)
packages/make-tarball (+89/-0)
packages/redhat/cloud-init.spec (+183/-0)
setup.py (+102/-17)
sysvinit/cloud-config (+124/-0)
sysvinit/cloud-final (+124/-0)
sysvinit/cloud-init (+124/-0)
sysvinit/cloud-init-local (+124/-0)
templates/chef_client.rb.tmpl (+4/-4)
templates/default-locale.tmpl (+0/-1)
templates/hosts.redhat.tmpl (+22/-0)
templates/hosts.ubuntu.tmpl (+7/-8)
templates/sources.list.tmpl (+56/-57)
tests/configs/sample1.yaml (+53/-0)
tests/unittests/test__init__.py (+75/-93)
tests/unittests/test_builtin_handlers.py (+54/-0)
tests/unittests/test_datasource/test_maas.py (+33/-37)
tests/unittests/test_handler/test_handler_ca_certs.py (+62/-45)
tests/unittests/test_userdata.py (+90/-53)
tests/unittests/test_util.py (+69/-64)
tools/hacking.py (+175/-0)
tools/mock-meta.py (+444/-0)
tools/read-dependencies (+45/-0)
tools/read-version (+70/-0)
tools/run-pep8 (+35/-0)
tools/run-pylint (+1/-12)
upstart/cloud-config.conf (+1/-1)
upstart/cloud-final.conf (+1/-1)
upstart/cloud-init-local.conf (+1/-1)
upstart/cloud-init.conf (+1/-1)
To merge this branch: bzr merge lp:~cloud-init/cloud-init/rework
Reviewer Review Type Date Requested Status
cloud-init Commiters Pending
Review via email: mp+113684@code.launchpad.net
To post a comment you must log in.
lp:~cloud-init/cloud-init/rework updated
992. By Joshua Harlow

Updated so that if no mirror is found, the module stops running.

993. By Joshua Harlow

Add comment about keeping track of what people think about the 'read'
and 'write' root, and if it confuses them, remove it later and just
recommend a more 'natural' way of doing it (ie 'chroot').

994. By Scott Moser

setup.py: rename "daemon type" to "init system"

This brings with it other changes, and also makes an install
install all of the requisite init files. (ie, cloud-init needs the -local and
the non-local)

995. By Joshua Harlow

Fix the initsys variable, setuptools/distools will automatically assign
to a variable of the name 'init_system' instead due to the param name being
'init-system'.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'ChangeLog'
--- ChangeLog 2012-06-21 15:37:22 +0000
+++ ChangeLog 2012-07-06 21:16:18 +0000
@@ -1,3 +1,196 @@
10.7.0:
2 - unified binary that activates the various stages
3 - Now using argparse + subcommands to specify the various CLI options
4 - a stage module that clearly separates the stages of the different
5 components (also described how they are used and in what order in the
6 new unified binary)
7 - user_data is now a module that just does user data processing while the
8 actual activation and 'handling' of the processed user data is done via
9 a separate set of files (and modules) with the main 'init' stage being the
10 controller of this
11 - creation of boot_hook, cloud_config, shell_script, upstart_job version 2
12 modules (with classes that perform there functionality) instead of those
13 having functionality that is attached to the cloudinit object (which
14 reduces reuse and limits future functionality, and makes testing harder)
15 - removal of global config that defined paths, shared config, now this is
16 via objects making unit testing testing and global side-effects a non issue
17 - creation of a 'helpers.py'
18 - this contains an abstraction for the 'lock' like objects that the various
19 module/handler running stages use to avoid re-running a given
20 module/handler for a given frequency. this makes it separated from
21 the actual usage of that object (thus helpful for testing and clear lines
22 usage and how the actual job is accomplished)
23 - a common 'runner' class is the main entrypoint using these locks to
24 run function objects passed in (along with there arguments) and there
25 frequency
26 - add in a 'paths' object that provides access to the previously global
27 and/or config based paths (thus providing a single entrypoint object/type
28 that provides path information)
29 - this also adds in the ability to change the path when constructing
30 that path 'object' and adding in additional config that can be used to
31 alter the root paths of 'joins' (useful for testing or possibly useful
32 in chroots?)
33 - config options now avaiable that can alter the 'write_root' and the
34 'read_root' when backing code uses the paths join() function
35 - add a config parser subclass that will automatically add unknown sections
36 and return default values (instead of throwing exceptions for these cases)
37 - a new config merging class that will be the central object that knows
38 how to do the common configuration merging from the various configuration
39 sources. The order is the following:
40 - cli config files override environment config files
41 which override instance configs which override datasource
42 configs which override base configuration which overrides
43 default configuration.
44 - remove the passing around of the 'cloudinit' object as a 'cloud' variable
45 and instead pass around an 'interface' object that can be given to modules
46 and handlers as there cloud access layer while the backing of that
47 object can be varied (good for abstraction and testing)
48 - use a single set of functions to do importing of modules
49 - add a function in which will search for a given set of module names with
50 a given set of attributes and return those which are found
51 - refactor logging so that instead of using a single top level 'log' that
52 instead each component/module can use its own logger (if desired), this
53 should be backwards compatible with handlers and config modules that used
54 the passed in logger (its still passed in)
55 - ensure that all places where exception are caught and where applicable
56 that the util logexc() is called, so that no exceptions that may occur
57 are dropped without first being logged (where it makes sense for this
58 to happen)
59 - add a 'requires' file that lists cloud-init dependencies
60 - applying it in package creation (bdeb and brpm) as well as using it
61 in the modified setup.py to ensure dependencies are installed when
62 using that method of packaging
63 - add a 'version.py' that lists the active version (in code) so that code
64 inside cloud-init can report the version in messaging and other config files
65 - cleanup of subprocess usage so that all subprocess calls go through the
66 subp() utility method, which now has an exception type that will provide
67 detailed information on python 2.6 and 2.7
68 - forced all code loading, moving, chmod, writing files and other system
69 level actions to go through standard set of util functions, this greatly
70 helps in debugging and determining exactly which system actions cloud-init is
71 performing
72 - switching out the templating engine cheetah for tempita since tempita has
73 no external dependencies (minus python) while cheetah has many dependencies
74 which makes it more difficult to adopt cloud-init in distros that may not
75 have those dependencies
76 - adjust url fetching and url trying to go through a single function that
77 reads urls in the new 'url helper' file, this helps in tracing, debugging
78 and knowing which urls are being called and/or posted to from with-in
79 cloud-init code
80 - add in the sending of a 'User-Agent' header for all urls fetched that
81 do not provide there own header mapping, derive this user-agent from
82 the following template, 'Cloud-Init/{version}' where the version is the
83 cloud-init version number
84 - using prettytable for netinfo 'debug' printing since it provides a standard
85 and defined output that should be easier to parse than a custom format
86 - add a set of distro specific classes, that handle distro specific actions
87 that modules and or handler code can use as needed, this is organized into
88 a base abstract class with child classes that implement the shared
89 functionality. config determines exactly which subclass to load, so it can
90 be easily extended as needed.
91 - current functionality
92 - network interface config file writing
93 - hostname setting/updating
94 - locale/timezone/ setting
95 - updating of /etc/hosts (with templates or generically)
96 - package commands (ie installing, removing)/mirror finding
97 - interface up/down activating
98 - implemented a debian + ubuntu subclass
99 - implemented a redhat + fedora subclass
100 - adjust the root 'cloud.cfg' file to now have distrobution/path specific
101 configuration values in it. these special configs are merged as the normal
102 config is, but the system level config is not passed into modules/handlers
103 - modules/handlers must go through the path and distro object instead
104 - have the cloudstack datasource test the url before calling into boto to
105 avoid the long wait for boto to finish retrying and finally fail when
106 the gateway meta-data address is unavailable
107 - add a simple mock ec2 meta-data python based http server that can serve a
108 very simple set of ec2 meta-data back to callers
109 - useful for testing or for understanding what the ec2 meta-data
110 service can provide in terms of data or functionality
111 - for ssh key and authorized key file parsing add in classes and util functions
112 that maintain the state of individual lines, allowing for a clearer
113 separation of parsing and modification (useful for testing and tracing)
114 - add a set of 'base' init.d scripts that can be used on systems that do
115 not have full upstart or systemd support (or support that does not match
116 the standard fedora/ubuntu implementation)
117 - currently these are being tested on RHEL 6.2
118 - separate the datasources into there own subdirectory (instead of being
119 a top-level item), this matches how config 'modules' and user-data 'handlers'
120 are also in there own subdirectory (thus helping new developers and others
121 understand the code layout in a quicker manner)
122 - add the building of rpms based off a new cli tool and template 'spec' file
123 that will templatize and perform the necessary commands to create a source
124 and binary package to be used with a cloud-init install on a 'rpm' supporting
125 system
126 - uses the new standard set of requires and converts those pypi requirements
127 into a local set of package requirments (that are known to exist on RHEL
128 systems but should also exist on fedora systems)
129 - adjust the bdeb builder to be a python script (instead of a shell script) and
130 make its 'control' file a template that takes in the standard set of pypi
131 dependencies and uses a local mapping (known to work on ubuntu) to create the
132 packages set of dependencies (that should also work on ubuntu-like systems)
133 - pythonify a large set of various pieces of code
134 - remove wrapping return statements with () when it has no effect
135 - upper case all constants used
136 - correctly 'case' class and method names (where applicable)
137 - use os.path.join (and similar commands) instead of custom path creation
138 - use 'is None' instead of the frowned upon '== None' which picks up a large
139 set of 'true' cases than is typically desired (ie for objects that have
140 there own equality)
141 - use context managers on locks, tempdir, chdir, file, selinux, umask,
142 unmounting commands so that these actions do not have to be closed and/or
143 cleaned up manually in finally blocks, which is typically not done and will
144 eventually be a bug in the future
145 - use the 'abc' module for abstract classes base where possible
146 - applied in the datasource root class, the distro root class, and the
147 user-data v2 root class
148 - when loading yaml, check that the 'root' type matches a predefined set of
149 valid types (typically just 'dict') and throw a type error if a mismatch
150 occurs, this seems to be a good idea to do when loading user config files
151 - when forking a long running task (ie resizing a filesytem) use a new util
152 function that will fork and then call a callback, instead of having to
153 implement all that code in a non-shared location (thus allowing it to be
154 used by others in the future)
155 - when writing out filenames, go through a util function that will attempt to
156 ensure that the given filename is 'filesystem' safe by replacing '/' with
157 '_' and removing characters which do not match a given whitelist of allowed
158 filename characters
159 - for the varying usages of the 'blkid' command make a function in the util
160 module that can be used as the single point of entry for interaction with
161 that command (and its results) instead of having X separate implementations
162 - place the rfc 8222 time formatting and uptime repeated pieces of code in the
163 util module as a set of function with the name 'time_rfc2822'/'uptime'
164 - separate the pylint+pep8 calling from one tool into two indivudal tools so
165 that they can be called independently, add make file sections that can be
166 used to call these independently
167 - remove the support for the old style config that was previously located in
168 '/etc/ec2-init/ec2-config.cfg', no longer supported!
169 - instead of using a altered config parser that added its own 'dummy' section
170 on in the 'mcollective' module, use configobj which handles the parsing of
171 config without sections better (and it also maintains comments instead of
172 removing them)
173 - use the new defaulting config parser (that will not raise errors on sections
174 that do not exist or return errors when values are fetched that do not exist)
175 in the 'puppet' module
176 - for config 'modules' add in the ability for the module to provide a list of
177 distro names which it is known to work with, if when ran and the distro being
178 used name does not match one of those in this list, a warning will be written
179 out saying that this module may not work correctly on this distrobution
180 - for all dynamically imported modules ensure that they are fixed up before
181 they are used by ensuring that they have certain attributes, if they do not
182 have those attributes they will be set to a sensible set of defaults instead
183 - adjust all 'config' modules and handlers to use the adjusted util functions
184 and the new distro objects where applicable so that those pieces of code can
185 benefit from the unified and enhanced functionality being provided in that
186 util module
187 - fix a potential bug whereby when a #includeonce was encountered it would
188 enable checking of urls against a cache, if later a #include was encountered
189 it would continue checking against that cache, instead of refetching (which
190 would likely be the expected case)
191 - add a openstack/nova based pep8 extension utility ('hacking.py') that allows
192 for custom checks (along with the standard pep8 checks) to occur when running
193 'make pep8' and its derivatives
10.6.4:1940.6.4:
2 - support relative path in AuthorizedKeysFile (LP: #970071).195 - support relative path in AuthorizedKeysFile (LP: #970071).
3 - make apt-get update run with --quiet (suitable for logging) (LP: #1012613)196 - make apt-get update run with --quiet (suitable for logging) (LP: #1012613)
4197
=== modified file 'Makefile'
--- Makefile 2012-01-12 15:06:27 +0000
+++ Makefile 2012-07-06 21:16:18 +0000
@@ -1,14 +1,33 @@
1CWD=$(shell pwd)
2PY_FILES=$(shell find cloudinit bin -name "*.py")
3PY_FILES+="bin/cloud-init"
14
2all: test5all: test
36
7pep8:
8 $(CWD)/tools/run-pep8 $(PY_FILES)
9
4pylint:10pylint:
5 pylint cloudinit11 $(CWD)/tools/run-pylint $(PY_FILES)
612
7pyflakes:13pyflakes:
8 pyflakes .14 pyflakes $(PY_FILES)
915
10test:16test:
11 nosetests tests/unittests/17 nosetests $(noseopts) tests/unittests/
1218
13.PHONY: test pylint pyflakes192to3:
20 2to3 $(PY_FILES)
21
22clean:
23 rm -rf /var/log/cloud-init.log \
24 /var/lib/cloud/
25
26rpm:
27 cd packages && ./brpm
28
29deb:
30 cd packages && ./bddeb
31
32.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb
1433
1534
=== added file 'Requires'
--- Requires 1970-01-01 00:00:00 +0000
+++ Requires 2012-07-06 21:16:18 +0000
@@ -0,0 +1,30 @@
1# Pypi requirements for cloud-init to work
2
3# Used for templating any files or strings that are considered
4# to be templates, not cheetah since it pulls in alot of extra libs.
5# This one is pretty dinky and does want we want (var substituion)
6Tempita
7
8# This is used for any pretty printing of tabular data.
9PrettyTable
10
11# This one is currently only used by the MAAS datasource. If that
12# datasource is removed, this is no longer needed
13oauth
14
15# This is used to fetch the ec2 metadata into a easily
16# parseable format, instead of having to have cloud-init perform
17# those same fetchs and decodes and signing (...) that ec2 requires.
18boto
19
20# This is only needed for places where we need to support configs in a manner
21# that the built-in config parser is not sufficent (ie
22# when we need to preserve comments, or do not have a top-level
23# section)...
24configobj
25
26# All new style configurations are in the yaml format
27pyyaml
28
29# The new main entrypoint uses argparse instead of optparse
30argparse
031
=== modified file 'TODO'
--- TODO 2011-02-17 20:48:41 +0000
+++ TODO 2012-07-06 21:16:18 +0000
@@ -1,14 +1,37 @@
1- consider 'failsafe' DataSource1- Consider a 'failsafe' DataSource
2 If all others fail, setting a default that2 If all others fail, setting a default that
3 - sets the user password, writing it to console3 - sets the user password, writing it to console
4 - logs to console that this happened4 - logs to console that this happened
5- consider 'previous' DataSource5- Consider a 'previous' DataSource
6 If no other data source is found, fall back to the 'previous' one6 If no other data source is found, fall back to the 'previous' one
7 keep a indication of what instance id that is in /var/lib/cloud7 keep a indication of what instance id that is in /var/lib/cloud
8- rewrite "cloud-init-query"8- Rewrite "cloud-init-query" (currently not implemented)
9 have DataSource and cloudinit expose explicit fields9 Possibly have DataSource and cloudinit expose explicit fields
10 - instance-id10 - instance-id
11 - hostname11 - hostname
12 - mirror12 - mirror
13 - release13 - release
14 - ssh public keys14 - ssh public keys
15- Remove the conversion of the ubuntu network interface format conversion
16 to a RH/fedora format and replace it with a top level format that uses
17 the netcf libraries format instead (which itself knows how to translate
18 into the specific formats)
19- Replace the 'apt*' modules with variants that now use the distro classes
20 to perform distro independent packaging commands (where possible)
21- Canonicalize the semaphore/lock name for modules and user data handlers
22 a. It is most likely a bug that currently exists that if a module in config
23 alters its name and it has already ran, then it will get ran again since
24 the lock name hasn't be canonicalized
25- Replace some the LOG.debug calls with a LOG.info where appropriate instead
26 of how right now there is really only 2 levels (WARN and DEBUG)
27- Remove the 'cc_' for config modules, either have them fully specified (ie
28 'cloudinit.config.resizefs') or by default only look in the 'cloudinit.config'
29 for these modules (or have a combination of the above), this avoids having
30 to understand where your modules are coming from (which can be altered by
31 the current python inclusion path)
32- Depending on if people think the wrapper around 'os.path.join' provided
33 by the 'paths' object is useful (allowing us to modify based off a 'read'
34 and 'write' configuration based 'root') or is just to confusing, it might be
35 something to remove later, and just recommend using 'chroot' instead (or the X
36 different other options which are similar to 'chroot'), which is might be more
37 natural and less confusing...
1538
=== added directory 'bin'
=== added file 'bin/cloud-init'
--- bin/cloud-init 1970-01-01 00:00:00 +0000
+++ bin/cloud-init 2012-07-06 21:16:18 +0000
@@ -0,0 +1,474 @@
1#!/usr/bin/python
2# vi: ts=4 expandtab
3#
4# Copyright (C) 2012 Canonical Ltd.
5# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
6# Copyright (C) 2012 Yahoo! Inc.
7#
8# Author: Scott Moser <scott.moser@canonical.com>
9# Author: Juerg Haefliger <juerg.haefliger@hp.com>
10# Author: Joshua Harlow <harlowja@yahoo-inc.com>
11#
12# This program is free software: you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 3, as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with this program. If not, see <http://www.gnu.org/licenses/>.
23
24import argparse
25import os
26import sys
27import traceback
28
29# This is more just for running from the bin folder so that
30# cloud-init binary can find the cloudinit module
31possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
32 sys.argv[0]), os.pardir, os.pardir))
33if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")):
34 sys.path.insert(0, possible_topdir)
35
36from cloudinit import log as logging
37from cloudinit import netinfo
38from cloudinit import sources
39from cloudinit import stages
40from cloudinit import templater
41from cloudinit import util
42from cloudinit import version
43
44from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
45 CLOUD_CONFIG)
46
47
48# Pretty little welcome message template
49WELCOME_MSG_TPL = ("Cloud-init v. {{version}} running '{{action}}' at "
50 "{{timestamp}}. Up {{uptime}} seconds.")
51
52# Module section template
53MOD_SECTION_TPL = "cloud_%s_modules"
54
55# Things u can query on
56QUERY_DATA_TYPES = [
57 'data',
58 'data_raw',
59 'instance_id',
60]
61
62# Frequency shortname to full name
63# (so users don't have to remember the full name...)
64FREQ_SHORT_NAMES = {
65 'instance': PER_INSTANCE,
66 'always': PER_ALWAYS,
67 'once': PER_ONCE,
68}
69
70LOG = logging.getLogger()
71
72
73# Used for when a logger may not be active
74# and we still want to print exceptions...
75def print_exc(msg=''):
76 if msg:
77 sys.stderr.write("%s\n" % (msg))
78 sys.stderr.write('-' * 60)
79 sys.stderr.write("\n")
80 traceback.print_exc(file=sys.stderr)
81 sys.stderr.write('-' * 60)
82 sys.stderr.write("\n")
83
84
85def welcome(action):
86 tpl_params = {
87 'version': version.version_string(),
88 'uptime': util.uptime(),
89 'timestamp': util.time_rfc2822(),
90 'action': action,
91 }
92 tpl_msg = templater.render_string(WELCOME_MSG_TPL, tpl_params)
93 util.multi_log("%s\n" % (tpl_msg),
94 console=False, stderr=True)
95
96
97def extract_fns(args):
98 # Files are already opened so lets just pass that along
99 # since it would of broke if it couldn't have
100 # read that file already...
101 fn_cfgs = []
102 if args.files:
103 for fh in args.files:
104 # The realpath is more useful in logging
105 # so lets resolve to that...
106 fn_cfgs.append(os.path.realpath(fh.name))
107 return fn_cfgs
108
109
110def run_module_section(mods, action_name, section):
111 full_section_name = MOD_SECTION_TPL % (section)
112 (which_ran, failures) = mods.run_section(full_section_name)
113 total_attempted = len(which_ran) + len(failures)
114 if total_attempted == 0:
115 msg = ("No '%s' modules to run"
116 " under section '%s'") % (action_name, full_section_name)
117 sys.stderr.write("%s\n" % (msg))
118 LOG.debug(msg)
119 return 0
120 else:
121 LOG.debug("Ran %s modules with %s failures",
122 len(which_ran), len(failures))
123 return len(failures)
124
125
126def main_init(name, args):
127 deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
128 if args.local:
129 deps = [sources.DEP_FILESYSTEM]
130
131 if not args.local:
132 # See doc/kernel-cmdline.txt
133 #
134 # This is used in maas datasource, in "ephemeral" (read-only root)
135 # environment where the instance netboots to iscsi ro root.
136 # and the entity that controls the pxe config has to configure
137 # the maas datasource.
138 #
139 # Could be used elsewhere, only works on network based (not local).
140 root_name = "%s.d" % (CLOUD_CONFIG)
141 target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
142 util.read_write_cmdline_url(target_fn)
143
144 # Cloud-init 'init' stage is broken up into the following sub-stages
145 # 1. Ensure that the init object fetches its config without errors
146 # 2. Setup logging/output redirections with resultant config (if any)
147 # 3. Initialize the cloud-init filesystem
148 # 4. Check if we can stop early by looking for various files
149 # 5. Fetch the datasource
150 # 6. Connect to the current instance location + update the cache
151 # 7. Consume the userdata (handlers get activated here)
152 # 8. Construct the modules object
153 # 9. Adjust any subsequent logging/output redirections using
154 # the modules objects configuration
155 # 10. Run the modules for the 'init' stage
156 # 11. Done!
157 welcome(name)
158 init = stages.Init(deps)
159 # Stage 1
160 init.read_cfg(extract_fns(args))
161 # Stage 2
162 outfmt = None
163 errfmt = None
164 try:
165 LOG.debug("Closing stdin")
166 util.close_stdin()
167 (outfmt, errfmt) = util.fixup_output(init.cfg, name)
168 except:
169 util.logexc(LOG, "Failed to setup output redirection!")
170 print_exc("Failed to setup output redirection!")
171 if args.debug:
172 # Reset so that all the debug handlers are closed out
173 LOG.debug(("Logging being reset, this logger may no"
174 " longer be active shortly"))
175 logging.resetLogging()
176 logging.setupLogging(init.cfg)
177 # Stage 3
178 try:
179 init.initialize()
180 except Exception:
181 util.logexc(LOG, "Failed to initialize, likely bad things to come!")
182 # Stage 4
183 path_helper = init.paths
184 if not args.local:
185 sys.stderr.write("%s\n" % (netinfo.debug_info()))
186 LOG.debug(("Checking to see if files that we need already"
187 " exist from a previous run that would allow us"
188 " to stop early."))
189 stop_files = [
190 os.path.join(path_helper.get_cpath("data"), "no-net"),
191 path_helper.get_ipath_cur("obj_pkl"),
192 ]
193 existing_files = []
194 for fn in stop_files:
195 try:
196 c = util.load_file(fn)
197 if len(c):
198 existing_files.append((fn, len(c)))
199 except Exception:
200 pass
201 if existing_files:
202 LOG.debug("Exiting early due to the existence of %s files",
203 existing_files)
204 return 0
205 else:
206 # The cache is not instance specific, so it has to be purged
207 # but we want 'start' to benefit from a cache if
208 # a previous start-local populated one...
209 manual_clean = util.get_cfg_option_bool(init.cfg,
210 'manual_cache_clean', False)
211 if manual_clean:
212 LOG.debug("Not purging instance link, manual cleaning enabled")
213 init.purge_cache(False)
214 else:
215 init.purge_cache()
216 # Delete the non-net file as well
217 util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
218 # Stage 5
219 try:
220 init.fetch()
221 except sources.DataSourceNotFoundException:
222 util.logexc(LOG, ("No instance datasource found!"
223 " Likely bad things to come!"))
224 # In the case of cloud-init (net mode) it is a bit
225 # more likely that the user would consider it
226 # failure if nothing was found. When using
227 # upstart it will also mentions job failure
228 # in console log if exit code is != 0.
229 if not args.force:
230 if args.local:
231 return 0
232 else:
233 return 1
234 # Stage 6
235 iid = init.instancify()
236 LOG.debug("%s will now be targeting instance id: %s", name, iid)
237 init.update()
238 # Stage 7
239 try:
240 # Attempt to consume the data per instance.
241 # This may run user-data handlers and/or perform
242 # url downloads and such as needed.
243 (ran, _results) = init.cloudify().run('consume_userdata',
244 init.consume_userdata,
245 args=[PER_INSTANCE],
246 freq=PER_INSTANCE)
247 if not ran:
248 # Just consume anything that is set to run per-always
249 # if nothing ran in the per-instance code
250 #
251 # See: https://bugs.launchpad.net/bugs/819507 for a little
252 # reason behind this...
253 init.consume_userdata(PER_ALWAYS)
254 except Exception:
255 util.logexc(LOG, "Consuming user data failed!")
256 return 1
257 # Stage 8 - TODO - do we really need to re-extract our configs?
258 mods = stages.Modules(init, extract_fns(args))
259 # Stage 9 - TODO is this really needed??
260 try:
261 outfmt_orig = outfmt
262 errfmt_orig = errfmt
263 (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
264 if outfmt_orig != outfmt or errfmt_orig != errfmt:
265 LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
266 (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
267 except:
268 util.logexc(LOG, "Failed to re-adjust output redirection!")
269 # Stage 10
270 return run_module_section(mods, name, name)
271
272
273def main_modules(action_name, args):
274 name = args.mode
275 # Cloud-init 'modules' stages are broken up into the following sub-stages
276 # 1. Ensure that the init object fetches its config without errors
277 # 2. Get the datasource from the init object, if it does
278 # not exist then that means the main_init stage never
279 # worked, and thus this stage can not run.
280 # 3. Construct the modules object
281 # 4. Adjust any subsequent logging/output redirections using
282 # the modules objects configuration
283 # 5. Run the modules for the given stage name
284 # 6. Done!
285 welcome("%s:%s" % (action_name, name))
286 init = stages.Init(ds_deps=[])
287 # Stage 1
288 init.read_cfg(extract_fns(args))
289 # Stage 2
290 try:
291 init.fetch()
292 except sources.DataSourceNotFoundException:
293 # There was no datasource found, theres nothing to do
294 util.logexc(LOG, ('Can not apply stage %s, '
295 'no datasource found!'
296 " Likely bad things to come!"), name)
297 print_exc(('Can not apply stage %s, '
298 'no datasource found!'
299 " Likely bad things to come!") % (name))
300 if not args.force:
301 return 1
302 # Stage 3
303 mods = stages.Modules(init, extract_fns(args))
304 # Stage 4
305 try:
306 LOG.debug("Closing stdin")
307 util.close_stdin()
308 util.fixup_output(mods.cfg, name)
309 except:
310 util.logexc(LOG, "Failed to setup output redirection!")
311 if args.debug:
312 # Reset so that all the debug handlers are closed out
313 LOG.debug(("Logging being reset, this logger may no"
314 " longer be active shortly"))
315 logging.resetLogging()
316 logging.setupLogging(mods.cfg)
317 # Stage 5
318 return run_module_section(mods, name, name)
319
320
321def main_query(name, _args):
322 raise NotImplementedError(("Action '%s' is not"
323 " currently implemented") % (name))
324
325
326def main_single(name, args):
327 # Cloud-init single stage is broken up into the following sub-stages
328 # 1. Ensure that the init object fetches its config without errors
329 # 2. Attempt to fetch the datasource (warn if it doesn't work)
330 # 3. Construct the modules object
331 # 4. Adjust any subsequent logging/output redirections using
332 # the modules objects configuration
333 # 5. Run the single module
334 # 6. Done!
335 mod_name = args.name
336 welcome("%s:%s" % (name, mod_name))
337 init = stages.Init(ds_deps=[])
338 # Stage 1
339 init.read_cfg(extract_fns(args))
340 # Stage 2
341 try:
342 init.fetch()
343 except sources.DataSourceNotFoundException:
344 # There was no datasource found,
345 # that might be bad (or ok) depending on
346 # the module being ran (so continue on)
347 util.logexc(LOG, ("Failed to fetch your datasource,"
348 " likely bad things to come!"))
349 print_exc(("Failed to fetch your datasource,"
350 " likely bad things to come!"))
351 if not args.force:
352 return 1
353 # Stage 3
354 mods = stages.Modules(init, extract_fns(args))
355 mod_args = args.module_args
356 if mod_args:
357 LOG.debug("Using passed in arguments %s", mod_args)
358 mod_freq = args.frequency
359 if mod_freq:
360 LOG.debug("Using passed in frequency %s", mod_freq)
361 mod_freq = FREQ_SHORT_NAMES.get(mod_freq)
362 # Stage 4
363 try:
364 LOG.debug("Closing stdin")
365 util.close_stdin()
366 util.fixup_output(mods.cfg, None)
367 except:
368 util.logexc(LOG, "Failed to setup output redirection!")
369 if args.debug:
370 # Reset so that all the debug handlers are closed out
371 LOG.debug(("Logging being reset, this logger may no"
372 " longer be active shortly"))
373 logging.resetLogging()
374 logging.setupLogging(mods.cfg)
375 # Stage 5
376 (which_ran, failures) = mods.run_single(mod_name,
377 mod_args,
378 mod_freq)
379 if failures:
380 LOG.warn("Ran %s but it failed!", mod_name)
381 return 1
382 elif not which_ran:
383 LOG.warn("Did not run %s, does it exist?", mod_name)
384 return 1
385 else:
386 # Guess it worked
387 return 0
388
389
390def main():
391 parser = argparse.ArgumentParser()
392
393 # Top level args
394 parser.add_argument('--version', '-v', action='version',
395 version='%(prog)s ' + (version.version_string()))
396 parser.add_argument('--file', '-f', action='append',
397 dest='files',
398 help=('additional yaml configuration'
399 ' files to use'),
400 type=argparse.FileType('rb'))
401 parser.add_argument('--debug', '-d', action='store_true',
402 help=('show additional pre-action'
403 ' logging (default: %(default)s)'),
404 default=False)
405 parser.add_argument('--force', action='store_true',
406 help=('force running even if no datasource is'
407 ' found (use at your own risk)'),
408 dest='force',
409 default=False)
410 subparsers = parser.add_subparsers()
411
412 # Each action and its sub-options (if any)
413 parser_init = subparsers.add_parser('init',
414 help=('initializes cloud-init and'
415 ' performs initial modules'))
416 parser_init.add_argument("--local", '-l', action='store_true',
417 help="start in local mode (default: %(default)s)",
418 default=False)
419 # This is used so that we can know which action is selected +
420 # the functor to use to run this subcommand
421 parser_init.set_defaults(action=('init', main_init))
422
423 # These settings are used for the 'config' and 'final' stages
424 parser_mod = subparsers.add_parser('modules',
425 help=('activates modules '
426 'using a given configuration key'))
427 parser_mod.add_argument("--mode", '-m', action='store',
428 help=("module configuration name "
429 "to use (default: %(default)s)"),
430 default='config',
431 choices=('init', 'config', 'final'))
432 parser_mod.set_defaults(action=('modules', main_modules))
433
434 # These settings are used when you want to query information
435 # stored in the cloud-init data objects/directories/files
436 parser_query = subparsers.add_parser('query',
437 help=('query information stored '
438 'in cloud-init'))
439 parser_query.add_argument("--name", '-n', action="store",
440 help="item name to query on",
441 required=True,
442 choices=QUERY_DATA_TYPES)
443 parser_query.set_defaults(action=('query', main_query))
444
445 # This subcommand allows you to run a single module
446 parser_single = subparsers.add_parser('single',
447 help=('run a single module '))
448 parser_single.set_defaults(action=('single', main_single))
449 parser_single.add_argument("--name", '-n', action="store",
450 help="module name to run",
451 required=True)
452 parser_single.add_argument("--frequency", action="store",
453 help=("frequency of the module"),
454 required=False,
455 choices=list(FREQ_SHORT_NAMES.keys()))
456 parser_single.add_argument("module_args", nargs="*",
457 metavar='argument',
458 help=('any additional arguments to'
459 ' pass to this module'))
460 parser_single.set_defaults(action=('single', main_single))
461
462 args = parser.parse_args()
463
464 # Setup basic logging to start (until reinitialized)
465 # iff in debug mode...
466 if args.debug:
467 logging.setupBasicLogging()
468
469 (name, functor) = args.action
470 return functor(name, args)
471
472
473if __name__ == '__main__':
474 sys.exit(main())
0475
=== removed file 'cloud-init-cfg.py'
--- cloud-init-cfg.py 2012-01-18 14:07:33 +0000
+++ cloud-init-cfg.py 1970-01-01 00:00:00 +0000
@@ -1,115 +0,0 @@
1#!/usr/bin/python
2# vi: ts=4 expandtab
3#
4# Copyright (C) 2009-2010 Canonical Ltd.
5# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
6#
7# Author: Scott Moser <scott.moser@canonical.com>
8# Author: Juerg Haefliger <juerg.haefliger@hp.com>
9#
10# This program is free software: you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 3, as
12# published by the Free Software Foundation.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21
22import sys
23import cloudinit
24import cloudinit.util as util
25import cloudinit.CloudConfig as CC
26import logging
27import os
28
29
30def Usage(out=sys.stdout):
31 out.write("Usage: %s name\n" % sys.argv[0])
32
33
34def main():
35 # expect to be called with
36 # name [ freq [ args ]
37 # run the cloud-config job 'name' at with given args
38 # or
39 # read cloud config jobs from config (builtin -> system)
40 # and run all in order
41
42 util.close_stdin()
43
44 modename = "config"
45
46 if len(sys.argv) < 2:
47 Usage(sys.stderr)
48 sys.exit(1)
49 if sys.argv[1] == "all":
50 name = "all"
51 if len(sys.argv) > 2:
52 modename = sys.argv[2]
53 else:
54 freq = None
55 run_args = []
56 name = sys.argv[1]
57 if len(sys.argv) > 2:
58 freq = sys.argv[2]
59 if freq == "None":
60 freq = None
61 if len(sys.argv) > 3:
62 run_args = sys.argv[3:]
63
64 cfg_path = cloudinit.get_ipath_cur("cloud_config")
65 cfg_env_name = cloudinit.cfg_env_name
66 if cfg_env_name in os.environ:
67 cfg_path = os.environ[cfg_env_name]
68
69 cloud = cloudinit.CloudInit(ds_deps=[]) # ds_deps=[], get only cached
70 try:
71 cloud.get_data_source()
72 except cloudinit.DataSourceNotFoundException as e:
73 # there was no datasource found, theres nothing to do
74 sys.exit(0)
75
76 cc = CC.CloudConfig(cfg_path, cloud)
77
78 try:
79 (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, modename)
80 CC.redirect_output(outfmt, errfmt)
81 except Exception as e:
82 err("Failed to get and set output config: %s\n" % e)
83
84 cloudinit.logging_set_from_cfg(cc.cfg)
85 log = logging.getLogger()
86 log.info("cloud-init-cfg %s" % sys.argv[1:])
87
88 module_list = []
89 if name == "all":
90 modlist_cfg_name = "cloud_%s_modules" % modename
91 module_list = CC.read_cc_modules(cc.cfg, modlist_cfg_name)
92 if not len(module_list):
93 err("no modules to run in cloud_config [%s]" % modename, log)
94 sys.exit(0)
95 else:
96 module_list.append([name, freq] + run_args)
97
98 failures = CC.run_cc_modules(cc, module_list, log)
99 if len(failures):
100 err("errors running cloud_config [%s]: %s" % (modename, failures), log)
101 sys.exit(len(failures))
102
103
104def err(msg, log=None):
105 if log:
106 log.error(msg)
107 sys.stderr.write(msg + "\n")
108
109
110def fail(msg, log=None):
111 err(msg, log)
112 sys.exit(1)
113
114if __name__ == '__main__':
115 main()
1160
=== removed file 'cloud-init-query.py'
--- cloud-init-query.py 2012-01-18 14:07:33 +0000
+++ cloud-init-query.py 1970-01-01 00:00:00 +0000
@@ -1,56 +0,0 @@
1#!/usr/bin/python
2# vi: ts=4 expandtab
3#
4# Copyright (C) 2009-2010 Canonical Ltd.
5# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
6#
7# Author: Scott Moser <scott.moser@canonical.com>
8# Author: Juerg Haefliger <juerg.haefliger@hp.com>
9#
10# This program is free software: you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 3, as
12# published by the Free Software Foundation.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21
22import sys
23import cloudinit
24import cloudinit.CloudConfig
25
26
27def Usage(out=sys.stdout):
28 out.write("Usage: %s name\n" % sys.argv[0])
29
30
31def main():
32 # expect to be called with name of item to fetch
33 if len(sys.argv) != 2:
34 Usage(sys.stderr)
35 sys.exit(1)
36
37 cfg_path = cloudinit.get_ipath_cur("cloud_config")
38 cc = cloudinit.CloudConfig.CloudConfig(cfg_path)
39 data = {
40 'user_data': cc.cloud.get_userdata(),
41 'user_data_raw': cc.cloud.get_userdata_raw(),
42 'instance_id': cc.cloud.get_instance_id(),
43 }
44
45 name = sys.argv[1].replace('-', '_')
46
47 if name not in data:
48 sys.stderr.write("unknown name '%s'. Known values are:\n %s\n" %
49 (sys.argv[1], ' '.join(data.keys())))
50 sys.exit(1)
51
52 print data[name]
53 sys.exit(0)
54
55if __name__ == '__main__':
56 main()
570
=== removed file 'cloud-init.py'
--- cloud-init.py 2012-04-10 20:08:25 +0000
+++ cloud-init.py 1970-01-01 00:00:00 +0000
@@ -1,229 +0,0 @@
1#!/usr/bin/python
2# vi: ts=4 expandtab
3#
4# Copyright (C) 2009-2010 Canonical Ltd.
5# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
6#
7# Author: Scott Moser <scott.moser@canonical.com>
8# Author: Juerg Haefliger <juerg.haefliger@hp.com>
9#
10# This program is free software: you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 3, as
12# published by the Free Software Foundation.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21
22import subprocess
23import sys
24
25import cloudinit
26import cloudinit.util as util
27import cloudinit.CloudConfig as CC
28import cloudinit.DataSource as ds
29import cloudinit.netinfo as netinfo
30import time
31import traceback
32import logging
33import errno
34import os
35
36
37def warn(wstr):
38 sys.stderr.write("WARN:%s" % wstr)
39
40
41def main():
42 util.close_stdin()
43
44 cmds = ("start", "start-local")
45 deps = {"start": (ds.DEP_FILESYSTEM, ds.DEP_NETWORK),
46 "start-local": (ds.DEP_FILESYSTEM, )}
47
48 cmd = ""
49 if len(sys.argv) > 1:
50 cmd = sys.argv[1]
51
52 cfg_path = None
53 if len(sys.argv) > 2:
54 # this is really for debugging only
55 # but you can invoke on development system with ./config/cloud.cfg
56 cfg_path = sys.argv[2]
57
58 if not cmd in cmds:
59 sys.stderr.write("bad command %s. use one of %s\n" % (cmd, cmds))
60 sys.exit(1)
61
62 now = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
63 try:
64 uptimef = open("/proc/uptime")
65 uptime = uptimef.read().split(" ")[0]
66 uptimef.close()
67 except IOError as e:
68 warn("unable to open /proc/uptime\n")
69 uptime = "na"
70
71 cmdline_msg = None
72 cmdline_exc = None
73 if cmd == "start":
74 target = "%s.d/%s" % (cloudinit.system_config,
75 "91_kernel_cmdline_url.cfg")
76 if os.path.exists(target):
77 cmdline_msg = "cmdline: %s existed" % target
78 else:
79 cmdline = util.get_cmdline()
80 try:
81 (key, url, content) = cloudinit.get_cmdline_url(
82 cmdline=cmdline)
83 if key and content:
84 util.write_file(target, content, mode=0600)
85 cmdline_msg = ("cmdline: wrote %s from %s, %s" %
86 (target, key, url))
87 elif key:
88 cmdline_msg = ("cmdline: %s, %s had no cloud-config" %
89 (key, url))
90 except Exception:
91 cmdline_exc = ("cmdline: '%s' raised exception\n%s" %
92 (cmdline, traceback.format_exc()))
93 warn(cmdline_exc)
94
95 try:
96 cfg = cloudinit.get_base_cfg(cfg_path)
97 except Exception as e:
98 warn("Failed to get base config. falling back to builtin: %s\n" % e)
99 try:
100 cfg = cloudinit.get_builtin_cfg()
101 except Exception as e:
102 warn("Unable to load builtin config\n")
103 raise
104
105 try:
106 (outfmt, errfmt) = CC.get_output_cfg(cfg, "init")
107 CC.redirect_output(outfmt, errfmt)
108 except Exception as e:
109 warn("Failed to get and set output config: %s\n" % e)
110
111 cloudinit.logging_set_from_cfg(cfg)
112 log = logging.getLogger()
113
114 if cmdline_exc:
115 log.debug(cmdline_exc)
116 elif cmdline_msg:
117 log.debug(cmdline_msg)
118
119 try:
120 cloudinit.initfs()
121 except Exception as e:
122 warn("failed to initfs, likely bad things to come: %s\n" % str(e))
123
124 nonet_path = "%s/%s" % (cloudinit.get_cpath("data"), "no-net")
125
126 if cmd == "start":
127 print netinfo.debug_info()
128
129 stop_files = (cloudinit.get_ipath_cur("obj_pkl"), nonet_path)
130 # if starting as the network start, there are cases
131 # where everything is already done for us, and it makes
132 # most sense to exit early and silently
133 for f in stop_files:
134 try:
135 fp = open(f, "r")
136 fp.close()
137 except:
138 continue
139
140 log.debug("no need for cloud-init start to run (%s)\n", f)
141 sys.exit(0)
142 elif cmd == "start-local":
143 # cache is not instance specific, so it has to be purged
144 # but we want 'start' to benefit from a cache if
145 # a previous start-local populated one
146 manclean = util.get_cfg_option_bool(cfg, 'manual_cache_clean', False)
147 if manclean:
148 log.debug("not purging cache, manual_cache_clean = True")
149 cloudinit.purge_cache(not manclean)
150
151 try:
152 os.unlink(nonet_path)
153 except OSError as e:
154 if e.errno != errno.ENOENT:
155 raise
156
157 msg = "cloud-init %s running: %s. up %s seconds" % (cmd, now, uptime)
158 sys.stderr.write(msg + "\n")
159 sys.stderr.flush()
160
161 log.info(msg)
162
163 cloud = cloudinit.CloudInit(ds_deps=deps[cmd])
164
165 try:
166 cloud.get_data_source()
167 except cloudinit.DataSourceNotFoundException as e:
168 sys.stderr.write("no instance data found in %s\n" % cmd)
169 sys.exit(0)
170
171 # set this as the current instance
172 cloud.set_cur_instance()
173
174 # store the metadata
175 cloud.update_cache()
176
177 msg = "found data source: %s" % cloud.datasource
178 sys.stderr.write(msg + "\n")
179 log.debug(msg)
180
181 # parse the user data (ec2-run-userdata.py)
182 try:
183 ran = cloud.sem_and_run("consume_userdata", cloudinit.per_instance,
184 cloud.consume_userdata, [cloudinit.per_instance], False)
185 if not ran:
186 cloud.consume_userdata(cloudinit.per_always)
187 except:
188 warn("consuming user data failed!\n")
189 raise
190
191 cfg_path = cloudinit.get_ipath_cur("cloud_config")
192 cc = CC.CloudConfig(cfg_path, cloud)
193
194 # if the output config changed, update output and err
195 try:
196 outfmt_orig = outfmt
197 errfmt_orig = errfmt
198 (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, "init")
199 if outfmt_orig != outfmt or errfmt_orig != errfmt:
200 warn("stdout, stderr changing to (%s,%s)" % (outfmt, errfmt))
201 CC.redirect_output(outfmt, errfmt)
202 except Exception as e:
203 warn("Failed to get and set output config: %s\n" % e)
204
205 # send the cloud-config ready event
206 cc_path = cloudinit.get_ipath_cur('cloud_config')
207 cc_ready = cc.cfg.get("cc_ready_cmd",
208 ['initctl', 'emit', 'cloud-config',
209 '%s=%s' % (cloudinit.cfg_env_name, cc_path)])
210 if cc_ready:
211 if isinstance(cc_ready, str):
212 cc_ready = ['sh', '-c', cc_ready]
213 subprocess.Popen(cc_ready).communicate()
214
215 module_list = CC.read_cc_modules(cc.cfg, "cloud_init_modules")
216
217 failures = []
218 if len(module_list):
219 failures = CC.run_cc_modules(cc, module_list, log)
220 else:
221 msg = "no cloud_init_modules to run"
222 sys.stderr.write(msg + "\n")
223 log.debug(msg)
224 sys.exit(0)
225
226 sys.exit(len(failures))
227
228if __name__ == '__main__':
229 main()
2300
=== removed file 'cloudinit/DataSource.py'
--- cloudinit/DataSource.py 2012-03-19 17:33:39 +0000
+++ cloudinit/DataSource.py 1970-01-01 00:00:00 +0000
@@ -1,214 +0,0 @@
1# vi: ts=4 expandtab
2#
3# Copyright (C) 2009-2010 Canonical Ltd.
4# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
5#
6# Author: Scott Moser <scott.moser@canonical.com>
7# Author: Juerg Hafliger <juerg.haefliger@hp.com>
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License version 3, as
11# published by the Free Software Foundation.
12#
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU General Public License for more details.
17#
18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.
20
21
22DEP_FILESYSTEM = "FILESYSTEM"
23DEP_NETWORK = "NETWORK"
24
25import cloudinit.UserDataHandler as ud
26import cloudinit.util as util
27import socket
28
29
30class DataSource:
31 userdata = None
32 metadata = None
33 userdata_raw = None
34 cfgname = ""
35 # system config (passed in from cloudinit,
36 # cloud-config before input from the DataSource)
37 sys_cfg = {}
38 # datasource config, the cloud-config['datasource']['__name__']
39 ds_cfg = {} # datasource config
40
41 def __init__(self, sys_cfg=None):
42 if not self.cfgname:
43 name = str(self.__class__).split(".")[-1]
44 if name.startswith("DataSource"):
45 name = name[len("DataSource"):]
46 self.cfgname = name
47 if sys_cfg:
48 self.sys_cfg = sys_cfg
49
50 self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
51 ("datasource", self.cfgname), self.ds_cfg)
52
53 def get_userdata(self):
54 if self.userdata == None:
55 self.userdata = ud.preprocess_userdata(self.userdata_raw)
56 return self.userdata
57
58 def get_userdata_raw(self):
59 return(self.userdata_raw)
60
61 # the data sources' config_obj is a cloud-config formated
62 # object that came to it from ways other than cloud-config
63 # because cloud-config content would be handled elsewhere
64 def get_config_obj(self):
65 return({})
66
67 def get_public_ssh_keys(self):
68 keys = []
69 if 'public-keys' not in self.metadata:
70 return([])
71
72 if isinstance(self.metadata['public-keys'], str):
73 return(str(self.metadata['public-keys']).splitlines())
74
75 if isinstance(self.metadata['public-keys'], list):
76 return(self.metadata['public-keys'])
77
78 for _keyname, klist in self.metadata['public-keys'].items():
79 # lp:506332 uec metadata service responds with
80 # data that makes boto populate a string for 'klist' rather
81 # than a list.
82 if isinstance(klist, str):
83 klist = [klist]
84 for pkey in klist:
85 # there is an empty string at the end of the keylist, trim it
86 if pkey:
87 keys.append(pkey)
88
89 return(keys)
90
91 def device_name_to_device(self, _name):
92 # translate a 'name' to a device
93 # the primary function at this point is on ec2
94 # to consult metadata service, that has
95 # ephemeral0: sdb
96 # and return 'sdb' for input 'ephemeral0'
97 return(None)
98
99 def get_locale(self):
100 return('en_US.UTF-8')
101
102 def get_local_mirror(self):
103 return None
104
105 def get_instance_id(self):
106 if 'instance-id' not in self.metadata:
107 return "iid-datasource"
108 return(self.metadata['instance-id'])
109
110 def get_hostname(self, fqdn=False):
111 defdomain = "localdomain"
112 defhost = "localhost"
113
114 domain = defdomain
115 if not 'local-hostname' in self.metadata:
116
117 # this is somewhat questionable really.
118 # the cloud datasource was asked for a hostname
119 # and didn't have one. raising error might be more appropriate
120 # but instead, basically look up the existing hostname
121 toks = []
122
123 hostname = socket.gethostname()
124
125 fqdn = util.get_fqdn_from_hosts(hostname)
126
127 if fqdn and fqdn.find(".") > 0:
128 toks = str(fqdn).split(".")
129 elif hostname:
130 toks = [hostname, defdomain]
131 else:
132 toks = [defhost, defdomain]
133
134 else:
135 # if there is an ipv4 address in 'local-hostname', then
136 # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
137 lhost = self.metadata['local-hostname']
138 if is_ipv4(lhost):
139 toks = "ip-%s" % lhost.replace(".", "-")
140 else:
141 toks = lhost.split(".")
142
143 if len(toks) > 1:
144 hostname = toks[0]
145 domain = '.'.join(toks[1:])
146 else:
147 hostname = toks[0]
148
149 if fqdn:
150 return "%s.%s" % (hostname, domain)
151 else:
152 return hostname
153
154
155# return a list of classes that have the same depends as 'depends'
156# iterate through cfg_list, loading "DataSourceCollections" modules
157# and calling their "get_datasource_list".
158# return an ordered list of classes that match
159#
160# - modules must be named "DataSource<item>", where 'item' is an entry
161# in cfg_list
162# - if pkglist is given, it will iterate try loading from that package
163# ie, pkglist=[ "foo", "" ]
164# will first try to load foo.DataSource<item>
165# then DataSource<item>
166def list_sources(cfg_list, depends, pkglist=None):
167 if pkglist is None:
168 pkglist = []
169 retlist = []
170 for ds_coll in cfg_list:
171 for pkg in pkglist:
172 if pkg:
173 pkg = "%s." % pkg
174 try:
175 mod = __import__("%sDataSource%s" % (pkg, ds_coll))
176 if pkg:
177 mod = getattr(mod, "DataSource%s" % ds_coll)
178 lister = getattr(mod, "get_datasource_list")
179 retlist.extend(lister(depends))
180 break
181 except:
182 raise
183 return(retlist)
184
185
186# depends is a list of dependencies (DEP_FILESYSTEM)
187# dslist is a list of 2 item lists
188# dslist = [
189# ( class, ( depends-that-this-class-needs ) )
190# }
191# it returns a list of 'class' that matched these deps exactly
192# it is a helper function for DataSourceCollections
193def list_from_depends(depends, dslist):
194 retlist = []
195 depset = set(depends)
196 for elem in dslist:
197 (cls, deps) = elem
198 if depset == set(deps):
199 retlist.append(cls)
200 return(retlist)
201
202
203def is_ipv4(instr):
204 """ determine if input string is a ipv4 address. return boolean"""
205 toks = instr.split('.')
206 if len(toks) != 4:
207 return False
208
209 try:
210 toks = [x for x in toks if (int(x) < 256 and int(x) > 0)]
211 except:
212 return False
213
214 return (len(toks) == 4)
2150
=== removed file 'cloudinit/UserDataHandler.py'
--- cloudinit/UserDataHandler.py 2012-06-21 15:37:22 +0000
+++ cloudinit/UserDataHandler.py 1970-01-01 00:00:00 +0000
@@ -1,262 +0,0 @@
1# vi: ts=4 expandtab
2#
3# Copyright (C) 2009-2010 Canonical Ltd.
4# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
5#
6# Author: Scott Moser <scott.moser@canonical.com>
7# Author: Juerg Hafliger <juerg.haefliger@hp.com>
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License version 3, as
11# published by the Free Software Foundation.
12#
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU General Public License for more details.
17#
18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.
20
21import email
22
23from email.mime.multipart import MIMEMultipart
24from email.mime.text import MIMEText
25from email.mime.base import MIMEBase
26import yaml
27import cloudinit
28import cloudinit.util as util
29import hashlib
30import urllib
31
32
33starts_with_mappings = {
34 '#include': 'text/x-include-url',
35 '#include-once': 'text/x-include-once-url',
36 '#!': 'text/x-shellscript',
37 '#cloud-config': 'text/cloud-config',
38 '#upstart-job': 'text/upstart-job',
39 '#part-handler': 'text/part-handler',
40 '#cloud-boothook': 'text/cloud-boothook',
41 '#cloud-config-archive': 'text/cloud-config-archive',
42}
43
44
45# if 'string' is compressed return decompressed otherwise return it
46def decomp_str(string):
47 import StringIO
48 import gzip
49 try:
50 uncomp = gzip.GzipFile(None, "rb", 1, StringIO.StringIO(string)).read()
51 return(uncomp)
52 except:
53 return(string)
54
55
56def do_include(content, appendmsg):
57 import os
58 # is just a list of urls, one per line
59 # also support '#include <url here>'
60 includeonce = False
61 for line in content.splitlines():
62 if line == "#include":
63 continue
64 if line == "#include-once":
65 includeonce = True
66 continue
67 if line.startswith("#include-once"):
68 line = line[len("#include-once"):].lstrip()
69 includeonce = True
70 elif line.startswith("#include"):
71 line = line[len("#include"):].lstrip()
72 if line.startswith("#"):
73 continue
74 if line.strip() == "":
75 continue
76
77 # urls cannot not have leading or trailing white space
78 msum = hashlib.md5() # pylint: disable=E1101
79 msum.update(line.strip())
80 includeonce_filename = "%s/urlcache/%s" % (
81 cloudinit.get_ipath_cur("data"), msum.hexdigest())
82 try:
83 if includeonce and os.path.isfile(includeonce_filename):
84 with open(includeonce_filename, "r") as fp:
85 content = fp.read()
86 else:
87 content = urllib.urlopen(line).read()
88 if includeonce:
89 util.write_file(includeonce_filename, content, mode=0600)
90 except Exception:
91 raise
92
93 process_includes(message_from_string(decomp_str(content)), appendmsg)
94
95
96def explode_cc_archive(archive, appendmsg):
97 for ent in yaml.safe_load(archive):
98 # ent can be one of:
99 # dict { 'filename' : 'value', 'content' : 'value', 'type' : 'value' }
100 # filename and type not be present
101 # or
102 # scalar(payload)
103
104 def_type = "text/cloud-config"
105 if isinstance(ent, str):
106 ent = {'content': ent}
107
108 content = ent.get('content', '')
109 mtype = ent.get('type', None)
110 if mtype == None:
111 mtype = type_from_startswith(content, def_type)
112
113 maintype, subtype = mtype.split('/', 1)
114 if maintype == "text":
115 msg = MIMEText(content, _subtype=subtype)
116 else:
117 msg = MIMEBase(maintype, subtype)
118 msg.set_payload(content)
119
120 if 'filename' in ent:
121 msg.add_header('Content-Disposition', 'attachment',
122 filename=ent['filename'])
123
124 for header in ent.keys():
125 if header in ('content', 'filename', 'type'):
126 continue
127 msg.add_header(header, ent['header'])
128
129 _attach_part(appendmsg, msg)
130
131
132def multi_part_count(outermsg, newcount=None):
133 """
134 Return the number of attachments to this MIMEMultipart by looking
135 at its 'Number-Attachments' header.
136 """
137 nfield = 'Number-Attachments'
138 if nfield not in outermsg:
139 outermsg[nfield] = "0"
140
141 if newcount != None:
142 outermsg.replace_header(nfield, str(newcount))
143
144 return(int(outermsg.get('Number-Attachments', 0)))
145
146
147def _attach_part(outermsg, part):
148 """
149 Attach an part to an outer message. outermsg must be a MIMEMultipart.
150 Modifies a header in outermsg to keep track of number of attachments.
151 """
152 cur = multi_part_count(outermsg)
153 if not part.get_filename(None):
154 part.add_header('Content-Disposition', 'attachment',
155 filename='part-%03d' % (cur + 1))
156 outermsg.attach(part)
157 multi_part_count(outermsg, cur + 1)
158
159
160def type_from_startswith(payload, default=None):
161 # slist is sorted longest first
162 slist = sorted(starts_with_mappings.keys(), key=lambda e: 0 - len(e))
163 for sstr in slist:
164 if payload.startswith(sstr):
165 return(starts_with_mappings[sstr])
166 return default
167
168
169def process_includes(msg, appendmsg=None):
170 if appendmsg == None:
171 appendmsg = MIMEMultipart()
172
173 for part in msg.walk():
174 # multipart/* are just containers
175 if part.get_content_maintype() == 'multipart':
176 continue
177
178 ctype = None
179 ctype_orig = part.get_content_type()
180
181 payload = part.get_payload(decode=True)
182
183 if ctype_orig in ("text/plain", "text/x-not-multipart"):
184 ctype = type_from_startswith(payload)
185
186 if ctype is None:
187 ctype = ctype_orig
188
189 if ctype in ('text/x-include-url', 'text/x-include-once-url'):
190 do_include(payload, appendmsg)
191 continue
192
193 if ctype == "text/cloud-config-archive":
194 explode_cc_archive(payload, appendmsg)
195 continue
196
197 if 'Content-Type' in msg:
198 msg.replace_header('Content-Type', ctype)
199 else:
200 msg['Content-Type'] = ctype
201
202 _attach_part(appendmsg, part)
203
204
205def message_from_string(data, headers=None):
206 if headers is None:
207 headers = {}
208 if "mime-version:" in data[0:4096].lower():
209 msg = email.message_from_string(data)
210 for (key, val) in headers.items():
211 if key in msg:
212 msg.replace_header(key, val)
213 else:
214 msg[key] = val
215 else:
216 mtype = headers.get("Content-Type", "text/x-not-multipart")
217 maintype, subtype = mtype.split("/", 1)
218 msg = MIMEBase(maintype, subtype, *headers)
219 msg.set_payload(data)
220
221 return(msg)
222
223
224# this is heavily wasteful, reads through userdata string input
225def preprocess_userdata(data):
226 newmsg = MIMEMultipart()
227 process_includes(message_from_string(decomp_str(data)), newmsg)
228 return(newmsg.as_string())
229
230
231# callback is a function that will be called with (data, content_type,
232# filename, payload)
233def walk_userdata(istr, callback, data=None):
234 partnum = 0
235 for part in message_from_string(istr).walk():
236 # multipart/* are just containers
237 if part.get_content_maintype() == 'multipart':
238 continue
239
240 ctype = part.get_content_type()
241 if ctype is None:
242 ctype = 'application/octet-stream'
243
244 filename = part.get_filename()
245 if not filename:
246 filename = 'part-%03d' % partnum
247
248 callback(data, ctype, filename, part.get_payload(decode=True))
249
250 partnum = partnum + 1
251
252
253if __name__ == "__main__":
254 def main():
255 import sys
256 data = decomp_str(file(sys.argv[1]).read())
257 newmsg = MIMEMultipart()
258 process_includes(message_from_string(data), newmsg)
259 print newmsg
260 print "#found %s parts" % multi_part_count(newmsg)
261
262 main()
2630
=== modified file 'cloudinit/__init__.py'
--- cloudinit/__init__.py 2012-06-28 17:10:56 +0000
+++ cloudinit/__init__.py 2012-07-06 21:16:18 +0000
@@ -1,11 +1,12 @@
1# vi: ts=4 expandtab1# vi: ts=4 expandtab
2#2#
3# Common code for the EC2 initialisation scripts in Ubuntu3# Copyright (C) 2012 Canonical Ltd.
4# Copyright (C) 2008-2009 Canonical Ltd
5# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.4# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
5# Copyright (C) 2012 Yahoo! Inc.
6#6#
7# Author: Soren Hansen <soren@canonical.com>7# Author: Scott Moser <scott.moser@canonical.com>
8# Author: Juerg Haefliger <juerg.haefliger@hp.com>8# Author: Juerg Haefliger <juerg.haefliger@hp.com>
9# Author: Joshua Harlow <harlowja@yahoo-inc.com>
9#10#
10# This program is free software: you can redistribute it and/or modify11# This program is free software: you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 3, as12# it under the terms of the GNU General Public License version 3, as
@@ -18,650 +19,3 @@
18#19#
19# You should have received a copy of the GNU General Public License20# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.21# along with this program. If not, see <http://www.gnu.org/licenses/>.
21#
22
23varlibdir = '/var/lib/cloud'
24cur_instance_link = varlibdir + "/instance"
25boot_finished = cur_instance_link + "/boot-finished"
26system_config = '/etc/cloud/cloud.cfg'
27seeddir = varlibdir + "/seed"
28cfg_env_name = "CLOUD_CFG"
29
30cfg_builtin = """
31log_cfgs: []
32datasource_list: ["NoCloud", "ConfigDrive", "OVF", "MAAS", "Ec2", "CloudStack"]
33def_log_file: /var/log/cloud-init.log
34syslog_fix_perms: syslog:adm
35"""
36logger_name = "cloudinit"
37
38pathmap = {
39 "handlers": "/handlers",
40 "scripts": "/scripts",
41 "sem": "/sem",
42 "boothooks": "/boothooks",
43 "userdata_raw": "/user-data.txt",
44 "userdata": "/user-data.txt.i",
45 "obj_pkl": "/obj.pkl",
46 "cloud_config": "/cloud-config.txt",
47 "data": "/data",
48 None: "",
49}
50
51per_instance = "once-per-instance"
52per_always = "always"
53per_once = "once"
54
55parsed_cfgs = {}
56
57import os
58
59import cPickle
60import sys
61import os.path
62import errno
63import subprocess
64import yaml
65import logging
66import logging.config
67import StringIO
68import glob
69import traceback
70
71import cloudinit.util as util
72
73
74class NullHandler(logging.Handler):
75 def emit(self, record):
76 pass
77
78
79log = logging.getLogger(logger_name)
80log.addHandler(NullHandler())
81
82
83def logging_set_from_cfg_file(cfg_file=system_config):
84 logging_set_from_cfg(util.get_base_cfg(cfg_file, cfg_builtin, parsed_cfgs))
85
86
87def logging_set_from_cfg(cfg):
88 log_cfgs = []
89 logcfg = util.get_cfg_option_str(cfg, "log_cfg", False)
90 if logcfg:
91 # if there is a 'logcfg' entry in the config, respect
92 # it, it is the old keyname
93 log_cfgs = [logcfg]
94 elif "log_cfgs" in cfg:
95 for cfg in cfg['log_cfgs']:
96 if isinstance(cfg, list):
97 log_cfgs.append('\n'.join(cfg))
98 else:
99 log_cfgs.append()
100
101 if not len(log_cfgs):
102 sys.stderr.write("Warning, no logging configured\n")
103 return
104
105 for logcfg in log_cfgs:
106 try:
107 logging.config.fileConfig(StringIO.StringIO(logcfg))
108 return
109 except:
110 pass
111
112 raise Exception("no valid logging found\n")
113
114
115import cloudinit.DataSource as DataSource
116import cloudinit.UserDataHandler as UserDataHandler
117
118
119class CloudInit:
120 cfg = None
121 part_handlers = {}
122 old_conffile = '/etc/ec2-init/ec2-config.cfg'
123 ds_deps = [DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK]
124 datasource = None
125 cloud_config_str = ''
126 datasource_name = ''
127
128 builtin_handlers = []
129
130 def __init__(self, ds_deps=None, sysconfig=system_config):
131 self.builtin_handlers = [
132 ['text/x-shellscript', self.handle_user_script, per_always],
133 ['text/cloud-config', self.handle_cloud_config, per_always],
134 ['text/upstart-job', self.handle_upstart_job, per_instance],
135 ['text/cloud-boothook', self.handle_cloud_boothook, per_always],
136 ]
137
138 if ds_deps != None:
139 self.ds_deps = ds_deps
140
141 self.sysconfig = sysconfig
142
143 self.cfg = self.read_cfg()
144
145 def read_cfg(self):
146 if self.cfg:
147 return(self.cfg)
148
149 try:
150 conf = util.get_base_cfg(self.sysconfig, cfg_builtin, parsed_cfgs)
151 except Exception:
152 conf = get_builtin_cfg()
153
154 # support reading the old ConfigObj format file and merging
155 # it into the yaml dictionary
156 try:
157 from configobj import ConfigObj
158 oldcfg = ConfigObj(self.old_conffile)
159 if oldcfg is None:
160 oldcfg = {}
161 conf = util.mergedict(conf, oldcfg)
162 except:
163 pass
164
165 return(conf)
166
167 def restore_from_cache(self):
168 try:
169 # we try to restore from a current link and static path
170 # by using the instance link, if purge_cache was called
171 # the file wont exist
172 cache = get_ipath_cur('obj_pkl')
173 f = open(cache, "rb")
174 data = cPickle.load(f)
175 f.close()
176 self.datasource = data
177 return True
178 except:
179 return False
180
181 def write_to_cache(self):
182 cache = self.get_ipath("obj_pkl")
183 try:
184 os.makedirs(os.path.dirname(cache))
185 except OSError as e:
186 if e.errno != errno.EEXIST:
187 return False
188
189 try:
190 f = open(cache, "wb")
191 cPickle.dump(self.datasource, f)
192 f.close()
193 os.chmod(cache, 0400)
194 except:
195 raise
196
197 def get_data_source(self):
198 if self.datasource is not None:
199 return True
200
201 if self.restore_from_cache():
202 log.debug("restored from cache type %s" % self.datasource)
203 return True
204
205 cfglist = self.cfg['datasource_list']
206 dslist = list_sources(cfglist, self.ds_deps)
207 dsnames = [f.__name__ for f in dslist]
208
209 log.debug("searching for data source in %s" % dsnames)
210 for cls in dslist:
211 ds = cls.__name__
212 try:
213 s = cls(sys_cfg=self.cfg)
214 if s.get_data():
215 self.datasource = s
216 self.datasource_name = ds
217 log.debug("found data source %s" % ds)
218 return True
219 except Exception as e:
220 log.warn("get_data of %s raised %s" % (ds, e))
221 util.logexc(log)
222 msg = "Did not find data source. searched classes: %s" % dsnames
223 log.debug(msg)
224 raise DataSourceNotFoundException(msg)
225
226 def set_cur_instance(self):
227 try:
228 os.unlink(cur_instance_link)
229 except OSError as e:
230 if e.errno != errno.ENOENT:
231 raise
232
233 iid = self.get_instance_id()
234 os.symlink("./instances/%s" % iid, cur_instance_link)
235 idir = self.get_ipath()
236 dlist = []
237 for d in ["handlers", "scripts", "sem"]:
238 dlist.append("%s/%s" % (idir, d))
239
240 util.ensure_dirs(dlist)
241
242 ds = "%s: %s\n" % (self.datasource.__class__, str(self.datasource))
243 dp = self.get_cpath('data')
244 util.write_file("%s/%s" % (idir, 'datasource'), ds)
245 util.write_file("%s/%s" % (dp, 'previous-datasource'), ds)
246 util.write_file("%s/%s" % (dp, 'previous-instance-id'), "%s\n" % iid)
247
248 def get_userdata(self):
249 return(self.datasource.get_userdata())
250
251 def get_userdata_raw(self):
252 return(self.datasource.get_userdata_raw())
253
254 def get_instance_id(self):
255 return(self.datasource.get_instance_id())
256
257 def update_cache(self):
258 self.write_to_cache()
259 self.store_userdata()
260
261 def store_userdata(self):
262 util.write_file(self.get_ipath('userdata_raw'),
263 self.datasource.get_userdata_raw(), 0600)
264 util.write_file(self.get_ipath('userdata'),
265 self.datasource.get_userdata(), 0600)
266
267 def sem_getpath(self, name, freq):
268 if freq == 'once-per-instance':
269 return("%s/%s" % (self.get_ipath("sem"), name))
270 return("%s/%s.%s" % (get_cpath("sem"), name, freq))
271
272 def sem_has_run(self, name, freq):
273 if freq == per_always:
274 return False
275 semfile = self.sem_getpath(name, freq)
276 if os.path.exists(semfile):
277 return True
278 return False
279
280 def sem_acquire(self, name, freq):
281 from time import time
282 semfile = self.sem_getpath(name, freq)
283
284 try:
285 os.makedirs(os.path.dirname(semfile))
286 except OSError as e:
287 if e.errno != errno.EEXIST:
288 raise e
289
290 if os.path.exists(semfile) and freq != per_always:
291 return False
292
293 # race condition
294 try:
295 f = open(semfile, "w")
296 f.write("%s\n" % str(time()))
297 f.close()
298 except:
299 return(False)
300 return(True)
301
302 def sem_clear(self, name, freq):
303 semfile = self.sem_getpath(name, freq)
304 try:
305 os.unlink(semfile)
306 except OSError as e:
307 if e.errno != errno.ENOENT:
308 return False
309
310 return True
311
312 # acquire lock on 'name' for given 'freq'
313 # if that does not exist, then call 'func' with given 'args'
314 # if 'clear_on_fail' is True and func throws an exception
315 # then remove the lock (so it would run again)
316 def sem_and_run(self, semname, freq, func, args=None, clear_on_fail=False):
317 if args is None:
318 args = []
319 if self.sem_has_run(semname, freq):
320 log.debug("%s already ran %s", semname, freq)
321 return False
322 try:
323 if not self.sem_acquire(semname, freq):
324 raise Exception("Failed to acquire lock on %s" % semname)
325
326 func(*args)
327 except:
328 if clear_on_fail:
329 self.sem_clear(semname, freq)
330 raise
331
332 return True
333
334 # get_ipath : get the instance path for a name in pathmap
335 # (/var/lib/cloud/instances/<instance>/name)<name>)
336 def get_ipath(self, name=None):
337 return("%s/instances/%s%s"
338 % (varlibdir, self.get_instance_id(), pathmap[name]))
339
340 def consume_userdata(self, frequency=per_instance):
341 self.get_userdata()
342 data = self
343
344 cdir = get_cpath("handlers")
345 idir = self.get_ipath("handlers")
346
347 # add the path to the plugins dir to the top of our list for import
348 # instance dir should be read before cloud-dir
349 sys.path.insert(0, cdir)
350 sys.path.insert(0, idir)
351
352 part_handlers = {}
353 # add handlers in cdir
354 for fname in glob.glob("%s/*.py" % cdir):
355 if not os.path.isfile(fname):
356 continue
357 modname = os.path.basename(fname)[0:-3]
358 try:
359 mod = __import__(modname)
360 handler_register(mod, part_handlers, data, frequency)
361 log.debug("added handler for [%s] from %s" % (mod.list_types(),
362 fname))
363 except:
364 log.warn("failed to initialize handler in %s" % fname)
365 util.logexc(log)
366
367 # add the internal handers if their type hasn't been already claimed
368 for (btype, bhand, bfreq) in self.builtin_handlers:
369 if btype in part_handlers:
370 continue
371 handler_register(InternalPartHandler(bhand, [btype], bfreq),
372 part_handlers, data, frequency)
373
374 # walk the data
375 pdata = {'handlers': part_handlers, 'handlerdir': idir,
376 'data': data, 'frequency': frequency}
377 UserDataHandler.walk_userdata(self.get_userdata(),
378 partwalker_callback, data=pdata)
379
380 # give callbacks opportunity to finalize
381 called = []
382 for (_mtype, mod) in part_handlers.iteritems():
383 if mod in called:
384 continue
385 handler_call_end(mod, data, frequency)
386
387 def handle_user_script(self, _data, ctype, filename, payload, _frequency):
388 if ctype == "__end__":
389 return
390 if ctype == "__begin__":
391 # maybe delete existing things here
392 return
393
394 filename = filename.replace(os.sep, '_')
395 scriptsdir = get_ipath_cur('scripts')
396 util.write_file("%s/%s" %
397 (scriptsdir, filename), util.dos2unix(payload), 0700)
398
399 def handle_upstart_job(self, _data, ctype, filename, payload, frequency):
400 # upstart jobs are only written on the first boot
401 if frequency != per_instance:
402 return
403
404 if ctype == "__end__" or ctype == "__begin__":
405 return
406 if not filename.endswith(".conf"):
407 filename = filename + ".conf"
408
409 util.write_file("%s/%s" % ("/etc/init", filename),
410 util.dos2unix(payload), 0644)
411
412 def handle_cloud_config(self, _data, ctype, filename, payload, _frequency):
413 if ctype == "__begin__":
414 self.cloud_config_str = ""
415 return
416 if ctype == "__end__":
417 cloud_config = self.get_ipath("cloud_config")
418 util.write_file(cloud_config, self.cloud_config_str, 0600)
419
420 ## this could merge the cloud config with the system config
421 ## for now, not doing this as it seems somewhat circular
422 ## as CloudConfig does that also, merging it with this cfg
423 ##
424 # ccfg = yaml.safe_load(self.cloud_config_str)
425 # if ccfg is None: ccfg = {}
426 # self.cfg = util.mergedict(ccfg, self.cfg)
427
428 return
429
430 self.cloud_config_str += "\n#%s\n%s" % (filename, payload)
431
432 def handle_cloud_boothook(self, _data, ctype, filename, payload,
433 _frequency):
434 if ctype == "__end__":
435 return
436 if ctype == "__begin__":
437 return
438
439 filename = filename.replace(os.sep, '_')
440 payload = util.dos2unix(payload)
441 prefix = "#cloud-boothook"
442 start = 0
443 if payload.startswith(prefix):
444 start = len(prefix) + 1
445
446 boothooks_dir = self.get_ipath("boothooks")
447 filepath = "%s/%s" % (boothooks_dir, filename)
448 util.write_file(filepath, payload[start:], 0700)
449 try:
450 env = os.environ.copy()
451 env['INSTANCE_ID'] = self.datasource.get_instance_id()
452 subprocess.check_call([filepath], env=env)
453 except subprocess.CalledProcessError as e:
454 log.error("boothooks script %s returned %i" %
455 (filepath, e.returncode))
456 except Exception as e:
457 log.error("boothooks unknown exception %s when running %s" %
458 (e, filepath))
459
460 def get_public_ssh_keys(self):
461 return(self.datasource.get_public_ssh_keys())
462
463 def get_locale(self):
464 return(self.datasource.get_locale())
465
466 def get_mirror(self):
467 return(self.datasource.get_local_mirror())
468
469 def get_hostname(self, fqdn=False):
470 return(self.datasource.get_hostname(fqdn=fqdn))
471
472 def device_name_to_device(self, name):
473 return(self.datasource.device_name_to_device(name))
474
475 # I really don't know if this should be here or not, but
476 # I needed it in cc_update_hostname, where that code had a valid 'cloud'
477 # reference, but did not have a cloudinit handle
478 # (ie, no cloudinit.get_cpath())
479 def get_cpath(self, name=None):
480 return(get_cpath(name))
481
482
483def initfs():
484 subds = ['scripts/per-instance', 'scripts/per-once', 'scripts/per-boot',
485 'seed', 'instances', 'handlers', 'sem', 'data']
486 dlist = []
487 for subd in subds:
488 dlist.append("%s/%s" % (varlibdir, subd))
489 util.ensure_dirs(dlist)
490
491 cfg = util.get_base_cfg(system_config, cfg_builtin, parsed_cfgs)
492 log_file = util.get_cfg_option_str(cfg, 'def_log_file', None)
493 perms = util.get_cfg_option_str(cfg, 'syslog_fix_perms', None)
494 if log_file:
495 fp = open(log_file, "ab")
496 fp.close()
497 if log_file and perms:
498 (u, g) = perms.split(':', 1)
499 if u == "-1" or u == "None":
500 u = None
501 if g == "-1" or g == "None":
502 g = None
503 util.chownbyname(log_file, u, g)
504
505
506def purge_cache(rmcur=True):
507 rmlist = [boot_finished]
508 if rmcur:
509 rmlist.append(cur_instance_link)
510 for f in rmlist:
511 try:
512 os.unlink(f)
513 except OSError as e:
514 if e.errno == errno.ENOENT:
515 continue
516 return(False)
517 except:
518 return(False)
519 return(True)
520
521
522# get_ipath_cur: get the current instance path for an item
523def get_ipath_cur(name=None):
524 return("%s/%s%s" % (varlibdir, "instance", pathmap[name]))
525
526
527# get_cpath : get the "clouddir" (/var/lib/cloud/<name>)
528# for a name in dirmap
529def get_cpath(name=None):
530 return("%s%s" % (varlibdir, pathmap[name]))
531
532
533def get_base_cfg(cfg_path=None):
534 if cfg_path is None:
535 cfg_path = system_config
536 return(util.get_base_cfg(cfg_path, cfg_builtin, parsed_cfgs))
537
538
539def get_builtin_cfg():
540 return(yaml.safe_load(cfg_builtin))
541
542
543class DataSourceNotFoundException(Exception):
544 pass
545
546
547def list_sources(cfg_list, depends):
548 return(DataSource.list_sources(cfg_list, depends, ["cloudinit", ""]))
549
550
551def handler_register(mod, part_handlers, data, frequency=per_instance):
552 if not hasattr(mod, "handler_version"):
553 setattr(mod, "handler_version", 1)
554
555 for mtype in mod.list_types():
556 part_handlers[mtype] = mod
557
558 handler_call_begin(mod, data, frequency)
559 return(mod)
560
561
562def handler_call_begin(mod, data, frequency):
563 handler_handle_part(mod, data, "__begin__", None, None, frequency)
564
565
566def handler_call_end(mod, data, frequency):
567 handler_handle_part(mod, data, "__end__", None, None, frequency)
568
569
570def handler_handle_part(mod, data, ctype, filename, payload, frequency):
571 # only add the handler if the module should run
572 modfreq = getattr(mod, "frequency", per_instance)
573 if not (modfreq == per_always or
574 (frequency == per_instance and modfreq == per_instance)):
575 return
576 try:
577 if mod.handler_version == 1:
578 mod.handle_part(data, ctype, filename, payload)
579 else:
580 mod.handle_part(data, ctype, filename, payload, frequency)
581 except:
582 util.logexc(log)
583 traceback.print_exc(file=sys.stderr)
584
585
586def partwalker_handle_handler(pdata, _ctype, _filename, payload):
587 curcount = pdata['handlercount']
588 modname = 'part-handler-%03d' % curcount
589 frequency = pdata['frequency']
590
591 modfname = modname + ".py"
592 util.write_file("%s/%s" % (pdata['handlerdir'], modfname), payload, 0600)
593
594 try:
595 mod = __import__(modname)
596 handler_register(mod, pdata['handlers'], pdata['data'], frequency)
597 pdata['handlercount'] = curcount + 1
598 except:
599 util.logexc(log)
600 traceback.print_exc(file=sys.stderr)
601
602
603def partwalker_callback(pdata, ctype, filename, payload):
604 # data here is the part_handlers array and then the data to pass through
605 if ctype == "text/part-handler":
606 if 'handlercount' not in pdata:
607 pdata['handlercount'] = 0
608 partwalker_handle_handler(pdata, ctype, filename, payload)
609 return
610 if ctype not in pdata['handlers'] and payload:
611 if ctype == "text/x-not-multipart":
612 # Extract the first line or 24 bytes for displaying in the log
613 start = payload.split("\n", 1)[0][:24]
614 if start < payload:
615 details = "starting '%s...'" % start.encode("string-escape")
616 else:
617 details = repr(payload)
618 log.warning("Unhandled non-multipart userdata %s", details)
619 return
620 handler_handle_part(pdata['handlers'][ctype], pdata['data'],
621 ctype, filename, payload, pdata['frequency'])
622
623
624class InternalPartHandler:
625 freq = per_instance
626 mtypes = []
627 handler_version = 1
628 handler = None
629
630 def __init__(self, handler, mtypes, frequency, version=2):
631 self.handler = handler
632 self.mtypes = mtypes
633 self.frequency = frequency
634 self.handler_version = version
635
636 def __repr__(self):
637 return("InternalPartHandler: [%s]" % self.mtypes)
638
639 def list_types(self):
640 return(self.mtypes)
641
642 def handle_part(self, data, ctype, filename, payload, frequency):
643 return(self.handler(data, ctype, filename, payload, frequency))
644
645
646def get_cmdline_url(names=('cloud-config-url', 'url'),
647 starts="#cloud-config", cmdline=None):
648
649 if cmdline == None:
650 cmdline = util.get_cmdline()
651
652 data = util.keyval_str_to_dict(cmdline)
653 url = None
654 key = None
655 for key in names:
656 if key in data:
657 url = data[key]
658 break
659 if url == None:
660 return (None, None, None)
661
662 contents = util.readurl(url)
663
664 if contents.startswith(starts):
665 return (key, url, contents)
666
667 return (key, url, None)
66822
=== added file 'cloudinit/cloud.py'
--- cloudinit/cloud.py 1970-01-01 00:00:00 +0000
+++ cloudinit/cloud.py 2012-07-06 21:16:18 +0000
@@ -0,0 +1,101 @@
1# vi: ts=4 expandtab
2#
3# Copyright (C) 2012 Canonical Ltd.
4# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
5# Copyright (C) 2012 Yahoo! Inc.
6#
7# Author: Scott Moser <scott.moser@canonical.com>
8# Author: Juerg Haefliger <juerg.haefliger@hp.com>
9# Author: Joshua Harlow <harlowja@yahoo-inc.com>
10#
11# This program is free software: you can redistribute it and/or modify
12# it under the terms of the GNU General Public License version 3, as
13# published by the Free Software Foundation.
14#
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18# GNU General Public License for more details.
19#
20# You should have received a copy of the GNU General Public License
21# along with this program. If not, see <http://www.gnu.org/licenses/>.
22
23import copy
24import os
25
26from cloudinit import log as logging
27
28LOG = logging.getLogger(__name__)
29
30# This class is the high level wrapper that provides
31# access to cloud-init objects without exposing the stage objects
32# to handler and or module manipulation. It allows for cloud
33# init to restrict what those types of user facing code may see
34# and or adjust (which helps avoid code messing with each other)
35#
36# It also provides util functions that avoid having to know
37# how to get a certain member from this submembers as well
38# as providing a backwards compatible object that can be maintained
39# while the stages/other objects can be worked on independently...
40
41
42class Cloud(object):
43 def __init__(self, datasource, paths, cfg, distro, runners):
44 self.datasource = datasource
45 self.paths = paths
46 self.distro = distro
47 self._cfg = cfg
48 self._runners = runners
49
50 # If a 'user' manipulates logging or logging services
51 # it is typically useful to cause the logging to be
52 # setup again.
53 def cycle_logging(self):
54 logging.resetLogging()
55 logging.setupLogging(self.cfg)
56
57 @property
58 def cfg(self):
59 # Ensure that not indirectly modified
60 return copy.deepcopy(self._cfg)
61
62 def run(self, name, functor, args, freq=None, clear_on_fail=False):
63 return self._runners.run(name, functor, args, freq, clear_on_fail)
64
65 def get_template_filename(self, name):
66 fn = self.paths.template_tpl % (name)
67 if not os.path.isfile(fn):
68 LOG.warn("No template found at %s for template named %s", fn, name)
69 return None
70 return fn
71
72 # The rest of thes are just useful proxies
73 def get_userdata(self):
74 return self.datasource.get_userdata()
75
76 def get_instance_id(self):
77 return self.datasource.get_instance_id()
78
79 def get_public_ssh_keys(self):
80 return self.datasource.get_public_ssh_keys()
81
82 def get_locale(self):
83 return self.datasource.get_locale()
84
85 def get_local_mirror(self):
86 return self.datasource.get_local_mirror()
87
88 def get_hostname(self, fqdn=False):
89 return self.datasource.get_hostname(fqdn=fqdn)
90
91 def device_name_to_device(self, name):
92 return self.datasource.device_name_to_device(name)
93
94 def get_ipath_cur(self, name=None):
95 return self.paths.get_ipath_cur(name)
96
97 def get_cpath(self, name=None):
98 return self.paths.get_cpath(name)
99
100 def get_ipath(self, name=None):
101 return self.paths.get_ipath(name)
0102
=== renamed directory 'cloudinit/CloudConfig' => 'cloudinit/config'
=== modified file 'cloudinit/config/__init__.py'
--- cloudinit/CloudConfig/__init__.py 2012-06-13 13:11:27 +0000
+++ cloudinit/config/__init__.py 2012-07-06 21:16:18 +0000
@@ -19,256 +19,38 @@
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
20#20#
2121
22import yaml22from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
23import cloudinit23
24import cloudinit.util as util24from cloudinit import log as logging
25import sys25
26import traceback26LOG = logging.getLogger(__name__)
27import os27
28import subprocess28# This prefix is used to make it less
29import time29# of a chance that when importing
3030# we will not find something else with the same
31per_instance = cloudinit.per_instance31# name in the lookup path...
32per_always = cloudinit.per_always32MOD_PREFIX = "cc_"
33per_once = cloudinit.per_once33
3434
3535def form_module_name(name):
36class CloudConfig():36 canon_name = name.replace("-", "_")
37 cfgfile = None37 if canon_name.lower().endswith(".py"):
38 cfg = None38 canon_name = canon_name[0:(len(canon_name) - 3)]
3939 canon_name = canon_name.strip()
40 def __init__(self, cfgfile, cloud=None, ds_deps=None):40 if not canon_name:
41 if cloud == None:41 return None
42 self.cloud = cloudinit.CloudInit(ds_deps)42 if not canon_name.startswith(MOD_PREFIX):
43 self.cloud.get_data_source()43 canon_name = '%s%s' % (MOD_PREFIX, canon_name)
44 else:44 return canon_name
45 self.cloud = cloud45
46 self.cfg = self.get_config_obj(cfgfile)46
4747def fixup_module(mod, def_freq=PER_INSTANCE):
48 def get_config_obj(self, cfgfile):48 if not hasattr(mod, 'frequency'):
49 try:49 setattr(mod, 'frequency', def_freq)
50 cfg = util.read_conf(cfgfile)
51 except:
52 # TODO: this 'log' could/should be passed in
53 cloudinit.log.critical("Failed loading of cloud config '%s'. "
54 "Continuing with empty config\n" % cfgfile)
55 cloudinit.log.debug(traceback.format_exc() + "\n")
56 cfg = None
57 if cfg is None:
58 cfg = {}
59
60 try:
61 ds_cfg = self.cloud.datasource.get_config_obj()
62 except:
63 ds_cfg = {}
64
65 cfg = util.mergedict(cfg, ds_cfg)
66 return(util.mergedict(cfg, self.cloud.cfg))
67
68 def handle(self, name, args, freq=None):
69 try:
70 mod = __import__("cc_" + name.replace("-", "_"), globals())
71 def_freq = getattr(mod, "frequency", per_instance)
72 handler = getattr(mod, "handle")
73
74 if not freq:
75 freq = def_freq
76
77 self.cloud.sem_and_run("config-" + name, freq, handler,
78 [name, self.cfg, self.cloud, cloudinit.log, args])
79 except:
80 raise
81
82
83# reads a cloudconfig module list, returns
84# a 2 dimensional array suitable to pass to run_cc_modules
85def read_cc_modules(cfg, name):
86 if name not in cfg:
87 return([])
88 module_list = []
89 # create 'module_list', an array of arrays
90 # where array[0] = config
91 # array[1] = freq
92 # array[2:] = arguemnts
93 for item in cfg[name]:
94 if isinstance(item, str):
95 module_list.append((item,))
96 elif isinstance(item, list):
97 module_list.append(item)
98 else:
99 raise TypeError("failed to read '%s' item in config")
100 return(module_list)
101
102
103def run_cc_modules(cc, module_list, log):
104 failures = []
105 for cfg_mod in module_list:
106 name = cfg_mod[0]
107 freq = None
108 run_args = []
109 if len(cfg_mod) > 1:
110 freq = cfg_mod[1]
111 if len(cfg_mod) > 2:
112 run_args = cfg_mod[2:]
113
114 try:
115 log.debug("handling %s with freq=%s and args=%s" %
116 (name, freq, run_args))
117 cc.handle(name, run_args, freq=freq)
118 except:
119 log.warn(traceback.format_exc())
120 log.error("config handling of %s, %s, %s failed\n" %
121 (name, freq, run_args))
122 failures.append(name)
123
124 return(failures)
125
126
127# always returns well formated values
128# cfg is expected to have an entry 'output' in it, which is a dictionary
129# that includes entries for 'init', 'config', 'final' or 'all'
130# init: /var/log/cloud.out
131# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
132# final:
133# output: "| logger -p"
134# error: "> /dev/null"
135# this returns the specific 'mode' entry, cleanly formatted, with value
136# None if if none is given
137def get_output_cfg(cfg, mode="init"):
138 ret = [None, None]
139 if not 'output' in cfg:
140 return ret
141
142 outcfg = cfg['output']
143 if mode in outcfg:
144 modecfg = outcfg[mode]
145 else:50 else:
146 if 'all' not in outcfg:51 freq = mod.frequency
147 return ret52 if freq and freq not in FREQUENCIES:
148 # if there is a 'all' item in the output list53 LOG.warn("Module %s has an unknown frequency %s", mod, freq)
149 # then it applies to all users of this (init, config, final)54 if not hasattr(mod, 'distros'):
150 modecfg = outcfg['all']55 setattr(mod, 'distros', None)
15156 return mod
152 # if value is a string, it specifies stdout and stderr
153 if isinstance(modecfg, str):
154 ret = [modecfg, modecfg]
155
156 # if its a list, then we expect (stdout, stderr)
157 if isinstance(modecfg, list):
158 if len(modecfg) > 0:
159 ret[0] = modecfg[0]
160 if len(modecfg) > 1:
161 ret[1] = modecfg[1]
162
163 # if it is a dictionary, expect 'out' and 'error'
164 # items, which indicate out and error
165 if isinstance(modecfg, dict):
166 if 'output' in modecfg:
167 ret[0] = modecfg['output']
168 if 'error' in modecfg:
169 ret[1] = modecfg['error']
170
171 # if err's entry == "&1", then make it same as stdout
172 # as in shell syntax of "echo foo >/dev/null 2>&1"
173 if ret[1] == "&1":
174 ret[1] = ret[0]
175
176 swlist = [">>", ">", "|"]
177 for i in range(len(ret)):
178 if not ret[i]:
179 continue
180 val = ret[i].lstrip()
181 found = False
182 for s in swlist:
183 if val.startswith(s):
184 val = "%s %s" % (s, val[len(s):].strip())
185 found = True
186 break
187 if not found:
188 # default behavior is append
189 val = "%s %s" % (">>", val.strip())
190 ret[i] = val
191
192 return(ret)
193
194
195# redirect_output(outfmt, errfmt, orig_out, orig_err)
196# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
197# fmt can be:
198# > FILEPATH
199# >> FILEPATH
200# | program [ arg1 [ arg2 [ ... ] ] ]
201#
202# with a '|', arguments are passed to shell, so one level of
203# shell escape is required.
204def redirect_output(outfmt, errfmt, o_out=sys.stdout, o_err=sys.stderr):
205 if outfmt:
206 (mode, arg) = outfmt.split(" ", 1)
207 if mode == ">" or mode == ">>":
208 owith = "ab"
209 if mode == ">":
210 owith = "wb"
211 new_fp = open(arg, owith)
212 elif mode == "|":
213 proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
214 new_fp = proc.stdin
215 else:
216 raise TypeError("invalid type for outfmt: %s" % outfmt)
217
218 if o_out:
219 os.dup2(new_fp.fileno(), o_out.fileno())
220 if errfmt == outfmt:
221 os.dup2(new_fp.fileno(), o_err.fileno())
222 return
223
224 if errfmt:
225 (mode, arg) = errfmt.split(" ", 1)
226 if mode == ">" or mode == ">>":
227 owith = "ab"
228 if mode == ">":
229 owith = "wb"
230 new_fp = open(arg, owith)
231 elif mode == "|":
232 proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
233 new_fp = proc.stdin
234 else:
235 raise TypeError("invalid type for outfmt: %s" % outfmt)
236
237 if o_err:
238 os.dup2(new_fp.fileno(), o_err.fileno())
239 return
240
241
242def run_per_instance(name, func, args, clear_on_fail=False):
243 semfile = "%s/%s" % (cloudinit.get_ipath_cur("data"), name)
244 if os.path.exists(semfile):
245 return
246
247 util.write_file(semfile, str(time.time()))
248 try:
249 func(*args)
250 except:
251 if clear_on_fail:
252 os.unlink(semfile)
253 raise
254
255
256# apt_get top level command (install, update...), and args to pass it
257def apt_get(tlc, args=None):
258 if args is None:
259 args = []
260 e = os.environ.copy()
261 e['DEBIAN_FRONTEND'] = 'noninteractive'
262 cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold',
263 '--assume-yes', '--quiet', tlc]
264 cmd.extend(args)
265 subprocess.check_call(cmd, env=e)
266
267
268def update_package_sources():
269 run_per_instance("update-sources", apt_get, ("update",))
270
271
272def install_packages(pkglist):
273 update_package_sources()
274 apt_get("install", pkglist)
27557
=== modified file 'cloudinit/config/cc_apt_pipelining.py'
--- cloudinit/CloudConfig/cc_apt_pipelining.py 2012-03-09 15:26:09 +0000
+++ cloudinit/config/cc_apt_pipelining.py 2012-07-06 21:16:18 +0000
@@ -16,38 +16,44 @@
16# You should have received a copy of the GNU General Public License16# You should have received a copy of the GNU General Public License
17# along with this program. If not, see <http://www.gnu.org/licenses/>.17# along with this program. If not, see <http://www.gnu.org/licenses/>.
1818
19import cloudinit.util as util19from cloudinit import util
20from cloudinit.CloudConfig import per_instance20from cloudinit.settings import PER_INSTANCE
2121
22frequency = per_instance22frequency = PER_INSTANCE
23default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining"23
2424distros = ['ubuntu', 'debian']
2525
26def handle(_name, cfg, _cloud, log, _args):26DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
27
28APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
29 'Acquire::http::Pipeline-Depth "%s";\n')
30
31# Acquire::http::Pipeline-Depth can be a value
32# from 0 to 5 indicating how many outstanding requests APT should send.
33# A value of zero MUST be specified if the remote host does not properly linger
34# on TCP connections - otherwise data corruption will occur.
35
36
37def handle(_name, cfg, cloud, log, _args):
2738
28 apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)39 apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
29 apt_pipe_value = str(apt_pipe_value).lower()40 apt_pipe_value_s = str(apt_pipe_value).lower().strip()
3041
31 if apt_pipe_value == "false":42 if apt_pipe_value_s == "false":
32 write_apt_snippet("0", log)43 write_apt_snippet(cloud, "0", log, DEFAULT_FILE)
3344 elif apt_pipe_value_s in ("none", "unchanged", "os"):
34 elif apt_pipe_value in ("none", "unchanged", "os"):
35 return45 return
3646 elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
37 elif apt_pipe_value in str(range(0, 6)):47 write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE)
38 write_apt_snippet(apt_pipe_value, log)
39
40 else:48 else:
41 log.warn("Invalid option for apt_pipeling: %s" % apt_pipe_value)49 log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
4250
4351
44def write_apt_snippet(setting, log, f_name=default_file):52def write_apt_snippet(cloud, setting, log, f_name):
45 """ Writes f_name with apt pipeline depth 'setting' """53 """ Writes f_name with apt pipeline depth 'setting' """
4654
47 acquire_pipeline_depth = 'Acquire::http::Pipeline-Depth "%s";\n'55 file_contents = APT_PIPE_TPL % (setting)
48 file_contents = ("//Written by cloud-init per 'apt_pipelining'\n"56
49 + (acquire_pipeline_depth % setting))57 util.write_file(cloud.paths.join(False, f_name), file_contents)
5058
51 util.write_file(f_name, file_contents)59 log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
52
53 log.debug("Wrote %s with APT pipeline setting" % f_name)
5460
=== modified file 'cloudinit/config/cc_apt_update_upgrade.py'
--- cloudinit/CloudConfig/cc_apt_update_upgrade.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_apt_update_upgrade.py 2012-07-06 21:16:18 +0000
@@ -18,50 +18,73 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import cloudinit.util as util21import glob
22import subprocess
23import traceback
24import os22import os
25import glob23
26import cloudinit.CloudConfig as cc24from cloudinit import templater
2725from cloudinit import util
2826
29def handle(_name, cfg, cloud, log, _args):27distros = ['ubuntu', 'debian']
28
29PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
30PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
31
32# A temporary shell program to get a given gpg key
33# from a given keyserver
34EXPORT_GPG_KEYID = """
35 k=${1} ks=${2};
36 exec 2>/dev/null
37 [ -n "$k" ] || exit 1;
38 armour=$(gpg --list-keys --armour "${k}")
39 if [ -z "${armour}" ]; then
40 gpg --keyserver ${ks} --recv $k >/dev/null &&
41 armour=$(gpg --export --armour "${k}") &&
42 gpg --batch --yes --delete-keys "${k}"
43 fi
44 [ -n "${armour}" ] && echo "${armour}"
45"""
46
47
48def handle(name, cfg, cloud, log, _args):
30 update = util.get_cfg_option_bool(cfg, 'apt_update', False)49 update = util.get_cfg_option_bool(cfg, 'apt_update', False)
31 upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)50 upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
3251
33 release = get_release()52 release = get_release()
34
35 mirror = find_apt_mirror(cloud, cfg)53 mirror = find_apt_mirror(cloud, cfg)
3654 if not mirror:
37 log.debug("selected mirror at: %s" % mirror)55 log.debug(("Skipping module named %s,"
3856 " no package 'mirror' located"), name)
39 if not util.get_cfg_option_bool(cfg, \57 return
40 'apt_preserve_sources_list', False):58
41 generate_sources_list(release, mirror)59 log.debug("Selected mirror at: %s" % mirror)
42 old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \60
43 "archive.ubuntu.com/ubuntu")61 if not util.get_cfg_option_bool(cfg,
62 'apt_preserve_sources_list', False):
63 generate_sources_list(release, mirror, cloud, log)
64 old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror',
65 "archive.ubuntu.com/ubuntu")
44 rename_apt_lists(old_mir, mirror)66 rename_apt_lists(old_mir, mirror)
4567
46 # set up proxy68 # Set up any apt proxy
47 proxy = cfg.get("apt_proxy", None)69 proxy = cfg.get("apt_proxy", None)
48 proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy"70 proxy_filename = PROXY_FN
49 if proxy:71 if proxy:
50 try:72 try:
51 contents = "Acquire::HTTP::Proxy \"%s\";\n"73 # See man 'apt.conf'
52 with open(proxy_filename, "w") as fp:74 contents = PROXY_TPL % (proxy)
53 fp.write(contents % proxy)75 util.write_file(cloud.paths.join(False, proxy_filename),
76 contents)
54 except Exception as e:77 except Exception as e:
55 log.warn("Failed to write proxy to %s" % proxy_filename)78 util.logexc(log, "Failed to write proxy to %s", proxy_filename)
56 elif os.path.isfile(proxy_filename):79 elif os.path.isfile(proxy_filename):
57 os.unlink(proxy_filename)80 util.del_file(proxy_filename)
5881
59 # process 'apt_sources'82 # Process 'apt_sources'
60 if 'apt_sources' in cfg:83 if 'apt_sources' in cfg:
61 errors = add_sources(cfg['apt_sources'],84 errors = add_sources(cloud, cfg['apt_sources'],
62 {'MIRROR': mirror, 'RELEASE': release})85 {'MIRROR': mirror, 'RELEASE': release})
63 for e in errors:86 for e in errors:
64 log.warn("Source Error: %s\n" % ':'.join(e))87 log.warn("Source Error: %s", ':'.join(e))
6588
66 dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)89 dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
67 if dconf_sel:90 if dconf_sel:
@@ -69,45 +92,51 @@
69 try:92 try:
70 util.subp(('debconf-set-selections', '-'), dconf_sel)93 util.subp(('debconf-set-selections', '-'), dconf_sel)
71 except:94 except:
72 log.error("Failed to run debconf-set-selections")95 util.logexc(log, "Failed to run debconf-set-selections")
73 log.debug(traceback.format_exc())
7496
75 pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', [])97 pkglist = util.get_cfg_option_list(cfg, 'packages', [])
7698
77 errors = []99 errors = []
78 if update or len(pkglist) or upgrade:100 if update or len(pkglist) or upgrade:
79 try:101 try:
80 cc.update_package_sources()102 cloud.distro.update_package_sources()
81 except subprocess.CalledProcessError as e:103 except Exception as e:
82 log.warn("apt-get update failed")104 util.logexc(log, "Package update failed")
83 log.debug(traceback.format_exc())
84 errors.append(e)105 errors.append(e)
85106
86 if upgrade:107 if upgrade:
87 try:108 try:
88 cc.apt_get("upgrade")109 cloud.distro.package_command("upgrade")
89 except subprocess.CalledProcessError as e:110 except Exception as e:
90 log.warn("apt upgrade failed")111 util.logexc(log, "Package upgrade failed")
91 log.debug(traceback.format_exc())
92 errors.append(e)112 errors.append(e)
93113
94 if len(pkglist):114 if len(pkglist):
95 try:115 try:
96 cc.install_packages(pkglist)116 cloud.distro.install_packages(pkglist)
97 except subprocess.CalledProcessError as e:117 except Exception as e:
98 log.warn("Failed to install packages: %s " % pkglist)118 util.logexc(log, "Failed to install packages: %s ", pkglist)
99 log.debug(traceback.format_exc())
100 errors.append(e)119 errors.append(e)
101120
102 if len(errors):121 if len(errors):
103 raise errors[0]122 log.warn("%s failed with exceptions, re-raising the last one",
104123 len(errors))
105 return(True)124 raise errors[-1]
125
126
127# get gpg keyid from keyserver
128def getkeybyid(keyid, keyserver):
129 with util.ExtendedTemporaryFile(suffix='.sh') as fh:
130 fh.write(EXPORT_GPG_KEYID)
131 fh.flush()
132 cmd = ['/bin/sh', fh.name, keyid, keyserver]
133 (stdout, _stderr) = util.subp(cmd)
134 return stdout.strip()
106135
107136
108def mirror2lists_fileprefix(mirror):137def mirror2lists_fileprefix(mirror):
109 string = mirror138 string = mirror
110 # take of http:// or ftp://139 # take off http:// or ftp://
111 if string.endswith("/"):140 if string.endswith("/"):
112 string = string[0:-1]141 string = string[0:-1]
113 pos = string.find("://")142 pos = string.find("://")
@@ -118,39 +147,44 @@
118147
119148
120def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):149def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
121 oprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(omirror))150 oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
122 nprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(new_mirror))151 nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror))
123 if(oprefix == nprefix):152 if oprefix == nprefix:
124 return153 return
125 olen = len(oprefix)154 olen = len(oprefix)
126 for filename in glob.glob("%s_*" % oprefix):155 for filename in glob.glob("%s_*" % oprefix):
127 os.rename(filename, "%s%s" % (nprefix, filename[olen:]))156 # TODO use the cloud.paths.join...
157 util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
128158
129159
130def get_release():160def get_release():
131 stdout, _stderr = subprocess.Popen(['lsb_release', '-cs'],161 (stdout, _stderr) = util.subp(['lsb_release', '-cs'])
132 stdout=subprocess.PIPE).communicate()162 return stdout.strip()
133 return(str(stdout).strip())163
134164
135165def generate_sources_list(codename, mirror, cloud, log):
136def generate_sources_list(codename, mirror):166 template_fn = cloud.get_template_filename('sources.list')
137 util.render_to_file('sources.list', '/etc/apt/sources.list', \167 if template_fn:
138 {'mirror': mirror, 'codename': codename})168 params = {'mirror': mirror, 'codename': codename}
139169 out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
140170 templater.render_to_file(template_fn, out_fn, params)
141def add_sources(srclist, searchList=None):171 else:
172 log.warn("No template found, not rendering /etc/apt/sources.list")
173
174
175def add_sources(cloud, srclist, template_params=None):
142 """176 """
143 add entries in /etc/apt/sources.list.d for each abbreviated177 add entries in /etc/apt/sources.list.d for each abbreviated
144 sources.list entry in 'srclist'. When rendering template, also178 sources.list entry in 'srclist'. When rendering template, also
145 include the values in dictionary searchList179 include the values in dictionary searchList
146 """180 """
147 if searchList is None:181 if template_params is None:
148 searchList = {}182 template_params = {}
149 elst = []
150183
184 errorlist = []
151 for ent in srclist:185 for ent in srclist:
152 if 'source' not in ent:186 if 'source' not in ent:
153 elst.append(["", "missing source"])187 errorlist.append(["", "missing source"])
154 continue188 continue
155189
156 source = ent['source']190 source = ent['source']
@@ -158,51 +192,48 @@
158 try:192 try:
159 util.subp(["add-apt-repository", source])193 util.subp(["add-apt-repository", source])
160 except:194 except:
161 elst.append([source, "add-apt-repository failed"])195 errorlist.append([source, "add-apt-repository failed"])
162 continue196 continue
163197
164 source = util.render_string(source, searchList)198 source = templater.render_string(source, template_params)
165199
166 if 'filename' not in ent:200 if 'filename' not in ent:
167 ent['filename'] = 'cloud_config_sources.list'201 ent['filename'] = 'cloud_config_sources.list'
168202
169 if not ent['filename'].startswith("/"):203 if not ent['filename'].startswith("/"):
170 ent['filename'] = "%s/%s" % \204 ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
171 ("/etc/apt/sources.list.d/", ent['filename'])205 ent['filename'])
172206
173 if ('keyid' in ent and 'key' not in ent):207 if ('keyid' in ent and 'key' not in ent):
174 ks = "keyserver.ubuntu.com"208 ks = "keyserver.ubuntu.com"
175 if 'keyserver' in ent:209 if 'keyserver' in ent:
176 ks = ent['keyserver']210 ks = ent['keyserver']
177 try:211 try:
178 ent['key'] = util.getkeybyid(ent['keyid'], ks)212 ent['key'] = getkeybyid(ent['keyid'], ks)
179 except:213 except:
180 elst.append([source, "failed to get key from %s" % ks])214 errorlist.append([source, "failed to get key from %s" % ks])
181 continue215 continue
182216
183 if 'key' in ent:217 if 'key' in ent:
184 try:218 try:
185 util.subp(('apt-key', 'add', '-'), ent['key'])219 util.subp(('apt-key', 'add', '-'), ent['key'])
186 except:220 except:
187 elst.append([source, "failed add key"])221 errorlist.append([source, "failed add key"])
188222
189 try:223 try:
190 util.write_file(ent['filename'], source + "\n", omode="ab")224 contents = "%s\n" % (source)
225 util.write_file(cloud.paths.join(False, ent['filename']),
226 contents, omode="ab")
191 except:227 except:
192 elst.append([source, "failed write to file %s" % ent['filename']])228 errorlist.append([source,
229 "failed write to file %s" % ent['filename']])
193230
194 return(elst)231 return errorlist
195232
196233
197def find_apt_mirror(cloud, cfg):234def find_apt_mirror(cloud, cfg):
198 """ find an apt_mirror given the cloud and cfg provided """235 """ find an apt_mirror given the cloud and cfg provided """
199236
200 # TODO: distro and defaults should be configurable
201 distro = "ubuntu"
202 defaults = {
203 'ubuntu': "http://archive.ubuntu.com/ubuntu",
204 'debian': "http://archive.debian.org/debian",
205 }
206 mirror = None237 mirror = None
207238
208 cfg_mirror = cfg.get("apt_mirror", None)239 cfg_mirror = cfg.get("apt_mirror", None)
@@ -211,14 +242,13 @@
211 elif "apt_mirror_search" in cfg:242 elif "apt_mirror_search" in cfg:
212 mirror = util.search_for_mirror(cfg['apt_mirror_search'])243 mirror = util.search_for_mirror(cfg['apt_mirror_search'])
213 else:244 else:
214 if cloud:245 mirror = cloud.get_local_mirror()
215 mirror = cloud.get_mirror()
216246
217 mydom = ""247 mydom = ""
218248
219 doms = []249 doms = []
220250
221 if not mirror and cloud:251 if not mirror:
222 # if we have a fqdn, then search its domain portion first252 # if we have a fqdn, then search its domain portion first
223 (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)253 (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
224 mydom = ".".join(fqdn.split(".")[1:])254 mydom = ".".join(fqdn.split(".")[1:])
@@ -229,13 +259,14 @@
229 doms.extend((".localdomain", "",))259 doms.extend((".localdomain", "",))
230260
231 mirror_list = []261 mirror_list = []
262 distro = cloud.distro.name
232 mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)263 mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
233 for post in doms:264 for post in doms:
234 mirror_list.append(mirrorfmt % post)265 mirror_list.append(mirrorfmt % (post))
235266
236 mirror = util.search_for_mirror(mirror_list)267 mirror = util.search_for_mirror(mirror_list)
237268
238 if not mirror:269 if not mirror:
239 mirror = defaults[distro]270 mirror = cloud.distro.get_package_mirror()
240271
241 return mirror272 return mirror
242273
=== modified file 'cloudinit/config/cc_bootcmd.py'
--- cloudinit/CloudConfig/cc_bootcmd.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_bootcmd.py 2012-07-06 21:16:18 +0000
@@ -17,32 +17,39 @@
17#17#
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
20import cloudinit.util as util20
21import subprocess
22import tempfile
23import os21import os
24from cloudinit.CloudConfig import per_always22
25frequency = per_always23from cloudinit import util
2624from cloudinit.settings import PER_ALWAYS
2725
28def handle(_name, cfg, cloud, log, _args):26frequency = PER_ALWAYS
27
28
29def handle(name, cfg, cloud, log, _args):
30
29 if "bootcmd" not in cfg:31 if "bootcmd" not in cfg:
32 log.debug(("Skipping module named %s,"
33 " no 'bootcmd' key in configuration"), name)
30 return34 return
3135
32 try:36 with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
33 content = util.shellify(cfg["bootcmd"])37 try:
34 tmpf = tempfile.TemporaryFile()38 content = util.shellify(cfg["bootcmd"])
35 tmpf.write(content)39 tmpf.write(content)
36 tmpf.seek(0)40 tmpf.flush()
37 except:41 except:
38 log.warn("failed to shellify bootcmd")42 util.logexc(log, "Failed to shellify bootcmd")
39 raise43 raise
4044
41 try:45 try:
42 env = os.environ.copy()46 env = os.environ.copy()
43 env['INSTANCE_ID'] = cloud.get_instance_id()47 iid = cloud.get_instance_id()
44 subprocess.check_call(['/bin/sh'], env=env, stdin=tmpf)48 if iid:
45 tmpf.close()49 env['INSTANCE_ID'] = str(iid)
46 except:50 cmd = ['/bin/sh', tmpf.name]
47 log.warn("failed to run commands from bootcmd")51 util.subp(cmd, env=env, capture=False)
48 raise52 except:
53 util.logexc(log,
54 ("Failed to run bootcmd module %s"), name)
55 raise
4956
=== modified file 'cloudinit/config/cc_byobu.py'
--- cloudinit/CloudConfig/cc_byobu.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_byobu.py 2012-07-06 21:16:18 +0000
@@ -18,18 +18,19 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import cloudinit.util as util21from cloudinit import util
22import subprocess22
23import traceback23distros = ['ubuntu', 'debian']
2424
2525
26def handle(_name, cfg, _cloud, log, args):26def handle(name, cfg, _cloud, log, args):
27 if len(args) != 0:27 if len(args) != 0:
28 value = args[0]28 value = args[0]
29 else:29 else:
30 value = util.get_cfg_option_str(cfg, "byobu_by_default", "")30 value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
3131
32 if not value:32 if not value:
33 log.debug("Skipping module named %s, no 'byobu' values found", name)
33 return34 return
3435
35 if value == "user" or value == "system":36 if value == "user" or value == "system":
@@ -38,7 +39,7 @@
38 valid = ("enable-user", "enable-system", "enable",39 valid = ("enable-user", "enable-system", "enable",
39 "disable-user", "disable-system", "disable")40 "disable-user", "disable-system", "disable")
40 if not value in valid:41 if not value in valid:
41 log.warn("Unknown value %s for byobu_by_default" % value)42 log.warn("Unknown value %s for byobu_by_default", value)
4243
43 mod_user = value.endswith("-user")44 mod_user = value.endswith("-user")
44 mod_sys = value.endswith("-system")45 mod_sys = value.endswith("-system")
@@ -65,13 +66,6 @@
6566
66 cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]67 cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
6768
68 log.debug("setting byobu to %s" % value)69 log.debug("Setting byobu to %s", value)
6970
70 try:71 util.subp(cmd, capture=False)
71 subprocess.check_call(cmd)
72 except subprocess.CalledProcessError as e:
73 log.debug(traceback.format_exc(e))
74 raise Exception("Cmd returned %s: %s" % (e.returncode, cmd))
75 except OSError as e:
76 log.debug(traceback.format_exc(e))
77 raise Exception("Cmd failed to execute: %s" % (cmd))
7872
=== modified file 'cloudinit/config/cc_ca_certs.py'
--- cloudinit/CloudConfig/cc_ca_certs.py 2012-03-08 12:45:43 +0000
+++ cloudinit/config/cc_ca_certs.py 2012-07-06 21:16:18 +0000
@@ -13,25 +13,27 @@
13#13#
14# You should have received a copy of the GNU General Public License14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
16import os17import os
17from subprocess import check_call18
18from cloudinit.util import (write_file, get_cfg_option_list_or_str,19from cloudinit import util
19 delete_dir_contents, subp)
2020
21CA_CERT_PATH = "/usr/share/ca-certificates/"21CA_CERT_PATH = "/usr/share/ca-certificates/"
22CA_CERT_FILENAME = "cloud-init-ca-certs.crt"22CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
23CA_CERT_CONFIG = "/etc/ca-certificates.conf"23CA_CERT_CONFIG = "/etc/ca-certificates.conf"
24CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"24CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
2525
26distros = ['ubuntu', 'debian']
27
2628
27def update_ca_certs():29def update_ca_certs():
28 """30 """
29 Updates the CA certificate cache on the current machine.31 Updates the CA certificate cache on the current machine.
30 """32 """
31 check_call(["update-ca-certificates"])33 util.subp(["update-ca-certificates"], capture=False)
3234
3335
34def add_ca_certs(certs):36def add_ca_certs(paths, certs):
35 """37 """
36 Adds certificates to the system. To actually apply the new certificates38 Adds certificates to the system. To actually apply the new certificates
37 you must also call L{update_ca_certs}.39 you must also call L{update_ca_certs}.
@@ -39,26 +41,29 @@
39 @param certs: A list of certificate strings.41 @param certs: A list of certificate strings.
40 """42 """
41 if certs:43 if certs:
42 cert_file_contents = "\n".join(certs)44 # First ensure they are strings...
45 cert_file_contents = "\n".join([str(c) for c in certs])
43 cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)46 cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
44 write_file(cert_file_fullpath, cert_file_contents, mode=0644)47 cert_file_fullpath = paths.join(False, cert_file_fullpath)
48 util.write_file(cert_file_fullpath, cert_file_contents, mode=0644)
45 # Append cert filename to CA_CERT_CONFIG file.49 # Append cert filename to CA_CERT_CONFIG file.
46 write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="a")50 util.write_file(paths.join(False, CA_CERT_CONFIG),
4751 "\n%s" % CA_CERT_FILENAME, omode="ab")
4852
49def remove_default_ca_certs():53
54def remove_default_ca_certs(paths):
50 """55 """
51 Removes all default trusted CA certificates from the system. To actually56 Removes all default trusted CA certificates from the system. To actually
52 apply the change you must also call L{update_ca_certs}.57 apply the change you must also call L{update_ca_certs}.
53 """58 """
54 delete_dir_contents(CA_CERT_PATH)59 util.delete_dir_contents(paths.join(False, CA_CERT_PATH))
55 delete_dir_contents(CA_CERT_SYSTEM_PATH)60 util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH))
56 write_file(CA_CERT_CONFIG, "", mode=0644)61 util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644)
57 debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"62 debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
58 subp(('debconf-set-selections', '-'), debconf_sel)63 util.subp(('debconf-set-selections', '-'), debconf_sel)
5964
6065
61def handle(_name, cfg, _cloud, log, _args):66def handle(name, cfg, cloud, log, _args):
62 """67 """
63 Call to handle ca-cert sections in cloud-config file.68 Call to handle ca-cert sections in cloud-config file.
6469
@@ -70,21 +75,25 @@
70 """75 """
71 # If there isn't a ca-certs section in the configuration don't do anything76 # If there isn't a ca-certs section in the configuration don't do anything
72 if "ca-certs" not in cfg:77 if "ca-certs" not in cfg:
78 log.debug(("Skipping module named %s,"
79 " no 'ca-certs' key in configuration"), name)
73 return80 return
81
74 ca_cert_cfg = cfg['ca-certs']82 ca_cert_cfg = cfg['ca-certs']
7583
76 # If there is a remove-defaults option set to true, remove the system84 # If there is a remove-defaults option set to true, remove the system
77 # default trusted CA certs first.85 # default trusted CA certs first.
78 if ca_cert_cfg.get("remove-defaults", False):86 if ca_cert_cfg.get("remove-defaults", False):
79 log.debug("removing default certificates")87 log.debug("Removing default certificates")
80 remove_default_ca_certs()88 remove_default_ca_certs(cloud.paths)
8189
82 # If we are given any new trusted CA certs to add, add them.90 # If we are given any new trusted CA certs to add, add them.
83 if "trusted" in ca_cert_cfg:91 if "trusted" in ca_cert_cfg:
84 trusted_certs = get_cfg_option_list_or_str(ca_cert_cfg, "trusted")92 trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
85 if trusted_certs:93 if trusted_certs:
86 log.debug("adding %d certificates" % len(trusted_certs))94 log.debug("Adding %d certificates" % len(trusted_certs))
87 add_ca_certs(trusted_certs)95 add_ca_certs(cloud.paths, trusted_certs)
8896
89 # Update the system with the new cert configuration.97 # Update the system with the new cert configuration.
98 log.debug("Updating certificates")
90 update_ca_certs()99 update_ca_certs()
91100
=== modified file 'cloudinit/config/cc_chef.py'
--- cloudinit/CloudConfig/cc_chef.py 2012-03-26 17:49:06 +0000
+++ cloudinit/config/cc_chef.py 2012-07-06 21:16:18 +0000
@@ -18,53 +18,71 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import json
21import os22import os
22import subprocess23
23import json24from cloudinit import templater
24import cloudinit.CloudConfig as cc25from cloudinit import util
25import cloudinit.util as util26
2627RUBY_VERSION_DEFAULT = "1.8"
27ruby_version_default = "1.8"28
2829
2930def handle(name, cfg, cloud, log, _args):
30def handle(_name, cfg, cloud, log, _args):31
31 # If there isn't a chef key in the configuration don't do anything32 # If there isn't a chef key in the configuration don't do anything
32 if 'chef' not in cfg:33 if 'chef' not in cfg:
34 log.debug(("Skipping module named %s,"
35 " no 'chef' key in configuration"), name)
33 return36 return
34 chef_cfg = cfg['chef']37 chef_cfg = cfg['chef']
3538
36 # ensure the chef directories we use exist39 # Ensure the chef directories we use exist
37 mkdirs(['/etc/chef', '/var/log/chef', '/var/lib/chef',40 c_dirs = [
38 '/var/cache/chef', '/var/backups/chef', '/var/run/chef'])41 '/etc/chef',
42 '/var/log/chef',
43 '/var/lib/chef',
44 '/var/cache/chef',
45 '/var/backups/chef',
46 '/var/run/chef',
47 ]
48 for d in c_dirs:
49 util.ensure_dir(cloud.paths.join(False, d))
3950
40 # set the validation key based on the presence of either 'validation_key'51 # Set the validation key based on the presence of either 'validation_key'
41 # or 'validation_cert'. In the case where both exist, 'validation_key'52 # or 'validation_cert'. In the case where both exist, 'validation_key'
42 # takes precedence53 # takes precedence
43 for key in ('validation_key', 'validation_cert'):54 for key in ('validation_key', 'validation_cert'):
44 if key in chef_cfg and chef_cfg[key]:55 if key in chef_cfg and chef_cfg[key]:
45 with open('/etc/chef/validation.pem', 'w') as validation_key_fh:56 v_fn = cloud.paths.join(False, '/etc/chef/validation.pem')
46 validation_key_fh.write(chef_cfg[key])57 util.write_file(v_fn, chef_cfg[key])
47 break58 break
4859
49 # create the chef config from template60 # Create the chef config from template
50 util.render_to_file('chef_client.rb', '/etc/chef/client.rb',61 template_fn = cloud.get_template_filename('chef_client.rb')
51 {'server_url': chef_cfg['server_url'],62 if template_fn:
52 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',63 iid = str(cloud.datasource.get_instance_id())
53 cloud.datasource.get_instance_id()),64 params = {
54 'environment': util.get_cfg_option_str(chef_cfg, 'environment',65 'server_url': chef_cfg['server_url'],
55 '_default'),66 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid),
56 'validation_name': chef_cfg['validation_name']})67 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
68 '_default'),
69 'validation_name': chef_cfg['validation_name']
70 }
71 out_fn = cloud.paths.join(False, '/etc/chef/client.rb')
72 templater.render_to_file(template_fn, out_fn, params)
73 else:
74 log.warn("No template found, not rendering to /etc/chef/client.rb")
5775
58 # set the firstboot json76 # set the firstboot json
59 with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh:77 initial_json = {}
60 initial_json = {}78 if 'run_list' in chef_cfg:
61 if 'run_list' in chef_cfg:79 initial_json['run_list'] = chef_cfg['run_list']
62 initial_json['run_list'] = chef_cfg['run_list']80 if 'initial_attributes' in chef_cfg:
63 if 'initial_attributes' in chef_cfg:81 initial_attributes = chef_cfg['initial_attributes']
64 initial_attributes = chef_cfg['initial_attributes']82 for k in list(initial_attributes.keys()):
65 for k in initial_attributes.keys():83 initial_json[k] = initial_attributes[k]
66 initial_json[k] = initial_attributes[k]84 firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json')
67 firstboot_json_fh.write(json.dumps(initial_json))85 util.write_file(firstboot_fn, json.dumps(initial_json))
6886
69 # If chef is not installed, we install chef based on 'install_type'87 # If chef is not installed, we install chef based on 'install_type'
70 if not os.path.isfile('/usr/bin/chef-client'):88 if not os.path.isfile('/usr/bin/chef-client'):
@@ -74,15 +92,17 @@
74 # this will install and run the chef-client from gems92 # this will install and run the chef-client from gems
75 chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)93 chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
76 ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',94 ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
77 ruby_version_default)95 RUBY_VERSION_DEFAULT)
78 install_chef_from_gems(ruby_version, chef_version)96 install_chef_from_gems(cloud.distro, ruby_version, chef_version)
79 # and finally, run chef-client97 # and finally, run chef-client
80 log.debug('running chef-client')98 log.debug('Running chef-client')
81 subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800',99 util.subp(['/usr/bin/chef-client',
82 '-s', '20'])100 '-d', '-i', '1800', '-s', '20'], capture=False)
101 elif install_type == 'packages':
102 # this will install and run the chef-client from packages
103 cloud.distro.install_packages(('chef',))
83 else:104 else:
84 # this will install and run the chef-client from packages105 log.warn("Unknown chef install type %s", install_type)
85 cc.install_packages(('chef',))
86106
87107
88def get_ruby_packages(version):108def get_ruby_packages(version):
@@ -90,30 +110,20 @@
90 pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]110 pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
91 if version == "1.8":111 if version == "1.8":
92 pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))112 pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
93 return(pkgs)113 return pkgs
94114
95115
96def install_chef_from_gems(ruby_version, chef_version=None):116def install_chef_from_gems(ruby_version, chef_version, distro):
97 cc.install_packages(get_ruby_packages(ruby_version))117 distro.install_packages(get_ruby_packages(ruby_version))
98 if not os.path.exists('/usr/bin/gem'):118 if not os.path.exists('/usr/bin/gem'):
99 os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')119 util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
100 if not os.path.exists('/usr/bin/ruby'):120 if not os.path.exists('/usr/bin/ruby'):
101 os.symlink('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')121 util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
102 if chef_version:122 if chef_version:
103 subprocess.check_call(['/usr/bin/gem', 'install', 'chef',123 util.subp(['/usr/bin/gem', 'install', 'chef',
104 '-v %s' % chef_version, '--no-ri',124 '-v %s' % chef_version, '--no-ri',
105 '--no-rdoc', '--bindir', '/usr/bin', '-q'])125 '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
106 else:126 else:
107 subprocess.check_call(['/usr/bin/gem', 'install', 'chef',127 util.subp(['/usr/bin/gem', 'install', 'chef',
108 '--no-ri', '--no-rdoc', '--bindir',128 '--no-ri', '--no-rdoc', '--bindir',
109 '/usr/bin', '-q'])129 '/usr/bin', '-q'], capture=False)
110
111
112def ensure_dir(d):
113 if not os.path.exists(d):
114 os.makedirs(d)
115
116
117def mkdirs(dirs):
118 for d in dirs:
119 ensure_dir(d)
120130
=== modified file 'cloudinit/config/cc_disable_ec2_metadata.py'
--- cloudinit/CloudConfig/cc_disable_ec2_metadata.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_disable_ec2_metadata.py 2012-07-06 21:16:18 +0000
@@ -17,14 +17,20 @@
17#17#
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
20import cloudinit.util as util20
21import subprocess21from cloudinit import util
22from cloudinit.CloudConfig import per_always22
2323from cloudinit.settings import PER_ALWAYS
24frequency = per_always24
2525frequency = PER_ALWAYS
2626
27def handle(_name, cfg, _cloud, _log, _args):27REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
28 if util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False):28
29 fwall = "route add -host 169.254.169.254 reject"29
30 subprocess.call(fwall.split(' '))30def handle(name, cfg, _cloud, log, _args):
31 disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
32 if disabled:
33 util.subp(REJECT_CMD, capture=False)
34 else:
35 log.debug(("Skipping module named %s,"
36 " disabling the ec2 route not enabled"), name)
3137
=== modified file 'cloudinit/config/cc_final_message.py'
--- cloudinit/CloudConfig/cc_final_message.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_final_message.py 2012-07-06 21:16:18 +0000
@@ -18,41 +18,51 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21from cloudinit.CloudConfig import per_always21from cloudinit import templater
22import sys22from cloudinit import util
23from cloudinit import util, boot_finished23from cloudinit import version
24import time24
2525from cloudinit.settings import PER_ALWAYS
26frequency = per_always26
2727frequency = PER_ALWAYS
28final_message = "cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds"28
2929FINAL_MESSAGE_DEF = ("Cloud-init v. {{version}} finished at {{timestamp}}."
3030 " Up {{uptime}} seconds.")
31def handle(_name, cfg, _cloud, log, args):31
32
33def handle(_name, cfg, cloud, log, args):
34
35 msg_in = None
32 if len(args) != 0:36 if len(args) != 0:
33 msg_in = args[0]37 msg_in = args[0]
34 else:38 else:
35 msg_in = util.get_cfg_option_str(cfg, "final_message", final_message)39 msg_in = util.get_cfg_option_str(cfg, "final_message")
3640
37 try:41 if not msg_in:
38 uptimef = open("/proc/uptime")42 template_fn = cloud.get_template_filename('final_message')
39 uptime = uptimef.read().split(" ")[0]43 if template_fn:
40 uptimef.close()44 msg_in = util.load_file(template_fn)
41 except IOError as e:45
42 log.warn("unable to open /proc/uptime\n")46 if not msg_in:
43 uptime = "na"47 msg_in = FINAL_MESSAGE_DEF
4448
45 try:49 uptime = util.uptime()
46 ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())50 ts = util.time_rfc2822()
51 cver = version.version_string()
52 try:
53 subs = {
54 'uptime': uptime,
55 'timestamp': ts,
56 'version': cver,
57 }
58 util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
59 console=False, stderr=True)
60 except Exception:
61 util.logexc(log, "Failed to render final message template")
62
63 boot_fin_fn = cloud.paths.boot_finished
64 try:
65 contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
66 util.write_file(boot_fin_fn, contents)
47 except:67 except:
48 ts = "na"68 util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
49
50 try:
51 subs = {'UPTIME': uptime, 'TIMESTAMP': ts}
52 sys.stdout.write("%s\n" % util.render_string(msg_in, subs))
53 except Exception as e:
54 log.warn("failed to render string to stdout: %s" % e)
55
56 fp = open(boot_finished, "wb")
57 fp.write(uptime + "\n")
58 fp.close()
5969
=== modified file 'cloudinit/config/cc_foo.py'
--- cloudinit/CloudConfig/cc_foo.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_foo.py 2012-07-06 21:16:18 +0000
@@ -18,12 +18,35 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21#import cloudinit21from cloudinit.settings import PER_INSTANCE
22#import cloudinit.util as util22
23from cloudinit.CloudConfig import per_instance23# Modules are expected to have the following attributes.
2424# 1. A required 'handle' method which takes the following params.
25frequency = per_instance25# a) The name will not be this files name, but instead
2626# the name specified in configuration (which is the name
2727# which will be used to find this module).
28def handle(_name, _cfg, _cloud, _log, _args):28# b) A configuration object that is the result of the merging
29 print "hi"29# of cloud configs configuration with legacy configuration
30# as well as any datasource provided configuration
31# c) A cloud object that can be used to access various
32# datasource and paths for the given distro and data provided
33# by the various datasource instance types.
34# d) A argument list that may or may not be empty to this module.
35# Typically those are from module configuration where the module
36# is defined with some extra configuration that will eventually
37# be translated from yaml into arguments to this module.
38# 2. A optional 'frequency' that defines how often this module should be ran.
39# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
40# provided PER_INSTANCE will be assumed.
41# See settings.py for these constants.
42# 3. A optional 'distros' array/set/tuple that defines the known distros
43# this module will work with (if not all of them). This is used to write
44# a warning out if a module is being ran on a untested distribution for
45# informational purposes. If non existent all distros are assumed and
46# no warning occurs.
47
48frequency = PER_INSTANCE
49
50
51def handle(name, _cfg, _cloud, log, _args):
52 log.debug("Hi from module %s", name)
3053
=== modified file 'cloudinit/config/cc_grub_dpkg.py'
--- cloudinit/CloudConfig/cc_grub_dpkg.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_grub_dpkg.py 2012-07-06 21:16:18 +0000
@@ -18,10 +18,12 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import cloudinit.util as util
22import traceback
23import os21import os
2422
23from cloudinit import util
24
25distros = ['ubuntu', 'debian']
26
2527
26def handle(_name, cfg, _cloud, log, _args):28def handle(_name, cfg, _cloud, log, _args):
27 idevs = None29 idevs = None
@@ -35,14 +37,14 @@
3537
36 if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or38 if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
37 (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):39 (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
38 if idevs == None:40 if idevs is None:
39 idevs = ""41 idevs = ""
40 if idevs_empty == None:42 if idevs_empty is None:
41 idevs_empty = "true"43 idevs_empty = "true"
42 else:44 else:
43 if idevs_empty == None:45 if idevs_empty is None:
44 idevs_empty = "false"46 idevs_empty = "false"
45 if idevs == None:47 if idevs is None:
46 idevs = "/dev/sda"48 idevs = "/dev/sda"
47 for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"):49 for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"):
48 if os.path.exists(dev):50 if os.path.exists(dev):
@@ -52,13 +54,14 @@
52 # now idevs and idevs_empty are set to determined values54 # now idevs and idevs_empty are set to determined values
53 # or, those set by user55 # or, those set by user
5456
55 dconf_sel = "grub-pc grub-pc/install_devices string %s\n" % idevs + \57 dconf_sel = (("grub-pc grub-pc/install_devices string %s\n"
56 "grub-pc grub-pc/install_devices_empty boolean %s\n" % idevs_empty58 "grub-pc grub-pc/install_devices_empty boolean %s\n") %
57 log.debug("setting grub debconf-set-selections with '%s','%s'" %59 (idevs, idevs_empty))
60
61 log.debug("Setting grub debconf-set-selections with '%s','%s'" %
58 (idevs, idevs_empty))62 (idevs, idevs_empty))
5963
60 try:64 try:
61 util.subp(('debconf-set-selections'), dconf_sel)65 util.subp(['debconf-set-selections'], dconf_sel)
62 except:66 except:
63 log.error("Failed to run debconf-set-selections for grub-dpkg")67 util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
64 log.debug(traceback.format_exc())
6568
=== modified file 'cloudinit/config/cc_keys_to_console.py'
--- cloudinit/CloudConfig/cc_keys_to_console.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_keys_to_console.py 2012-07-06 21:16:18 +0000
@@ -18,25 +18,36 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21from cloudinit.CloudConfig import per_instance21import os
22import cloudinit.util as util22
23import subprocess23from cloudinit.settings import PER_INSTANCE
2424from cloudinit import util
25frequency = per_instance25
2626frequency = PER_INSTANCE
2727
28def handle(_name, cfg, _cloud, log, _args):28# This is a tool that cloud init provides
29 cmd = ['/usr/lib/cloud-init/write-ssh-key-fingerprints']29HELPER_TOOL = '/usr/lib/cloud-init/write-ssh-key-fingerprints'
30 fp_blacklist = util.get_cfg_option_list_or_str(cfg,30
31 "ssh_fp_console_blacklist", [])31
32 key_blacklist = util.get_cfg_option_list_or_str(cfg,32def handle(name, cfg, _cloud, log, _args):
33 "ssh_key_console_blacklist", ["ssh-dss"])33 if not os.path.exists(HELPER_TOOL):
34 log.warn(("Unable to activate module %s,"
35 " helper tool not found at %s"), name, HELPER_TOOL)
36 return
37
38 fp_blacklist = util.get_cfg_option_list(cfg,
39 "ssh_fp_console_blacklist", [])
40 key_blacklist = util.get_cfg_option_list(cfg,
41 "ssh_key_console_blacklist",
42 ["ssh-dss"])
43
34 try:44 try:
35 confp = open('/dev/console', "wb")45 cmd = [HELPER_TOOL]
36 cmd.append(','.join(fp_blacklist))46 cmd.append(','.join(fp_blacklist))
37 cmd.append(','.join(key_blacklist))47 cmd.append(','.join(key_blacklist))
38 subprocess.call(cmd, stdout=confp)48 (stdout, _stderr) = util.subp(cmd)
39 confp.close()49 util.multi_log("%s\n" % (stdout.strip()),
50 stderr=False, console=True)
40 except:51 except:
41 log.warn("writing keys to console value")52 log.warn("Writing keys to the system console failed!")
42 raise53 raise
4354
=== modified file 'cloudinit/config/cc_landscape.py'
--- cloudinit/CloudConfig/cc_landscape.py 2012-04-10 20:22:47 +0000
+++ cloudinit/config/cc_landscape.py 2012-07-06 21:16:18 +0000
@@ -19,16 +19,23 @@
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import os21import os
22import os.path22
23from cloudinit.CloudConfig import per_instance23from StringIO import StringIO
24
24from configobj import ConfigObj25from configobj import ConfigObj
2526
26frequency = per_instance27from cloudinit import util
2728
28lsc_client_cfg_file = "/etc/landscape/client.conf"29from cloudinit.settings import PER_INSTANCE
30
31frequency = PER_INSTANCE
32
33LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
34
35distros = ['ubuntu']
2936
30# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu237# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
31lsc_builtincfg = {38LSC_BUILTIN_CFG = {
32 'client': {39 'client': {
33 'log_level': "info",40 'log_level': "info",
34 'url': "https://landscape.canonical.com/message-system",41 'url': "https://landscape.canonical.com/message-system",
@@ -38,7 +45,7 @@
38}45}
3946
4047
41def handle(_name, cfg, _cloud, log, _args):48def handle(_name, cfg, cloud, log, _args):
42 """49 """
43 Basically turn a top level 'landscape' entry with a 'client' dict50 Basically turn a top level 'landscape' entry with a 'client' dict
44 and render it to ConfigObj format under '[client]' section in51 and render it to ConfigObj format under '[client]' section in
@@ -47,27 +54,40 @@
4754
48 ls_cloudcfg = cfg.get("landscape", {})55 ls_cloudcfg = cfg.get("landscape", {})
4956
50 if not isinstance(ls_cloudcfg, dict):57 if not isinstance(ls_cloudcfg, (dict)):
51 raise(Exception("'landscape' existed in config, but not a dict"))58 raise RuntimeError(("'landscape' key existed in config,"
5259 " but not a dictionary type,"
53 merged = mergeTogether([lsc_builtincfg, lsc_client_cfg_file, ls_cloudcfg])60 " is a %s instead"), util.obj_name(ls_cloudcfg))
5461
55 if not os.path.isdir(os.path.dirname(lsc_client_cfg_file)):62 merge_data = [
56 os.makedirs(os.path.dirname(lsc_client_cfg_file))63 LSC_BUILTIN_CFG,
5764 cloud.paths.join(True, LSC_CLIENT_CFG_FILE),
58 with open(lsc_client_cfg_file, "w") as fp:65 ls_cloudcfg,
59 merged.write(fp)66 ]
6067 merged = merge_together(merge_data)
61 log.debug("updated %s" % lsc_client_cfg_file)68
6269 lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE)
6370 lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn))
64def mergeTogether(objs):71 if not os.path.isdir(lsc_dir):
72 util.ensure_dir(lsc_dir)
73
74 contents = StringIO()
75 merged.write(contents)
76 contents.flush()
77
78 util.write_file(lsc_client_fn, contents.getvalue())
79 log.debug("Wrote landscape config file to %s", lsc_client_fn)
80
81
82def merge_together(objs):
65 """83 """
66 merge together ConfigObj objects or things that ConfigObj() will take in84 merge together ConfigObj objects or things that ConfigObj() will take in
67 later entries override earlier85 later entries override earlier
68 """86 """
69 cfg = ConfigObj({})87 cfg = ConfigObj({})
70 for obj in objs:88 for obj in objs:
89 if not obj:
90 continue
71 if isinstance(obj, ConfigObj):91 if isinstance(obj, ConfigObj):
72 cfg.merge(obj)92 cfg.merge(obj)
73 else:93 else:
7494
=== modified file 'cloudinit/config/cc_locale.py'
--- cloudinit/CloudConfig/cc_locale.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_locale.py 2012-07-06 21:16:18 +0000
@@ -18,37 +18,20 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import cloudinit.util as util21from cloudinit import util
22import os.path22
23import subprocess23
24import traceback24def handle(name, cfg, cloud, log, args):
25
26
27def apply_locale(locale, cfgfile):
28 if os.path.exists('/usr/sbin/locale-gen'):
29 subprocess.Popen(['locale-gen', locale]).communicate()
30 if os.path.exists('/usr/sbin/update-locale'):
31 subprocess.Popen(['update-locale', locale]).communicate()
32
33 util.render_to_file('default-locale', cfgfile, {'locale': locale})
34
35
36def handle(_name, cfg, cloud, log, args):
37 if len(args) != 0:25 if len(args) != 0:
38 locale = args[0]26 locale = args[0]
39 else:27 else:
40 locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())28 locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
4129
42 locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile",
43 "/etc/default/locale")
44
45 if not locale:30 if not locale:
31 log.debug(("Skipping module named %s, "
32 "no 'locale' configuration found"), name)
46 return33 return
4734
48 log.debug("setting locale to %s" % locale)35 log.debug("Setting locale to %s", locale)
4936 locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
50 try:37 cloud.distro.apply_locale(locale, locale_cfgfile)
51 apply_locale(locale, locale_cfgfile)
52 except Exception as e:
53 log.debug(traceback.format_exc(e))
54 raise Exception("failed to apply locale %s" % locale)
5538
=== modified file 'cloudinit/config/cc_mcollective.py'
--- cloudinit/CloudConfig/cc_mcollective.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_mcollective.py 2012-07-06 21:16:18 +0000
@@ -19,81 +19,73 @@
19# You should have received a copy of the GNU General Public License19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.20# along with this program. If not, see <http://www.gnu.org/licenses/>.
2121
22import os22from StringIO import StringIO
23import subprocess23
24import StringIO24# Used since this can maintain comments
25import ConfigParser25# and doesn't need a top level section
26import cloudinit.CloudConfig as cc26from configobj import ConfigObj
27import cloudinit.util as util27
2828from cloudinit import util
29pubcert_file = "/etc/mcollective/ssl/server-public.pem"29
30pricert_file = "/etc/mcollective/ssl/server-private.pem"30PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
3131PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
3232
33# Our fake header section33
34class FakeSecHead(object):34def handle(name, cfg, cloud, log, _args):
35 def __init__(self, fp):35
36 self.fp = fp
37 self.sechead = '[nullsection]\n'
38
39 def readline(self):
40 if self.sechead:
41 try:
42 return self.sechead
43 finally:
44 self.sechead = None
45 else:
46 return self.fp.readline()
47
48
49def handle(_name, cfg, _cloud, _log, _args):
50 # If there isn't a mcollective key in the configuration don't do anything36 # If there isn't a mcollective key in the configuration don't do anything
51 if 'mcollective' not in cfg:37 if 'mcollective' not in cfg:
38 log.debug(("Skipping module named %s, "
39 "no 'mcollective' key in configuration"), name)
52 return40 return
41
53 mcollective_cfg = cfg['mcollective']42 mcollective_cfg = cfg['mcollective']
43
54 # Start by installing the mcollective package ...44 # Start by installing the mcollective package ...
55 cc.install_packages(("mcollective",))45 cloud.distro.install_packages(("mcollective",))
5646
57 # ... and then update the mcollective configuration47 # ... and then update the mcollective configuration
58 if 'conf' in mcollective_cfg:48 if 'conf' in mcollective_cfg:
59 # Create object for reading server.cfg values49 # Read server.cfg values from the
60 mcollective_config = ConfigParser.ConfigParser()50 # original file in order to be able to mix the rest up
61 # Read server.cfg values from original file in order to be able to mix51 server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg')
62 # the rest up52 mcollective_config = ConfigObj(server_cfg_fn)
63 mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/'53 # See: http://tiny.cc/jh9agw
64 'server.cfg')))54 for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems():
65 for cfg_name, cfg in mcollective_cfg['conf'].iteritems():
66 if cfg_name == 'public-cert':55 if cfg_name == 'public-cert':
67 util.write_file(pubcert_file, cfg, mode=0644)56 pubcert_fn = cloud.paths.join(True, PUBCERT_FILE)
68 mcollective_config.set(cfg_name,57 util.write_file(pubcert_fn, cfg, mode=0644)
69 'plugin.ssl_server_public', pubcert_file)58 mcollective_config['plugin.ssl_server_public'] = pubcert_fn
70 mcollective_config.set(cfg_name, 'securityprovider', 'ssl')59 mcollective_config['securityprovider'] = 'ssl'
71 elif cfg_name == 'private-cert':60 elif cfg_name == 'private-cert':
72 util.write_file(pricert_file, cfg, mode=0600)61 pricert_fn = cloud.paths.join(True, PRICERT_FILE)
73 mcollective_config.set(cfg_name,62 util.write_file(pricert_fn, cfg, mode=0600)
74 'plugin.ssl_server_private', pricert_file)63 mcollective_config['plugin.ssl_server_private'] = pricert_fn
75 mcollective_config.set(cfg_name, 'securityprovider', 'ssl')64 mcollective_config['securityprovider'] = 'ssl'
76 else:65 else:
77 # Iterate throug the config items, we'll use ConfigParser.set66 if isinstance(cfg, (basestring, str)):
78 # to overwrite or create new items as needed67 # Just set it in the 'main' section
79 for o, v in cfg.iteritems():68 mcollective_config[cfg_name] = cfg
80 mcollective_config.set(cfg_name, o, v)69 elif isinstance(cfg, (dict)):
70 # Iterate throug the config items, create a section
71 # if it is needed and then add/or create items as needed
72 if cfg_name not in mcollective_config.sections:
73 mcollective_config[cfg_name] = {}
74 for (o, v) in cfg.iteritems():
75 mcollective_config[cfg_name][o] = v
76 else:
77 # Otherwise just try to convert it to a string
78 mcollective_config[cfg_name] = str(cfg)
81 # We got all our config as wanted we'll rename79 # We got all our config as wanted we'll rename
82 # the previous server.cfg and create our new one80 # the previous server.cfg and create our new one
83 os.rename('/etc/mcollective/server.cfg',81 old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old')
84 '/etc/mcollective/server.cfg.old')82 util.rename(server_cfg_fn, old_fn)
85 outputfile = StringIO.StringIO()83 # Now we got the whole file, write to disk...
86 mcollective_config.write(outputfile)84 contents = StringIO()
87 # Now we got the whole file, write to disk except first line85 mcollective_config.write(contents)
88 # Note below, that we've just used ConfigParser because it generally86 contents = contents.getvalue()
89 # works. Below, we remove the initial 'nullsection' header87 server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg')
90 # and then change 'key = value' to 'key: value'. The global88 util.write_file(server_cfg_rw, contents, mode=0644)
91 # search and replace of '=' with ':' could be problematic though.
92 # this most likely needs fixing.
93 util.write_file('/etc/mcollective/server.cfg',
94 outputfile.getvalue().replace('[nullsection]\n', '').replace(' =',
95 ':'),
96 mode=0644)
9789
98 # Start mcollective90 # Start mcollective
99 subprocess.check_call(['service', 'mcollective', 'start'])91 util.subp(['service', 'mcollective', 'start'], capture=False)
10092
=== modified file 'cloudinit/config/cc_mounts.py'
--- cloudinit/CloudConfig/cc_mounts.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_mounts.py 2012-07-06 21:16:18 +0000
@@ -18,10 +18,16 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import cloudinit.util as util21from string import whitespace # pylint: disable=W0402
22import os22
23import re23import re
24from string import whitespace # pylint: disable=W040224
25from cloudinit import util
26
27# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1
28SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$"
29SHORTNAME = re.compile(SHORTNAME_FILTER)
30WS = re.compile("[%s]+" % (whitespace))
2531
2632
27def is_mdname(name):33def is_mdname(name):
@@ -49,38 +55,46 @@
49 if "mounts" in cfg:55 if "mounts" in cfg:
50 cfgmnt = cfg["mounts"]56 cfgmnt = cfg["mounts"]
5157
52 # shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1
53 shortname_filter = r"^[x]{0,1}[shv]d[a-z][0-9]*$"
54 shortname = re.compile(shortname_filter)
55
56 for i in range(len(cfgmnt)):58 for i in range(len(cfgmnt)):
57 # skip something that wasn't a list59 # skip something that wasn't a list
58 if not isinstance(cfgmnt[i], list):60 if not isinstance(cfgmnt[i], list):
61 log.warn("Mount option %s not a list, got a %s instead",
62 (i + 1), util.obj_name(cfgmnt[i]))
59 continue63 continue
6064
65 startname = str(cfgmnt[i][0])
66 log.debug("Attempting to determine the real name of %s", startname)
67
61 # workaround, allow user to specify 'ephemeral'68 # workaround, allow user to specify 'ephemeral'
62 # rather than more ec2 correct 'ephemeral0'69 # rather than more ec2 correct 'ephemeral0'
63 if cfgmnt[i][0] == "ephemeral":70 if startname == "ephemeral":
64 cfgmnt[i][0] = "ephemeral0"71 cfgmnt[i][0] = "ephemeral0"
72 log.debug(("Adjusted mount option %s "
73 "name from ephemeral to ephemeral0"), (i + 1))
6574
66 if is_mdname(cfgmnt[i][0]):75 if is_mdname(startname):
67 newname = cloud.device_name_to_device(cfgmnt[i][0])76 newname = cloud.device_name_to_device(startname)
68 if not newname:77 if not newname:
69 log.debug("ignoring nonexistant named mount %s" % cfgmnt[i][0])78 log.debug("Ignoring nonexistant named mount %s", startname)
70 cfgmnt[i][1] = None79 cfgmnt[i][1] = None
71 else:80 else:
72 if newname.startswith("/"):81 renamed = newname
73 cfgmnt[i][0] = newname82 if not newname.startswith("/"):
74 else:83 renamed = "/dev/%s" % newname
75 cfgmnt[i][0] = "/dev/%s" % newname84 cfgmnt[i][0] = renamed
85 log.debug("Mapped metadata name %s to %s", startname, renamed)
76 else:86 else:
77 if shortname.match(cfgmnt[i][0]):87 if SHORTNAME.match(startname):
78 cfgmnt[i][0] = "/dev/%s" % cfgmnt[i][0]88 renamed = "/dev/%s" % startname
89 log.debug("Mapped shortname name %s to %s", startname, renamed)
90 cfgmnt[i][0] = renamed
7991
80 # in case the user did not quote a field (likely fs-freq, fs_passno)92 # in case the user did not quote a field (likely fs-freq, fs_passno)
81 # but do not convert None to 'None' (LP: #898365)93 # but do not convert None to 'None' (LP: #898365)
82 for j in range(len(cfgmnt[i])):94 for j in range(len(cfgmnt[i])):
83 if isinstance(cfgmnt[i][j], int):95 if j is None:
96 continue
97 else:
84 cfgmnt[i][j] = str(cfgmnt[i][j])98 cfgmnt[i][j] = str(cfgmnt[i][j])
8599
86 for i in range(len(cfgmnt)):100 for i in range(len(cfgmnt)):
@@ -102,14 +116,18 @@
102 # for each of the "default" mounts, add them only if no other116 # for each of the "default" mounts, add them only if no other
103 # entry has the same device name117 # entry has the same device name
104 for defmnt in defmnts:118 for defmnt in defmnts:
105 devname = cloud.device_name_to_device(defmnt[0])119 startname = defmnt[0]
120 devname = cloud.device_name_to_device(startname)
106 if devname is None:121 if devname is None:
122 log.debug("Ignoring nonexistant named default mount %s", startname)
107 continue123 continue
108 if devname.startswith("/"):124 if devname.startswith("/"):
109 defmnt[0] = devname125 defmnt[0] = devname
110 else:126 else:
111 defmnt[0] = "/dev/%s" % devname127 defmnt[0] = "/dev/%s" % devname
112128
129 log.debug("Mapped default device %s to %s", startname, defmnt[0])
130
113 cfgmnt_has = False131 cfgmnt_has = False
114 for cfgm in cfgmnt:132 for cfgm in cfgmnt:
115 if cfgm[0] == defmnt[0]:133 if cfgm[0] == defmnt[0]:
@@ -117,14 +135,22 @@
117 break135 break
118136
119 if cfgmnt_has:137 if cfgmnt_has:
138 log.debug(("Not including %s, already"
139 " previously included"), startname)
120 continue140 continue
121 cfgmnt.append(defmnt)141 cfgmnt.append(defmnt)
122142
123 # now, each entry in the cfgmnt list has all fstab values143 # now, each entry in the cfgmnt list has all fstab values
124 # if the second field is None (not the string, the value) we skip it144 # if the second field is None (not the string, the value) we skip it
125 actlist = [x for x in cfgmnt if x[1] is not None]145 actlist = []
146 for x in cfgmnt:
147 if x[1] is None:
148 log.debug("Skipping non-existent device named %s", x[0])
149 else:
150 actlist.append(x)
126151
127 if len(actlist) == 0:152 if len(actlist) == 0:
153 log.debug("No modifications to fstab needed.")
128 return154 return
129155
130 comment = "comment=cloudconfig"156 comment = "comment=cloudconfig"
@@ -133,7 +159,7 @@
133 dirs = []159 dirs = []
134 for line in actlist:160 for line in actlist:
135 # write 'comment' in the fs_mntops, entry, claiming this161 # write 'comment' in the fs_mntops, entry, claiming this
136 line[3] = "%s,comment=cloudconfig" % line[3]162 line[3] = "%s,%s" % (line[3], comment)
137 if line[2] == "swap":163 if line[2] == "swap":
138 needswap = True164 needswap = True
139 if line[1].startswith("/"):165 if line[1].startswith("/"):
@@ -141,11 +167,10 @@
141 cc_lines.append('\t'.join(line))167 cc_lines.append('\t'.join(line))
142168
143 fstab_lines = []169 fstab_lines = []
144 fstab = open("/etc/fstab", "r+")170 fstab = util.load_file(cloud.paths.join(True, "/etc/fstab"))
145 ws = re.compile("[%s]+" % whitespace)171 for line in fstab.splitlines():
146 for line in fstab.read().splitlines():
147 try:172 try:
148 toks = ws.split(line)173 toks = WS.split(line)
149 if toks[3].find(comment) != -1:174 if toks[3].find(comment) != -1:
150 continue175 continue
151 except:176 except:
@@ -153,27 +178,23 @@
153 fstab_lines.append(line)178 fstab_lines.append(line)
154179
155 fstab_lines.extend(cc_lines)180 fstab_lines.extend(cc_lines)
156181 contents = "%s\n" % ('\n'.join(fstab_lines))
157 fstab.seek(0)182 util.write_file(cloud.paths.join(False, "/etc/fstab"), contents)
158 fstab.write("%s\n" % '\n'.join(fstab_lines))
159 fstab.truncate()
160 fstab.close()
161183
162 if needswap:184 if needswap:
163 try:185 try:
164 util.subp(("swapon", "-a"))186 util.subp(("swapon", "-a"))
165 except:187 except:
166 log.warn("Failed to enable swap")188 util.logexc(log, "Activating swap via 'swapon -a' failed")
167189
168 for d in dirs:190 for d in dirs:
169 if os.path.exists(d):191 real_dir = cloud.paths.join(False, d)
170 continue
171 try:192 try:
172 os.makedirs(d)193 util.ensure_dir(real_dir)
173 except:194 except:
174 log.warn("Failed to make '%s' config-mount\n", d)195 util.logexc(log, "Failed to make '%s' config-mount", d)
175196
176 try:197 try:
177 util.subp(("mount", "-a"))198 util.subp(("mount", "-a"))
178 except:199 except:
179 log.warn("'mount -a' failed")200 util.logexc(log, "Activating mounts via 'mount -a' failed")
180201
=== modified file 'cloudinit/config/cc_phone_home.py'
--- cloudinit/CloudConfig/cc_phone_home.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_phone_home.py 2012-07-06 21:16:18 +0000
@@ -17,13 +17,22 @@
17#17#
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
20from cloudinit.CloudConfig import per_instance20
21import cloudinit.util as util21from cloudinit import templater
22from time import sleep22from cloudinit import url_helper as uhelp
2323from cloudinit import util
24frequency = per_instance24
25post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id',25from cloudinit.settings import PER_INSTANCE
26 'hostname']26
27frequency = PER_INSTANCE
28
29POST_LIST_ALL = [
30 'pub_key_dsa',
31 'pub_key_rsa',
32 'pub_key_ecdsa',
33 'instance_id',
34 'hostname'
35]
2736
2837
29# phone_home:38# phone_home:
@@ -35,29 +44,33 @@
35# url: http://my.foo.bar/$INSTANCE_ID/44# url: http://my.foo.bar/$INSTANCE_ID/
36# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id45# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id
37#46#
38def handle(_name, cfg, cloud, log, args):47def handle(name, cfg, cloud, log, args):
39 if len(args) != 0:48 if len(args) != 0:
40 ph_cfg = util.read_conf(args[0])49 ph_cfg = util.read_conf(args[0])
41 else:50 else:
42 if not 'phone_home' in cfg:51 if not 'phone_home' in cfg:
52 log.debug(("Skipping module named %s, "
53 "no 'phone_home' configuration found"), name)
43 return54 return
44 ph_cfg = cfg['phone_home']55 ph_cfg = cfg['phone_home']
4556
46 if 'url' not in ph_cfg:57 if 'url' not in ph_cfg:
47 log.warn("no 'url' token in phone_home")58 log.warn(("Skipping module named %s, "
59 "no 'url' found in 'phone_home' configuration"), name)
48 return60 return
4961
50 url = ph_cfg['url']62 url = ph_cfg['url']
51 post_list = ph_cfg.get('post', 'all')63 post_list = ph_cfg.get('post', 'all')
52 tries = ph_cfg.get('tries', 10)64 tries = ph_cfg.get('tries')
53 try:65 try:
54 tries = int(tries)66 tries = int(tries)
55 except:67 except:
56 log.warn("tries is not an integer. using 10")
57 tries = 1068 tries = 10
69 util.logexc(log, ("Configuration entry 'tries'"
70 " is not an integer, using %s instead"), tries)
5871
59 if post_list == "all":72 if post_list == "all":
60 post_list = post_list_all73 post_list = POST_LIST_ALL
6174
62 all_keys = {}75 all_keys = {}
63 all_keys['instance_id'] = cloud.get_instance_id()76 all_keys['instance_id'] = cloud.get_instance_id()
@@ -69,38 +82,37 @@
69 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',82 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
70 }83 }
7184
72 for n, path in pubkeys.iteritems():85 for (n, path) in pubkeys.iteritems():
73 try:86 try:
74 fp = open(path, "rb")87 all_keys[n] = util.load_file(cloud.paths.join(True, path))
75 all_keys[n] = fp.read()
76 fp.close()
77 except:88 except:
78 log.warn("%s: failed to open in phone_home" % path)89 util.logexc(log, ("%s: failed to open, can not"
90 " phone home that data"), path)
7991
80 submit_keys = {}92 submit_keys = {}
81 for k in post_list:93 for k in post_list:
82 if k in all_keys:94 if k in all_keys:
83 submit_keys[k] = all_keys[k]95 submit_keys[k] = all_keys[k]
84 else:96 else:
85 submit_keys[k] = "N/A"97 submit_keys[k] = None
86 log.warn("requested key %s from 'post' list not available")98 log.warn(("Requested key %s from 'post'"
8799 " configuration list not available"), k)
88 url = util.render_string(url, {'INSTANCE_ID': all_keys['instance_id']})100
89101 # Get them read to be posted
90 null_exc = object()102 real_submit_keys = {}
91 last_e = null_exc103 for (k, v) in submit_keys.iteritems():
92 for i in range(0, tries):104 if v is None:
93 try:105 real_submit_keys[k] = 'N/A'
94 util.readurl(url, submit_keys)106 else:
95 log.debug("succeeded submit to %s on try %i" % (url, i + 1))107 real_submit_keys[k] = str(v)
96 return108
97 except Exception as e:109 # Incase the url is parameterized
98 log.debug("failed to post to %s on try %i" % (url, i + 1))110 url_params = {
99 last_e = e111 'INSTANCE_ID': all_keys['instance_id'],
100 sleep(3)112 }
101113 url = templater.render_string(url, url_params)
102 log.warn("failed to post to %s in %i tries" % (url, tries))114 try:
103 if last_e is not null_exc:115 uhelp.readurl(url, data=real_submit_keys, retries=tries, sec_between=3)
104 raise(last_e)116 except:
105117 util.logexc(log, ("Failed to post phone home data to"
106 return118 " %s in %s tries"), url, tries)
107119
=== modified file 'cloudinit/config/cc_puppet.py'
--- cloudinit/CloudConfig/cc_puppet.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_puppet.py 2012-07-06 21:16:18 +0000
@@ -18,91 +18,96 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21from StringIO import StringIO
22
21import os23import os
22import os.path
23import pwd24import pwd
24import socket25import socket
25import subprocess26
26import StringIO27from cloudinit import helpers
27import ConfigParser28from cloudinit import util
28import cloudinit.CloudConfig as cc29
29import cloudinit.util as util30
3031def handle(name, cfg, cloud, log, _args):
31
32def handle(_name, cfg, cloud, log, _args):
33 # If there isn't a puppet key in the configuration don't do anything32 # If there isn't a puppet key in the configuration don't do anything
34 if 'puppet' not in cfg:33 if 'puppet' not in cfg:
34 log.debug(("Skipping module named %s,"
35 " no 'puppet' configuration found"), name)
35 return36 return
37
36 puppet_cfg = cfg['puppet']38 puppet_cfg = cfg['puppet']
39
37 # Start by installing the puppet package ...40 # Start by installing the puppet package ...
38 cc.install_packages(("puppet",))41 cloud.distro.install_packages(["puppet"])
3942
40 # ... and then update the puppet configuration43 # ... and then update the puppet configuration
41 if 'conf' in puppet_cfg:44 if 'conf' in puppet_cfg:
42 # Add all sections from the conf object to puppet.conf45 # Add all sections from the conf object to puppet.conf
43 puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r')46 puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf')
47 contents = util.load_file(puppet_conf_fn)
44 # Create object for reading puppet.conf values48 # Create object for reading puppet.conf values
45 puppet_config = ConfigParser.ConfigParser()49 puppet_config = helpers.DefaultingConfigParser()
46 # Read puppet.conf values from original file in order to be able to50 # Read puppet.conf values from original file in order to be able to
47 # mix the rest up51 # mix the rest up. First clean them up (TODO is this really needed??)
48 puppet_config.readfp(StringIO.StringIO(''.join(i.lstrip() for i in52 cleaned_lines = [i.lstrip() for i in contents.splitlines()]
49 puppet_conf_fh.readlines())))53 cleaned_contents = '\n'.join(cleaned_lines)
50 # Close original file, no longer needed54 puppet_config.readfp(StringIO(cleaned_contents),
51 puppet_conf_fh.close()55 filename=puppet_conf_fn)
52 for cfg_name, cfg in puppet_cfg['conf'].iteritems():56 for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
53 # ca_cert configuration is a special case57 # Cert configuration is a special case
54 # Dump the puppetmaster ca certificate in the correct place58 # Dump the puppet master ca certificate in the correct place
55 if cfg_name == 'ca_cert':59 if cfg_name == 'ca_cert':
56 # Puppet ssl sub-directory isn't created yet60 # Puppet ssl sub-directory isn't created yet
57 # Create it with the proper permissions and ownership61 # Create it with the proper permissions and ownership
58 os.makedirs('/var/lib/puppet/ssl')62 pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl')
59 os.chmod('/var/lib/puppet/ssl', 0771)63 util.ensure_dir(pp_ssl_dir, 0771)
60 os.chown('/var/lib/puppet/ssl',64 util.chownbyid(pp_ssl_dir,
61 pwd.getpwnam('puppet').pw_uid, 0)65 pwd.getpwnam('puppet').pw_uid, 0)
62 os.makedirs('/var/lib/puppet/ssl/certs/')66 pp_ssl_certs = cloud.paths.join(False,
63 os.chown('/var/lib/puppet/ssl/certs/',67 '/var/lib/puppet/ssl/certs/')
64 pwd.getpwnam('puppet').pw_uid, 0)68 util.ensure_dir(pp_ssl_certs)
65 ca_fh = open('/var/lib/puppet/ssl/certs/ca.pem', 'w')69 util.chownbyid(pp_ssl_certs,
66 ca_fh.write(cfg)70 pwd.getpwnam('puppet').pw_uid, 0)
67 ca_fh.close()71 pp_ssl_ca_certs = cloud.paths.join(False,
68 os.chown('/var/lib/puppet/ssl/certs/ca.pem',72 ('/var/lib/puppet/'
69 pwd.getpwnam('puppet').pw_uid, 0)73 'ssl/certs/ca.pem'))
70 util.restorecon_if_possible('/var/lib/puppet', recursive=True)74 util.write_file(pp_ssl_ca_certs, cfg)
75 util.chownbyid(pp_ssl_ca_certs,
76 pwd.getpwnam('puppet').pw_uid, 0)
71 else:77 else:
72 #puppet_conf_fh.write("\n[%s]\n" % (cfg_name))
73 # If puppet.conf already has this section we don't want to
74 # write it again
75 if puppet_config.has_section(cfg_name) == False:
76 puppet_config.add_section(cfg_name)
77 # Iterate throug the config items, we'll use ConfigParser.set78 # Iterate throug the config items, we'll use ConfigParser.set
78 # to overwrite or create new items as needed79 # to overwrite or create new items as needed
79 for o, v in cfg.iteritems():80 for (o, v) in cfg.iteritems():
80 if o == 'certname':81 if o == 'certname':
81 # Expand %f as the fqdn82 # Expand %f as the fqdn
83 # TODO should this use the cloud fqdn??
82 v = v.replace("%f", socket.getfqdn())84 v = v.replace("%f", socket.getfqdn())
83 # Expand %i as the instance id85 # Expand %i as the instance id
84 v = v.replace("%i",86 v = v.replace("%i", cloud.get_instance_id())
85 cloud.datasource.get_instance_id())87 # certname needs to be downcased
86 # certname needs to be downcase
87 v = v.lower()88 v = v.lower()
88 puppet_config.set(cfg_name, o, v)89 puppet_config.set(cfg_name, o, v)
89 #puppet_conf_fh.write("%s=%s\n" % (o, v))
90 # We got all our config as wanted we'll rename90 # We got all our config as wanted we'll rename
91 # the previous puppet.conf and create our new one91 # the previous puppet.conf and create our new one
92 os.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old')92 conf_old_fn = cloud.paths.join(False,
93 with open('/etc/puppet/puppet.conf', 'wb') as configfile:93 '/etc/puppet/puppet.conf.old')
94 puppet_config.write(configfile)94 util.rename(puppet_conf_fn, conf_old_fn)
95 util.restorecon_if_possible('/etc/puppet/puppet.conf')95 puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf')
96 util.write_file(puppet_conf_rw, puppet_config.stringify())
97
96 # Set puppet to automatically start98 # Set puppet to automatically start
97 if os.path.exists('/etc/default/puppet'):99 if os.path.exists('/etc/default/puppet'):
98 subprocess.check_call(['sed', '-i',100 util.subp(['sed', '-i',
99 '-e', 's/^START=.*/START=yes/',101 '-e', 's/^START=.*/START=yes/',
100 '/etc/default/puppet'])102 '/etc/default/puppet'], capture=False)
101 elif os.path.exists('/bin/systemctl'):103 elif os.path.exists('/bin/systemctl'):
102 subprocess.check_call(['/bin/systemctl', 'enable', 'puppet.service'])104 util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
105 capture=False)
103 elif os.path.exists('/sbin/chkconfig'):106 elif os.path.exists('/sbin/chkconfig'):
104 subprocess.check_call(['/sbin/chkconfig', 'puppet', 'on'])107 util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
105 else:108 else:
106 log.warn("Do not know how to enable puppet service on this system")109 log.warn(("Sorry we do not know how to enable"
110 " puppet services on this system"))
111
107 # Start puppetd112 # Start puppetd
108 subprocess.check_call(['service', 'puppet', 'start'])113 util.subp(['service', 'puppet', 'start'], capture=False)
109114
=== modified file 'cloudinit/config/cc_resizefs.py'
--- cloudinit/CloudConfig/cc_resizefs.py 2012-03-21 20:41:50 +0000
+++ cloudinit/config/cc_resizefs.py 2012-07-06 21:16:18 +0000
@@ -18,91 +18,123 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import cloudinit.util as util
22import subprocess
23import os21import os
24import stat22import stat
25import sys
26import time23import time
27import tempfile24
28from cloudinit.CloudConfig import per_always25from cloudinit import util
2926from cloudinit.settings import PER_ALWAYS
30frequency = per_always27
3128frequency = PER_ALWAYS
3229
33def handle(_name, cfg, _cloud, log, args):30RESIZE_FS_PREFIXES_CMDS = [
34 if len(args) != 0:31 ('ext', 'resize2fs'),
35 resize_root = False32 ('xfs', 'xfs_growfs'),
36 if str(args[0]).lower() in ['true', '1', 'on', 'yes']:33]
37 resize_root = True34
38 else:35
39 resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)36def nodeify_path(devpth, where, log):
40
41 if str(resize_root).lower() in ['false', '0']:
42 return
43
44 # we use mktemp rather than mkstemp because early in boot nothing
45 # else should be able to race us for this, and we need to mknod.
46 devpth = tempfile.mktemp(prefix="cloudinit.resizefs.", dir="/run")
47
48 try:37 try:
49 st_dev = os.stat("/").st_dev38 st_dev = os.stat(where).st_dev
50 dev = os.makedev(os.major(st_dev), os.minor(st_dev))39 dev = os.makedev(os.major(st_dev), os.minor(st_dev))
51 os.mknod(devpth, 0400 | stat.S_IFBLK, dev)40 os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
41 return st_dev
52 except:42 except:
53 if util.is_container():43 if util.is_container():
54 log.debug("inside container, ignoring mknod failure in resizefs")44 log.debug("Inside container, ignoring mknod failure in resizefs")
55 return45 return
56 log.warn("Failed to make device node to resize /")46 log.warn("Failed to make device node to resize %s at %s",
47 where, devpth)
57 raise48 raise
5849
59 cmd = ['blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth]50
51def get_fs_type(st_dev, path, log):
60 try:52 try:
61 (fstype, _err) = util.subp(cmd)53 dev_entries = util.find_devs_with(tag='TYPE', oformat='value',
62 except subprocess.CalledProcessError as e:54 no_cache=True, path=path)
63 log.warn("Failed to get filesystem type of maj=%s, min=%s via: %s" %55 if not dev_entries:
64 (os.major(st_dev), os.minor(st_dev), cmd))56 return None
65 log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1])57 return dev_entries[0].strip()
66 os.unlink(devpth)58 except util.ProcessExecutionError:
59 util.logexc(log, ("Failed to get filesystem type"
60 " of maj=%s, min=%s for path %s"),
61 os.major(st_dev), os.minor(st_dev), path)
67 raise62 raise
6863
69 if str(fstype).startswith("ext"):64
70 resize_cmd = ['resize2fs', devpth]65def handle(name, cfg, cloud, log, args):
71 elif fstype == "xfs":66 if len(args) != 0:
72 resize_cmd = ['xfs_growfs', devpth]67 resize_root = args[0]
73 else:68 else:
74 os.unlink(devpth)69 resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
75 log.debug("not resizing unknown filesystem %s" % fstype)70
71 if not util.translate_bool(resize_root):
72 log.debug("Skipping module named %s, resizing disabled", name)
76 return73 return
7774
75 # TODO is the directory ok to be used??
76 resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
77 resize_root_d = cloud.paths.join(False, resize_root_d)
78 util.ensure_dir(resize_root_d)
79
80 # TODO: allow what is to be resized to be configurable??
81 resize_what = cloud.paths.join(False, "/")
82 with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
83 dir=resize_root_d, delete=True) as tfh:
84 devpth = tfh.name
85
86 # Delete the file so that mknod will work
87 # but don't change the file handle to know that its
88 # removed so that when a later call that recreates
89 # occurs this temporary file will still benefit from
90 # auto deletion
91 tfh.unlink_now()
92
93 st_dev = nodeify_path(devpth, resize_what, log)
94 fs_type = get_fs_type(st_dev, devpth, log)
95 if not fs_type:
96 log.warn("Could not determine filesystem type of %s", resize_what)
97 return
98
99 resizer = None
100 fstype_lc = fs_type.lower()
101 for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
102 if fstype_lc.startswith(pfix):
103 resizer = root_cmd
104 break
105
106 if not resizer:
107 log.warn("Not resizing unknown filesystem type %s for %s",
108 fs_type, resize_what)
109 return
110
111 log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer)
112 resize_cmd = [resizer, devpth]
113
114 if resize_root == "noblock":
115 # Fork to a child that will run
116 # the resize command
117 util.fork_cb(do_resize, resize_cmd, log)
118 # Don't delete the file now in the parent
119 tfh.delete = False
120 else:
121 do_resize(resize_cmd, log)
122
123 action = 'Resized'
78 if resize_root == "noblock":124 if resize_root == "noblock":
79 fid = os.fork()125 action = 'Resizing (via forking)'
80 if fid == 0:126 log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)",
81 try:127 action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root)
82 do_resize(resize_cmd, devpth, log)128
83 os._exit(0) # pylint: disable=W0212129
84 except Exception as exc:130def do_resize(resize_cmd, log):
85 sys.stderr.write("Failed: %s" % exc)131 start = time.time()
86 os._exit(1) # pylint: disable=W0212
87 else:
88 do_resize(resize_cmd, devpth, log)
89
90 log.debug("resizing root filesystem (type=%s, maj=%i, min=%i, val=%s)" %
91 (str(fstype).rstrip("\n"), os.major(st_dev), os.minor(st_dev),
92 resize_root))
93
94 return
95
96
97def do_resize(resize_cmd, devpth, log):
98 try:132 try:
99 start = time.time()
100 util.subp(resize_cmd)133 util.subp(resize_cmd)
101 except subprocess.CalledProcessError as e:134 except util.ProcessExecutionError:
102 log.warn("Failed to resize filesystem (%s)" % resize_cmd)135 util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
103 log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1])
104 os.unlink(devpth)
105 raise136 raise
106137 tot_time = int(time.time() - start)
107 os.unlink(devpth)138 log.debug("Resizing took %s seconds", tot_time)
108 log.debug("resize took %s seconds" % (time.time() - start))139 # TODO: Should we add a fsck check after this to make
140 # sure we didn't corrupt anything?
109141
=== modified file 'cloudinit/config/cc_rightscale_userdata.py'
--- cloudinit/CloudConfig/cc_rightscale_userdata.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_rightscale_userdata.py 2012-07-06 21:16:18 +0000
@@ -35,44 +35,68 @@
35##35##
36##36##
3737
38import cloudinit.util as util38import os
39from cloudinit.CloudConfig import per_instance39
40from cloudinit import get_ipath_cur40from cloudinit import url_helper as uhelp
41from cloudinit import util
42from cloudinit.settings import PER_INSTANCE
43
41from urlparse import parse_qs44from urlparse import parse_qs
4245
43frequency = per_instance46frequency = PER_INSTANCE
44my_name = "cc_rightscale_userdata"47
45my_hookname = 'CLOUD_INIT_REMOTE_HOOK'48MY_NAME = "cc_rightscale_userdata"
4649MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK'
4750
48def handle(_name, _cfg, cloud, log, _args):51
52def handle(name, _cfg, cloud, log, _args):
49 try:53 try:
50 ud = cloud.get_userdata_raw()54 ud = cloud.get_userdata_raw()
51 except:55 except:
52 log.warn("failed to get raw userdata in %s" % my_name)56 log.warn("Failed to get raw userdata in module %s", name)
53 return57 return
5458
55 try:59 try:
56 mdict = parse_qs(ud)60 mdict = parse_qs(ud)
57 if not my_hookname in mdict:61 if not mdict or not MY_HOOKNAME in mdict:
62 log.debug(("Skipping module %s, "
63 "did not find %s in parsed"
64 " raw userdata"), name, MY_HOOKNAME)
58 return65 return
59 except:66 except:
60 log.warn("failed to urlparse.parse_qa(userdata_raw())")67 util.logexc(log, ("Failed to parse query string %s"
68 " into a dictionary"), ud)
61 raise69 raise
6270
63 scripts_d = get_ipath_cur('scripts')71 wrote_fns = []
64 i = 072 captured_excps = []
65 first_e = None73
66 for url in mdict[my_hookname]:74 # These will eventually be then ran by the cc_scripts_user
67 fname = "%s/rightscale-%02i" % (scripts_d, i)75 # TODO: maybe this should just be a new user data handler??
68 i = i + 176 # Instead of a late module that acts like a user data handler?
77 scripts_d = cloud.get_ipath_cur('scripts')
78 urls = mdict[MY_HOOKNAME]
79 for (i, url) in enumerate(urls):
80 fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
69 try:81 try:
70 content = util.readurl(url)82 resp = uhelp.readurl(url)
71 util.write_file(fname, content, mode=0700)83 # Ensure its a valid http response (and something gotten)
84 if resp.ok() and resp.contents:
85 util.write_file(fname, str(resp), mode=0700)
86 wrote_fns.append(fname)
72 except Exception as e:87 except Exception as e:
73 if not first_e:88 captured_excps.append(e)
74 first_e = None89 util.logexc(log, "%s failed to read %s and write %s",
75 log.warn("%s failed to read %s: %s" % (my_name, url, e))90 MY_NAME, url, fname)
7691
77 if first_e:92 if wrote_fns:
78 raise(e)93 log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
94
95 if len(wrote_fns) != len(urls):
96 skipped = len(urls) - len(wrote_fns)
97 log.debug("%s urls were skipped or failed", skipped)
98
99 if captured_excps:
100 log.warn("%s failed with exceptions, re-raising the last one",
101 len(captured_excps))
102 raise captured_excps[-1]
79103
=== modified file 'cloudinit/config/cc_rsyslog.py'
--- cloudinit/CloudConfig/cc_rsyslog.py 2012-01-18 14:07:33 +0000
+++ cloudinit/config/cc_rsyslog.py 2012-07-06 21:16:18 +0000
@@ -18,16 +18,15 @@
18# You should have received a copy of the GNU General Public License18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.19# along with this program. If not, see <http://www.gnu.org/licenses/>.
2020
21import cloudinit21import os
22import logging22
23import cloudinit.util as util23from cloudinit import util
24import traceback
2524
26DEF_FILENAME = "20-cloud-config.conf"25DEF_FILENAME = "20-cloud-config.conf"
27DEF_DIR = "/etc/rsyslog.d"26DEF_DIR = "/etc/rsyslog.d"
2827
2928
30def handle(_name, cfg, _cloud, log, _args):29def handle(name, cfg, cloud, log, _args):
31 # rsyslog:30 # rsyslog:
32 # - "*.* @@192.158.1.1"31 # - "*.* @@192.158.1.1"
33 # - content: "*.* @@192.0.2.1:10514"32 # - content: "*.* @@192.0.2.1:10514"
@@ -37,17 +36,18 @@
3736
38 # process 'rsyslog'37 # process 'rsyslog'
39 if not 'rsyslog' in cfg:38 if not 'rsyslog' in cfg:
39 log.debug(("Skipping module named %s,"
40 " no 'rsyslog' key in configuration"), name)
40 return41 return
4142
The diff has been truncated for viewing.