Merge lp:~cloud-init/cloud-init/rework into lp:~cloud-init-dev/cloud-init/trunk
- rework
- Merge into trunk
Proposed by
Joshua Harlow
Status: | Merged |
---|---|
Merged at revision: | 564 |
Proposed branch: | lp:~cloud-init/cloud-init/rework |
Merge into: | lp:~cloud-init-dev/cloud-init/trunk |
Diff against target: |
17875 lines (+10046/-4803) 113 files modified
ChangeLog (+193/-0) Makefile (+24/-5) Requires (+30/-0) TODO (+27/-4) bin/cloud-init (+474/-0) cloud-init-cfg.py (+0/-115) cloud-init-query.py (+0/-56) cloud-init.py (+0/-229) cloudinit/DataSource.py (+0/-214) cloudinit/UserDataHandler.py (+0/-262) cloudinit/__init__.py (+4/-650) cloudinit/cloud.py (+101/-0) cloudinit/config/__init__.py (+34/-252) cloudinit/config/cc_apt_pipelining.py (+35/-29) cloudinit/config/cc_apt_update_upgrade.py (+117/-86) cloudinit/config/cc_bootcmd.py (+31/-24) cloudinit/config/cc_byobu.py (+10/-16) cloudinit/config/cc_ca_certs.py (+34/-25) cloudinit/config/cc_chef.py (+72/-62) cloudinit/config/cc_disable_ec2_metadata.py (+17/-11) cloudinit/config/cc_final_message.py (+44/-34) cloudinit/config/cc_foo.py (+32/-9) cloudinit/config/cc_grub_dpkg.py (+15/-12) cloudinit/config/cc_keys_to_console.py (+28/-17) cloudinit/config/cc_landscape.py (+42/-22) cloudinit/config/cc_locale.py (+9/-26) cloudinit/config/cc_mcollective.py (+55/-63) cloudinit/config/cc_mounts.py (+57/-36) cloudinit/config/cc_phone_home.py (+51/-39) cloudinit/config/cc_puppet.py (+59/-54) cloudinit/config/cc_resizefs.py (+99/-67) cloudinit/config/cc_rightscale_userdata.py (+50/-26) cloudinit/config/cc_rsyslog.py (+32/-31) cloudinit/config/cc_runcmd.py (+14/-8) cloudinit/config/cc_salt_minion.py (+30/-26) cloudinit/config/cc_scripts_per_boot.py (+17/-10) cloudinit/config/cc_scripts_per_instance.py (+17/-10) cloudinit/config/cc_scripts_per_once.py (+17/-10) cloudinit/config/cc_scripts_user.py (+18/-10) cloudinit/config/cc_set_hostname.py (+10/-17) cloudinit/config/cc_set_passwords.py (+62/-45) cloudinit/config/cc_ssh.py (+76/-50) cloudinit/config/cc_ssh_import_id.py (+19/-16) cloudinit/config/cc_timezone.py (+10/-38) cloudinit/config/cc_update_etc_hosts.py (+36/-63) cloudinit/config/cc_update_hostname.py (+14/-74) cloudinit/distros/__init__.py (+163/-0) cloudinit/distros/debian.py (+149/-0) cloudinit/distros/fedora.py (+31/-0) cloudinit/distros/rhel.py (+337/-0) cloudinit/distros/ubuntu.py (+31/-0) cloudinit/handlers/__init__.py (+222/-0) cloudinit/handlers/boot_hook.py (+73/-0) cloudinit/handlers/cloud_config.py (+62/-0) cloudinit/handlers/shell_script.py (+52/-0) cloudinit/handlers/upstart_job.py (+66/-0) cloudinit/helpers.py (+452/-0) cloudinit/importer.py (+65/-0) cloudinit/log.py (+133/-0) cloudinit/netinfo.py (+81/-30) cloudinit/settings.py (+57/-0) cloudinit/sources/DataSourceCloudStack.py (+94/-39) cloudinit/sources/DataSourceConfigDrive.py (+116/-121) cloudinit/sources/DataSourceEc2.py (+143/-95) cloudinit/sources/DataSourceMAAS.py (+81/-162) cloudinit/sources/DataSourceNoCloud.py (+75/-79) cloudinit/sources/DataSourceOVF.py (+117/-156) cloudinit/sources/__init__.py (+223/-0) cloudinit/ssh_util.py (+275/-188) cloudinit/stages.py (+551/-0) cloudinit/templater.py (+41/-0) cloudinit/url_helper.py (+226/-0) cloudinit/user_data.py (+243/-0) cloudinit/util.py (+1136/-592) cloudinit/version.py (+27/-0) config/cloud.cfg (+36/-4) config/cloud.cfg.d/05_logging.cfg (+5/-1) install.sh (+0/-31) packages/bddeb (+172/-33) packages/brpm (+216/-0) packages/debian/changelog (+1/-1) packages/debian/control (+4/-6) packages/debian/rules (+3/-15) packages/make-dist-tarball (+2/-2) packages/make-tarball (+89/-0) packages/redhat/cloud-init.spec (+183/-0) setup.py (+102/-17) sysvinit/cloud-config (+124/-0) sysvinit/cloud-final (+124/-0) sysvinit/cloud-init (+124/-0) sysvinit/cloud-init-local (+124/-0) templates/chef_client.rb.tmpl (+4/-4) templates/default-locale.tmpl (+0/-1) templates/hosts.redhat.tmpl (+22/-0) templates/hosts.ubuntu.tmpl (+7/-8) templates/sources.list.tmpl (+56/-57) tests/configs/sample1.yaml (+53/-0) tests/unittests/test__init__.py (+75/-93) tests/unittests/test_builtin_handlers.py (+54/-0) tests/unittests/test_datasource/test_maas.py (+33/-37) tests/unittests/test_handler/test_handler_ca_certs.py (+62/-45) tests/unittests/test_userdata.py (+90/-53) tests/unittests/test_util.py (+69/-64) tools/hacking.py (+175/-0) tools/mock-meta.py (+444/-0) tools/read-dependencies (+45/-0) tools/read-version (+70/-0) tools/run-pep8 (+35/-0) tools/run-pylint (+1/-12) upstart/cloud-config.conf (+1/-1) upstart/cloud-final.conf (+1/-1) upstart/cloud-init-local.conf (+1/-1) upstart/cloud-init.conf (+1/-1) |
To merge this branch: | bzr merge lp:~cloud-init/cloud-init/rework |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
cloud-init Commiters | Pending | ||
Review via email: mp+113684@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
lp:~cloud-init/cloud-init/rework
updated
- 992. By Joshua Harlow
-
Updated so that if no mirror is found, the module stops running.
- 993. By Joshua Harlow
-
Add comment about keeping track of what people think about the 'read'
and 'write' root, and if it confuses them, remove it later and just
recommend a more 'natural' way of doing it (ie 'chroot'). - 994. By Scott Moser
-
setup.py: rename "daemon type" to "init system"
This brings with it other changes, and also makes an install
install all of the requisite init files. (ie, cloud-init needs the -local and
the non-local) - 995. By Joshua Harlow
-
Fix the initsys variable, setuptools/distools will automatically assign
to a variable of the name 'init_system' instead due to the param name being
'init-system'.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'ChangeLog' |
2 | --- ChangeLog 2012-06-21 15:37:22 +0000 |
3 | +++ ChangeLog 2012-07-06 21:16:18 +0000 |
4 | @@ -1,3 +1,196 @@ |
5 | +0.7.0: |
6 | + - unified binary that activates the various stages |
7 | + - Now using argparse + subcommands to specify the various CLI options |
8 | + - a stage module that clearly separates the stages of the different |
9 | + components (also described how they are used and in what order in the |
10 | + new unified binary) |
11 | + - user_data is now a module that just does user data processing while the |
12 | + actual activation and 'handling' of the processed user data is done via |
13 | + a separate set of files (and modules) with the main 'init' stage being the |
14 | + controller of this |
15 | + - creation of boot_hook, cloud_config, shell_script, upstart_job version 2 |
16 | + modules (with classes that perform there functionality) instead of those |
17 | + having functionality that is attached to the cloudinit object (which |
18 | + reduces reuse and limits future functionality, and makes testing harder) |
19 | + - removal of global config that defined paths, shared config, now this is |
20 | + via objects making unit testing testing and global side-effects a non issue |
21 | + - creation of a 'helpers.py' |
22 | + - this contains an abstraction for the 'lock' like objects that the various |
23 | + module/handler running stages use to avoid re-running a given |
24 | + module/handler for a given frequency. this makes it separated from |
25 | + the actual usage of that object (thus helpful for testing and clear lines |
26 | + usage and how the actual job is accomplished) |
27 | + - a common 'runner' class is the main entrypoint using these locks to |
28 | + run function objects passed in (along with there arguments) and there |
29 | + frequency |
30 | + - add in a 'paths' object that provides access to the previously global |
31 | + and/or config based paths (thus providing a single entrypoint object/type |
32 | + that provides path information) |
33 | + - this also adds in the ability to change the path when constructing |
34 | + that path 'object' and adding in additional config that can be used to |
35 | + alter the root paths of 'joins' (useful for testing or possibly useful |
36 | + in chroots?) |
37 | + - config options now avaiable that can alter the 'write_root' and the |
38 | + 'read_root' when backing code uses the paths join() function |
39 | + - add a config parser subclass that will automatically add unknown sections |
40 | + and return default values (instead of throwing exceptions for these cases) |
41 | + - a new config merging class that will be the central object that knows |
42 | + how to do the common configuration merging from the various configuration |
43 | + sources. The order is the following: |
44 | + - cli config files override environment config files |
45 | + which override instance configs which override datasource |
46 | + configs which override base configuration which overrides |
47 | + default configuration. |
48 | + - remove the passing around of the 'cloudinit' object as a 'cloud' variable |
49 | + and instead pass around an 'interface' object that can be given to modules |
50 | + and handlers as there cloud access layer while the backing of that |
51 | + object can be varied (good for abstraction and testing) |
52 | + - use a single set of functions to do importing of modules |
53 | + - add a function in which will search for a given set of module names with |
54 | + a given set of attributes and return those which are found |
55 | + - refactor logging so that instead of using a single top level 'log' that |
56 | + instead each component/module can use its own logger (if desired), this |
57 | + should be backwards compatible with handlers and config modules that used |
58 | + the passed in logger (its still passed in) |
59 | + - ensure that all places where exception are caught and where applicable |
60 | + that the util logexc() is called, so that no exceptions that may occur |
61 | + are dropped without first being logged (where it makes sense for this |
62 | + to happen) |
63 | + - add a 'requires' file that lists cloud-init dependencies |
64 | + - applying it in package creation (bdeb and brpm) as well as using it |
65 | + in the modified setup.py to ensure dependencies are installed when |
66 | + using that method of packaging |
67 | + - add a 'version.py' that lists the active version (in code) so that code |
68 | + inside cloud-init can report the version in messaging and other config files |
69 | + - cleanup of subprocess usage so that all subprocess calls go through the |
70 | + subp() utility method, which now has an exception type that will provide |
71 | + detailed information on python 2.6 and 2.7 |
72 | + - forced all code loading, moving, chmod, writing files and other system |
73 | + level actions to go through standard set of util functions, this greatly |
74 | + helps in debugging and determining exactly which system actions cloud-init is |
75 | + performing |
76 | + - switching out the templating engine cheetah for tempita since tempita has |
77 | + no external dependencies (minus python) while cheetah has many dependencies |
78 | + which makes it more difficult to adopt cloud-init in distros that may not |
79 | + have those dependencies |
80 | + - adjust url fetching and url trying to go through a single function that |
81 | + reads urls in the new 'url helper' file, this helps in tracing, debugging |
82 | + and knowing which urls are being called and/or posted to from with-in |
83 | + cloud-init code |
84 | + - add in the sending of a 'User-Agent' header for all urls fetched that |
85 | + do not provide there own header mapping, derive this user-agent from |
86 | + the following template, 'Cloud-Init/{version}' where the version is the |
87 | + cloud-init version number |
88 | + - using prettytable for netinfo 'debug' printing since it provides a standard |
89 | + and defined output that should be easier to parse than a custom format |
90 | + - add a set of distro specific classes, that handle distro specific actions |
91 | + that modules and or handler code can use as needed, this is organized into |
92 | + a base abstract class with child classes that implement the shared |
93 | + functionality. config determines exactly which subclass to load, so it can |
94 | + be easily extended as needed. |
95 | + - current functionality |
96 | + - network interface config file writing |
97 | + - hostname setting/updating |
98 | + - locale/timezone/ setting |
99 | + - updating of /etc/hosts (with templates or generically) |
100 | + - package commands (ie installing, removing)/mirror finding |
101 | + - interface up/down activating |
102 | + - implemented a debian + ubuntu subclass |
103 | + - implemented a redhat + fedora subclass |
104 | + - adjust the root 'cloud.cfg' file to now have distrobution/path specific |
105 | + configuration values in it. these special configs are merged as the normal |
106 | + config is, but the system level config is not passed into modules/handlers |
107 | + - modules/handlers must go through the path and distro object instead |
108 | + - have the cloudstack datasource test the url before calling into boto to |
109 | + avoid the long wait for boto to finish retrying and finally fail when |
110 | + the gateway meta-data address is unavailable |
111 | + - add a simple mock ec2 meta-data python based http server that can serve a |
112 | + very simple set of ec2 meta-data back to callers |
113 | + - useful for testing or for understanding what the ec2 meta-data |
114 | + service can provide in terms of data or functionality |
115 | + - for ssh key and authorized key file parsing add in classes and util functions |
116 | + that maintain the state of individual lines, allowing for a clearer |
117 | + separation of parsing and modification (useful for testing and tracing) |
118 | + - add a set of 'base' init.d scripts that can be used on systems that do |
119 | + not have full upstart or systemd support (or support that does not match |
120 | + the standard fedora/ubuntu implementation) |
121 | + - currently these are being tested on RHEL 6.2 |
122 | + - separate the datasources into there own subdirectory (instead of being |
123 | + a top-level item), this matches how config 'modules' and user-data 'handlers' |
124 | + are also in there own subdirectory (thus helping new developers and others |
125 | + understand the code layout in a quicker manner) |
126 | + - add the building of rpms based off a new cli tool and template 'spec' file |
127 | + that will templatize and perform the necessary commands to create a source |
128 | + and binary package to be used with a cloud-init install on a 'rpm' supporting |
129 | + system |
130 | + - uses the new standard set of requires and converts those pypi requirements |
131 | + into a local set of package requirments (that are known to exist on RHEL |
132 | + systems but should also exist on fedora systems) |
133 | + - adjust the bdeb builder to be a python script (instead of a shell script) and |
134 | + make its 'control' file a template that takes in the standard set of pypi |
135 | + dependencies and uses a local mapping (known to work on ubuntu) to create the |
136 | + packages set of dependencies (that should also work on ubuntu-like systems) |
137 | + - pythonify a large set of various pieces of code |
138 | + - remove wrapping return statements with () when it has no effect |
139 | + - upper case all constants used |
140 | + - correctly 'case' class and method names (where applicable) |
141 | + - use os.path.join (and similar commands) instead of custom path creation |
142 | + - use 'is None' instead of the frowned upon '== None' which picks up a large |
143 | + set of 'true' cases than is typically desired (ie for objects that have |
144 | + there own equality) |
145 | + - use context managers on locks, tempdir, chdir, file, selinux, umask, |
146 | + unmounting commands so that these actions do not have to be closed and/or |
147 | + cleaned up manually in finally blocks, which is typically not done and will |
148 | + eventually be a bug in the future |
149 | + - use the 'abc' module for abstract classes base where possible |
150 | + - applied in the datasource root class, the distro root class, and the |
151 | + user-data v2 root class |
152 | + - when loading yaml, check that the 'root' type matches a predefined set of |
153 | + valid types (typically just 'dict') and throw a type error if a mismatch |
154 | + occurs, this seems to be a good idea to do when loading user config files |
155 | + - when forking a long running task (ie resizing a filesytem) use a new util |
156 | + function that will fork and then call a callback, instead of having to |
157 | + implement all that code in a non-shared location (thus allowing it to be |
158 | + used by others in the future) |
159 | + - when writing out filenames, go through a util function that will attempt to |
160 | + ensure that the given filename is 'filesystem' safe by replacing '/' with |
161 | + '_' and removing characters which do not match a given whitelist of allowed |
162 | + filename characters |
163 | + - for the varying usages of the 'blkid' command make a function in the util |
164 | + module that can be used as the single point of entry for interaction with |
165 | + that command (and its results) instead of having X separate implementations |
166 | + - place the rfc 8222 time formatting and uptime repeated pieces of code in the |
167 | + util module as a set of function with the name 'time_rfc2822'/'uptime' |
168 | + - separate the pylint+pep8 calling from one tool into two indivudal tools so |
169 | + that they can be called independently, add make file sections that can be |
170 | + used to call these independently |
171 | + - remove the support for the old style config that was previously located in |
172 | + '/etc/ec2-init/ec2-config.cfg', no longer supported! |
173 | + - instead of using a altered config parser that added its own 'dummy' section |
174 | + on in the 'mcollective' module, use configobj which handles the parsing of |
175 | + config without sections better (and it also maintains comments instead of |
176 | + removing them) |
177 | + - use the new defaulting config parser (that will not raise errors on sections |
178 | + that do not exist or return errors when values are fetched that do not exist) |
179 | + in the 'puppet' module |
180 | + - for config 'modules' add in the ability for the module to provide a list of |
181 | + distro names which it is known to work with, if when ran and the distro being |
182 | + used name does not match one of those in this list, a warning will be written |
183 | + out saying that this module may not work correctly on this distrobution |
184 | + - for all dynamically imported modules ensure that they are fixed up before |
185 | + they are used by ensuring that they have certain attributes, if they do not |
186 | + have those attributes they will be set to a sensible set of defaults instead |
187 | + - adjust all 'config' modules and handlers to use the adjusted util functions |
188 | + and the new distro objects where applicable so that those pieces of code can |
189 | + benefit from the unified and enhanced functionality being provided in that |
190 | + util module |
191 | + - fix a potential bug whereby when a #includeonce was encountered it would |
192 | + enable checking of urls against a cache, if later a #include was encountered |
193 | + it would continue checking against that cache, instead of refetching (which |
194 | + would likely be the expected case) |
195 | + - add a openstack/nova based pep8 extension utility ('hacking.py') that allows |
196 | + for custom checks (along with the standard pep8 checks) to occur when running |
197 | + 'make pep8' and its derivatives |
198 | 0.6.4: |
199 | - support relative path in AuthorizedKeysFile (LP: #970071). |
200 | - make apt-get update run with --quiet (suitable for logging) (LP: #1012613) |
201 | |
202 | === modified file 'Makefile' |
203 | --- Makefile 2012-01-12 15:06:27 +0000 |
204 | +++ Makefile 2012-07-06 21:16:18 +0000 |
205 | @@ -1,14 +1,33 @@ |
206 | +CWD=$(shell pwd) |
207 | +PY_FILES=$(shell find cloudinit bin -name "*.py") |
208 | +PY_FILES+="bin/cloud-init" |
209 | |
210 | all: test |
211 | |
212 | +pep8: |
213 | + $(CWD)/tools/run-pep8 $(PY_FILES) |
214 | + |
215 | pylint: |
216 | - pylint cloudinit |
217 | + $(CWD)/tools/run-pylint $(PY_FILES) |
218 | |
219 | pyflakes: |
220 | - pyflakes . |
221 | + pyflakes $(PY_FILES) |
222 | |
223 | test: |
224 | - nosetests tests/unittests/ |
225 | - |
226 | -.PHONY: test pylint pyflakes |
227 | + nosetests $(noseopts) tests/unittests/ |
228 | + |
229 | +2to3: |
230 | + 2to3 $(PY_FILES) |
231 | + |
232 | +clean: |
233 | + rm -rf /var/log/cloud-init.log \ |
234 | + /var/lib/cloud/ |
235 | + |
236 | +rpm: |
237 | + cd packages && ./brpm |
238 | + |
239 | +deb: |
240 | + cd packages && ./bddeb |
241 | + |
242 | +.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb |
243 | |
244 | |
245 | === added file 'Requires' |
246 | --- Requires 1970-01-01 00:00:00 +0000 |
247 | +++ Requires 2012-07-06 21:16:18 +0000 |
248 | @@ -0,0 +1,30 @@ |
249 | +# Pypi requirements for cloud-init to work |
250 | + |
251 | +# Used for templating any files or strings that are considered |
252 | +# to be templates, not cheetah since it pulls in alot of extra libs. |
253 | +# This one is pretty dinky and does want we want (var substituion) |
254 | +Tempita |
255 | + |
256 | +# This is used for any pretty printing of tabular data. |
257 | +PrettyTable |
258 | + |
259 | +# This one is currently only used by the MAAS datasource. If that |
260 | +# datasource is removed, this is no longer needed |
261 | +oauth |
262 | + |
263 | +# This is used to fetch the ec2 metadata into a easily |
264 | +# parseable format, instead of having to have cloud-init perform |
265 | +# those same fetchs and decodes and signing (...) that ec2 requires. |
266 | +boto |
267 | + |
268 | +# This is only needed for places where we need to support configs in a manner |
269 | +# that the built-in config parser is not sufficent (ie |
270 | +# when we need to preserve comments, or do not have a top-level |
271 | +# section)... |
272 | +configobj |
273 | + |
274 | +# All new style configurations are in the yaml format |
275 | +pyyaml |
276 | + |
277 | +# The new main entrypoint uses argparse instead of optparse |
278 | +argparse |
279 | |
280 | === modified file 'TODO' |
281 | --- TODO 2011-02-17 20:48:41 +0000 |
282 | +++ TODO 2012-07-06 21:16:18 +0000 |
283 | @@ -1,14 +1,37 @@ |
284 | -- consider 'failsafe' DataSource |
285 | +- Consider a 'failsafe' DataSource |
286 | If all others fail, setting a default that |
287 | - sets the user password, writing it to console |
288 | - logs to console that this happened |
289 | -- consider 'previous' DataSource |
290 | +- Consider a 'previous' DataSource |
291 | If no other data source is found, fall back to the 'previous' one |
292 | keep a indication of what instance id that is in /var/lib/cloud |
293 | -- rewrite "cloud-init-query" |
294 | - have DataSource and cloudinit expose explicit fields |
295 | +- Rewrite "cloud-init-query" (currently not implemented) |
296 | + Possibly have DataSource and cloudinit expose explicit fields |
297 | - instance-id |
298 | - hostname |
299 | - mirror |
300 | - release |
301 | - ssh public keys |
302 | +- Remove the conversion of the ubuntu network interface format conversion |
303 | + to a RH/fedora format and replace it with a top level format that uses |
304 | + the netcf libraries format instead (which itself knows how to translate |
305 | + into the specific formats) |
306 | +- Replace the 'apt*' modules with variants that now use the distro classes |
307 | + to perform distro independent packaging commands (where possible) |
308 | +- Canonicalize the semaphore/lock name for modules and user data handlers |
309 | + a. It is most likely a bug that currently exists that if a module in config |
310 | + alters its name and it has already ran, then it will get ran again since |
311 | + the lock name hasn't be canonicalized |
312 | +- Replace some the LOG.debug calls with a LOG.info where appropriate instead |
313 | + of how right now there is really only 2 levels (WARN and DEBUG) |
314 | +- Remove the 'cc_' for config modules, either have them fully specified (ie |
315 | + 'cloudinit.config.resizefs') or by default only look in the 'cloudinit.config' |
316 | + for these modules (or have a combination of the above), this avoids having |
317 | + to understand where your modules are coming from (which can be altered by |
318 | + the current python inclusion path) |
319 | +- Depending on if people think the wrapper around 'os.path.join' provided |
320 | + by the 'paths' object is useful (allowing us to modify based off a 'read' |
321 | + and 'write' configuration based 'root') or is just to confusing, it might be |
322 | + something to remove later, and just recommend using 'chroot' instead (or the X |
323 | + different other options which are similar to 'chroot'), which is might be more |
324 | + natural and less confusing... |
325 | |
326 | === added directory 'bin' |
327 | === added file 'bin/cloud-init' |
328 | --- bin/cloud-init 1970-01-01 00:00:00 +0000 |
329 | +++ bin/cloud-init 2012-07-06 21:16:18 +0000 |
330 | @@ -0,0 +1,474 @@ |
331 | +#!/usr/bin/python |
332 | +# vi: ts=4 expandtab |
333 | +# |
334 | +# Copyright (C) 2012 Canonical Ltd. |
335 | +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
336 | +# Copyright (C) 2012 Yahoo! Inc. |
337 | +# |
338 | +# Author: Scott Moser <scott.moser@canonical.com> |
339 | +# Author: Juerg Haefliger <juerg.haefliger@hp.com> |
340 | +# Author: Joshua Harlow <harlowja@yahoo-inc.com> |
341 | +# |
342 | +# This program is free software: you can redistribute it and/or modify |
343 | +# it under the terms of the GNU General Public License version 3, as |
344 | +# published by the Free Software Foundation. |
345 | +# |
346 | +# This program is distributed in the hope that it will be useful, |
347 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
348 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
349 | +# GNU General Public License for more details. |
350 | +# |
351 | +# You should have received a copy of the GNU General Public License |
352 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. |
353 | + |
354 | +import argparse |
355 | +import os |
356 | +import sys |
357 | +import traceback |
358 | + |
359 | +# This is more just for running from the bin folder so that |
360 | +# cloud-init binary can find the cloudinit module |
361 | +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( |
362 | + sys.argv[0]), os.pardir, os.pardir)) |
363 | +if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")): |
364 | + sys.path.insert(0, possible_topdir) |
365 | + |
366 | +from cloudinit import log as logging |
367 | +from cloudinit import netinfo |
368 | +from cloudinit import sources |
369 | +from cloudinit import stages |
370 | +from cloudinit import templater |
371 | +from cloudinit import util |
372 | +from cloudinit import version |
373 | + |
374 | +from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, |
375 | + CLOUD_CONFIG) |
376 | + |
377 | + |
378 | +# Pretty little welcome message template |
379 | +WELCOME_MSG_TPL = ("Cloud-init v. {{version}} running '{{action}}' at " |
380 | + "{{timestamp}}. Up {{uptime}} seconds.") |
381 | + |
382 | +# Module section template |
383 | +MOD_SECTION_TPL = "cloud_%s_modules" |
384 | + |
385 | +# Things u can query on |
386 | +QUERY_DATA_TYPES = [ |
387 | + 'data', |
388 | + 'data_raw', |
389 | + 'instance_id', |
390 | +] |
391 | + |
392 | +# Frequency shortname to full name |
393 | +# (so users don't have to remember the full name...) |
394 | +FREQ_SHORT_NAMES = { |
395 | + 'instance': PER_INSTANCE, |
396 | + 'always': PER_ALWAYS, |
397 | + 'once': PER_ONCE, |
398 | +} |
399 | + |
400 | +LOG = logging.getLogger() |
401 | + |
402 | + |
403 | +# Used for when a logger may not be active |
404 | +# and we still want to print exceptions... |
405 | +def print_exc(msg=''): |
406 | + if msg: |
407 | + sys.stderr.write("%s\n" % (msg)) |
408 | + sys.stderr.write('-' * 60) |
409 | + sys.stderr.write("\n") |
410 | + traceback.print_exc(file=sys.stderr) |
411 | + sys.stderr.write('-' * 60) |
412 | + sys.stderr.write("\n") |
413 | + |
414 | + |
415 | +def welcome(action): |
416 | + tpl_params = { |
417 | + 'version': version.version_string(), |
418 | + 'uptime': util.uptime(), |
419 | + 'timestamp': util.time_rfc2822(), |
420 | + 'action': action, |
421 | + } |
422 | + tpl_msg = templater.render_string(WELCOME_MSG_TPL, tpl_params) |
423 | + util.multi_log("%s\n" % (tpl_msg), |
424 | + console=False, stderr=True) |
425 | + |
426 | + |
427 | +def extract_fns(args): |
428 | + # Files are already opened so lets just pass that along |
429 | + # since it would of broke if it couldn't have |
430 | + # read that file already... |
431 | + fn_cfgs = [] |
432 | + if args.files: |
433 | + for fh in args.files: |
434 | + # The realpath is more useful in logging |
435 | + # so lets resolve to that... |
436 | + fn_cfgs.append(os.path.realpath(fh.name)) |
437 | + return fn_cfgs |
438 | + |
439 | + |
440 | +def run_module_section(mods, action_name, section): |
441 | + full_section_name = MOD_SECTION_TPL % (section) |
442 | + (which_ran, failures) = mods.run_section(full_section_name) |
443 | + total_attempted = len(which_ran) + len(failures) |
444 | + if total_attempted == 0: |
445 | + msg = ("No '%s' modules to run" |
446 | + " under section '%s'") % (action_name, full_section_name) |
447 | + sys.stderr.write("%s\n" % (msg)) |
448 | + LOG.debug(msg) |
449 | + return 0 |
450 | + else: |
451 | + LOG.debug("Ran %s modules with %s failures", |
452 | + len(which_ran), len(failures)) |
453 | + return len(failures) |
454 | + |
455 | + |
456 | +def main_init(name, args): |
457 | + deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] |
458 | + if args.local: |
459 | + deps = [sources.DEP_FILESYSTEM] |
460 | + |
461 | + if not args.local: |
462 | + # See doc/kernel-cmdline.txt |
463 | + # |
464 | + # This is used in maas datasource, in "ephemeral" (read-only root) |
465 | + # environment where the instance netboots to iscsi ro root. |
466 | + # and the entity that controls the pxe config has to configure |
467 | + # the maas datasource. |
468 | + # |
469 | + # Could be used elsewhere, only works on network based (not local). |
470 | + root_name = "%s.d" % (CLOUD_CONFIG) |
471 | + target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg") |
472 | + util.read_write_cmdline_url(target_fn) |
473 | + |
474 | + # Cloud-init 'init' stage is broken up into the following sub-stages |
475 | + # 1. Ensure that the init object fetches its config without errors |
476 | + # 2. Setup logging/output redirections with resultant config (if any) |
477 | + # 3. Initialize the cloud-init filesystem |
478 | + # 4. Check if we can stop early by looking for various files |
479 | + # 5. Fetch the datasource |
480 | + # 6. Connect to the current instance location + update the cache |
481 | + # 7. Consume the userdata (handlers get activated here) |
482 | + # 8. Construct the modules object |
483 | + # 9. Adjust any subsequent logging/output redirections using |
484 | + # the modules objects configuration |
485 | + # 10. Run the modules for the 'init' stage |
486 | + # 11. Done! |
487 | + welcome(name) |
488 | + init = stages.Init(deps) |
489 | + # Stage 1 |
490 | + init.read_cfg(extract_fns(args)) |
491 | + # Stage 2 |
492 | + outfmt = None |
493 | + errfmt = None |
494 | + try: |
495 | + LOG.debug("Closing stdin") |
496 | + util.close_stdin() |
497 | + (outfmt, errfmt) = util.fixup_output(init.cfg, name) |
498 | + except: |
499 | + util.logexc(LOG, "Failed to setup output redirection!") |
500 | + print_exc("Failed to setup output redirection!") |
501 | + if args.debug: |
502 | + # Reset so that all the debug handlers are closed out |
503 | + LOG.debug(("Logging being reset, this logger may no" |
504 | + " longer be active shortly")) |
505 | + logging.resetLogging() |
506 | + logging.setupLogging(init.cfg) |
507 | + # Stage 3 |
508 | + try: |
509 | + init.initialize() |
510 | + except Exception: |
511 | + util.logexc(LOG, "Failed to initialize, likely bad things to come!") |
512 | + # Stage 4 |
513 | + path_helper = init.paths |
514 | + if not args.local: |
515 | + sys.stderr.write("%s\n" % (netinfo.debug_info())) |
516 | + LOG.debug(("Checking to see if files that we need already" |
517 | + " exist from a previous run that would allow us" |
518 | + " to stop early.")) |
519 | + stop_files = [ |
520 | + os.path.join(path_helper.get_cpath("data"), "no-net"), |
521 | + path_helper.get_ipath_cur("obj_pkl"), |
522 | + ] |
523 | + existing_files = [] |
524 | + for fn in stop_files: |
525 | + try: |
526 | + c = util.load_file(fn) |
527 | + if len(c): |
528 | + existing_files.append((fn, len(c))) |
529 | + except Exception: |
530 | + pass |
531 | + if existing_files: |
532 | + LOG.debug("Exiting early due to the existence of %s files", |
533 | + existing_files) |
534 | + return 0 |
535 | + else: |
536 | + # The cache is not instance specific, so it has to be purged |
537 | + # but we want 'start' to benefit from a cache if |
538 | + # a previous start-local populated one... |
539 | + manual_clean = util.get_cfg_option_bool(init.cfg, |
540 | + 'manual_cache_clean', False) |
541 | + if manual_clean: |
542 | + LOG.debug("Not purging instance link, manual cleaning enabled") |
543 | + init.purge_cache(False) |
544 | + else: |
545 | + init.purge_cache() |
546 | + # Delete the non-net file as well |
547 | + util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) |
548 | + # Stage 5 |
549 | + try: |
550 | + init.fetch() |
551 | + except sources.DataSourceNotFoundException: |
552 | + util.logexc(LOG, ("No instance datasource found!" |
553 | + " Likely bad things to come!")) |
554 | + # In the case of cloud-init (net mode) it is a bit |
555 | + # more likely that the user would consider it |
556 | + # failure if nothing was found. When using |
557 | + # upstart it will also mentions job failure |
558 | + # in console log if exit code is != 0. |
559 | + if not args.force: |
560 | + if args.local: |
561 | + return 0 |
562 | + else: |
563 | + return 1 |
564 | + # Stage 6 |
565 | + iid = init.instancify() |
566 | + LOG.debug("%s will now be targeting instance id: %s", name, iid) |
567 | + init.update() |
568 | + # Stage 7 |
569 | + try: |
570 | + # Attempt to consume the data per instance. |
571 | + # This may run user-data handlers and/or perform |
572 | + # url downloads and such as needed. |
573 | + (ran, _results) = init.cloudify().run('consume_userdata', |
574 | + init.consume_userdata, |
575 | + args=[PER_INSTANCE], |
576 | + freq=PER_INSTANCE) |
577 | + if not ran: |
578 | + # Just consume anything that is set to run per-always |
579 | + # if nothing ran in the per-instance code |
580 | + # |
581 | + # See: https://bugs.launchpad.net/bugs/819507 for a little |
582 | + # reason behind this... |
583 | + init.consume_userdata(PER_ALWAYS) |
584 | + except Exception: |
585 | + util.logexc(LOG, "Consuming user data failed!") |
586 | + return 1 |
587 | + # Stage 8 - TODO - do we really need to re-extract our configs? |
588 | + mods = stages.Modules(init, extract_fns(args)) |
589 | + # Stage 9 - TODO is this really needed?? |
590 | + try: |
591 | + outfmt_orig = outfmt |
592 | + errfmt_orig = errfmt |
593 | + (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) |
594 | + if outfmt_orig != outfmt or errfmt_orig != errfmt: |
595 | + LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) |
596 | + (outfmt, errfmt) = util.fixup_output(mods.cfg, name) |
597 | + except: |
598 | + util.logexc(LOG, "Failed to re-adjust output redirection!") |
599 | + # Stage 10 |
600 | + return run_module_section(mods, name, name) |
601 | + |
602 | + |
603 | +def main_modules(action_name, args): |
604 | + name = args.mode |
605 | + # Cloud-init 'modules' stages are broken up into the following sub-stages |
606 | + # 1. Ensure that the init object fetches its config without errors |
607 | + # 2. Get the datasource from the init object, if it does |
608 | + # not exist then that means the main_init stage never |
609 | + # worked, and thus this stage can not run. |
610 | + # 3. Construct the modules object |
611 | + # 4. Adjust any subsequent logging/output redirections using |
612 | + # the modules objects configuration |
613 | + # 5. Run the modules for the given stage name |
614 | + # 6. Done! |
615 | + welcome("%s:%s" % (action_name, name)) |
616 | + init = stages.Init(ds_deps=[]) |
617 | + # Stage 1 |
618 | + init.read_cfg(extract_fns(args)) |
619 | + # Stage 2 |
620 | + try: |
621 | + init.fetch() |
622 | + except sources.DataSourceNotFoundException: |
623 | + # There was no datasource found, theres nothing to do |
624 | + util.logexc(LOG, ('Can not apply stage %s, ' |
625 | + 'no datasource found!' |
626 | + " Likely bad things to come!"), name) |
627 | + print_exc(('Can not apply stage %s, ' |
628 | + 'no datasource found!' |
629 | + " Likely bad things to come!") % (name)) |
630 | + if not args.force: |
631 | + return 1 |
632 | + # Stage 3 |
633 | + mods = stages.Modules(init, extract_fns(args)) |
634 | + # Stage 4 |
635 | + try: |
636 | + LOG.debug("Closing stdin") |
637 | + util.close_stdin() |
638 | + util.fixup_output(mods.cfg, name) |
639 | + except: |
640 | + util.logexc(LOG, "Failed to setup output redirection!") |
641 | + if args.debug: |
642 | + # Reset so that all the debug handlers are closed out |
643 | + LOG.debug(("Logging being reset, this logger may no" |
644 | + " longer be active shortly")) |
645 | + logging.resetLogging() |
646 | + logging.setupLogging(mods.cfg) |
647 | + # Stage 5 |
648 | + return run_module_section(mods, name, name) |
649 | + |
650 | + |
651 | +def main_query(name, _args): |
652 | + raise NotImplementedError(("Action '%s' is not" |
653 | + " currently implemented") % (name)) |
654 | + |
655 | + |
656 | +def main_single(name, args): |
657 | + # Cloud-init single stage is broken up into the following sub-stages |
658 | + # 1. Ensure that the init object fetches its config without errors |
659 | + # 2. Attempt to fetch the datasource (warn if it doesn't work) |
660 | + # 3. Construct the modules object |
661 | + # 4. Adjust any subsequent logging/output redirections using |
662 | + # the modules objects configuration |
663 | + # 5. Run the single module |
664 | + # 6. Done! |
665 | + mod_name = args.name |
666 | + welcome("%s:%s" % (name, mod_name)) |
667 | + init = stages.Init(ds_deps=[]) |
668 | + # Stage 1 |
669 | + init.read_cfg(extract_fns(args)) |
670 | + # Stage 2 |
671 | + try: |
672 | + init.fetch() |
673 | + except sources.DataSourceNotFoundException: |
674 | + # There was no datasource found, |
675 | + # that might be bad (or ok) depending on |
676 | + # the module being ran (so continue on) |
677 | + util.logexc(LOG, ("Failed to fetch your datasource," |
678 | + " likely bad things to come!")) |
679 | + print_exc(("Failed to fetch your datasource," |
680 | + " likely bad things to come!")) |
681 | + if not args.force: |
682 | + return 1 |
683 | + # Stage 3 |
684 | + mods = stages.Modules(init, extract_fns(args)) |
685 | + mod_args = args.module_args |
686 | + if mod_args: |
687 | + LOG.debug("Using passed in arguments %s", mod_args) |
688 | + mod_freq = args.frequency |
689 | + if mod_freq: |
690 | + LOG.debug("Using passed in frequency %s", mod_freq) |
691 | + mod_freq = FREQ_SHORT_NAMES.get(mod_freq) |
692 | + # Stage 4 |
693 | + try: |
694 | + LOG.debug("Closing stdin") |
695 | + util.close_stdin() |
696 | + util.fixup_output(mods.cfg, None) |
697 | + except: |
698 | + util.logexc(LOG, "Failed to setup output redirection!") |
699 | + if args.debug: |
700 | + # Reset so that all the debug handlers are closed out |
701 | + LOG.debug(("Logging being reset, this logger may no" |
702 | + " longer be active shortly")) |
703 | + logging.resetLogging() |
704 | + logging.setupLogging(mods.cfg) |
705 | + # Stage 5 |
706 | + (which_ran, failures) = mods.run_single(mod_name, |
707 | + mod_args, |
708 | + mod_freq) |
709 | + if failures: |
710 | + LOG.warn("Ran %s but it failed!", mod_name) |
711 | + return 1 |
712 | + elif not which_ran: |
713 | + LOG.warn("Did not run %s, does it exist?", mod_name) |
714 | + return 1 |
715 | + else: |
716 | + # Guess it worked |
717 | + return 0 |
718 | + |
719 | + |
720 | +def main(): |
721 | + parser = argparse.ArgumentParser() |
722 | + |
723 | + # Top level args |
724 | + parser.add_argument('--version', '-v', action='version', |
725 | + version='%(prog)s ' + (version.version_string())) |
726 | + parser.add_argument('--file', '-f', action='append', |
727 | + dest='files', |
728 | + help=('additional yaml configuration' |
729 | + ' files to use'), |
730 | + type=argparse.FileType('rb')) |
731 | + parser.add_argument('--debug', '-d', action='store_true', |
732 | + help=('show additional pre-action' |
733 | + ' logging (default: %(default)s)'), |
734 | + default=False) |
735 | + parser.add_argument('--force', action='store_true', |
736 | + help=('force running even if no datasource is' |
737 | + ' found (use at your own risk)'), |
738 | + dest='force', |
739 | + default=False) |
740 | + subparsers = parser.add_subparsers() |
741 | + |
742 | + # Each action and its sub-options (if any) |
743 | + parser_init = subparsers.add_parser('init', |
744 | + help=('initializes cloud-init and' |
745 | + ' performs initial modules')) |
746 | + parser_init.add_argument("--local", '-l', action='store_true', |
747 | + help="start in local mode (default: %(default)s)", |
748 | + default=False) |
749 | + # This is used so that we can know which action is selected + |
750 | + # the functor to use to run this subcommand |
751 | + parser_init.set_defaults(action=('init', main_init)) |
752 | + |
753 | + # These settings are used for the 'config' and 'final' stages |
754 | + parser_mod = subparsers.add_parser('modules', |
755 | + help=('activates modules ' |
756 | + 'using a given configuration key')) |
757 | + parser_mod.add_argument("--mode", '-m', action='store', |
758 | + help=("module configuration name " |
759 | + "to use (default: %(default)s)"), |
760 | + default='config', |
761 | + choices=('init', 'config', 'final')) |
762 | + parser_mod.set_defaults(action=('modules', main_modules)) |
763 | + |
764 | + # These settings are used when you want to query information |
765 | + # stored in the cloud-init data objects/directories/files |
766 | + parser_query = subparsers.add_parser('query', |
767 | + help=('query information stored ' |
768 | + 'in cloud-init')) |
769 | + parser_query.add_argument("--name", '-n', action="store", |
770 | + help="item name to query on", |
771 | + required=True, |
772 | + choices=QUERY_DATA_TYPES) |
773 | + parser_query.set_defaults(action=('query', main_query)) |
774 | + |
775 | + # This subcommand allows you to run a single module |
776 | + parser_single = subparsers.add_parser('single', |
777 | + help=('run a single module ')) |
778 | + parser_single.set_defaults(action=('single', main_single)) |
779 | + parser_single.add_argument("--name", '-n', action="store", |
780 | + help="module name to run", |
781 | + required=True) |
782 | + parser_single.add_argument("--frequency", action="store", |
783 | + help=("frequency of the module"), |
784 | + required=False, |
785 | + choices=list(FREQ_SHORT_NAMES.keys())) |
786 | + parser_single.add_argument("module_args", nargs="*", |
787 | + metavar='argument', |
788 | + help=('any additional arguments to' |
789 | + ' pass to this module')) |
790 | + parser_single.set_defaults(action=('single', main_single)) |
791 | + |
792 | + args = parser.parse_args() |
793 | + |
794 | + # Setup basic logging to start (until reinitialized) |
795 | + # iff in debug mode... |
796 | + if args.debug: |
797 | + logging.setupBasicLogging() |
798 | + |
799 | + (name, functor) = args.action |
800 | + return functor(name, args) |
801 | + |
802 | + |
803 | +if __name__ == '__main__': |
804 | + sys.exit(main()) |
805 | |
806 | === removed file 'cloud-init-cfg.py' |
807 | --- cloud-init-cfg.py 2012-01-18 14:07:33 +0000 |
808 | +++ cloud-init-cfg.py 1970-01-01 00:00:00 +0000 |
809 | @@ -1,115 +0,0 @@ |
810 | -#!/usr/bin/python |
811 | -# vi: ts=4 expandtab |
812 | -# |
813 | -# Copyright (C) 2009-2010 Canonical Ltd. |
814 | -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
815 | -# |
816 | -# Author: Scott Moser <scott.moser@canonical.com> |
817 | -# Author: Juerg Haefliger <juerg.haefliger@hp.com> |
818 | -# |
819 | -# This program is free software: you can redistribute it and/or modify |
820 | -# it under the terms of the GNU General Public License version 3, as |
821 | -# published by the Free Software Foundation. |
822 | -# |
823 | -# This program is distributed in the hope that it will be useful, |
824 | -# but WITHOUT ANY WARRANTY; without even the implied warranty of |
825 | -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
826 | -# GNU General Public License for more details. |
827 | -# |
828 | -# You should have received a copy of the GNU General Public License |
829 | -# along with this program. If not, see <http://www.gnu.org/licenses/>. |
830 | - |
831 | -import sys |
832 | -import cloudinit |
833 | -import cloudinit.util as util |
834 | -import cloudinit.CloudConfig as CC |
835 | -import logging |
836 | -import os |
837 | - |
838 | - |
839 | -def Usage(out=sys.stdout): |
840 | - out.write("Usage: %s name\n" % sys.argv[0]) |
841 | - |
842 | - |
843 | -def main(): |
844 | - # expect to be called with |
845 | - # name [ freq [ args ] |
846 | - # run the cloud-config job 'name' at with given args |
847 | - # or |
848 | - # read cloud config jobs from config (builtin -> system) |
849 | - # and run all in order |
850 | - |
851 | - util.close_stdin() |
852 | - |
853 | - modename = "config" |
854 | - |
855 | - if len(sys.argv) < 2: |
856 | - Usage(sys.stderr) |
857 | - sys.exit(1) |
858 | - if sys.argv[1] == "all": |
859 | - name = "all" |
860 | - if len(sys.argv) > 2: |
861 | - modename = sys.argv[2] |
862 | - else: |
863 | - freq = None |
864 | - run_args = [] |
865 | - name = sys.argv[1] |
866 | - if len(sys.argv) > 2: |
867 | - freq = sys.argv[2] |
868 | - if freq == "None": |
869 | - freq = None |
870 | - if len(sys.argv) > 3: |
871 | - run_args = sys.argv[3:] |
872 | - |
873 | - cfg_path = cloudinit.get_ipath_cur("cloud_config") |
874 | - cfg_env_name = cloudinit.cfg_env_name |
875 | - if cfg_env_name in os.environ: |
876 | - cfg_path = os.environ[cfg_env_name] |
877 | - |
878 | - cloud = cloudinit.CloudInit(ds_deps=[]) # ds_deps=[], get only cached |
879 | - try: |
880 | - cloud.get_data_source() |
881 | - except cloudinit.DataSourceNotFoundException as e: |
882 | - # there was no datasource found, theres nothing to do |
883 | - sys.exit(0) |
884 | - |
885 | - cc = CC.CloudConfig(cfg_path, cloud) |
886 | - |
887 | - try: |
888 | - (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, modename) |
889 | - CC.redirect_output(outfmt, errfmt) |
890 | - except Exception as e: |
891 | - err("Failed to get and set output config: %s\n" % e) |
892 | - |
893 | - cloudinit.logging_set_from_cfg(cc.cfg) |
894 | - log = logging.getLogger() |
895 | - log.info("cloud-init-cfg %s" % sys.argv[1:]) |
896 | - |
897 | - module_list = [] |
898 | - if name == "all": |
899 | - modlist_cfg_name = "cloud_%s_modules" % modename |
900 | - module_list = CC.read_cc_modules(cc.cfg, modlist_cfg_name) |
901 | - if not len(module_list): |
902 | - err("no modules to run in cloud_config [%s]" % modename, log) |
903 | - sys.exit(0) |
904 | - else: |
905 | - module_list.append([name, freq] + run_args) |
906 | - |
907 | - failures = CC.run_cc_modules(cc, module_list, log) |
908 | - if len(failures): |
909 | - err("errors running cloud_config [%s]: %s" % (modename, failures), log) |
910 | - sys.exit(len(failures)) |
911 | - |
912 | - |
913 | -def err(msg, log=None): |
914 | - if log: |
915 | - log.error(msg) |
916 | - sys.stderr.write(msg + "\n") |
917 | - |
918 | - |
919 | -def fail(msg, log=None): |
920 | - err(msg, log) |
921 | - sys.exit(1) |
922 | - |
923 | -if __name__ == '__main__': |
924 | - main() |
925 | |
926 | === removed file 'cloud-init-query.py' |
927 | --- cloud-init-query.py 2012-01-18 14:07:33 +0000 |
928 | +++ cloud-init-query.py 1970-01-01 00:00:00 +0000 |
929 | @@ -1,56 +0,0 @@ |
930 | -#!/usr/bin/python |
931 | -# vi: ts=4 expandtab |
932 | -# |
933 | -# Copyright (C) 2009-2010 Canonical Ltd. |
934 | -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
935 | -# |
936 | -# Author: Scott Moser <scott.moser@canonical.com> |
937 | -# Author: Juerg Haefliger <juerg.haefliger@hp.com> |
938 | -# |
939 | -# This program is free software: you can redistribute it and/or modify |
940 | -# it under the terms of the GNU General Public License version 3, as |
941 | -# published by the Free Software Foundation. |
942 | -# |
943 | -# This program is distributed in the hope that it will be useful, |
944 | -# but WITHOUT ANY WARRANTY; without even the implied warranty of |
945 | -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
946 | -# GNU General Public License for more details. |
947 | -# |
948 | -# You should have received a copy of the GNU General Public License |
949 | -# along with this program. If not, see <http://www.gnu.org/licenses/>. |
950 | - |
951 | -import sys |
952 | -import cloudinit |
953 | -import cloudinit.CloudConfig |
954 | - |
955 | - |
956 | -def Usage(out=sys.stdout): |
957 | - out.write("Usage: %s name\n" % sys.argv[0]) |
958 | - |
959 | - |
960 | -def main(): |
961 | - # expect to be called with name of item to fetch |
962 | - if len(sys.argv) != 2: |
963 | - Usage(sys.stderr) |
964 | - sys.exit(1) |
965 | - |
966 | - cfg_path = cloudinit.get_ipath_cur("cloud_config") |
967 | - cc = cloudinit.CloudConfig.CloudConfig(cfg_path) |
968 | - data = { |
969 | - 'user_data': cc.cloud.get_userdata(), |
970 | - 'user_data_raw': cc.cloud.get_userdata_raw(), |
971 | - 'instance_id': cc.cloud.get_instance_id(), |
972 | - } |
973 | - |
974 | - name = sys.argv[1].replace('-', '_') |
975 | - |
976 | - if name not in data: |
977 | - sys.stderr.write("unknown name '%s'. Known values are:\n %s\n" % |
978 | - (sys.argv[1], ' '.join(data.keys()))) |
979 | - sys.exit(1) |
980 | - |
981 | - print data[name] |
982 | - sys.exit(0) |
983 | - |
984 | -if __name__ == '__main__': |
985 | - main() |
986 | |
987 | === removed file 'cloud-init.py' |
988 | --- cloud-init.py 2012-04-10 20:08:25 +0000 |
989 | +++ cloud-init.py 1970-01-01 00:00:00 +0000 |
990 | @@ -1,229 +0,0 @@ |
991 | -#!/usr/bin/python |
992 | -# vi: ts=4 expandtab |
993 | -# |
994 | -# Copyright (C) 2009-2010 Canonical Ltd. |
995 | -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
996 | -# |
997 | -# Author: Scott Moser <scott.moser@canonical.com> |
998 | -# Author: Juerg Haefliger <juerg.haefliger@hp.com> |
999 | -# |
1000 | -# This program is free software: you can redistribute it and/or modify |
1001 | -# it under the terms of the GNU General Public License version 3, as |
1002 | -# published by the Free Software Foundation. |
1003 | -# |
1004 | -# This program is distributed in the hope that it will be useful, |
1005 | -# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1006 | -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1007 | -# GNU General Public License for more details. |
1008 | -# |
1009 | -# You should have received a copy of the GNU General Public License |
1010 | -# along with this program. If not, see <http://www.gnu.org/licenses/>. |
1011 | - |
1012 | -import subprocess |
1013 | -import sys |
1014 | - |
1015 | -import cloudinit |
1016 | -import cloudinit.util as util |
1017 | -import cloudinit.CloudConfig as CC |
1018 | -import cloudinit.DataSource as ds |
1019 | -import cloudinit.netinfo as netinfo |
1020 | -import time |
1021 | -import traceback |
1022 | -import logging |
1023 | -import errno |
1024 | -import os |
1025 | - |
1026 | - |
1027 | -def warn(wstr): |
1028 | - sys.stderr.write("WARN:%s" % wstr) |
1029 | - |
1030 | - |
1031 | -def main(): |
1032 | - util.close_stdin() |
1033 | - |
1034 | - cmds = ("start", "start-local") |
1035 | - deps = {"start": (ds.DEP_FILESYSTEM, ds.DEP_NETWORK), |
1036 | - "start-local": (ds.DEP_FILESYSTEM, )} |
1037 | - |
1038 | - cmd = "" |
1039 | - if len(sys.argv) > 1: |
1040 | - cmd = sys.argv[1] |
1041 | - |
1042 | - cfg_path = None |
1043 | - if len(sys.argv) > 2: |
1044 | - # this is really for debugging only |
1045 | - # but you can invoke on development system with ./config/cloud.cfg |
1046 | - cfg_path = sys.argv[2] |
1047 | - |
1048 | - if not cmd in cmds: |
1049 | - sys.stderr.write("bad command %s. use one of %s\n" % (cmd, cmds)) |
1050 | - sys.exit(1) |
1051 | - |
1052 | - now = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime()) |
1053 | - try: |
1054 | - uptimef = open("/proc/uptime") |
1055 | - uptime = uptimef.read().split(" ")[0] |
1056 | - uptimef.close() |
1057 | - except IOError as e: |
1058 | - warn("unable to open /proc/uptime\n") |
1059 | - uptime = "na" |
1060 | - |
1061 | - cmdline_msg = None |
1062 | - cmdline_exc = None |
1063 | - if cmd == "start": |
1064 | - target = "%s.d/%s" % (cloudinit.system_config, |
1065 | - "91_kernel_cmdline_url.cfg") |
1066 | - if os.path.exists(target): |
1067 | - cmdline_msg = "cmdline: %s existed" % target |
1068 | - else: |
1069 | - cmdline = util.get_cmdline() |
1070 | - try: |
1071 | - (key, url, content) = cloudinit.get_cmdline_url( |
1072 | - cmdline=cmdline) |
1073 | - if key and content: |
1074 | - util.write_file(target, content, mode=0600) |
1075 | - cmdline_msg = ("cmdline: wrote %s from %s, %s" % |
1076 | - (target, key, url)) |
1077 | - elif key: |
1078 | - cmdline_msg = ("cmdline: %s, %s had no cloud-config" % |
1079 | - (key, url)) |
1080 | - except Exception: |
1081 | - cmdline_exc = ("cmdline: '%s' raised exception\n%s" % |
1082 | - (cmdline, traceback.format_exc())) |
1083 | - warn(cmdline_exc) |
1084 | - |
1085 | - try: |
1086 | - cfg = cloudinit.get_base_cfg(cfg_path) |
1087 | - except Exception as e: |
1088 | - warn("Failed to get base config. falling back to builtin: %s\n" % e) |
1089 | - try: |
1090 | - cfg = cloudinit.get_builtin_cfg() |
1091 | - except Exception as e: |
1092 | - warn("Unable to load builtin config\n") |
1093 | - raise |
1094 | - |
1095 | - try: |
1096 | - (outfmt, errfmt) = CC.get_output_cfg(cfg, "init") |
1097 | - CC.redirect_output(outfmt, errfmt) |
1098 | - except Exception as e: |
1099 | - warn("Failed to get and set output config: %s\n" % e) |
1100 | - |
1101 | - cloudinit.logging_set_from_cfg(cfg) |
1102 | - log = logging.getLogger() |
1103 | - |
1104 | - if cmdline_exc: |
1105 | - log.debug(cmdline_exc) |
1106 | - elif cmdline_msg: |
1107 | - log.debug(cmdline_msg) |
1108 | - |
1109 | - try: |
1110 | - cloudinit.initfs() |
1111 | - except Exception as e: |
1112 | - warn("failed to initfs, likely bad things to come: %s\n" % str(e)) |
1113 | - |
1114 | - nonet_path = "%s/%s" % (cloudinit.get_cpath("data"), "no-net") |
1115 | - |
1116 | - if cmd == "start": |
1117 | - print netinfo.debug_info() |
1118 | - |
1119 | - stop_files = (cloudinit.get_ipath_cur("obj_pkl"), nonet_path) |
1120 | - # if starting as the network start, there are cases |
1121 | - # where everything is already done for us, and it makes |
1122 | - # most sense to exit early and silently |
1123 | - for f in stop_files: |
1124 | - try: |
1125 | - fp = open(f, "r") |
1126 | - fp.close() |
1127 | - except: |
1128 | - continue |
1129 | - |
1130 | - log.debug("no need for cloud-init start to run (%s)\n", f) |
1131 | - sys.exit(0) |
1132 | - elif cmd == "start-local": |
1133 | - # cache is not instance specific, so it has to be purged |
1134 | - # but we want 'start' to benefit from a cache if |
1135 | - # a previous start-local populated one |
1136 | - manclean = util.get_cfg_option_bool(cfg, 'manual_cache_clean', False) |
1137 | - if manclean: |
1138 | - log.debug("not purging cache, manual_cache_clean = True") |
1139 | - cloudinit.purge_cache(not manclean) |
1140 | - |
1141 | - try: |
1142 | - os.unlink(nonet_path) |
1143 | - except OSError as e: |
1144 | - if e.errno != errno.ENOENT: |
1145 | - raise |
1146 | - |
1147 | - msg = "cloud-init %s running: %s. up %s seconds" % (cmd, now, uptime) |
1148 | - sys.stderr.write(msg + "\n") |
1149 | - sys.stderr.flush() |
1150 | - |
1151 | - log.info(msg) |
1152 | - |
1153 | - cloud = cloudinit.CloudInit(ds_deps=deps[cmd]) |
1154 | - |
1155 | - try: |
1156 | - cloud.get_data_source() |
1157 | - except cloudinit.DataSourceNotFoundException as e: |
1158 | - sys.stderr.write("no instance data found in %s\n" % cmd) |
1159 | - sys.exit(0) |
1160 | - |
1161 | - # set this as the current instance |
1162 | - cloud.set_cur_instance() |
1163 | - |
1164 | - # store the metadata |
1165 | - cloud.update_cache() |
1166 | - |
1167 | - msg = "found data source: %s" % cloud.datasource |
1168 | - sys.stderr.write(msg + "\n") |
1169 | - log.debug(msg) |
1170 | - |
1171 | - # parse the user data (ec2-run-userdata.py) |
1172 | - try: |
1173 | - ran = cloud.sem_and_run("consume_userdata", cloudinit.per_instance, |
1174 | - cloud.consume_userdata, [cloudinit.per_instance], False) |
1175 | - if not ran: |
1176 | - cloud.consume_userdata(cloudinit.per_always) |
1177 | - except: |
1178 | - warn("consuming user data failed!\n") |
1179 | - raise |
1180 | - |
1181 | - cfg_path = cloudinit.get_ipath_cur("cloud_config") |
1182 | - cc = CC.CloudConfig(cfg_path, cloud) |
1183 | - |
1184 | - # if the output config changed, update output and err |
1185 | - try: |
1186 | - outfmt_orig = outfmt |
1187 | - errfmt_orig = errfmt |
1188 | - (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, "init") |
1189 | - if outfmt_orig != outfmt or errfmt_orig != errfmt: |
1190 | - warn("stdout, stderr changing to (%s,%s)" % (outfmt, errfmt)) |
1191 | - CC.redirect_output(outfmt, errfmt) |
1192 | - except Exception as e: |
1193 | - warn("Failed to get and set output config: %s\n" % e) |
1194 | - |
1195 | - # send the cloud-config ready event |
1196 | - cc_path = cloudinit.get_ipath_cur('cloud_config') |
1197 | - cc_ready = cc.cfg.get("cc_ready_cmd", |
1198 | - ['initctl', 'emit', 'cloud-config', |
1199 | - '%s=%s' % (cloudinit.cfg_env_name, cc_path)]) |
1200 | - if cc_ready: |
1201 | - if isinstance(cc_ready, str): |
1202 | - cc_ready = ['sh', '-c', cc_ready] |
1203 | - subprocess.Popen(cc_ready).communicate() |
1204 | - |
1205 | - module_list = CC.read_cc_modules(cc.cfg, "cloud_init_modules") |
1206 | - |
1207 | - failures = [] |
1208 | - if len(module_list): |
1209 | - failures = CC.run_cc_modules(cc, module_list, log) |
1210 | - else: |
1211 | - msg = "no cloud_init_modules to run" |
1212 | - sys.stderr.write(msg + "\n") |
1213 | - log.debug(msg) |
1214 | - sys.exit(0) |
1215 | - |
1216 | - sys.exit(len(failures)) |
1217 | - |
1218 | -if __name__ == '__main__': |
1219 | - main() |
1220 | |
1221 | === removed file 'cloudinit/DataSource.py' |
1222 | --- cloudinit/DataSource.py 2012-03-19 17:33:39 +0000 |
1223 | +++ cloudinit/DataSource.py 1970-01-01 00:00:00 +0000 |
1224 | @@ -1,214 +0,0 @@ |
1225 | -# vi: ts=4 expandtab |
1226 | -# |
1227 | -# Copyright (C) 2009-2010 Canonical Ltd. |
1228 | -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
1229 | -# |
1230 | -# Author: Scott Moser <scott.moser@canonical.com> |
1231 | -# Author: Juerg Hafliger <juerg.haefliger@hp.com> |
1232 | -# |
1233 | -# This program is free software: you can redistribute it and/or modify |
1234 | -# it under the terms of the GNU General Public License version 3, as |
1235 | -# published by the Free Software Foundation. |
1236 | -# |
1237 | -# This program is distributed in the hope that it will be useful, |
1238 | -# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1239 | -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1240 | -# GNU General Public License for more details. |
1241 | -# |
1242 | -# You should have received a copy of the GNU General Public License |
1243 | -# along with this program. If not, see <http://www.gnu.org/licenses/>. |
1244 | - |
1245 | - |
1246 | -DEP_FILESYSTEM = "FILESYSTEM" |
1247 | -DEP_NETWORK = "NETWORK" |
1248 | - |
1249 | -import cloudinit.UserDataHandler as ud |
1250 | -import cloudinit.util as util |
1251 | -import socket |
1252 | - |
1253 | - |
1254 | -class DataSource: |
1255 | - userdata = None |
1256 | - metadata = None |
1257 | - userdata_raw = None |
1258 | - cfgname = "" |
1259 | - # system config (passed in from cloudinit, |
1260 | - # cloud-config before input from the DataSource) |
1261 | - sys_cfg = {} |
1262 | - # datasource config, the cloud-config['datasource']['__name__'] |
1263 | - ds_cfg = {} # datasource config |
1264 | - |
1265 | - def __init__(self, sys_cfg=None): |
1266 | - if not self.cfgname: |
1267 | - name = str(self.__class__).split(".")[-1] |
1268 | - if name.startswith("DataSource"): |
1269 | - name = name[len("DataSource"):] |
1270 | - self.cfgname = name |
1271 | - if sys_cfg: |
1272 | - self.sys_cfg = sys_cfg |
1273 | - |
1274 | - self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, |
1275 | - ("datasource", self.cfgname), self.ds_cfg) |
1276 | - |
1277 | - def get_userdata(self): |
1278 | - if self.userdata == None: |
1279 | - self.userdata = ud.preprocess_userdata(self.userdata_raw) |
1280 | - return self.userdata |
1281 | - |
1282 | - def get_userdata_raw(self): |
1283 | - return(self.userdata_raw) |
1284 | - |
1285 | - # the data sources' config_obj is a cloud-config formated |
1286 | - # object that came to it from ways other than cloud-config |
1287 | - # because cloud-config content would be handled elsewhere |
1288 | - def get_config_obj(self): |
1289 | - return({}) |
1290 | - |
1291 | - def get_public_ssh_keys(self): |
1292 | - keys = [] |
1293 | - if 'public-keys' not in self.metadata: |
1294 | - return([]) |
1295 | - |
1296 | - if isinstance(self.metadata['public-keys'], str): |
1297 | - return(str(self.metadata['public-keys']).splitlines()) |
1298 | - |
1299 | - if isinstance(self.metadata['public-keys'], list): |
1300 | - return(self.metadata['public-keys']) |
1301 | - |
1302 | - for _keyname, klist in self.metadata['public-keys'].items(): |
1303 | - # lp:506332 uec metadata service responds with |
1304 | - # data that makes boto populate a string for 'klist' rather |
1305 | - # than a list. |
1306 | - if isinstance(klist, str): |
1307 | - klist = [klist] |
1308 | - for pkey in klist: |
1309 | - # there is an empty string at the end of the keylist, trim it |
1310 | - if pkey: |
1311 | - keys.append(pkey) |
1312 | - |
1313 | - return(keys) |
1314 | - |
1315 | - def device_name_to_device(self, _name): |
1316 | - # translate a 'name' to a device |
1317 | - # the primary function at this point is on ec2 |
1318 | - # to consult metadata service, that has |
1319 | - # ephemeral0: sdb |
1320 | - # and return 'sdb' for input 'ephemeral0' |
1321 | - return(None) |
1322 | - |
1323 | - def get_locale(self): |
1324 | - return('en_US.UTF-8') |
1325 | - |
1326 | - def get_local_mirror(self): |
1327 | - return None |
1328 | - |
1329 | - def get_instance_id(self): |
1330 | - if 'instance-id' not in self.metadata: |
1331 | - return "iid-datasource" |
1332 | - return(self.metadata['instance-id']) |
1333 | - |
1334 | - def get_hostname(self, fqdn=False): |
1335 | - defdomain = "localdomain" |
1336 | - defhost = "localhost" |
1337 | - |
1338 | - domain = defdomain |
1339 | - if not 'local-hostname' in self.metadata: |
1340 | - |
1341 | - # this is somewhat questionable really. |
1342 | - # the cloud datasource was asked for a hostname |
1343 | - # and didn't have one. raising error might be more appropriate |
1344 | - # but instead, basically look up the existing hostname |
1345 | - toks = [] |
1346 | - |
1347 | - hostname = socket.gethostname() |
1348 | - |
1349 | - fqdn = util.get_fqdn_from_hosts(hostname) |
1350 | - |
1351 | - if fqdn and fqdn.find(".") > 0: |
1352 | - toks = str(fqdn).split(".") |
1353 | - elif hostname: |
1354 | - toks = [hostname, defdomain] |
1355 | - else: |
1356 | - toks = [defhost, defdomain] |
1357 | - |
1358 | - else: |
1359 | - # if there is an ipv4 address in 'local-hostname', then |
1360 | - # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx |
1361 | - lhost = self.metadata['local-hostname'] |
1362 | - if is_ipv4(lhost): |
1363 | - toks = "ip-%s" % lhost.replace(".", "-") |
1364 | - else: |
1365 | - toks = lhost.split(".") |
1366 | - |
1367 | - if len(toks) > 1: |
1368 | - hostname = toks[0] |
1369 | - domain = '.'.join(toks[1:]) |
1370 | - else: |
1371 | - hostname = toks[0] |
1372 | - |
1373 | - if fqdn: |
1374 | - return "%s.%s" % (hostname, domain) |
1375 | - else: |
1376 | - return hostname |
1377 | - |
1378 | - |
1379 | -# return a list of classes that have the same depends as 'depends' |
1380 | -# iterate through cfg_list, loading "DataSourceCollections" modules |
1381 | -# and calling their "get_datasource_list". |
1382 | -# return an ordered list of classes that match |
1383 | -# |
1384 | -# - modules must be named "DataSource<item>", where 'item' is an entry |
1385 | -# in cfg_list |
1386 | -# - if pkglist is given, it will iterate try loading from that package |
1387 | -# ie, pkglist=[ "foo", "" ] |
1388 | -# will first try to load foo.DataSource<item> |
1389 | -# then DataSource<item> |
1390 | -def list_sources(cfg_list, depends, pkglist=None): |
1391 | - if pkglist is None: |
1392 | - pkglist = [] |
1393 | - retlist = [] |
1394 | - for ds_coll in cfg_list: |
1395 | - for pkg in pkglist: |
1396 | - if pkg: |
1397 | - pkg = "%s." % pkg |
1398 | - try: |
1399 | - mod = __import__("%sDataSource%s" % (pkg, ds_coll)) |
1400 | - if pkg: |
1401 | - mod = getattr(mod, "DataSource%s" % ds_coll) |
1402 | - lister = getattr(mod, "get_datasource_list") |
1403 | - retlist.extend(lister(depends)) |
1404 | - break |
1405 | - except: |
1406 | - raise |
1407 | - return(retlist) |
1408 | - |
1409 | - |
1410 | -# depends is a list of dependencies (DEP_FILESYSTEM) |
1411 | -# dslist is a list of 2 item lists |
1412 | -# dslist = [ |
1413 | -# ( class, ( depends-that-this-class-needs ) ) |
1414 | -# } |
1415 | -# it returns a list of 'class' that matched these deps exactly |
1416 | -# it is a helper function for DataSourceCollections |
1417 | -def list_from_depends(depends, dslist): |
1418 | - retlist = [] |
1419 | - depset = set(depends) |
1420 | - for elem in dslist: |
1421 | - (cls, deps) = elem |
1422 | - if depset == set(deps): |
1423 | - retlist.append(cls) |
1424 | - return(retlist) |
1425 | - |
1426 | - |
1427 | -def is_ipv4(instr): |
1428 | - """ determine if input string is a ipv4 address. return boolean""" |
1429 | - toks = instr.split('.') |
1430 | - if len(toks) != 4: |
1431 | - return False |
1432 | - |
1433 | - try: |
1434 | - toks = [x for x in toks if (int(x) < 256 and int(x) > 0)] |
1435 | - except: |
1436 | - return False |
1437 | - |
1438 | - return (len(toks) == 4) |
1439 | |
1440 | === removed file 'cloudinit/UserDataHandler.py' |
1441 | --- cloudinit/UserDataHandler.py 2012-06-21 15:37:22 +0000 |
1442 | +++ cloudinit/UserDataHandler.py 1970-01-01 00:00:00 +0000 |
1443 | @@ -1,262 +0,0 @@ |
1444 | -# vi: ts=4 expandtab |
1445 | -# |
1446 | -# Copyright (C) 2009-2010 Canonical Ltd. |
1447 | -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
1448 | -# |
1449 | -# Author: Scott Moser <scott.moser@canonical.com> |
1450 | -# Author: Juerg Hafliger <juerg.haefliger@hp.com> |
1451 | -# |
1452 | -# This program is free software: you can redistribute it and/or modify |
1453 | -# it under the terms of the GNU General Public License version 3, as |
1454 | -# published by the Free Software Foundation. |
1455 | -# |
1456 | -# This program is distributed in the hope that it will be useful, |
1457 | -# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1458 | -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1459 | -# GNU General Public License for more details. |
1460 | -# |
1461 | -# You should have received a copy of the GNU General Public License |
1462 | -# along with this program. If not, see <http://www.gnu.org/licenses/>. |
1463 | - |
1464 | -import email |
1465 | - |
1466 | -from email.mime.multipart import MIMEMultipart |
1467 | -from email.mime.text import MIMEText |
1468 | -from email.mime.base import MIMEBase |
1469 | -import yaml |
1470 | -import cloudinit |
1471 | -import cloudinit.util as util |
1472 | -import hashlib |
1473 | -import urllib |
1474 | - |
1475 | - |
1476 | -starts_with_mappings = { |
1477 | - '#include': 'text/x-include-url', |
1478 | - '#include-once': 'text/x-include-once-url', |
1479 | - '#!': 'text/x-shellscript', |
1480 | - '#cloud-config': 'text/cloud-config', |
1481 | - '#upstart-job': 'text/upstart-job', |
1482 | - '#part-handler': 'text/part-handler', |
1483 | - '#cloud-boothook': 'text/cloud-boothook', |
1484 | - '#cloud-config-archive': 'text/cloud-config-archive', |
1485 | -} |
1486 | - |
1487 | - |
1488 | -# if 'string' is compressed return decompressed otherwise return it |
1489 | -def decomp_str(string): |
1490 | - import StringIO |
1491 | - import gzip |
1492 | - try: |
1493 | - uncomp = gzip.GzipFile(None, "rb", 1, StringIO.StringIO(string)).read() |
1494 | - return(uncomp) |
1495 | - except: |
1496 | - return(string) |
1497 | - |
1498 | - |
1499 | -def do_include(content, appendmsg): |
1500 | - import os |
1501 | - # is just a list of urls, one per line |
1502 | - # also support '#include <url here>' |
1503 | - includeonce = False |
1504 | - for line in content.splitlines(): |
1505 | - if line == "#include": |
1506 | - continue |
1507 | - if line == "#include-once": |
1508 | - includeonce = True |
1509 | - continue |
1510 | - if line.startswith("#include-once"): |
1511 | - line = line[len("#include-once"):].lstrip() |
1512 | - includeonce = True |
1513 | - elif line.startswith("#include"): |
1514 | - line = line[len("#include"):].lstrip() |
1515 | - if line.startswith("#"): |
1516 | - continue |
1517 | - if line.strip() == "": |
1518 | - continue |
1519 | - |
1520 | - # urls cannot not have leading or trailing white space |
1521 | - msum = hashlib.md5() # pylint: disable=E1101 |
1522 | - msum.update(line.strip()) |
1523 | - includeonce_filename = "%s/urlcache/%s" % ( |
1524 | - cloudinit.get_ipath_cur("data"), msum.hexdigest()) |
1525 | - try: |
1526 | - if includeonce and os.path.isfile(includeonce_filename): |
1527 | - with open(includeonce_filename, "r") as fp: |
1528 | - content = fp.read() |
1529 | - else: |
1530 | - content = urllib.urlopen(line).read() |
1531 | - if includeonce: |
1532 | - util.write_file(includeonce_filename, content, mode=0600) |
1533 | - except Exception: |
1534 | - raise |
1535 | - |
1536 | - process_includes(message_from_string(decomp_str(content)), appendmsg) |
1537 | - |
1538 | - |
1539 | -def explode_cc_archive(archive, appendmsg): |
1540 | - for ent in yaml.safe_load(archive): |
1541 | - # ent can be one of: |
1542 | - # dict { 'filename' : 'value', 'content' : 'value', 'type' : 'value' } |
1543 | - # filename and type not be present |
1544 | - # or |
1545 | - # scalar(payload) |
1546 | - |
1547 | - def_type = "text/cloud-config" |
1548 | - if isinstance(ent, str): |
1549 | - ent = {'content': ent} |
1550 | - |
1551 | - content = ent.get('content', '') |
1552 | - mtype = ent.get('type', None) |
1553 | - if mtype == None: |
1554 | - mtype = type_from_startswith(content, def_type) |
1555 | - |
1556 | - maintype, subtype = mtype.split('/', 1) |
1557 | - if maintype == "text": |
1558 | - msg = MIMEText(content, _subtype=subtype) |
1559 | - else: |
1560 | - msg = MIMEBase(maintype, subtype) |
1561 | - msg.set_payload(content) |
1562 | - |
1563 | - if 'filename' in ent: |
1564 | - msg.add_header('Content-Disposition', 'attachment', |
1565 | - filename=ent['filename']) |
1566 | - |
1567 | - for header in ent.keys(): |
1568 | - if header in ('content', 'filename', 'type'): |
1569 | - continue |
1570 | - msg.add_header(header, ent['header']) |
1571 | - |
1572 | - _attach_part(appendmsg, msg) |
1573 | - |
1574 | - |
1575 | -def multi_part_count(outermsg, newcount=None): |
1576 | - """ |
1577 | - Return the number of attachments to this MIMEMultipart by looking |
1578 | - at its 'Number-Attachments' header. |
1579 | - """ |
1580 | - nfield = 'Number-Attachments' |
1581 | - if nfield not in outermsg: |
1582 | - outermsg[nfield] = "0" |
1583 | - |
1584 | - if newcount != None: |
1585 | - outermsg.replace_header(nfield, str(newcount)) |
1586 | - |
1587 | - return(int(outermsg.get('Number-Attachments', 0))) |
1588 | - |
1589 | - |
1590 | -def _attach_part(outermsg, part): |
1591 | - """ |
1592 | - Attach an part to an outer message. outermsg must be a MIMEMultipart. |
1593 | - Modifies a header in outermsg to keep track of number of attachments. |
1594 | - """ |
1595 | - cur = multi_part_count(outermsg) |
1596 | - if not part.get_filename(None): |
1597 | - part.add_header('Content-Disposition', 'attachment', |
1598 | - filename='part-%03d' % (cur + 1)) |
1599 | - outermsg.attach(part) |
1600 | - multi_part_count(outermsg, cur + 1) |
1601 | - |
1602 | - |
1603 | -def type_from_startswith(payload, default=None): |
1604 | - # slist is sorted longest first |
1605 | - slist = sorted(starts_with_mappings.keys(), key=lambda e: 0 - len(e)) |
1606 | - for sstr in slist: |
1607 | - if payload.startswith(sstr): |
1608 | - return(starts_with_mappings[sstr]) |
1609 | - return default |
1610 | - |
1611 | - |
1612 | -def process_includes(msg, appendmsg=None): |
1613 | - if appendmsg == None: |
1614 | - appendmsg = MIMEMultipart() |
1615 | - |
1616 | - for part in msg.walk(): |
1617 | - # multipart/* are just containers |
1618 | - if part.get_content_maintype() == 'multipart': |
1619 | - continue |
1620 | - |
1621 | - ctype = None |
1622 | - ctype_orig = part.get_content_type() |
1623 | - |
1624 | - payload = part.get_payload(decode=True) |
1625 | - |
1626 | - if ctype_orig in ("text/plain", "text/x-not-multipart"): |
1627 | - ctype = type_from_startswith(payload) |
1628 | - |
1629 | - if ctype is None: |
1630 | - ctype = ctype_orig |
1631 | - |
1632 | - if ctype in ('text/x-include-url', 'text/x-include-once-url'): |
1633 | - do_include(payload, appendmsg) |
1634 | - continue |
1635 | - |
1636 | - if ctype == "text/cloud-config-archive": |
1637 | - explode_cc_archive(payload, appendmsg) |
1638 | - continue |
1639 | - |
1640 | - if 'Content-Type' in msg: |
1641 | - msg.replace_header('Content-Type', ctype) |
1642 | - else: |
1643 | - msg['Content-Type'] = ctype |
1644 | - |
1645 | - _attach_part(appendmsg, part) |
1646 | - |
1647 | - |
1648 | -def message_from_string(data, headers=None): |
1649 | - if headers is None: |
1650 | - headers = {} |
1651 | - if "mime-version:" in data[0:4096].lower(): |
1652 | - msg = email.message_from_string(data) |
1653 | - for (key, val) in headers.items(): |
1654 | - if key in msg: |
1655 | - msg.replace_header(key, val) |
1656 | - else: |
1657 | - msg[key] = val |
1658 | - else: |
1659 | - mtype = headers.get("Content-Type", "text/x-not-multipart") |
1660 | - maintype, subtype = mtype.split("/", 1) |
1661 | - msg = MIMEBase(maintype, subtype, *headers) |
1662 | - msg.set_payload(data) |
1663 | - |
1664 | - return(msg) |
1665 | - |
1666 | - |
1667 | -# this is heavily wasteful, reads through userdata string input |
1668 | -def preprocess_userdata(data): |
1669 | - newmsg = MIMEMultipart() |
1670 | - process_includes(message_from_string(decomp_str(data)), newmsg) |
1671 | - return(newmsg.as_string()) |
1672 | - |
1673 | - |
1674 | -# callback is a function that will be called with (data, content_type, |
1675 | -# filename, payload) |
1676 | -def walk_userdata(istr, callback, data=None): |
1677 | - partnum = 0 |
1678 | - for part in message_from_string(istr).walk(): |
1679 | - # multipart/* are just containers |
1680 | - if part.get_content_maintype() == 'multipart': |
1681 | - continue |
1682 | - |
1683 | - ctype = part.get_content_type() |
1684 | - if ctype is None: |
1685 | - ctype = 'application/octet-stream' |
1686 | - |
1687 | - filename = part.get_filename() |
1688 | - if not filename: |
1689 | - filename = 'part-%03d' % partnum |
1690 | - |
1691 | - callback(data, ctype, filename, part.get_payload(decode=True)) |
1692 | - |
1693 | - partnum = partnum + 1 |
1694 | - |
1695 | - |
1696 | -if __name__ == "__main__": |
1697 | - def main(): |
1698 | - import sys |
1699 | - data = decomp_str(file(sys.argv[1]).read()) |
1700 | - newmsg = MIMEMultipart() |
1701 | - process_includes(message_from_string(data), newmsg) |
1702 | - print newmsg |
1703 | - print "#found %s parts" % multi_part_count(newmsg) |
1704 | - |
1705 | - main() |
1706 | |
1707 | === modified file 'cloudinit/__init__.py' |
1708 | --- cloudinit/__init__.py 2012-06-28 17:10:56 +0000 |
1709 | +++ cloudinit/__init__.py 2012-07-06 21:16:18 +0000 |
1710 | @@ -1,11 +1,12 @@ |
1711 | # vi: ts=4 expandtab |
1712 | # |
1713 | -# Common code for the EC2 initialisation scripts in Ubuntu |
1714 | -# Copyright (C) 2008-2009 Canonical Ltd |
1715 | +# Copyright (C) 2012 Canonical Ltd. |
1716 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
1717 | +# Copyright (C) 2012 Yahoo! Inc. |
1718 | # |
1719 | -# Author: Soren Hansen <soren@canonical.com> |
1720 | +# Author: Scott Moser <scott.moser@canonical.com> |
1721 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> |
1722 | +# Author: Joshua Harlow <harlowja@yahoo-inc.com> |
1723 | # |
1724 | # This program is free software: you can redistribute it and/or modify |
1725 | # it under the terms of the GNU General Public License version 3, as |
1726 | @@ -18,650 +19,3 @@ |
1727 | # |
1728 | # You should have received a copy of the GNU General Public License |
1729 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
1730 | -# |
1731 | - |
1732 | -varlibdir = '/var/lib/cloud' |
1733 | -cur_instance_link = varlibdir + "/instance" |
1734 | -boot_finished = cur_instance_link + "/boot-finished" |
1735 | -system_config = '/etc/cloud/cloud.cfg' |
1736 | -seeddir = varlibdir + "/seed" |
1737 | -cfg_env_name = "CLOUD_CFG" |
1738 | - |
1739 | -cfg_builtin = """ |
1740 | -log_cfgs: [] |
1741 | -datasource_list: ["NoCloud", "ConfigDrive", "OVF", "MAAS", "Ec2", "CloudStack"] |
1742 | -def_log_file: /var/log/cloud-init.log |
1743 | -syslog_fix_perms: syslog:adm |
1744 | -""" |
1745 | -logger_name = "cloudinit" |
1746 | - |
1747 | -pathmap = { |
1748 | - "handlers": "/handlers", |
1749 | - "scripts": "/scripts", |
1750 | - "sem": "/sem", |
1751 | - "boothooks": "/boothooks", |
1752 | - "userdata_raw": "/user-data.txt", |
1753 | - "userdata": "/user-data.txt.i", |
1754 | - "obj_pkl": "/obj.pkl", |
1755 | - "cloud_config": "/cloud-config.txt", |
1756 | - "data": "/data", |
1757 | - None: "", |
1758 | -} |
1759 | - |
1760 | -per_instance = "once-per-instance" |
1761 | -per_always = "always" |
1762 | -per_once = "once" |
1763 | - |
1764 | -parsed_cfgs = {} |
1765 | - |
1766 | -import os |
1767 | - |
1768 | -import cPickle |
1769 | -import sys |
1770 | -import os.path |
1771 | -import errno |
1772 | -import subprocess |
1773 | -import yaml |
1774 | -import logging |
1775 | -import logging.config |
1776 | -import StringIO |
1777 | -import glob |
1778 | -import traceback |
1779 | - |
1780 | -import cloudinit.util as util |
1781 | - |
1782 | - |
1783 | -class NullHandler(logging.Handler): |
1784 | - def emit(self, record): |
1785 | - pass |
1786 | - |
1787 | - |
1788 | -log = logging.getLogger(logger_name) |
1789 | -log.addHandler(NullHandler()) |
1790 | - |
1791 | - |
1792 | -def logging_set_from_cfg_file(cfg_file=system_config): |
1793 | - logging_set_from_cfg(util.get_base_cfg(cfg_file, cfg_builtin, parsed_cfgs)) |
1794 | - |
1795 | - |
1796 | -def logging_set_from_cfg(cfg): |
1797 | - log_cfgs = [] |
1798 | - logcfg = util.get_cfg_option_str(cfg, "log_cfg", False) |
1799 | - if logcfg: |
1800 | - # if there is a 'logcfg' entry in the config, respect |
1801 | - # it, it is the old keyname |
1802 | - log_cfgs = [logcfg] |
1803 | - elif "log_cfgs" in cfg: |
1804 | - for cfg in cfg['log_cfgs']: |
1805 | - if isinstance(cfg, list): |
1806 | - log_cfgs.append('\n'.join(cfg)) |
1807 | - else: |
1808 | - log_cfgs.append() |
1809 | - |
1810 | - if not len(log_cfgs): |
1811 | - sys.stderr.write("Warning, no logging configured\n") |
1812 | - return |
1813 | - |
1814 | - for logcfg in log_cfgs: |
1815 | - try: |
1816 | - logging.config.fileConfig(StringIO.StringIO(logcfg)) |
1817 | - return |
1818 | - except: |
1819 | - pass |
1820 | - |
1821 | - raise Exception("no valid logging found\n") |
1822 | - |
1823 | - |
1824 | -import cloudinit.DataSource as DataSource |
1825 | -import cloudinit.UserDataHandler as UserDataHandler |
1826 | - |
1827 | - |
1828 | -class CloudInit: |
1829 | - cfg = None |
1830 | - part_handlers = {} |
1831 | - old_conffile = '/etc/ec2-init/ec2-config.cfg' |
1832 | - ds_deps = [DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK] |
1833 | - datasource = None |
1834 | - cloud_config_str = '' |
1835 | - datasource_name = '' |
1836 | - |
1837 | - builtin_handlers = [] |
1838 | - |
1839 | - def __init__(self, ds_deps=None, sysconfig=system_config): |
1840 | - self.builtin_handlers = [ |
1841 | - ['text/x-shellscript', self.handle_user_script, per_always], |
1842 | - ['text/cloud-config', self.handle_cloud_config, per_always], |
1843 | - ['text/upstart-job', self.handle_upstart_job, per_instance], |
1844 | - ['text/cloud-boothook', self.handle_cloud_boothook, per_always], |
1845 | - ] |
1846 | - |
1847 | - if ds_deps != None: |
1848 | - self.ds_deps = ds_deps |
1849 | - |
1850 | - self.sysconfig = sysconfig |
1851 | - |
1852 | - self.cfg = self.read_cfg() |
1853 | - |
1854 | - def read_cfg(self): |
1855 | - if self.cfg: |
1856 | - return(self.cfg) |
1857 | - |
1858 | - try: |
1859 | - conf = util.get_base_cfg(self.sysconfig, cfg_builtin, parsed_cfgs) |
1860 | - except Exception: |
1861 | - conf = get_builtin_cfg() |
1862 | - |
1863 | - # support reading the old ConfigObj format file and merging |
1864 | - # it into the yaml dictionary |
1865 | - try: |
1866 | - from configobj import ConfigObj |
1867 | - oldcfg = ConfigObj(self.old_conffile) |
1868 | - if oldcfg is None: |
1869 | - oldcfg = {} |
1870 | - conf = util.mergedict(conf, oldcfg) |
1871 | - except: |
1872 | - pass |
1873 | - |
1874 | - return(conf) |
1875 | - |
1876 | - def restore_from_cache(self): |
1877 | - try: |
1878 | - # we try to restore from a current link and static path |
1879 | - # by using the instance link, if purge_cache was called |
1880 | - # the file wont exist |
1881 | - cache = get_ipath_cur('obj_pkl') |
1882 | - f = open(cache, "rb") |
1883 | - data = cPickle.load(f) |
1884 | - f.close() |
1885 | - self.datasource = data |
1886 | - return True |
1887 | - except: |
1888 | - return False |
1889 | - |
1890 | - def write_to_cache(self): |
1891 | - cache = self.get_ipath("obj_pkl") |
1892 | - try: |
1893 | - os.makedirs(os.path.dirname(cache)) |
1894 | - except OSError as e: |
1895 | - if e.errno != errno.EEXIST: |
1896 | - return False |
1897 | - |
1898 | - try: |
1899 | - f = open(cache, "wb") |
1900 | - cPickle.dump(self.datasource, f) |
1901 | - f.close() |
1902 | - os.chmod(cache, 0400) |
1903 | - except: |
1904 | - raise |
1905 | - |
1906 | - def get_data_source(self): |
1907 | - if self.datasource is not None: |
1908 | - return True |
1909 | - |
1910 | - if self.restore_from_cache(): |
1911 | - log.debug("restored from cache type %s" % self.datasource) |
1912 | - return True |
1913 | - |
1914 | - cfglist = self.cfg['datasource_list'] |
1915 | - dslist = list_sources(cfglist, self.ds_deps) |
1916 | - dsnames = [f.__name__ for f in dslist] |
1917 | - |
1918 | - log.debug("searching for data source in %s" % dsnames) |
1919 | - for cls in dslist: |
1920 | - ds = cls.__name__ |
1921 | - try: |
1922 | - s = cls(sys_cfg=self.cfg) |
1923 | - if s.get_data(): |
1924 | - self.datasource = s |
1925 | - self.datasource_name = ds |
1926 | - log.debug("found data source %s" % ds) |
1927 | - return True |
1928 | - except Exception as e: |
1929 | - log.warn("get_data of %s raised %s" % (ds, e)) |
1930 | - util.logexc(log) |
1931 | - msg = "Did not find data source. searched classes: %s" % dsnames |
1932 | - log.debug(msg) |
1933 | - raise DataSourceNotFoundException(msg) |
1934 | - |
1935 | - def set_cur_instance(self): |
1936 | - try: |
1937 | - os.unlink(cur_instance_link) |
1938 | - except OSError as e: |
1939 | - if e.errno != errno.ENOENT: |
1940 | - raise |
1941 | - |
1942 | - iid = self.get_instance_id() |
1943 | - os.symlink("./instances/%s" % iid, cur_instance_link) |
1944 | - idir = self.get_ipath() |
1945 | - dlist = [] |
1946 | - for d in ["handlers", "scripts", "sem"]: |
1947 | - dlist.append("%s/%s" % (idir, d)) |
1948 | - |
1949 | - util.ensure_dirs(dlist) |
1950 | - |
1951 | - ds = "%s: %s\n" % (self.datasource.__class__, str(self.datasource)) |
1952 | - dp = self.get_cpath('data') |
1953 | - util.write_file("%s/%s" % (idir, 'datasource'), ds) |
1954 | - util.write_file("%s/%s" % (dp, 'previous-datasource'), ds) |
1955 | - util.write_file("%s/%s" % (dp, 'previous-instance-id'), "%s\n" % iid) |
1956 | - |
1957 | - def get_userdata(self): |
1958 | - return(self.datasource.get_userdata()) |
1959 | - |
1960 | - def get_userdata_raw(self): |
1961 | - return(self.datasource.get_userdata_raw()) |
1962 | - |
1963 | - def get_instance_id(self): |
1964 | - return(self.datasource.get_instance_id()) |
1965 | - |
1966 | - def update_cache(self): |
1967 | - self.write_to_cache() |
1968 | - self.store_userdata() |
1969 | - |
1970 | - def store_userdata(self): |
1971 | - util.write_file(self.get_ipath('userdata_raw'), |
1972 | - self.datasource.get_userdata_raw(), 0600) |
1973 | - util.write_file(self.get_ipath('userdata'), |
1974 | - self.datasource.get_userdata(), 0600) |
1975 | - |
1976 | - def sem_getpath(self, name, freq): |
1977 | - if freq == 'once-per-instance': |
1978 | - return("%s/%s" % (self.get_ipath("sem"), name)) |
1979 | - return("%s/%s.%s" % (get_cpath("sem"), name, freq)) |
1980 | - |
1981 | - def sem_has_run(self, name, freq): |
1982 | - if freq == per_always: |
1983 | - return False |
1984 | - semfile = self.sem_getpath(name, freq) |
1985 | - if os.path.exists(semfile): |
1986 | - return True |
1987 | - return False |
1988 | - |
1989 | - def sem_acquire(self, name, freq): |
1990 | - from time import time |
1991 | - semfile = self.sem_getpath(name, freq) |
1992 | - |
1993 | - try: |
1994 | - os.makedirs(os.path.dirname(semfile)) |
1995 | - except OSError as e: |
1996 | - if e.errno != errno.EEXIST: |
1997 | - raise e |
1998 | - |
1999 | - if os.path.exists(semfile) and freq != per_always: |
2000 | - return False |
2001 | - |
2002 | - # race condition |
2003 | - try: |
2004 | - f = open(semfile, "w") |
2005 | - f.write("%s\n" % str(time())) |
2006 | - f.close() |
2007 | - except: |
2008 | - return(False) |
2009 | - return(True) |
2010 | - |
2011 | - def sem_clear(self, name, freq): |
2012 | - semfile = self.sem_getpath(name, freq) |
2013 | - try: |
2014 | - os.unlink(semfile) |
2015 | - except OSError as e: |
2016 | - if e.errno != errno.ENOENT: |
2017 | - return False |
2018 | - |
2019 | - return True |
2020 | - |
2021 | - # acquire lock on 'name' for given 'freq' |
2022 | - # if that does not exist, then call 'func' with given 'args' |
2023 | - # if 'clear_on_fail' is True and func throws an exception |
2024 | - # then remove the lock (so it would run again) |
2025 | - def sem_and_run(self, semname, freq, func, args=None, clear_on_fail=False): |
2026 | - if args is None: |
2027 | - args = [] |
2028 | - if self.sem_has_run(semname, freq): |
2029 | - log.debug("%s already ran %s", semname, freq) |
2030 | - return False |
2031 | - try: |
2032 | - if not self.sem_acquire(semname, freq): |
2033 | - raise Exception("Failed to acquire lock on %s" % semname) |
2034 | - |
2035 | - func(*args) |
2036 | - except: |
2037 | - if clear_on_fail: |
2038 | - self.sem_clear(semname, freq) |
2039 | - raise |
2040 | - |
2041 | - return True |
2042 | - |
2043 | - # get_ipath : get the instance path for a name in pathmap |
2044 | - # (/var/lib/cloud/instances/<instance>/name)<name>) |
2045 | - def get_ipath(self, name=None): |
2046 | - return("%s/instances/%s%s" |
2047 | - % (varlibdir, self.get_instance_id(), pathmap[name])) |
2048 | - |
2049 | - def consume_userdata(self, frequency=per_instance): |
2050 | - self.get_userdata() |
2051 | - data = self |
2052 | - |
2053 | - cdir = get_cpath("handlers") |
2054 | - idir = self.get_ipath("handlers") |
2055 | - |
2056 | - # add the path to the plugins dir to the top of our list for import |
2057 | - # instance dir should be read before cloud-dir |
2058 | - sys.path.insert(0, cdir) |
2059 | - sys.path.insert(0, idir) |
2060 | - |
2061 | - part_handlers = {} |
2062 | - # add handlers in cdir |
2063 | - for fname in glob.glob("%s/*.py" % cdir): |
2064 | - if not os.path.isfile(fname): |
2065 | - continue |
2066 | - modname = os.path.basename(fname)[0:-3] |
2067 | - try: |
2068 | - mod = __import__(modname) |
2069 | - handler_register(mod, part_handlers, data, frequency) |
2070 | - log.debug("added handler for [%s] from %s" % (mod.list_types(), |
2071 | - fname)) |
2072 | - except: |
2073 | - log.warn("failed to initialize handler in %s" % fname) |
2074 | - util.logexc(log) |
2075 | - |
2076 | - # add the internal handers if their type hasn't been already claimed |
2077 | - for (btype, bhand, bfreq) in self.builtin_handlers: |
2078 | - if btype in part_handlers: |
2079 | - continue |
2080 | - handler_register(InternalPartHandler(bhand, [btype], bfreq), |
2081 | - part_handlers, data, frequency) |
2082 | - |
2083 | - # walk the data |
2084 | - pdata = {'handlers': part_handlers, 'handlerdir': idir, |
2085 | - 'data': data, 'frequency': frequency} |
2086 | - UserDataHandler.walk_userdata(self.get_userdata(), |
2087 | - partwalker_callback, data=pdata) |
2088 | - |
2089 | - # give callbacks opportunity to finalize |
2090 | - called = [] |
2091 | - for (_mtype, mod) in part_handlers.iteritems(): |
2092 | - if mod in called: |
2093 | - continue |
2094 | - handler_call_end(mod, data, frequency) |
2095 | - |
2096 | - def handle_user_script(self, _data, ctype, filename, payload, _frequency): |
2097 | - if ctype == "__end__": |
2098 | - return |
2099 | - if ctype == "__begin__": |
2100 | - # maybe delete existing things here |
2101 | - return |
2102 | - |
2103 | - filename = filename.replace(os.sep, '_') |
2104 | - scriptsdir = get_ipath_cur('scripts') |
2105 | - util.write_file("%s/%s" % |
2106 | - (scriptsdir, filename), util.dos2unix(payload), 0700) |
2107 | - |
2108 | - def handle_upstart_job(self, _data, ctype, filename, payload, frequency): |
2109 | - # upstart jobs are only written on the first boot |
2110 | - if frequency != per_instance: |
2111 | - return |
2112 | - |
2113 | - if ctype == "__end__" or ctype == "__begin__": |
2114 | - return |
2115 | - if not filename.endswith(".conf"): |
2116 | - filename = filename + ".conf" |
2117 | - |
2118 | - util.write_file("%s/%s" % ("/etc/init", filename), |
2119 | - util.dos2unix(payload), 0644) |
2120 | - |
2121 | - def handle_cloud_config(self, _data, ctype, filename, payload, _frequency): |
2122 | - if ctype == "__begin__": |
2123 | - self.cloud_config_str = "" |
2124 | - return |
2125 | - if ctype == "__end__": |
2126 | - cloud_config = self.get_ipath("cloud_config") |
2127 | - util.write_file(cloud_config, self.cloud_config_str, 0600) |
2128 | - |
2129 | - ## this could merge the cloud config with the system config |
2130 | - ## for now, not doing this as it seems somewhat circular |
2131 | - ## as CloudConfig does that also, merging it with this cfg |
2132 | - ## |
2133 | - # ccfg = yaml.safe_load(self.cloud_config_str) |
2134 | - # if ccfg is None: ccfg = {} |
2135 | - # self.cfg = util.mergedict(ccfg, self.cfg) |
2136 | - |
2137 | - return |
2138 | - |
2139 | - self.cloud_config_str += "\n#%s\n%s" % (filename, payload) |
2140 | - |
2141 | - def handle_cloud_boothook(self, _data, ctype, filename, payload, |
2142 | - _frequency): |
2143 | - if ctype == "__end__": |
2144 | - return |
2145 | - if ctype == "__begin__": |
2146 | - return |
2147 | - |
2148 | - filename = filename.replace(os.sep, '_') |
2149 | - payload = util.dos2unix(payload) |
2150 | - prefix = "#cloud-boothook" |
2151 | - start = 0 |
2152 | - if payload.startswith(prefix): |
2153 | - start = len(prefix) + 1 |
2154 | - |
2155 | - boothooks_dir = self.get_ipath("boothooks") |
2156 | - filepath = "%s/%s" % (boothooks_dir, filename) |
2157 | - util.write_file(filepath, payload[start:], 0700) |
2158 | - try: |
2159 | - env = os.environ.copy() |
2160 | - env['INSTANCE_ID'] = self.datasource.get_instance_id() |
2161 | - subprocess.check_call([filepath], env=env) |
2162 | - except subprocess.CalledProcessError as e: |
2163 | - log.error("boothooks script %s returned %i" % |
2164 | - (filepath, e.returncode)) |
2165 | - except Exception as e: |
2166 | - log.error("boothooks unknown exception %s when running %s" % |
2167 | - (e, filepath)) |
2168 | - |
2169 | - def get_public_ssh_keys(self): |
2170 | - return(self.datasource.get_public_ssh_keys()) |
2171 | - |
2172 | - def get_locale(self): |
2173 | - return(self.datasource.get_locale()) |
2174 | - |
2175 | - def get_mirror(self): |
2176 | - return(self.datasource.get_local_mirror()) |
2177 | - |
2178 | - def get_hostname(self, fqdn=False): |
2179 | - return(self.datasource.get_hostname(fqdn=fqdn)) |
2180 | - |
2181 | - def device_name_to_device(self, name): |
2182 | - return(self.datasource.device_name_to_device(name)) |
2183 | - |
2184 | - # I really don't know if this should be here or not, but |
2185 | - # I needed it in cc_update_hostname, where that code had a valid 'cloud' |
2186 | - # reference, but did not have a cloudinit handle |
2187 | - # (ie, no cloudinit.get_cpath()) |
2188 | - def get_cpath(self, name=None): |
2189 | - return(get_cpath(name)) |
2190 | - |
2191 | - |
2192 | -def initfs(): |
2193 | - subds = ['scripts/per-instance', 'scripts/per-once', 'scripts/per-boot', |
2194 | - 'seed', 'instances', 'handlers', 'sem', 'data'] |
2195 | - dlist = [] |
2196 | - for subd in subds: |
2197 | - dlist.append("%s/%s" % (varlibdir, subd)) |
2198 | - util.ensure_dirs(dlist) |
2199 | - |
2200 | - cfg = util.get_base_cfg(system_config, cfg_builtin, parsed_cfgs) |
2201 | - log_file = util.get_cfg_option_str(cfg, 'def_log_file', None) |
2202 | - perms = util.get_cfg_option_str(cfg, 'syslog_fix_perms', None) |
2203 | - if log_file: |
2204 | - fp = open(log_file, "ab") |
2205 | - fp.close() |
2206 | - if log_file and perms: |
2207 | - (u, g) = perms.split(':', 1) |
2208 | - if u == "-1" or u == "None": |
2209 | - u = None |
2210 | - if g == "-1" or g == "None": |
2211 | - g = None |
2212 | - util.chownbyname(log_file, u, g) |
2213 | - |
2214 | - |
2215 | -def purge_cache(rmcur=True): |
2216 | - rmlist = [boot_finished] |
2217 | - if rmcur: |
2218 | - rmlist.append(cur_instance_link) |
2219 | - for f in rmlist: |
2220 | - try: |
2221 | - os.unlink(f) |
2222 | - except OSError as e: |
2223 | - if e.errno == errno.ENOENT: |
2224 | - continue |
2225 | - return(False) |
2226 | - except: |
2227 | - return(False) |
2228 | - return(True) |
2229 | - |
2230 | - |
2231 | -# get_ipath_cur: get the current instance path for an item |
2232 | -def get_ipath_cur(name=None): |
2233 | - return("%s/%s%s" % (varlibdir, "instance", pathmap[name])) |
2234 | - |
2235 | - |
2236 | -# get_cpath : get the "clouddir" (/var/lib/cloud/<name>) |
2237 | -# for a name in dirmap |
2238 | -def get_cpath(name=None): |
2239 | - return("%s%s" % (varlibdir, pathmap[name])) |
2240 | - |
2241 | - |
2242 | -def get_base_cfg(cfg_path=None): |
2243 | - if cfg_path is None: |
2244 | - cfg_path = system_config |
2245 | - return(util.get_base_cfg(cfg_path, cfg_builtin, parsed_cfgs)) |
2246 | - |
2247 | - |
2248 | -def get_builtin_cfg(): |
2249 | - return(yaml.safe_load(cfg_builtin)) |
2250 | - |
2251 | - |
2252 | -class DataSourceNotFoundException(Exception): |
2253 | - pass |
2254 | - |
2255 | - |
2256 | -def list_sources(cfg_list, depends): |
2257 | - return(DataSource.list_sources(cfg_list, depends, ["cloudinit", ""])) |
2258 | - |
2259 | - |
2260 | -def handler_register(mod, part_handlers, data, frequency=per_instance): |
2261 | - if not hasattr(mod, "handler_version"): |
2262 | - setattr(mod, "handler_version", 1) |
2263 | - |
2264 | - for mtype in mod.list_types(): |
2265 | - part_handlers[mtype] = mod |
2266 | - |
2267 | - handler_call_begin(mod, data, frequency) |
2268 | - return(mod) |
2269 | - |
2270 | - |
2271 | -def handler_call_begin(mod, data, frequency): |
2272 | - handler_handle_part(mod, data, "__begin__", None, None, frequency) |
2273 | - |
2274 | - |
2275 | -def handler_call_end(mod, data, frequency): |
2276 | - handler_handle_part(mod, data, "__end__", None, None, frequency) |
2277 | - |
2278 | - |
2279 | -def handler_handle_part(mod, data, ctype, filename, payload, frequency): |
2280 | - # only add the handler if the module should run |
2281 | - modfreq = getattr(mod, "frequency", per_instance) |
2282 | - if not (modfreq == per_always or |
2283 | - (frequency == per_instance and modfreq == per_instance)): |
2284 | - return |
2285 | - try: |
2286 | - if mod.handler_version == 1: |
2287 | - mod.handle_part(data, ctype, filename, payload) |
2288 | - else: |
2289 | - mod.handle_part(data, ctype, filename, payload, frequency) |
2290 | - except: |
2291 | - util.logexc(log) |
2292 | - traceback.print_exc(file=sys.stderr) |
2293 | - |
2294 | - |
2295 | -def partwalker_handle_handler(pdata, _ctype, _filename, payload): |
2296 | - curcount = pdata['handlercount'] |
2297 | - modname = 'part-handler-%03d' % curcount |
2298 | - frequency = pdata['frequency'] |
2299 | - |
2300 | - modfname = modname + ".py" |
2301 | - util.write_file("%s/%s" % (pdata['handlerdir'], modfname), payload, 0600) |
2302 | - |
2303 | - try: |
2304 | - mod = __import__(modname) |
2305 | - handler_register(mod, pdata['handlers'], pdata['data'], frequency) |
2306 | - pdata['handlercount'] = curcount + 1 |
2307 | - except: |
2308 | - util.logexc(log) |
2309 | - traceback.print_exc(file=sys.stderr) |
2310 | - |
2311 | - |
2312 | -def partwalker_callback(pdata, ctype, filename, payload): |
2313 | - # data here is the part_handlers array and then the data to pass through |
2314 | - if ctype == "text/part-handler": |
2315 | - if 'handlercount' not in pdata: |
2316 | - pdata['handlercount'] = 0 |
2317 | - partwalker_handle_handler(pdata, ctype, filename, payload) |
2318 | - return |
2319 | - if ctype not in pdata['handlers'] and payload: |
2320 | - if ctype == "text/x-not-multipart": |
2321 | - # Extract the first line or 24 bytes for displaying in the log |
2322 | - start = payload.split("\n", 1)[0][:24] |
2323 | - if start < payload: |
2324 | - details = "starting '%s...'" % start.encode("string-escape") |
2325 | - else: |
2326 | - details = repr(payload) |
2327 | - log.warning("Unhandled non-multipart userdata %s", details) |
2328 | - return |
2329 | - handler_handle_part(pdata['handlers'][ctype], pdata['data'], |
2330 | - ctype, filename, payload, pdata['frequency']) |
2331 | - |
2332 | - |
2333 | -class InternalPartHandler: |
2334 | - freq = per_instance |
2335 | - mtypes = [] |
2336 | - handler_version = 1 |
2337 | - handler = None |
2338 | - |
2339 | - def __init__(self, handler, mtypes, frequency, version=2): |
2340 | - self.handler = handler |
2341 | - self.mtypes = mtypes |
2342 | - self.frequency = frequency |
2343 | - self.handler_version = version |
2344 | - |
2345 | - def __repr__(self): |
2346 | - return("InternalPartHandler: [%s]" % self.mtypes) |
2347 | - |
2348 | - def list_types(self): |
2349 | - return(self.mtypes) |
2350 | - |
2351 | - def handle_part(self, data, ctype, filename, payload, frequency): |
2352 | - return(self.handler(data, ctype, filename, payload, frequency)) |
2353 | - |
2354 | - |
2355 | -def get_cmdline_url(names=('cloud-config-url', 'url'), |
2356 | - starts="#cloud-config", cmdline=None): |
2357 | - |
2358 | - if cmdline == None: |
2359 | - cmdline = util.get_cmdline() |
2360 | - |
2361 | - data = util.keyval_str_to_dict(cmdline) |
2362 | - url = None |
2363 | - key = None |
2364 | - for key in names: |
2365 | - if key in data: |
2366 | - url = data[key] |
2367 | - break |
2368 | - if url == None: |
2369 | - return (None, None, None) |
2370 | - |
2371 | - contents = util.readurl(url) |
2372 | - |
2373 | - if contents.startswith(starts): |
2374 | - return (key, url, contents) |
2375 | - |
2376 | - return (key, url, None) |
2377 | |
2378 | === added file 'cloudinit/cloud.py' |
2379 | --- cloudinit/cloud.py 1970-01-01 00:00:00 +0000 |
2380 | +++ cloudinit/cloud.py 2012-07-06 21:16:18 +0000 |
2381 | @@ -0,0 +1,101 @@ |
2382 | +# vi: ts=4 expandtab |
2383 | +# |
2384 | +# Copyright (C) 2012 Canonical Ltd. |
2385 | +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
2386 | +# Copyright (C) 2012 Yahoo! Inc. |
2387 | +# |
2388 | +# Author: Scott Moser <scott.moser@canonical.com> |
2389 | +# Author: Juerg Haefliger <juerg.haefliger@hp.com> |
2390 | +# Author: Joshua Harlow <harlowja@yahoo-inc.com> |
2391 | +# |
2392 | +# This program is free software: you can redistribute it and/or modify |
2393 | +# it under the terms of the GNU General Public License version 3, as |
2394 | +# published by the Free Software Foundation. |
2395 | +# |
2396 | +# This program is distributed in the hope that it will be useful, |
2397 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2398 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2399 | +# GNU General Public License for more details. |
2400 | +# |
2401 | +# You should have received a copy of the GNU General Public License |
2402 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. |
2403 | + |
2404 | +import copy |
2405 | +import os |
2406 | + |
2407 | +from cloudinit import log as logging |
2408 | + |
2409 | +LOG = logging.getLogger(__name__) |
2410 | + |
2411 | +# This class is the high level wrapper that provides |
2412 | +# access to cloud-init objects without exposing the stage objects |
2413 | +# to handler and or module manipulation. It allows for cloud |
2414 | +# init to restrict what those types of user facing code may see |
2415 | +# and or adjust (which helps avoid code messing with each other) |
2416 | +# |
2417 | +# It also provides util functions that avoid having to know |
2418 | +# how to get a certain member from this submembers as well |
2419 | +# as providing a backwards compatible object that can be maintained |
2420 | +# while the stages/other objects can be worked on independently... |
2421 | + |
2422 | + |
2423 | +class Cloud(object): |
2424 | + def __init__(self, datasource, paths, cfg, distro, runners): |
2425 | + self.datasource = datasource |
2426 | + self.paths = paths |
2427 | + self.distro = distro |
2428 | + self._cfg = cfg |
2429 | + self._runners = runners |
2430 | + |
2431 | + # If a 'user' manipulates logging or logging services |
2432 | + # it is typically useful to cause the logging to be |
2433 | + # setup again. |
2434 | + def cycle_logging(self): |
2435 | + logging.resetLogging() |
2436 | + logging.setupLogging(self.cfg) |
2437 | + |
2438 | + @property |
2439 | + def cfg(self): |
2440 | + # Ensure that not indirectly modified |
2441 | + return copy.deepcopy(self._cfg) |
2442 | + |
2443 | + def run(self, name, functor, args, freq=None, clear_on_fail=False): |
2444 | + return self._runners.run(name, functor, args, freq, clear_on_fail) |
2445 | + |
2446 | + def get_template_filename(self, name): |
2447 | + fn = self.paths.template_tpl % (name) |
2448 | + if not os.path.isfile(fn): |
2449 | + LOG.warn("No template found at %s for template named %s", fn, name) |
2450 | + return None |
2451 | + return fn |
2452 | + |
2453 | + # The rest of thes are just useful proxies |
2454 | + def get_userdata(self): |
2455 | + return self.datasource.get_userdata() |
2456 | + |
2457 | + def get_instance_id(self): |
2458 | + return self.datasource.get_instance_id() |
2459 | + |
2460 | + def get_public_ssh_keys(self): |
2461 | + return self.datasource.get_public_ssh_keys() |
2462 | + |
2463 | + def get_locale(self): |
2464 | + return self.datasource.get_locale() |
2465 | + |
2466 | + def get_local_mirror(self): |
2467 | + return self.datasource.get_local_mirror() |
2468 | + |
2469 | + def get_hostname(self, fqdn=False): |
2470 | + return self.datasource.get_hostname(fqdn=fqdn) |
2471 | + |
2472 | + def device_name_to_device(self, name): |
2473 | + return self.datasource.device_name_to_device(name) |
2474 | + |
2475 | + def get_ipath_cur(self, name=None): |
2476 | + return self.paths.get_ipath_cur(name) |
2477 | + |
2478 | + def get_cpath(self, name=None): |
2479 | + return self.paths.get_cpath(name) |
2480 | + |
2481 | + def get_ipath(self, name=None): |
2482 | + return self.paths.get_ipath(name) |
2483 | |
2484 | === renamed directory 'cloudinit/CloudConfig' => 'cloudinit/config' |
2485 | === modified file 'cloudinit/config/__init__.py' |
2486 | --- cloudinit/CloudConfig/__init__.py 2012-06-13 13:11:27 +0000 |
2487 | +++ cloudinit/config/__init__.py 2012-07-06 21:16:18 +0000 |
2488 | @@ -19,256 +19,38 @@ |
2489 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
2490 | # |
2491 | |
2492 | -import yaml |
2493 | -import cloudinit |
2494 | -import cloudinit.util as util |
2495 | -import sys |
2496 | -import traceback |
2497 | -import os |
2498 | -import subprocess |
2499 | -import time |
2500 | - |
2501 | -per_instance = cloudinit.per_instance |
2502 | -per_always = cloudinit.per_always |
2503 | -per_once = cloudinit.per_once |
2504 | - |
2505 | - |
2506 | -class CloudConfig(): |
2507 | - cfgfile = None |
2508 | - cfg = None |
2509 | - |
2510 | - def __init__(self, cfgfile, cloud=None, ds_deps=None): |
2511 | - if cloud == None: |
2512 | - self.cloud = cloudinit.CloudInit(ds_deps) |
2513 | - self.cloud.get_data_source() |
2514 | - else: |
2515 | - self.cloud = cloud |
2516 | - self.cfg = self.get_config_obj(cfgfile) |
2517 | - |
2518 | - def get_config_obj(self, cfgfile): |
2519 | - try: |
2520 | - cfg = util.read_conf(cfgfile) |
2521 | - except: |
2522 | - # TODO: this 'log' could/should be passed in |
2523 | - cloudinit.log.critical("Failed loading of cloud config '%s'. " |
2524 | - "Continuing with empty config\n" % cfgfile) |
2525 | - cloudinit.log.debug(traceback.format_exc() + "\n") |
2526 | - cfg = None |
2527 | - if cfg is None: |
2528 | - cfg = {} |
2529 | - |
2530 | - try: |
2531 | - ds_cfg = self.cloud.datasource.get_config_obj() |
2532 | - except: |
2533 | - ds_cfg = {} |
2534 | - |
2535 | - cfg = util.mergedict(cfg, ds_cfg) |
2536 | - return(util.mergedict(cfg, self.cloud.cfg)) |
2537 | - |
2538 | - def handle(self, name, args, freq=None): |
2539 | - try: |
2540 | - mod = __import__("cc_" + name.replace("-", "_"), globals()) |
2541 | - def_freq = getattr(mod, "frequency", per_instance) |
2542 | - handler = getattr(mod, "handle") |
2543 | - |
2544 | - if not freq: |
2545 | - freq = def_freq |
2546 | - |
2547 | - self.cloud.sem_and_run("config-" + name, freq, handler, |
2548 | - [name, self.cfg, self.cloud, cloudinit.log, args]) |
2549 | - except: |
2550 | - raise |
2551 | - |
2552 | - |
2553 | -# reads a cloudconfig module list, returns |
2554 | -# a 2 dimensional array suitable to pass to run_cc_modules |
2555 | -def read_cc_modules(cfg, name): |
2556 | - if name not in cfg: |
2557 | - return([]) |
2558 | - module_list = [] |
2559 | - # create 'module_list', an array of arrays |
2560 | - # where array[0] = config |
2561 | - # array[1] = freq |
2562 | - # array[2:] = arguemnts |
2563 | - for item in cfg[name]: |
2564 | - if isinstance(item, str): |
2565 | - module_list.append((item,)) |
2566 | - elif isinstance(item, list): |
2567 | - module_list.append(item) |
2568 | - else: |
2569 | - raise TypeError("failed to read '%s' item in config") |
2570 | - return(module_list) |
2571 | - |
2572 | - |
2573 | -def run_cc_modules(cc, module_list, log): |
2574 | - failures = [] |
2575 | - for cfg_mod in module_list: |
2576 | - name = cfg_mod[0] |
2577 | - freq = None |
2578 | - run_args = [] |
2579 | - if len(cfg_mod) > 1: |
2580 | - freq = cfg_mod[1] |
2581 | - if len(cfg_mod) > 2: |
2582 | - run_args = cfg_mod[2:] |
2583 | - |
2584 | - try: |
2585 | - log.debug("handling %s with freq=%s and args=%s" % |
2586 | - (name, freq, run_args)) |
2587 | - cc.handle(name, run_args, freq=freq) |
2588 | - except: |
2589 | - log.warn(traceback.format_exc()) |
2590 | - log.error("config handling of %s, %s, %s failed\n" % |
2591 | - (name, freq, run_args)) |
2592 | - failures.append(name) |
2593 | - |
2594 | - return(failures) |
2595 | - |
2596 | - |
2597 | -# always returns well formated values |
2598 | -# cfg is expected to have an entry 'output' in it, which is a dictionary |
2599 | -# that includes entries for 'init', 'config', 'final' or 'all' |
2600 | -# init: /var/log/cloud.out |
2601 | -# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ] |
2602 | -# final: |
2603 | -# output: "| logger -p" |
2604 | -# error: "> /dev/null" |
2605 | -# this returns the specific 'mode' entry, cleanly formatted, with value |
2606 | -# None if if none is given |
2607 | -def get_output_cfg(cfg, mode="init"): |
2608 | - ret = [None, None] |
2609 | - if not 'output' in cfg: |
2610 | - return ret |
2611 | - |
2612 | - outcfg = cfg['output'] |
2613 | - if mode in outcfg: |
2614 | - modecfg = outcfg[mode] |
2615 | +from cloudinit.settings import (PER_INSTANCE, FREQUENCIES) |
2616 | + |
2617 | +from cloudinit import log as logging |
2618 | + |
2619 | +LOG = logging.getLogger(__name__) |
2620 | + |
2621 | +# This prefix is used to make it less |
2622 | +# of a chance that when importing |
2623 | +# we will not find something else with the same |
2624 | +# name in the lookup path... |
2625 | +MOD_PREFIX = "cc_" |
2626 | + |
2627 | + |
2628 | +def form_module_name(name): |
2629 | + canon_name = name.replace("-", "_") |
2630 | + if canon_name.lower().endswith(".py"): |
2631 | + canon_name = canon_name[0:(len(canon_name) - 3)] |
2632 | + canon_name = canon_name.strip() |
2633 | + if not canon_name: |
2634 | + return None |
2635 | + if not canon_name.startswith(MOD_PREFIX): |
2636 | + canon_name = '%s%s' % (MOD_PREFIX, canon_name) |
2637 | + return canon_name |
2638 | + |
2639 | + |
2640 | +def fixup_module(mod, def_freq=PER_INSTANCE): |
2641 | + if not hasattr(mod, 'frequency'): |
2642 | + setattr(mod, 'frequency', def_freq) |
2643 | else: |
2644 | - if 'all' not in outcfg: |
2645 | - return ret |
2646 | - # if there is a 'all' item in the output list |
2647 | - # then it applies to all users of this (init, config, final) |
2648 | - modecfg = outcfg['all'] |
2649 | - |
2650 | - # if value is a string, it specifies stdout and stderr |
2651 | - if isinstance(modecfg, str): |
2652 | - ret = [modecfg, modecfg] |
2653 | - |
2654 | - # if its a list, then we expect (stdout, stderr) |
2655 | - if isinstance(modecfg, list): |
2656 | - if len(modecfg) > 0: |
2657 | - ret[0] = modecfg[0] |
2658 | - if len(modecfg) > 1: |
2659 | - ret[1] = modecfg[1] |
2660 | - |
2661 | - # if it is a dictionary, expect 'out' and 'error' |
2662 | - # items, which indicate out and error |
2663 | - if isinstance(modecfg, dict): |
2664 | - if 'output' in modecfg: |
2665 | - ret[0] = modecfg['output'] |
2666 | - if 'error' in modecfg: |
2667 | - ret[1] = modecfg['error'] |
2668 | - |
2669 | - # if err's entry == "&1", then make it same as stdout |
2670 | - # as in shell syntax of "echo foo >/dev/null 2>&1" |
2671 | - if ret[1] == "&1": |
2672 | - ret[1] = ret[0] |
2673 | - |
2674 | - swlist = [">>", ">", "|"] |
2675 | - for i in range(len(ret)): |
2676 | - if not ret[i]: |
2677 | - continue |
2678 | - val = ret[i].lstrip() |
2679 | - found = False |
2680 | - for s in swlist: |
2681 | - if val.startswith(s): |
2682 | - val = "%s %s" % (s, val[len(s):].strip()) |
2683 | - found = True |
2684 | - break |
2685 | - if not found: |
2686 | - # default behavior is append |
2687 | - val = "%s %s" % (">>", val.strip()) |
2688 | - ret[i] = val |
2689 | - |
2690 | - return(ret) |
2691 | - |
2692 | - |
2693 | -# redirect_output(outfmt, errfmt, orig_out, orig_err) |
2694 | -# replace orig_out and orig_err with filehandles specified in outfmt or errfmt |
2695 | -# fmt can be: |
2696 | -# > FILEPATH |
2697 | -# >> FILEPATH |
2698 | -# | program [ arg1 [ arg2 [ ... ] ] ] |
2699 | -# |
2700 | -# with a '|', arguments are passed to shell, so one level of |
2701 | -# shell escape is required. |
2702 | -def redirect_output(outfmt, errfmt, o_out=sys.stdout, o_err=sys.stderr): |
2703 | - if outfmt: |
2704 | - (mode, arg) = outfmt.split(" ", 1) |
2705 | - if mode == ">" or mode == ">>": |
2706 | - owith = "ab" |
2707 | - if mode == ">": |
2708 | - owith = "wb" |
2709 | - new_fp = open(arg, owith) |
2710 | - elif mode == "|": |
2711 | - proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) |
2712 | - new_fp = proc.stdin |
2713 | - else: |
2714 | - raise TypeError("invalid type for outfmt: %s" % outfmt) |
2715 | - |
2716 | - if o_out: |
2717 | - os.dup2(new_fp.fileno(), o_out.fileno()) |
2718 | - if errfmt == outfmt: |
2719 | - os.dup2(new_fp.fileno(), o_err.fileno()) |
2720 | - return |
2721 | - |
2722 | - if errfmt: |
2723 | - (mode, arg) = errfmt.split(" ", 1) |
2724 | - if mode == ">" or mode == ">>": |
2725 | - owith = "ab" |
2726 | - if mode == ">": |
2727 | - owith = "wb" |
2728 | - new_fp = open(arg, owith) |
2729 | - elif mode == "|": |
2730 | - proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) |
2731 | - new_fp = proc.stdin |
2732 | - else: |
2733 | - raise TypeError("invalid type for outfmt: %s" % outfmt) |
2734 | - |
2735 | - if o_err: |
2736 | - os.dup2(new_fp.fileno(), o_err.fileno()) |
2737 | - return |
2738 | - |
2739 | - |
2740 | -def run_per_instance(name, func, args, clear_on_fail=False): |
2741 | - semfile = "%s/%s" % (cloudinit.get_ipath_cur("data"), name) |
2742 | - if os.path.exists(semfile): |
2743 | - return |
2744 | - |
2745 | - util.write_file(semfile, str(time.time())) |
2746 | - try: |
2747 | - func(*args) |
2748 | - except: |
2749 | - if clear_on_fail: |
2750 | - os.unlink(semfile) |
2751 | - raise |
2752 | - |
2753 | - |
2754 | -# apt_get top level command (install, update...), and args to pass it |
2755 | -def apt_get(tlc, args=None): |
2756 | - if args is None: |
2757 | - args = [] |
2758 | - e = os.environ.copy() |
2759 | - e['DEBIAN_FRONTEND'] = 'noninteractive' |
2760 | - cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold', |
2761 | - '--assume-yes', '--quiet', tlc] |
2762 | - cmd.extend(args) |
2763 | - subprocess.check_call(cmd, env=e) |
2764 | - |
2765 | - |
2766 | -def update_package_sources(): |
2767 | - run_per_instance("update-sources", apt_get, ("update",)) |
2768 | - |
2769 | - |
2770 | -def install_packages(pkglist): |
2771 | - update_package_sources() |
2772 | - apt_get("install", pkglist) |
2773 | + freq = mod.frequency |
2774 | + if freq and freq not in FREQUENCIES: |
2775 | + LOG.warn("Module %s has an unknown frequency %s", mod, freq) |
2776 | + if not hasattr(mod, 'distros'): |
2777 | + setattr(mod, 'distros', None) |
2778 | + return mod |
2779 | |
2780 | === modified file 'cloudinit/config/cc_apt_pipelining.py' |
2781 | --- cloudinit/CloudConfig/cc_apt_pipelining.py 2012-03-09 15:26:09 +0000 |
2782 | +++ cloudinit/config/cc_apt_pipelining.py 2012-07-06 21:16:18 +0000 |
2783 | @@ -16,38 +16,44 @@ |
2784 | # You should have received a copy of the GNU General Public License |
2785 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
2786 | |
2787 | -import cloudinit.util as util |
2788 | -from cloudinit.CloudConfig import per_instance |
2789 | - |
2790 | -frequency = per_instance |
2791 | -default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining" |
2792 | - |
2793 | - |
2794 | -def handle(_name, cfg, _cloud, log, _args): |
2795 | +from cloudinit import util |
2796 | +from cloudinit.settings import PER_INSTANCE |
2797 | + |
2798 | +frequency = PER_INSTANCE |
2799 | + |
2800 | +distros = ['ubuntu', 'debian'] |
2801 | + |
2802 | +DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining" |
2803 | + |
2804 | +APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" |
2805 | + 'Acquire::http::Pipeline-Depth "%s";\n') |
2806 | + |
2807 | +# Acquire::http::Pipeline-Depth can be a value |
2808 | +# from 0 to 5 indicating how many outstanding requests APT should send. |
2809 | +# A value of zero MUST be specified if the remote host does not properly linger |
2810 | +# on TCP connections - otherwise data corruption will occur. |
2811 | + |
2812 | + |
2813 | +def handle(_name, cfg, cloud, log, _args): |
2814 | |
2815 | apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) |
2816 | - apt_pipe_value = str(apt_pipe_value).lower() |
2817 | - |
2818 | - if apt_pipe_value == "false": |
2819 | - write_apt_snippet("0", log) |
2820 | - |
2821 | - elif apt_pipe_value in ("none", "unchanged", "os"): |
2822 | + apt_pipe_value_s = str(apt_pipe_value).lower().strip() |
2823 | + |
2824 | + if apt_pipe_value_s == "false": |
2825 | + write_apt_snippet(cloud, "0", log, DEFAULT_FILE) |
2826 | + elif apt_pipe_value_s in ("none", "unchanged", "os"): |
2827 | return |
2828 | - |
2829 | - elif apt_pipe_value in str(range(0, 6)): |
2830 | - write_apt_snippet(apt_pipe_value, log) |
2831 | - |
2832 | + elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: |
2833 | + write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE) |
2834 | else: |
2835 | - log.warn("Invalid option for apt_pipeling: %s" % apt_pipe_value) |
2836 | - |
2837 | - |
2838 | -def write_apt_snippet(setting, log, f_name=default_file): |
2839 | + log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) |
2840 | + |
2841 | + |
2842 | +def write_apt_snippet(cloud, setting, log, f_name): |
2843 | """ Writes f_name with apt pipeline depth 'setting' """ |
2844 | |
2845 | - acquire_pipeline_depth = 'Acquire::http::Pipeline-Depth "%s";\n' |
2846 | - file_contents = ("//Written by cloud-init per 'apt_pipelining'\n" |
2847 | - + (acquire_pipeline_depth % setting)) |
2848 | - |
2849 | - util.write_file(f_name, file_contents) |
2850 | - |
2851 | - log.debug("Wrote %s with APT pipeline setting" % f_name) |
2852 | + file_contents = APT_PIPE_TPL % (setting) |
2853 | + |
2854 | + util.write_file(cloud.paths.join(False, f_name), file_contents) |
2855 | + |
2856 | + log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) |
2857 | |
2858 | === modified file 'cloudinit/config/cc_apt_update_upgrade.py' |
2859 | --- cloudinit/CloudConfig/cc_apt_update_upgrade.py 2012-01-18 14:07:33 +0000 |
2860 | +++ cloudinit/config/cc_apt_update_upgrade.py 2012-07-06 21:16:18 +0000 |
2861 | @@ -18,50 +18,73 @@ |
2862 | # You should have received a copy of the GNU General Public License |
2863 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
2864 | |
2865 | -import cloudinit.util as util |
2866 | -import subprocess |
2867 | -import traceback |
2868 | +import glob |
2869 | import os |
2870 | -import glob |
2871 | -import cloudinit.CloudConfig as cc |
2872 | - |
2873 | - |
2874 | -def handle(_name, cfg, cloud, log, _args): |
2875 | + |
2876 | +from cloudinit import templater |
2877 | +from cloudinit import util |
2878 | + |
2879 | +distros = ['ubuntu', 'debian'] |
2880 | + |
2881 | +PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" |
2882 | +PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" |
2883 | + |
2884 | +# A temporary shell program to get a given gpg key |
2885 | +# from a given keyserver |
2886 | +EXPORT_GPG_KEYID = """ |
2887 | + k=${1} ks=${2}; |
2888 | + exec 2>/dev/null |
2889 | + [ -n "$k" ] || exit 1; |
2890 | + armour=$(gpg --list-keys --armour "${k}") |
2891 | + if [ -z "${armour}" ]; then |
2892 | + gpg --keyserver ${ks} --recv $k >/dev/null && |
2893 | + armour=$(gpg --export --armour "${k}") && |
2894 | + gpg --batch --yes --delete-keys "${k}" |
2895 | + fi |
2896 | + [ -n "${armour}" ] && echo "${armour}" |
2897 | +""" |
2898 | + |
2899 | + |
2900 | +def handle(name, cfg, cloud, log, _args): |
2901 | update = util.get_cfg_option_bool(cfg, 'apt_update', False) |
2902 | upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) |
2903 | |
2904 | release = get_release() |
2905 | - |
2906 | mirror = find_apt_mirror(cloud, cfg) |
2907 | - |
2908 | - log.debug("selected mirror at: %s" % mirror) |
2909 | - |
2910 | - if not util.get_cfg_option_bool(cfg, \ |
2911 | - 'apt_preserve_sources_list', False): |
2912 | - generate_sources_list(release, mirror) |
2913 | - old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \ |
2914 | - "archive.ubuntu.com/ubuntu") |
2915 | + if not mirror: |
2916 | + log.debug(("Skipping module named %s," |
2917 | + " no package 'mirror' located"), name) |
2918 | + return |
2919 | + |
2920 | + log.debug("Selected mirror at: %s" % mirror) |
2921 | + |
2922 | + if not util.get_cfg_option_bool(cfg, |
2923 | + 'apt_preserve_sources_list', False): |
2924 | + generate_sources_list(release, mirror, cloud, log) |
2925 | + old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', |
2926 | + "archive.ubuntu.com/ubuntu") |
2927 | rename_apt_lists(old_mir, mirror) |
2928 | |
2929 | - # set up proxy |
2930 | + # Set up any apt proxy |
2931 | proxy = cfg.get("apt_proxy", None) |
2932 | - proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy" |
2933 | + proxy_filename = PROXY_FN |
2934 | if proxy: |
2935 | try: |
2936 | - contents = "Acquire::HTTP::Proxy \"%s\";\n" |
2937 | - with open(proxy_filename, "w") as fp: |
2938 | - fp.write(contents % proxy) |
2939 | + # See man 'apt.conf' |
2940 | + contents = PROXY_TPL % (proxy) |
2941 | + util.write_file(cloud.paths.join(False, proxy_filename), |
2942 | + contents) |
2943 | except Exception as e: |
2944 | - log.warn("Failed to write proxy to %s" % proxy_filename) |
2945 | + util.logexc(log, "Failed to write proxy to %s", proxy_filename) |
2946 | elif os.path.isfile(proxy_filename): |
2947 | - os.unlink(proxy_filename) |
2948 | + util.del_file(proxy_filename) |
2949 | |
2950 | - # process 'apt_sources' |
2951 | + # Process 'apt_sources' |
2952 | if 'apt_sources' in cfg: |
2953 | - errors = add_sources(cfg['apt_sources'], |
2954 | + errors = add_sources(cloud, cfg['apt_sources'], |
2955 | {'MIRROR': mirror, 'RELEASE': release}) |
2956 | for e in errors: |
2957 | - log.warn("Source Error: %s\n" % ':'.join(e)) |
2958 | + log.warn("Source Error: %s", ':'.join(e)) |
2959 | |
2960 | dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) |
2961 | if dconf_sel: |
2962 | @@ -69,45 +92,51 @@ |
2963 | try: |
2964 | util.subp(('debconf-set-selections', '-'), dconf_sel) |
2965 | except: |
2966 | - log.error("Failed to run debconf-set-selections") |
2967 | - log.debug(traceback.format_exc()) |
2968 | + util.logexc(log, "Failed to run debconf-set-selections") |
2969 | |
2970 | - pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', []) |
2971 | + pkglist = util.get_cfg_option_list(cfg, 'packages', []) |
2972 | |
2973 | errors = [] |
2974 | if update or len(pkglist) or upgrade: |
2975 | try: |
2976 | - cc.update_package_sources() |
2977 | - except subprocess.CalledProcessError as e: |
2978 | - log.warn("apt-get update failed") |
2979 | - log.debug(traceback.format_exc()) |
2980 | + cloud.distro.update_package_sources() |
2981 | + except Exception as e: |
2982 | + util.logexc(log, "Package update failed") |
2983 | errors.append(e) |
2984 | |
2985 | if upgrade: |
2986 | try: |
2987 | - cc.apt_get("upgrade") |
2988 | - except subprocess.CalledProcessError as e: |
2989 | - log.warn("apt upgrade failed") |
2990 | - log.debug(traceback.format_exc()) |
2991 | + cloud.distro.package_command("upgrade") |
2992 | + except Exception as e: |
2993 | + util.logexc(log, "Package upgrade failed") |
2994 | errors.append(e) |
2995 | |
2996 | if len(pkglist): |
2997 | try: |
2998 | - cc.install_packages(pkglist) |
2999 | - except subprocess.CalledProcessError as e: |
3000 | - log.warn("Failed to install packages: %s " % pkglist) |
3001 | - log.debug(traceback.format_exc()) |
3002 | + cloud.distro.install_packages(pkglist) |
3003 | + except Exception as e: |
3004 | + util.logexc(log, "Failed to install packages: %s ", pkglist) |
3005 | errors.append(e) |
3006 | |
3007 | if len(errors): |
3008 | - raise errors[0] |
3009 | - |
3010 | - return(True) |
3011 | + log.warn("%s failed with exceptions, re-raising the last one", |
3012 | + len(errors)) |
3013 | + raise errors[-1] |
3014 | + |
3015 | + |
3016 | +# get gpg keyid from keyserver |
3017 | +def getkeybyid(keyid, keyserver): |
3018 | + with util.ExtendedTemporaryFile(suffix='.sh') as fh: |
3019 | + fh.write(EXPORT_GPG_KEYID) |
3020 | + fh.flush() |
3021 | + cmd = ['/bin/sh', fh.name, keyid, keyserver] |
3022 | + (stdout, _stderr) = util.subp(cmd) |
3023 | + return stdout.strip() |
3024 | |
3025 | |
3026 | def mirror2lists_fileprefix(mirror): |
3027 | string = mirror |
3028 | - # take of http:// or ftp:// |
3029 | + # take off http:// or ftp:// |
3030 | if string.endswith("/"): |
3031 | string = string[0:-1] |
3032 | pos = string.find("://") |
3033 | @@ -118,39 +147,44 @@ |
3034 | |
3035 | |
3036 | def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): |
3037 | - oprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(omirror)) |
3038 | - nprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(new_mirror)) |
3039 | - if(oprefix == nprefix): |
3040 | + oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) |
3041 | + nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror)) |
3042 | + if oprefix == nprefix: |
3043 | return |
3044 | olen = len(oprefix) |
3045 | for filename in glob.glob("%s_*" % oprefix): |
3046 | - os.rename(filename, "%s%s" % (nprefix, filename[olen:])) |
3047 | + # TODO use the cloud.paths.join... |
3048 | + util.rename(filename, "%s%s" % (nprefix, filename[olen:])) |
3049 | |
3050 | |
3051 | def get_release(): |
3052 | - stdout, _stderr = subprocess.Popen(['lsb_release', '-cs'], |
3053 | - stdout=subprocess.PIPE).communicate() |
3054 | - return(str(stdout).strip()) |
3055 | - |
3056 | - |
3057 | -def generate_sources_list(codename, mirror): |
3058 | - util.render_to_file('sources.list', '/etc/apt/sources.list', \ |
3059 | - {'mirror': mirror, 'codename': codename}) |
3060 | - |
3061 | - |
3062 | -def add_sources(srclist, searchList=None): |
3063 | + (stdout, _stderr) = util.subp(['lsb_release', '-cs']) |
3064 | + return stdout.strip() |
3065 | + |
3066 | + |
3067 | +def generate_sources_list(codename, mirror, cloud, log): |
3068 | + template_fn = cloud.get_template_filename('sources.list') |
3069 | + if template_fn: |
3070 | + params = {'mirror': mirror, 'codename': codename} |
3071 | + out_fn = cloud.paths.join(False, '/etc/apt/sources.list') |
3072 | + templater.render_to_file(template_fn, out_fn, params) |
3073 | + else: |
3074 | + log.warn("No template found, not rendering /etc/apt/sources.list") |
3075 | + |
3076 | + |
3077 | +def add_sources(cloud, srclist, template_params=None): |
3078 | """ |
3079 | add entries in /etc/apt/sources.list.d for each abbreviated |
3080 | sources.list entry in 'srclist'. When rendering template, also |
3081 | include the values in dictionary searchList |
3082 | """ |
3083 | - if searchList is None: |
3084 | - searchList = {} |
3085 | - elst = [] |
3086 | + if template_params is None: |
3087 | + template_params = {} |
3088 | |
3089 | + errorlist = [] |
3090 | for ent in srclist: |
3091 | if 'source' not in ent: |
3092 | - elst.append(["", "missing source"]) |
3093 | + errorlist.append(["", "missing source"]) |
3094 | continue |
3095 | |
3096 | source = ent['source'] |
3097 | @@ -158,51 +192,48 @@ |
3098 | try: |
3099 | util.subp(["add-apt-repository", source]) |
3100 | except: |
3101 | - elst.append([source, "add-apt-repository failed"]) |
3102 | + errorlist.append([source, "add-apt-repository failed"]) |
3103 | continue |
3104 | |
3105 | - source = util.render_string(source, searchList) |
3106 | + source = templater.render_string(source, template_params) |
3107 | |
3108 | if 'filename' not in ent: |
3109 | ent['filename'] = 'cloud_config_sources.list' |
3110 | |
3111 | if not ent['filename'].startswith("/"): |
3112 | - ent['filename'] = "%s/%s" % \ |
3113 | - ("/etc/apt/sources.list.d/", ent['filename']) |
3114 | + ent['filename'] = os.path.join("/etc/apt/sources.list.d/", |
3115 | + ent['filename']) |
3116 | |
3117 | if ('keyid' in ent and 'key' not in ent): |
3118 | ks = "keyserver.ubuntu.com" |
3119 | if 'keyserver' in ent: |
3120 | ks = ent['keyserver'] |
3121 | try: |
3122 | - ent['key'] = util.getkeybyid(ent['keyid'], ks) |
3123 | + ent['key'] = getkeybyid(ent['keyid'], ks) |
3124 | except: |
3125 | - elst.append([source, "failed to get key from %s" % ks]) |
3126 | + errorlist.append([source, "failed to get key from %s" % ks]) |
3127 | continue |
3128 | |
3129 | if 'key' in ent: |
3130 | try: |
3131 | util.subp(('apt-key', 'add', '-'), ent['key']) |
3132 | except: |
3133 | - elst.append([source, "failed add key"]) |
3134 | + errorlist.append([source, "failed add key"]) |
3135 | |
3136 | try: |
3137 | - util.write_file(ent['filename'], source + "\n", omode="ab") |
3138 | + contents = "%s\n" % (source) |
3139 | + util.write_file(cloud.paths.join(False, ent['filename']), |
3140 | + contents, omode="ab") |
3141 | except: |
3142 | - elst.append([source, "failed write to file %s" % ent['filename']]) |
3143 | + errorlist.append([source, |
3144 | + "failed write to file %s" % ent['filename']]) |
3145 | |
3146 | - return(elst) |
3147 | + return errorlist |
3148 | |
3149 | |
3150 | def find_apt_mirror(cloud, cfg): |
3151 | """ find an apt_mirror given the cloud and cfg provided """ |
3152 | |
3153 | - # TODO: distro and defaults should be configurable |
3154 | - distro = "ubuntu" |
3155 | - defaults = { |
3156 | - 'ubuntu': "http://archive.ubuntu.com/ubuntu", |
3157 | - 'debian': "http://archive.debian.org/debian", |
3158 | - } |
3159 | mirror = None |
3160 | |
3161 | cfg_mirror = cfg.get("apt_mirror", None) |
3162 | @@ -211,14 +242,13 @@ |
3163 | elif "apt_mirror_search" in cfg: |
3164 | mirror = util.search_for_mirror(cfg['apt_mirror_search']) |
3165 | else: |
3166 | - if cloud: |
3167 | - mirror = cloud.get_mirror() |
3168 | + mirror = cloud.get_local_mirror() |
3169 | |
3170 | mydom = "" |
3171 | |
3172 | doms = [] |
3173 | |
3174 | - if not mirror and cloud: |
3175 | + if not mirror: |
3176 | # if we have a fqdn, then search its domain portion first |
3177 | (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) |
3178 | mydom = ".".join(fqdn.split(".")[1:]) |
3179 | @@ -229,13 +259,14 @@ |
3180 | doms.extend((".localdomain", "",)) |
3181 | |
3182 | mirror_list = [] |
3183 | + distro = cloud.distro.name |
3184 | mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) |
3185 | for post in doms: |
3186 | - mirror_list.append(mirrorfmt % post) |
3187 | + mirror_list.append(mirrorfmt % (post)) |
3188 | |
3189 | mirror = util.search_for_mirror(mirror_list) |
3190 | |
3191 | if not mirror: |
3192 | - mirror = defaults[distro] |
3193 | + mirror = cloud.distro.get_package_mirror() |
3194 | |
3195 | return mirror |
3196 | |
3197 | === modified file 'cloudinit/config/cc_bootcmd.py' |
3198 | --- cloudinit/CloudConfig/cc_bootcmd.py 2012-01-18 14:07:33 +0000 |
3199 | +++ cloudinit/config/cc_bootcmd.py 2012-07-06 21:16:18 +0000 |
3200 | @@ -17,32 +17,39 @@ |
3201 | # |
3202 | # You should have received a copy of the GNU General Public License |
3203 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3204 | -import cloudinit.util as util |
3205 | -import subprocess |
3206 | -import tempfile |
3207 | + |
3208 | import os |
3209 | -from cloudinit.CloudConfig import per_always |
3210 | -frequency = per_always |
3211 | - |
3212 | - |
3213 | -def handle(_name, cfg, cloud, log, _args): |
3214 | + |
3215 | +from cloudinit import util |
3216 | +from cloudinit.settings import PER_ALWAYS |
3217 | + |
3218 | +frequency = PER_ALWAYS |
3219 | + |
3220 | + |
3221 | +def handle(name, cfg, cloud, log, _args): |
3222 | + |
3223 | if "bootcmd" not in cfg: |
3224 | + log.debug(("Skipping module named %s," |
3225 | + " no 'bootcmd' key in configuration"), name) |
3226 | return |
3227 | |
3228 | - try: |
3229 | - content = util.shellify(cfg["bootcmd"]) |
3230 | - tmpf = tempfile.TemporaryFile() |
3231 | - tmpf.write(content) |
3232 | - tmpf.seek(0) |
3233 | - except: |
3234 | - log.warn("failed to shellify bootcmd") |
3235 | - raise |
3236 | + with util.ExtendedTemporaryFile(suffix=".sh") as tmpf: |
3237 | + try: |
3238 | + content = util.shellify(cfg["bootcmd"]) |
3239 | + tmpf.write(content) |
3240 | + tmpf.flush() |
3241 | + except: |
3242 | + util.logexc(log, "Failed to shellify bootcmd") |
3243 | + raise |
3244 | |
3245 | - try: |
3246 | - env = os.environ.copy() |
3247 | - env['INSTANCE_ID'] = cloud.get_instance_id() |
3248 | - subprocess.check_call(['/bin/sh'], env=env, stdin=tmpf) |
3249 | - tmpf.close() |
3250 | - except: |
3251 | - log.warn("failed to run commands from bootcmd") |
3252 | - raise |
3253 | + try: |
3254 | + env = os.environ.copy() |
3255 | + iid = cloud.get_instance_id() |
3256 | + if iid: |
3257 | + env['INSTANCE_ID'] = str(iid) |
3258 | + cmd = ['/bin/sh', tmpf.name] |
3259 | + util.subp(cmd, env=env, capture=False) |
3260 | + except: |
3261 | + util.logexc(log, |
3262 | + ("Failed to run bootcmd module %s"), name) |
3263 | + raise |
3264 | |
3265 | === modified file 'cloudinit/config/cc_byobu.py' |
3266 | --- cloudinit/CloudConfig/cc_byobu.py 2012-01-18 14:07:33 +0000 |
3267 | +++ cloudinit/config/cc_byobu.py 2012-07-06 21:16:18 +0000 |
3268 | @@ -18,18 +18,19 @@ |
3269 | # You should have received a copy of the GNU General Public License |
3270 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3271 | |
3272 | -import cloudinit.util as util |
3273 | -import subprocess |
3274 | -import traceback |
3275 | - |
3276 | - |
3277 | -def handle(_name, cfg, _cloud, log, args): |
3278 | +from cloudinit import util |
3279 | + |
3280 | +distros = ['ubuntu', 'debian'] |
3281 | + |
3282 | + |
3283 | +def handle(name, cfg, _cloud, log, args): |
3284 | if len(args) != 0: |
3285 | value = args[0] |
3286 | else: |
3287 | value = util.get_cfg_option_str(cfg, "byobu_by_default", "") |
3288 | |
3289 | if not value: |
3290 | + log.debug("Skipping module named %s, no 'byobu' values found", name) |
3291 | return |
3292 | |
3293 | if value == "user" or value == "system": |
3294 | @@ -38,7 +39,7 @@ |
3295 | valid = ("enable-user", "enable-system", "enable", |
3296 | "disable-user", "disable-system", "disable") |
3297 | if not value in valid: |
3298 | - log.warn("Unknown value %s for byobu_by_default" % value) |
3299 | + log.warn("Unknown value %s for byobu_by_default", value) |
3300 | |
3301 | mod_user = value.endswith("-user") |
3302 | mod_sys = value.endswith("-system") |
3303 | @@ -65,13 +66,6 @@ |
3304 | |
3305 | cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] |
3306 | |
3307 | - log.debug("setting byobu to %s" % value) |
3308 | + log.debug("Setting byobu to %s", value) |
3309 | |
3310 | - try: |
3311 | - subprocess.check_call(cmd) |
3312 | - except subprocess.CalledProcessError as e: |
3313 | - log.debug(traceback.format_exc(e)) |
3314 | - raise Exception("Cmd returned %s: %s" % (e.returncode, cmd)) |
3315 | - except OSError as e: |
3316 | - log.debug(traceback.format_exc(e)) |
3317 | - raise Exception("Cmd failed to execute: %s" % (cmd)) |
3318 | + util.subp(cmd, capture=False) |
3319 | |
3320 | === modified file 'cloudinit/config/cc_ca_certs.py' |
3321 | --- cloudinit/CloudConfig/cc_ca_certs.py 2012-03-08 12:45:43 +0000 |
3322 | +++ cloudinit/config/cc_ca_certs.py 2012-07-06 21:16:18 +0000 |
3323 | @@ -13,25 +13,27 @@ |
3324 | # |
3325 | # You should have received a copy of the GNU General Public License |
3326 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3327 | + |
3328 | import os |
3329 | -from subprocess import check_call |
3330 | -from cloudinit.util import (write_file, get_cfg_option_list_or_str, |
3331 | - delete_dir_contents, subp) |
3332 | + |
3333 | +from cloudinit import util |
3334 | |
3335 | CA_CERT_PATH = "/usr/share/ca-certificates/" |
3336 | CA_CERT_FILENAME = "cloud-init-ca-certs.crt" |
3337 | CA_CERT_CONFIG = "/etc/ca-certificates.conf" |
3338 | CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" |
3339 | |
3340 | +distros = ['ubuntu', 'debian'] |
3341 | + |
3342 | |
3343 | def update_ca_certs(): |
3344 | """ |
3345 | Updates the CA certificate cache on the current machine. |
3346 | """ |
3347 | - check_call(["update-ca-certificates"]) |
3348 | - |
3349 | - |
3350 | -def add_ca_certs(certs): |
3351 | + util.subp(["update-ca-certificates"], capture=False) |
3352 | + |
3353 | + |
3354 | +def add_ca_certs(paths, certs): |
3355 | """ |
3356 | Adds certificates to the system. To actually apply the new certificates |
3357 | you must also call L{update_ca_certs}. |
3358 | @@ -39,26 +41,29 @@ |
3359 | @param certs: A list of certificate strings. |
3360 | """ |
3361 | if certs: |
3362 | - cert_file_contents = "\n".join(certs) |
3363 | + # First ensure they are strings... |
3364 | + cert_file_contents = "\n".join([str(c) for c in certs]) |
3365 | cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) |
3366 | - write_file(cert_file_fullpath, cert_file_contents, mode=0644) |
3367 | + cert_file_fullpath = paths.join(False, cert_file_fullpath) |
3368 | + util.write_file(cert_file_fullpath, cert_file_contents, mode=0644) |
3369 | # Append cert filename to CA_CERT_CONFIG file. |
3370 | - write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="a") |
3371 | - |
3372 | - |
3373 | -def remove_default_ca_certs(): |
3374 | + util.write_file(paths.join(False, CA_CERT_CONFIG), |
3375 | + "\n%s" % CA_CERT_FILENAME, omode="ab") |
3376 | + |
3377 | + |
3378 | +def remove_default_ca_certs(paths): |
3379 | """ |
3380 | Removes all default trusted CA certificates from the system. To actually |
3381 | apply the change you must also call L{update_ca_certs}. |
3382 | """ |
3383 | - delete_dir_contents(CA_CERT_PATH) |
3384 | - delete_dir_contents(CA_CERT_SYSTEM_PATH) |
3385 | - write_file(CA_CERT_CONFIG, "", mode=0644) |
3386 | + util.delete_dir_contents(paths.join(False, CA_CERT_PATH)) |
3387 | + util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH)) |
3388 | + util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644) |
3389 | debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" |
3390 | - subp(('debconf-set-selections', '-'), debconf_sel) |
3391 | - |
3392 | - |
3393 | -def handle(_name, cfg, _cloud, log, _args): |
3394 | + util.subp(('debconf-set-selections', '-'), debconf_sel) |
3395 | + |
3396 | + |
3397 | +def handle(name, cfg, cloud, log, _args): |
3398 | """ |
3399 | Call to handle ca-cert sections in cloud-config file. |
3400 | |
3401 | @@ -70,21 +75,25 @@ |
3402 | """ |
3403 | # If there isn't a ca-certs section in the configuration don't do anything |
3404 | if "ca-certs" not in cfg: |
3405 | + log.debug(("Skipping module named %s," |
3406 | + " no 'ca-certs' key in configuration"), name) |
3407 | return |
3408 | + |
3409 | ca_cert_cfg = cfg['ca-certs'] |
3410 | |
3411 | # If there is a remove-defaults option set to true, remove the system |
3412 | # default trusted CA certs first. |
3413 | if ca_cert_cfg.get("remove-defaults", False): |
3414 | - log.debug("removing default certificates") |
3415 | - remove_default_ca_certs() |
3416 | + log.debug("Removing default certificates") |
3417 | + remove_default_ca_certs(cloud.paths) |
3418 | |
3419 | # If we are given any new trusted CA certs to add, add them. |
3420 | if "trusted" in ca_cert_cfg: |
3421 | - trusted_certs = get_cfg_option_list_or_str(ca_cert_cfg, "trusted") |
3422 | + trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") |
3423 | if trusted_certs: |
3424 | - log.debug("adding %d certificates" % len(trusted_certs)) |
3425 | - add_ca_certs(trusted_certs) |
3426 | + log.debug("Adding %d certificates" % len(trusted_certs)) |
3427 | + add_ca_certs(cloud.paths, trusted_certs) |
3428 | |
3429 | # Update the system with the new cert configuration. |
3430 | + log.debug("Updating certificates") |
3431 | update_ca_certs() |
3432 | |
3433 | === modified file 'cloudinit/config/cc_chef.py' |
3434 | --- cloudinit/CloudConfig/cc_chef.py 2012-03-26 17:49:06 +0000 |
3435 | +++ cloudinit/config/cc_chef.py 2012-07-06 21:16:18 +0000 |
3436 | @@ -18,53 +18,71 @@ |
3437 | # You should have received a copy of the GNU General Public License |
3438 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3439 | |
3440 | +import json |
3441 | import os |
3442 | -import subprocess |
3443 | -import json |
3444 | -import cloudinit.CloudConfig as cc |
3445 | -import cloudinit.util as util |
3446 | - |
3447 | -ruby_version_default = "1.8" |
3448 | - |
3449 | - |
3450 | -def handle(_name, cfg, cloud, log, _args): |
3451 | + |
3452 | +from cloudinit import templater |
3453 | +from cloudinit import util |
3454 | + |
3455 | +RUBY_VERSION_DEFAULT = "1.8" |
3456 | + |
3457 | + |
3458 | +def handle(name, cfg, cloud, log, _args): |
3459 | + |
3460 | # If there isn't a chef key in the configuration don't do anything |
3461 | if 'chef' not in cfg: |
3462 | + log.debug(("Skipping module named %s," |
3463 | + " no 'chef' key in configuration"), name) |
3464 | return |
3465 | chef_cfg = cfg['chef'] |
3466 | |
3467 | - # ensure the chef directories we use exist |
3468 | - mkdirs(['/etc/chef', '/var/log/chef', '/var/lib/chef', |
3469 | - '/var/cache/chef', '/var/backups/chef', '/var/run/chef']) |
3470 | + # Ensure the chef directories we use exist |
3471 | + c_dirs = [ |
3472 | + '/etc/chef', |
3473 | + '/var/log/chef', |
3474 | + '/var/lib/chef', |
3475 | + '/var/cache/chef', |
3476 | + '/var/backups/chef', |
3477 | + '/var/run/chef', |
3478 | + ] |
3479 | + for d in c_dirs: |
3480 | + util.ensure_dir(cloud.paths.join(False, d)) |
3481 | |
3482 | - # set the validation key based on the presence of either 'validation_key' |
3483 | + # Set the validation key based on the presence of either 'validation_key' |
3484 | # or 'validation_cert'. In the case where both exist, 'validation_key' |
3485 | # takes precedence |
3486 | for key in ('validation_key', 'validation_cert'): |
3487 | if key in chef_cfg and chef_cfg[key]: |
3488 | - with open('/etc/chef/validation.pem', 'w') as validation_key_fh: |
3489 | - validation_key_fh.write(chef_cfg[key]) |
3490 | + v_fn = cloud.paths.join(False, '/etc/chef/validation.pem') |
3491 | + util.write_file(v_fn, chef_cfg[key]) |
3492 | break |
3493 | |
3494 | - # create the chef config from template |
3495 | - util.render_to_file('chef_client.rb', '/etc/chef/client.rb', |
3496 | - {'server_url': chef_cfg['server_url'], |
3497 | - 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', |
3498 | - cloud.datasource.get_instance_id()), |
3499 | - 'environment': util.get_cfg_option_str(chef_cfg, 'environment', |
3500 | - '_default'), |
3501 | - 'validation_name': chef_cfg['validation_name']}) |
3502 | + # Create the chef config from template |
3503 | + template_fn = cloud.get_template_filename('chef_client.rb') |
3504 | + if template_fn: |
3505 | + iid = str(cloud.datasource.get_instance_id()) |
3506 | + params = { |
3507 | + 'server_url': chef_cfg['server_url'], |
3508 | + 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid), |
3509 | + 'environment': util.get_cfg_option_str(chef_cfg, 'environment', |
3510 | + '_default'), |
3511 | + 'validation_name': chef_cfg['validation_name'] |
3512 | + } |
3513 | + out_fn = cloud.paths.join(False, '/etc/chef/client.rb') |
3514 | + templater.render_to_file(template_fn, out_fn, params) |
3515 | + else: |
3516 | + log.warn("No template found, not rendering to /etc/chef/client.rb") |
3517 | |
3518 | # set the firstboot json |
3519 | - with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh: |
3520 | - initial_json = {} |
3521 | - if 'run_list' in chef_cfg: |
3522 | - initial_json['run_list'] = chef_cfg['run_list'] |
3523 | - if 'initial_attributes' in chef_cfg: |
3524 | - initial_attributes = chef_cfg['initial_attributes'] |
3525 | - for k in initial_attributes.keys(): |
3526 | - initial_json[k] = initial_attributes[k] |
3527 | - firstboot_json_fh.write(json.dumps(initial_json)) |
3528 | + initial_json = {} |
3529 | + if 'run_list' in chef_cfg: |
3530 | + initial_json['run_list'] = chef_cfg['run_list'] |
3531 | + if 'initial_attributes' in chef_cfg: |
3532 | + initial_attributes = chef_cfg['initial_attributes'] |
3533 | + for k in list(initial_attributes.keys()): |
3534 | + initial_json[k] = initial_attributes[k] |
3535 | + firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json') |
3536 | + util.write_file(firstboot_fn, json.dumps(initial_json)) |
3537 | |
3538 | # If chef is not installed, we install chef based on 'install_type' |
3539 | if not os.path.isfile('/usr/bin/chef-client'): |
3540 | @@ -74,15 +92,17 @@ |
3541 | # this will install and run the chef-client from gems |
3542 | chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) |
3543 | ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', |
3544 | - ruby_version_default) |
3545 | - install_chef_from_gems(ruby_version, chef_version) |
3546 | + RUBY_VERSION_DEFAULT) |
3547 | + install_chef_from_gems(cloud.distro, ruby_version, chef_version) |
3548 | # and finally, run chef-client |
3549 | - log.debug('running chef-client') |
3550 | - subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800', |
3551 | - '-s', '20']) |
3552 | + log.debug('Running chef-client') |
3553 | + util.subp(['/usr/bin/chef-client', |
3554 | + '-d', '-i', '1800', '-s', '20'], capture=False) |
3555 | + elif install_type == 'packages': |
3556 | + # this will install and run the chef-client from packages |
3557 | + cloud.distro.install_packages(('chef',)) |
3558 | else: |
3559 | - # this will install and run the chef-client from packages |
3560 | - cc.install_packages(('chef',)) |
3561 | + log.warn("Unknown chef install type %s", install_type) |
3562 | |
3563 | |
3564 | def get_ruby_packages(version): |
3565 | @@ -90,30 +110,20 @@ |
3566 | pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] |
3567 | if version == "1.8": |
3568 | pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) |
3569 | - return(pkgs) |
3570 | - |
3571 | - |
3572 | -def install_chef_from_gems(ruby_version, chef_version=None): |
3573 | - cc.install_packages(get_ruby_packages(ruby_version)) |
3574 | + return pkgs |
3575 | + |
3576 | + |
3577 | +def install_chef_from_gems(ruby_version, chef_version, distro): |
3578 | + distro.install_packages(get_ruby_packages(ruby_version)) |
3579 | if not os.path.exists('/usr/bin/gem'): |
3580 | - os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') |
3581 | + util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') |
3582 | if not os.path.exists('/usr/bin/ruby'): |
3583 | - os.symlink('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') |
3584 | + util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') |
3585 | if chef_version: |
3586 | - subprocess.check_call(['/usr/bin/gem', 'install', 'chef', |
3587 | - '-v %s' % chef_version, '--no-ri', |
3588 | - '--no-rdoc', '--bindir', '/usr/bin', '-q']) |
3589 | + util.subp(['/usr/bin/gem', 'install', 'chef', |
3590 | + '-v %s' % chef_version, '--no-ri', |
3591 | + '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) |
3592 | else: |
3593 | - subprocess.check_call(['/usr/bin/gem', 'install', 'chef', |
3594 | - '--no-ri', '--no-rdoc', '--bindir', |
3595 | - '/usr/bin', '-q']) |
3596 | - |
3597 | - |
3598 | -def ensure_dir(d): |
3599 | - if not os.path.exists(d): |
3600 | - os.makedirs(d) |
3601 | - |
3602 | - |
3603 | -def mkdirs(dirs): |
3604 | - for d in dirs: |
3605 | - ensure_dir(d) |
3606 | + util.subp(['/usr/bin/gem', 'install', 'chef', |
3607 | + '--no-ri', '--no-rdoc', '--bindir', |
3608 | + '/usr/bin', '-q'], capture=False) |
3609 | |
3610 | === modified file 'cloudinit/config/cc_disable_ec2_metadata.py' |
3611 | --- cloudinit/CloudConfig/cc_disable_ec2_metadata.py 2012-01-18 14:07:33 +0000 |
3612 | +++ cloudinit/config/cc_disable_ec2_metadata.py 2012-07-06 21:16:18 +0000 |
3613 | @@ -17,14 +17,20 @@ |
3614 | # |
3615 | # You should have received a copy of the GNU General Public License |
3616 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3617 | -import cloudinit.util as util |
3618 | -import subprocess |
3619 | -from cloudinit.CloudConfig import per_always |
3620 | - |
3621 | -frequency = per_always |
3622 | - |
3623 | - |
3624 | -def handle(_name, cfg, _cloud, _log, _args): |
3625 | - if util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False): |
3626 | - fwall = "route add -host 169.254.169.254 reject" |
3627 | - subprocess.call(fwall.split(' ')) |
3628 | + |
3629 | +from cloudinit import util |
3630 | + |
3631 | +from cloudinit.settings import PER_ALWAYS |
3632 | + |
3633 | +frequency = PER_ALWAYS |
3634 | + |
3635 | +REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject'] |
3636 | + |
3637 | + |
3638 | +def handle(name, cfg, _cloud, log, _args): |
3639 | + disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) |
3640 | + if disabled: |
3641 | + util.subp(REJECT_CMD, capture=False) |
3642 | + else: |
3643 | + log.debug(("Skipping module named %s," |
3644 | + " disabling the ec2 route not enabled"), name) |
3645 | |
3646 | === modified file 'cloudinit/config/cc_final_message.py' |
3647 | --- cloudinit/CloudConfig/cc_final_message.py 2012-01-18 14:07:33 +0000 |
3648 | +++ cloudinit/config/cc_final_message.py 2012-07-06 21:16:18 +0000 |
3649 | @@ -18,41 +18,51 @@ |
3650 | # You should have received a copy of the GNU General Public License |
3651 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3652 | |
3653 | -from cloudinit.CloudConfig import per_always |
3654 | -import sys |
3655 | -from cloudinit import util, boot_finished |
3656 | -import time |
3657 | - |
3658 | -frequency = per_always |
3659 | - |
3660 | -final_message = "cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds" |
3661 | - |
3662 | - |
3663 | -def handle(_name, cfg, _cloud, log, args): |
3664 | +from cloudinit import templater |
3665 | +from cloudinit import util |
3666 | +from cloudinit import version |
3667 | + |
3668 | +from cloudinit.settings import PER_ALWAYS |
3669 | + |
3670 | +frequency = PER_ALWAYS |
3671 | + |
3672 | +FINAL_MESSAGE_DEF = ("Cloud-init v. {{version}} finished at {{timestamp}}." |
3673 | + " Up {{uptime}} seconds.") |
3674 | + |
3675 | + |
3676 | +def handle(_name, cfg, cloud, log, args): |
3677 | + |
3678 | + msg_in = None |
3679 | if len(args) != 0: |
3680 | msg_in = args[0] |
3681 | else: |
3682 | - msg_in = util.get_cfg_option_str(cfg, "final_message", final_message) |
3683 | - |
3684 | - try: |
3685 | - uptimef = open("/proc/uptime") |
3686 | - uptime = uptimef.read().split(" ")[0] |
3687 | - uptimef.close() |
3688 | - except IOError as e: |
3689 | - log.warn("unable to open /proc/uptime\n") |
3690 | - uptime = "na" |
3691 | - |
3692 | - try: |
3693 | - ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime()) |
3694 | + msg_in = util.get_cfg_option_str(cfg, "final_message") |
3695 | + |
3696 | + if not msg_in: |
3697 | + template_fn = cloud.get_template_filename('final_message') |
3698 | + if template_fn: |
3699 | + msg_in = util.load_file(template_fn) |
3700 | + |
3701 | + if not msg_in: |
3702 | + msg_in = FINAL_MESSAGE_DEF |
3703 | + |
3704 | + uptime = util.uptime() |
3705 | + ts = util.time_rfc2822() |
3706 | + cver = version.version_string() |
3707 | + try: |
3708 | + subs = { |
3709 | + 'uptime': uptime, |
3710 | + 'timestamp': ts, |
3711 | + 'version': cver, |
3712 | + } |
3713 | + util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), |
3714 | + console=False, stderr=True) |
3715 | + except Exception: |
3716 | + util.logexc(log, "Failed to render final message template") |
3717 | + |
3718 | + boot_fin_fn = cloud.paths.boot_finished |
3719 | + try: |
3720 | + contents = "%s - %s - v. %s\n" % (uptime, ts, cver) |
3721 | + util.write_file(boot_fin_fn, contents) |
3722 | except: |
3723 | - ts = "na" |
3724 | - |
3725 | - try: |
3726 | - subs = {'UPTIME': uptime, 'TIMESTAMP': ts} |
3727 | - sys.stdout.write("%s\n" % util.render_string(msg_in, subs)) |
3728 | - except Exception as e: |
3729 | - log.warn("failed to render string to stdout: %s" % e) |
3730 | - |
3731 | - fp = open(boot_finished, "wb") |
3732 | - fp.write(uptime + "\n") |
3733 | - fp.close() |
3734 | + util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn) |
3735 | |
3736 | === modified file 'cloudinit/config/cc_foo.py' |
3737 | --- cloudinit/CloudConfig/cc_foo.py 2012-01-18 14:07:33 +0000 |
3738 | +++ cloudinit/config/cc_foo.py 2012-07-06 21:16:18 +0000 |
3739 | @@ -18,12 +18,35 @@ |
3740 | # You should have received a copy of the GNU General Public License |
3741 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3742 | |
3743 | -#import cloudinit |
3744 | -#import cloudinit.util as util |
3745 | -from cloudinit.CloudConfig import per_instance |
3746 | - |
3747 | -frequency = per_instance |
3748 | - |
3749 | - |
3750 | -def handle(_name, _cfg, _cloud, _log, _args): |
3751 | - print "hi" |
3752 | +from cloudinit.settings import PER_INSTANCE |
3753 | + |
3754 | +# Modules are expected to have the following attributes. |
3755 | +# 1. A required 'handle' method which takes the following params. |
3756 | +# a) The name will not be this files name, but instead |
3757 | +# the name specified in configuration (which is the name |
3758 | +# which will be used to find this module). |
3759 | +# b) A configuration object that is the result of the merging |
3760 | +# of cloud configs configuration with legacy configuration |
3761 | +# as well as any datasource provided configuration |
3762 | +# c) A cloud object that can be used to access various |
3763 | +# datasource and paths for the given distro and data provided |
3764 | +# by the various datasource instance types. |
3765 | +# d) A argument list that may or may not be empty to this module. |
3766 | +# Typically those are from module configuration where the module |
3767 | +# is defined with some extra configuration that will eventually |
3768 | +# be translated from yaml into arguments to this module. |
3769 | +# 2. A optional 'frequency' that defines how often this module should be ran. |
3770 | +# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not |
3771 | +# provided PER_INSTANCE will be assumed. |
3772 | +# See settings.py for these constants. |
3773 | +# 3. A optional 'distros' array/set/tuple that defines the known distros |
3774 | +# this module will work with (if not all of them). This is used to write |
3775 | +# a warning out if a module is being ran on a untested distribution for |
3776 | +# informational purposes. If non existent all distros are assumed and |
3777 | +# no warning occurs. |
3778 | + |
3779 | +frequency = PER_INSTANCE |
3780 | + |
3781 | + |
3782 | +def handle(name, _cfg, _cloud, log, _args): |
3783 | + log.debug("Hi from module %s", name) |
3784 | |
3785 | === modified file 'cloudinit/config/cc_grub_dpkg.py' |
3786 | --- cloudinit/CloudConfig/cc_grub_dpkg.py 2012-01-18 14:07:33 +0000 |
3787 | +++ cloudinit/config/cc_grub_dpkg.py 2012-07-06 21:16:18 +0000 |
3788 | @@ -18,10 +18,12 @@ |
3789 | # You should have received a copy of the GNU General Public License |
3790 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3791 | |
3792 | -import cloudinit.util as util |
3793 | -import traceback |
3794 | import os |
3795 | |
3796 | +from cloudinit import util |
3797 | + |
3798 | +distros = ['ubuntu', 'debian'] |
3799 | + |
3800 | |
3801 | def handle(_name, cfg, _cloud, log, _args): |
3802 | idevs = None |
3803 | @@ -35,14 +37,14 @@ |
3804 | |
3805 | if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or |
3806 | (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): |
3807 | - if idevs == None: |
3808 | + if idevs is None: |
3809 | idevs = "" |
3810 | - if idevs_empty == None: |
3811 | + if idevs_empty is None: |
3812 | idevs_empty = "true" |
3813 | else: |
3814 | - if idevs_empty == None: |
3815 | + if idevs_empty is None: |
3816 | idevs_empty = "false" |
3817 | - if idevs == None: |
3818 | + if idevs is None: |
3819 | idevs = "/dev/sda" |
3820 | for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): |
3821 | if os.path.exists(dev): |
3822 | @@ -52,13 +54,14 @@ |
3823 | # now idevs and idevs_empty are set to determined values |
3824 | # or, those set by user |
3825 | |
3826 | - dconf_sel = "grub-pc grub-pc/install_devices string %s\n" % idevs + \ |
3827 | - "grub-pc grub-pc/install_devices_empty boolean %s\n" % idevs_empty |
3828 | - log.debug("setting grub debconf-set-selections with '%s','%s'" % |
3829 | + dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" |
3830 | + "grub-pc grub-pc/install_devices_empty boolean %s\n") % |
3831 | + (idevs, idevs_empty)) |
3832 | + |
3833 | + log.debug("Setting grub debconf-set-selections with '%s','%s'" % |
3834 | (idevs, idevs_empty)) |
3835 | |
3836 | try: |
3837 | - util.subp(('debconf-set-selections'), dconf_sel) |
3838 | + util.subp(['debconf-set-selections'], dconf_sel) |
3839 | except: |
3840 | - log.error("Failed to run debconf-set-selections for grub-dpkg") |
3841 | - log.debug(traceback.format_exc()) |
3842 | + util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg") |
3843 | |
3844 | === modified file 'cloudinit/config/cc_keys_to_console.py' |
3845 | --- cloudinit/CloudConfig/cc_keys_to_console.py 2012-01-18 14:07:33 +0000 |
3846 | +++ cloudinit/config/cc_keys_to_console.py 2012-07-06 21:16:18 +0000 |
3847 | @@ -18,25 +18,36 @@ |
3848 | # You should have received a copy of the GNU General Public License |
3849 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3850 | |
3851 | -from cloudinit.CloudConfig import per_instance |
3852 | -import cloudinit.util as util |
3853 | -import subprocess |
3854 | - |
3855 | -frequency = per_instance |
3856 | - |
3857 | - |
3858 | -def handle(_name, cfg, _cloud, log, _args): |
3859 | - cmd = ['/usr/lib/cloud-init/write-ssh-key-fingerprints'] |
3860 | - fp_blacklist = util.get_cfg_option_list_or_str(cfg, |
3861 | - "ssh_fp_console_blacklist", []) |
3862 | - key_blacklist = util.get_cfg_option_list_or_str(cfg, |
3863 | - "ssh_key_console_blacklist", ["ssh-dss"]) |
3864 | +import os |
3865 | + |
3866 | +from cloudinit.settings import PER_INSTANCE |
3867 | +from cloudinit import util |
3868 | + |
3869 | +frequency = PER_INSTANCE |
3870 | + |
3871 | +# This is a tool that cloud init provides |
3872 | +HELPER_TOOL = '/usr/lib/cloud-init/write-ssh-key-fingerprints' |
3873 | + |
3874 | + |
3875 | +def handle(name, cfg, _cloud, log, _args): |
3876 | + if not os.path.exists(HELPER_TOOL): |
3877 | + log.warn(("Unable to activate module %s," |
3878 | + " helper tool not found at %s"), name, HELPER_TOOL) |
3879 | + return |
3880 | + |
3881 | + fp_blacklist = util.get_cfg_option_list(cfg, |
3882 | + "ssh_fp_console_blacklist", []) |
3883 | + key_blacklist = util.get_cfg_option_list(cfg, |
3884 | + "ssh_key_console_blacklist", |
3885 | + ["ssh-dss"]) |
3886 | + |
3887 | try: |
3888 | - confp = open('/dev/console', "wb") |
3889 | + cmd = [HELPER_TOOL] |
3890 | cmd.append(','.join(fp_blacklist)) |
3891 | cmd.append(','.join(key_blacklist)) |
3892 | - subprocess.call(cmd, stdout=confp) |
3893 | - confp.close() |
3894 | + (stdout, _stderr) = util.subp(cmd) |
3895 | + util.multi_log("%s\n" % (stdout.strip()), |
3896 | + stderr=False, console=True) |
3897 | except: |
3898 | - log.warn("writing keys to console value") |
3899 | + log.warn("Writing keys to the system console failed!") |
3900 | raise |
3901 | |
3902 | === modified file 'cloudinit/config/cc_landscape.py' |
3903 | --- cloudinit/CloudConfig/cc_landscape.py 2012-04-10 20:22:47 +0000 |
3904 | +++ cloudinit/config/cc_landscape.py 2012-07-06 21:16:18 +0000 |
3905 | @@ -19,16 +19,23 @@ |
3906 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3907 | |
3908 | import os |
3909 | -import os.path |
3910 | -from cloudinit.CloudConfig import per_instance |
3911 | + |
3912 | +from StringIO import StringIO |
3913 | + |
3914 | from configobj import ConfigObj |
3915 | |
3916 | -frequency = per_instance |
3917 | - |
3918 | -lsc_client_cfg_file = "/etc/landscape/client.conf" |
3919 | +from cloudinit import util |
3920 | + |
3921 | +from cloudinit.settings import PER_INSTANCE |
3922 | + |
3923 | +frequency = PER_INSTANCE |
3924 | + |
3925 | +LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf" |
3926 | + |
3927 | +distros = ['ubuntu'] |
3928 | |
3929 | # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 |
3930 | -lsc_builtincfg = { |
3931 | +LSC_BUILTIN_CFG = { |
3932 | 'client': { |
3933 | 'log_level': "info", |
3934 | 'url': "https://landscape.canonical.com/message-system", |
3935 | @@ -38,7 +45,7 @@ |
3936 | } |
3937 | |
3938 | |
3939 | -def handle(_name, cfg, _cloud, log, _args): |
3940 | +def handle(_name, cfg, cloud, log, _args): |
3941 | """ |
3942 | Basically turn a top level 'landscape' entry with a 'client' dict |
3943 | and render it to ConfigObj format under '[client]' section in |
3944 | @@ -47,27 +54,40 @@ |
3945 | |
3946 | ls_cloudcfg = cfg.get("landscape", {}) |
3947 | |
3948 | - if not isinstance(ls_cloudcfg, dict): |
3949 | - raise(Exception("'landscape' existed in config, but not a dict")) |
3950 | - |
3951 | - merged = mergeTogether([lsc_builtincfg, lsc_client_cfg_file, ls_cloudcfg]) |
3952 | - |
3953 | - if not os.path.isdir(os.path.dirname(lsc_client_cfg_file)): |
3954 | - os.makedirs(os.path.dirname(lsc_client_cfg_file)) |
3955 | - |
3956 | - with open(lsc_client_cfg_file, "w") as fp: |
3957 | - merged.write(fp) |
3958 | - |
3959 | - log.debug("updated %s" % lsc_client_cfg_file) |
3960 | - |
3961 | - |
3962 | -def mergeTogether(objs): |
3963 | + if not isinstance(ls_cloudcfg, (dict)): |
3964 | + raise RuntimeError(("'landscape' key existed in config," |
3965 | + " but not a dictionary type," |
3966 | + " is a %s instead"), util.obj_name(ls_cloudcfg)) |
3967 | + |
3968 | + merge_data = [ |
3969 | + LSC_BUILTIN_CFG, |
3970 | + cloud.paths.join(True, LSC_CLIENT_CFG_FILE), |
3971 | + ls_cloudcfg, |
3972 | + ] |
3973 | + merged = merge_together(merge_data) |
3974 | + |
3975 | + lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE) |
3976 | + lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn)) |
3977 | + if not os.path.isdir(lsc_dir): |
3978 | + util.ensure_dir(lsc_dir) |
3979 | + |
3980 | + contents = StringIO() |
3981 | + merged.write(contents) |
3982 | + contents.flush() |
3983 | + |
3984 | + util.write_file(lsc_client_fn, contents.getvalue()) |
3985 | + log.debug("Wrote landscape config file to %s", lsc_client_fn) |
3986 | + |
3987 | + |
3988 | +def merge_together(objs): |
3989 | """ |
3990 | merge together ConfigObj objects or things that ConfigObj() will take in |
3991 | later entries override earlier |
3992 | """ |
3993 | cfg = ConfigObj({}) |
3994 | for obj in objs: |
3995 | + if not obj: |
3996 | + continue |
3997 | if isinstance(obj, ConfigObj): |
3998 | cfg.merge(obj) |
3999 | else: |
4000 | |
4001 | === modified file 'cloudinit/config/cc_locale.py' |
4002 | --- cloudinit/CloudConfig/cc_locale.py 2012-01-18 14:07:33 +0000 |
4003 | +++ cloudinit/config/cc_locale.py 2012-07-06 21:16:18 +0000 |
4004 | @@ -18,37 +18,20 @@ |
4005 | # You should have received a copy of the GNU General Public License |
4006 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4007 | |
4008 | -import cloudinit.util as util |
4009 | -import os.path |
4010 | -import subprocess |
4011 | -import traceback |
4012 | - |
4013 | - |
4014 | -def apply_locale(locale, cfgfile): |
4015 | - if os.path.exists('/usr/sbin/locale-gen'): |
4016 | - subprocess.Popen(['locale-gen', locale]).communicate() |
4017 | - if os.path.exists('/usr/sbin/update-locale'): |
4018 | - subprocess.Popen(['update-locale', locale]).communicate() |
4019 | - |
4020 | - util.render_to_file('default-locale', cfgfile, {'locale': locale}) |
4021 | - |
4022 | - |
4023 | -def handle(_name, cfg, cloud, log, args): |
4024 | +from cloudinit import util |
4025 | + |
4026 | + |
4027 | +def handle(name, cfg, cloud, log, args): |
4028 | if len(args) != 0: |
4029 | locale = args[0] |
4030 | else: |
4031 | locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) |
4032 | |
4033 | - locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile", |
4034 | - "/etc/default/locale") |
4035 | - |
4036 | if not locale: |
4037 | + log.debug(("Skipping module named %s, " |
4038 | + "no 'locale' configuration found"), name) |
4039 | return |
4040 | |
4041 | - log.debug("setting locale to %s" % locale) |
4042 | - |
4043 | - try: |
4044 | - apply_locale(locale, locale_cfgfile) |
4045 | - except Exception as e: |
4046 | - log.debug(traceback.format_exc(e)) |
4047 | - raise Exception("failed to apply locale %s" % locale) |
4048 | + log.debug("Setting locale to %s", locale) |
4049 | + locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") |
4050 | + cloud.distro.apply_locale(locale, locale_cfgfile) |
4051 | |
4052 | === modified file 'cloudinit/config/cc_mcollective.py' |
4053 | --- cloudinit/CloudConfig/cc_mcollective.py 2012-01-18 14:07:33 +0000 |
4054 | +++ cloudinit/config/cc_mcollective.py 2012-07-06 21:16:18 +0000 |
4055 | @@ -19,81 +19,73 @@ |
4056 | # You should have received a copy of the GNU General Public License |
4057 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4058 | |
4059 | -import os |
4060 | -import subprocess |
4061 | -import StringIO |
4062 | -import ConfigParser |
4063 | -import cloudinit.CloudConfig as cc |
4064 | -import cloudinit.util as util |
4065 | - |
4066 | -pubcert_file = "/etc/mcollective/ssl/server-public.pem" |
4067 | -pricert_file = "/etc/mcollective/ssl/server-private.pem" |
4068 | - |
4069 | - |
4070 | -# Our fake header section |
4071 | -class FakeSecHead(object): |
4072 | - def __init__(self, fp): |
4073 | - self.fp = fp |
4074 | - self.sechead = '[nullsection]\n' |
4075 | - |
4076 | - def readline(self): |
4077 | - if self.sechead: |
4078 | - try: |
4079 | - return self.sechead |
4080 | - finally: |
4081 | - self.sechead = None |
4082 | - else: |
4083 | - return self.fp.readline() |
4084 | - |
4085 | - |
4086 | -def handle(_name, cfg, _cloud, _log, _args): |
4087 | +from StringIO import StringIO |
4088 | + |
4089 | +# Used since this can maintain comments |
4090 | +# and doesn't need a top level section |
4091 | +from configobj import ConfigObj |
4092 | + |
4093 | +from cloudinit import util |
4094 | + |
4095 | +PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" |
4096 | +PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" |
4097 | + |
4098 | + |
4099 | +def handle(name, cfg, cloud, log, _args): |
4100 | + |
4101 | # If there isn't a mcollective key in the configuration don't do anything |
4102 | if 'mcollective' not in cfg: |
4103 | + log.debug(("Skipping module named %s, " |
4104 | + "no 'mcollective' key in configuration"), name) |
4105 | return |
4106 | + |
4107 | mcollective_cfg = cfg['mcollective'] |
4108 | + |
4109 | # Start by installing the mcollective package ... |
4110 | - cc.install_packages(("mcollective",)) |
4111 | + cloud.distro.install_packages(("mcollective",)) |
4112 | |
4113 | # ... and then update the mcollective configuration |
4114 | if 'conf' in mcollective_cfg: |
4115 | - # Create object for reading server.cfg values |
4116 | - mcollective_config = ConfigParser.ConfigParser() |
4117 | - # Read server.cfg values from original file in order to be able to mix |
4118 | - # the rest up |
4119 | - mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/' |
4120 | - 'server.cfg'))) |
4121 | - for cfg_name, cfg in mcollective_cfg['conf'].iteritems(): |
4122 | + # Read server.cfg values from the |
4123 | + # original file in order to be able to mix the rest up |
4124 | + server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg') |
4125 | + mcollective_config = ConfigObj(server_cfg_fn) |
4126 | + # See: http://tiny.cc/jh9agw |
4127 | + for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): |
4128 | if cfg_name == 'public-cert': |
4129 | - util.write_file(pubcert_file, cfg, mode=0644) |
4130 | - mcollective_config.set(cfg_name, |
4131 | - 'plugin.ssl_server_public', pubcert_file) |
4132 | - mcollective_config.set(cfg_name, 'securityprovider', 'ssl') |
4133 | + pubcert_fn = cloud.paths.join(True, PUBCERT_FILE) |
4134 | + util.write_file(pubcert_fn, cfg, mode=0644) |
4135 | + mcollective_config['plugin.ssl_server_public'] = pubcert_fn |
4136 | + mcollective_config['securityprovider'] = 'ssl' |
4137 | elif cfg_name == 'private-cert': |
4138 | - util.write_file(pricert_file, cfg, mode=0600) |
4139 | - mcollective_config.set(cfg_name, |
4140 | - 'plugin.ssl_server_private', pricert_file) |
4141 | - mcollective_config.set(cfg_name, 'securityprovider', 'ssl') |
4142 | + pricert_fn = cloud.paths.join(True, PRICERT_FILE) |
4143 | + util.write_file(pricert_fn, cfg, mode=0600) |
4144 | + mcollective_config['plugin.ssl_server_private'] = pricert_fn |
4145 | + mcollective_config['securityprovider'] = 'ssl' |
4146 | else: |
4147 | - # Iterate throug the config items, we'll use ConfigParser.set |
4148 | - # to overwrite or create new items as needed |
4149 | - for o, v in cfg.iteritems(): |
4150 | - mcollective_config.set(cfg_name, o, v) |
4151 | + if isinstance(cfg, (basestring, str)): |
4152 | + # Just set it in the 'main' section |
4153 | + mcollective_config[cfg_name] = cfg |
4154 | + elif isinstance(cfg, (dict)): |
4155 | + # Iterate throug the config items, create a section |
4156 | + # if it is needed and then add/or create items as needed |
4157 | + if cfg_name not in mcollective_config.sections: |
4158 | + mcollective_config[cfg_name] = {} |
4159 | + for (o, v) in cfg.iteritems(): |
4160 | + mcollective_config[cfg_name][o] = v |
4161 | + else: |
4162 | + # Otherwise just try to convert it to a string |
4163 | + mcollective_config[cfg_name] = str(cfg) |
4164 | # We got all our config as wanted we'll rename |
4165 | # the previous server.cfg and create our new one |
4166 | - os.rename('/etc/mcollective/server.cfg', |
4167 | - '/etc/mcollective/server.cfg.old') |
4168 | - outputfile = StringIO.StringIO() |
4169 | - mcollective_config.write(outputfile) |
4170 | - # Now we got the whole file, write to disk except first line |
4171 | - # Note below, that we've just used ConfigParser because it generally |
4172 | - # works. Below, we remove the initial 'nullsection' header |
4173 | - # and then change 'key = value' to 'key: value'. The global |
4174 | - # search and replace of '=' with ':' could be problematic though. |
4175 | - # this most likely needs fixing. |
4176 | - util.write_file('/etc/mcollective/server.cfg', |
4177 | - outputfile.getvalue().replace('[nullsection]\n', '').replace(' =', |
4178 | - ':'), |
4179 | - mode=0644) |
4180 | + old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old') |
4181 | + util.rename(server_cfg_fn, old_fn) |
4182 | + # Now we got the whole file, write to disk... |
4183 | + contents = StringIO() |
4184 | + mcollective_config.write(contents) |
4185 | + contents = contents.getvalue() |
4186 | + server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg') |
4187 | + util.write_file(server_cfg_rw, contents, mode=0644) |
4188 | |
4189 | # Start mcollective |
4190 | - subprocess.check_call(['service', 'mcollective', 'start']) |
4191 | + util.subp(['service', 'mcollective', 'start'], capture=False) |
4192 | |
4193 | === modified file 'cloudinit/config/cc_mounts.py' |
4194 | --- cloudinit/CloudConfig/cc_mounts.py 2012-01-18 14:07:33 +0000 |
4195 | +++ cloudinit/config/cc_mounts.py 2012-07-06 21:16:18 +0000 |
4196 | @@ -18,10 +18,16 @@ |
4197 | # You should have received a copy of the GNU General Public License |
4198 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4199 | |
4200 | -import cloudinit.util as util |
4201 | -import os |
4202 | +from string import whitespace # pylint: disable=W0402 |
4203 | + |
4204 | import re |
4205 | -from string import whitespace # pylint: disable=W0402 |
4206 | + |
4207 | +from cloudinit import util |
4208 | + |
4209 | +# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 |
4210 | +SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" |
4211 | +SHORTNAME = re.compile(SHORTNAME_FILTER) |
4212 | +WS = re.compile("[%s]+" % (whitespace)) |
4213 | |
4214 | |
4215 | def is_mdname(name): |
4216 | @@ -49,38 +55,46 @@ |
4217 | if "mounts" in cfg: |
4218 | cfgmnt = cfg["mounts"] |
4219 | |
4220 | - # shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 |
4221 | - shortname_filter = r"^[x]{0,1}[shv]d[a-z][0-9]*$" |
4222 | - shortname = re.compile(shortname_filter) |
4223 | - |
4224 | for i in range(len(cfgmnt)): |
4225 | # skip something that wasn't a list |
4226 | if not isinstance(cfgmnt[i], list): |
4227 | + log.warn("Mount option %s not a list, got a %s instead", |
4228 | + (i + 1), util.obj_name(cfgmnt[i])) |
4229 | continue |
4230 | |
4231 | + startname = str(cfgmnt[i][0]) |
4232 | + log.debug("Attempting to determine the real name of %s", startname) |
4233 | + |
4234 | # workaround, allow user to specify 'ephemeral' |
4235 | # rather than more ec2 correct 'ephemeral0' |
4236 | - if cfgmnt[i][0] == "ephemeral": |
4237 | + if startname == "ephemeral": |
4238 | cfgmnt[i][0] = "ephemeral0" |
4239 | + log.debug(("Adjusted mount option %s " |
4240 | + "name from ephemeral to ephemeral0"), (i + 1)) |
4241 | |
4242 | - if is_mdname(cfgmnt[i][0]): |
4243 | - newname = cloud.device_name_to_device(cfgmnt[i][0]) |
4244 | + if is_mdname(startname): |
4245 | + newname = cloud.device_name_to_device(startname) |
4246 | if not newname: |
4247 | - log.debug("ignoring nonexistant named mount %s" % cfgmnt[i][0]) |
4248 | + log.debug("Ignoring nonexistant named mount %s", startname) |
4249 | cfgmnt[i][1] = None |
4250 | else: |
4251 | - if newname.startswith("/"): |
4252 | - cfgmnt[i][0] = newname |
4253 | - else: |
4254 | - cfgmnt[i][0] = "/dev/%s" % newname |
4255 | + renamed = newname |
4256 | + if not newname.startswith("/"): |
4257 | + renamed = "/dev/%s" % newname |
4258 | + cfgmnt[i][0] = renamed |
4259 | + log.debug("Mapped metadata name %s to %s", startname, renamed) |
4260 | else: |
4261 | - if shortname.match(cfgmnt[i][0]): |
4262 | - cfgmnt[i][0] = "/dev/%s" % cfgmnt[i][0] |
4263 | + if SHORTNAME.match(startname): |
4264 | + renamed = "/dev/%s" % startname |
4265 | + log.debug("Mapped shortname name %s to %s", startname, renamed) |
4266 | + cfgmnt[i][0] = renamed |
4267 | |
4268 | # in case the user did not quote a field (likely fs-freq, fs_passno) |
4269 | # but do not convert None to 'None' (LP: #898365) |
4270 | for j in range(len(cfgmnt[i])): |
4271 | - if isinstance(cfgmnt[i][j], int): |
4272 | + if j is None: |
4273 | + continue |
4274 | + else: |
4275 | cfgmnt[i][j] = str(cfgmnt[i][j]) |
4276 | |
4277 | for i in range(len(cfgmnt)): |
4278 | @@ -102,14 +116,18 @@ |
4279 | # for each of the "default" mounts, add them only if no other |
4280 | # entry has the same device name |
4281 | for defmnt in defmnts: |
4282 | - devname = cloud.device_name_to_device(defmnt[0]) |
4283 | + startname = defmnt[0] |
4284 | + devname = cloud.device_name_to_device(startname) |
4285 | if devname is None: |
4286 | + log.debug("Ignoring nonexistant named default mount %s", startname) |
4287 | continue |
4288 | if devname.startswith("/"): |
4289 | defmnt[0] = devname |
4290 | else: |
4291 | defmnt[0] = "/dev/%s" % devname |
4292 | |
4293 | + log.debug("Mapped default device %s to %s", startname, defmnt[0]) |
4294 | + |
4295 | cfgmnt_has = False |
4296 | for cfgm in cfgmnt: |
4297 | if cfgm[0] == defmnt[0]: |
4298 | @@ -117,14 +135,22 @@ |
4299 | break |
4300 | |
4301 | if cfgmnt_has: |
4302 | + log.debug(("Not including %s, already" |
4303 | + " previously included"), startname) |
4304 | continue |
4305 | cfgmnt.append(defmnt) |
4306 | |
4307 | # now, each entry in the cfgmnt list has all fstab values |
4308 | # if the second field is None (not the string, the value) we skip it |
4309 | - actlist = [x for x in cfgmnt if x[1] is not None] |
4310 | + actlist = [] |
4311 | + for x in cfgmnt: |
4312 | + if x[1] is None: |
4313 | + log.debug("Skipping non-existent device named %s", x[0]) |
4314 | + else: |
4315 | + actlist.append(x) |
4316 | |
4317 | if len(actlist) == 0: |
4318 | + log.debug("No modifications to fstab needed.") |
4319 | return |
4320 | |
4321 | comment = "comment=cloudconfig" |
4322 | @@ -133,7 +159,7 @@ |
4323 | dirs = [] |
4324 | for line in actlist: |
4325 | # write 'comment' in the fs_mntops, entry, claiming this |
4326 | - line[3] = "%s,comment=cloudconfig" % line[3] |
4327 | + line[3] = "%s,%s" % (line[3], comment) |
4328 | if line[2] == "swap": |
4329 | needswap = True |
4330 | if line[1].startswith("/"): |
4331 | @@ -141,11 +167,10 @@ |
4332 | cc_lines.append('\t'.join(line)) |
4333 | |
4334 | fstab_lines = [] |
4335 | - fstab = open("/etc/fstab", "r+") |
4336 | - ws = re.compile("[%s]+" % whitespace) |
4337 | - for line in fstab.read().splitlines(): |
4338 | + fstab = util.load_file(cloud.paths.join(True, "/etc/fstab")) |
4339 | + for line in fstab.splitlines(): |
4340 | try: |
4341 | - toks = ws.split(line) |
4342 | + toks = WS.split(line) |
4343 | if toks[3].find(comment) != -1: |
4344 | continue |
4345 | except: |
4346 | @@ -153,27 +178,23 @@ |
4347 | fstab_lines.append(line) |
4348 | |
4349 | fstab_lines.extend(cc_lines) |
4350 | - |
4351 | - fstab.seek(0) |
4352 | - fstab.write("%s\n" % '\n'.join(fstab_lines)) |
4353 | - fstab.truncate() |
4354 | - fstab.close() |
4355 | + contents = "%s\n" % ('\n'.join(fstab_lines)) |
4356 | + util.write_file(cloud.paths.join(False, "/etc/fstab"), contents) |
4357 | |
4358 | if needswap: |
4359 | try: |
4360 | util.subp(("swapon", "-a")) |
4361 | except: |
4362 | - log.warn("Failed to enable swap") |
4363 | + util.logexc(log, "Activating swap via 'swapon -a' failed") |
4364 | |
4365 | for d in dirs: |
4366 | - if os.path.exists(d): |
4367 | - continue |
4368 | + real_dir = cloud.paths.join(False, d) |
4369 | try: |
4370 | - os.makedirs(d) |
4371 | + util.ensure_dir(real_dir) |
4372 | except: |
4373 | - log.warn("Failed to make '%s' config-mount\n", d) |
4374 | + util.logexc(log, "Failed to make '%s' config-mount", d) |
4375 | |
4376 | try: |
4377 | util.subp(("mount", "-a")) |
4378 | except: |
4379 | - log.warn("'mount -a' failed") |
4380 | + util.logexc(log, "Activating mounts via 'mount -a' failed") |
4381 | |
4382 | === modified file 'cloudinit/config/cc_phone_home.py' |
4383 | --- cloudinit/CloudConfig/cc_phone_home.py 2012-01-18 14:07:33 +0000 |
4384 | +++ cloudinit/config/cc_phone_home.py 2012-07-06 21:16:18 +0000 |
4385 | @@ -17,13 +17,22 @@ |
4386 | # |
4387 | # You should have received a copy of the GNU General Public License |
4388 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4389 | -from cloudinit.CloudConfig import per_instance |
4390 | -import cloudinit.util as util |
4391 | -from time import sleep |
4392 | - |
4393 | -frequency = per_instance |
4394 | -post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id', |
4395 | - 'hostname'] |
4396 | + |
4397 | +from cloudinit import templater |
4398 | +from cloudinit import url_helper as uhelp |
4399 | +from cloudinit import util |
4400 | + |
4401 | +from cloudinit.settings import PER_INSTANCE |
4402 | + |
4403 | +frequency = PER_INSTANCE |
4404 | + |
4405 | +POST_LIST_ALL = [ |
4406 | + 'pub_key_dsa', |
4407 | + 'pub_key_rsa', |
4408 | + 'pub_key_ecdsa', |
4409 | + 'instance_id', |
4410 | + 'hostname' |
4411 | +] |
4412 | |
4413 | |
4414 | # phone_home: |
4415 | @@ -35,29 +44,33 @@ |
4416 | # url: http://my.foo.bar/$INSTANCE_ID/ |
4417 | # post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id |
4418 | # |
4419 | -def handle(_name, cfg, cloud, log, args): |
4420 | +def handle(name, cfg, cloud, log, args): |
4421 | if len(args) != 0: |
4422 | ph_cfg = util.read_conf(args[0]) |
4423 | else: |
4424 | if not 'phone_home' in cfg: |
4425 | + log.debug(("Skipping module named %s, " |
4426 | + "no 'phone_home' configuration found"), name) |
4427 | return |
4428 | ph_cfg = cfg['phone_home'] |
4429 | |
4430 | if 'url' not in ph_cfg: |
4431 | - log.warn("no 'url' token in phone_home") |
4432 | + log.warn(("Skipping module named %s, " |
4433 | + "no 'url' found in 'phone_home' configuration"), name) |
4434 | return |
4435 | |
4436 | url = ph_cfg['url'] |
4437 | post_list = ph_cfg.get('post', 'all') |
4438 | - tries = ph_cfg.get('tries', 10) |
4439 | + tries = ph_cfg.get('tries') |
4440 | try: |
4441 | tries = int(tries) |
4442 | except: |
4443 | - log.warn("tries is not an integer. using 10") |
4444 | tries = 10 |
4445 | + util.logexc(log, ("Configuration entry 'tries'" |
4446 | + " is not an integer, using %s instead"), tries) |
4447 | |
4448 | if post_list == "all": |
4449 | - post_list = post_list_all |
4450 | + post_list = POST_LIST_ALL |
4451 | |
4452 | all_keys = {} |
4453 | all_keys['instance_id'] = cloud.get_instance_id() |
4454 | @@ -69,38 +82,37 @@ |
4455 | 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', |
4456 | } |
4457 | |
4458 | - for n, path in pubkeys.iteritems(): |
4459 | + for (n, path) in pubkeys.iteritems(): |
4460 | try: |
4461 | - fp = open(path, "rb") |
4462 | - all_keys[n] = fp.read() |
4463 | - fp.close() |
4464 | + all_keys[n] = util.load_file(cloud.paths.join(True, path)) |
4465 | except: |
4466 | - log.warn("%s: failed to open in phone_home" % path) |
4467 | + util.logexc(log, ("%s: failed to open, can not" |
4468 | + " phone home that data"), path) |
4469 | |
4470 | submit_keys = {} |
4471 | for k in post_list: |
4472 | if k in all_keys: |
4473 | submit_keys[k] = all_keys[k] |
4474 | else: |
4475 | - submit_keys[k] = "N/A" |
4476 | - log.warn("requested key %s from 'post' list not available") |
4477 | - |
4478 | - url = util.render_string(url, {'INSTANCE_ID': all_keys['instance_id']}) |
4479 | - |
4480 | - null_exc = object() |
4481 | - last_e = null_exc |
4482 | - for i in range(0, tries): |
4483 | - try: |
4484 | - util.readurl(url, submit_keys) |
4485 | - log.debug("succeeded submit to %s on try %i" % (url, i + 1)) |
4486 | - return |
4487 | - except Exception as e: |
4488 | - log.debug("failed to post to %s on try %i" % (url, i + 1)) |
4489 | - last_e = e |
4490 | - sleep(3) |
4491 | - |
4492 | - log.warn("failed to post to %s in %i tries" % (url, tries)) |
4493 | - if last_e is not null_exc: |
4494 | - raise(last_e) |
4495 | - |
4496 | - return |
4497 | + submit_keys[k] = None |
4498 | + log.warn(("Requested key %s from 'post'" |
4499 | + " configuration list not available"), k) |
4500 | + |
4501 | + # Get them read to be posted |
4502 | + real_submit_keys = {} |
4503 | + for (k, v) in submit_keys.iteritems(): |
4504 | + if v is None: |
4505 | + real_submit_keys[k] = 'N/A' |
4506 | + else: |
4507 | + real_submit_keys[k] = str(v) |
4508 | + |
4509 | + # Incase the url is parameterized |
4510 | + url_params = { |
4511 | + 'INSTANCE_ID': all_keys['instance_id'], |
4512 | + } |
4513 | + url = templater.render_string(url, url_params) |
4514 | + try: |
4515 | + uhelp.readurl(url, data=real_submit_keys, retries=tries, sec_between=3) |
4516 | + except: |
4517 | + util.logexc(log, ("Failed to post phone home data to" |
4518 | + " %s in %s tries"), url, tries) |
4519 | |
4520 | === modified file 'cloudinit/config/cc_puppet.py' |
4521 | --- cloudinit/CloudConfig/cc_puppet.py 2012-01-18 14:07:33 +0000 |
4522 | +++ cloudinit/config/cc_puppet.py 2012-07-06 21:16:18 +0000 |
4523 | @@ -18,91 +18,96 @@ |
4524 | # You should have received a copy of the GNU General Public License |
4525 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4526 | |
4527 | +from StringIO import StringIO |
4528 | + |
4529 | import os |
4530 | -import os.path |
4531 | import pwd |
4532 | import socket |
4533 | -import subprocess |
4534 | -import StringIO |
4535 | -import ConfigParser |
4536 | -import cloudinit.CloudConfig as cc |
4537 | -import cloudinit.util as util |
4538 | - |
4539 | - |
4540 | -def handle(_name, cfg, cloud, log, _args): |
4541 | + |
4542 | +from cloudinit import helpers |
4543 | +from cloudinit import util |
4544 | + |
4545 | + |
4546 | +def handle(name, cfg, cloud, log, _args): |
4547 | # If there isn't a puppet key in the configuration don't do anything |
4548 | if 'puppet' not in cfg: |
4549 | + log.debug(("Skipping module named %s," |
4550 | + " no 'puppet' configuration found"), name) |
4551 | return |
4552 | + |
4553 | puppet_cfg = cfg['puppet'] |
4554 | + |
4555 | # Start by installing the puppet package ... |
4556 | - cc.install_packages(("puppet",)) |
4557 | + cloud.distro.install_packages(["puppet"]) |
4558 | |
4559 | # ... and then update the puppet configuration |
4560 | if 'conf' in puppet_cfg: |
4561 | # Add all sections from the conf object to puppet.conf |
4562 | - puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r') |
4563 | + puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf') |
4564 | + contents = util.load_file(puppet_conf_fn) |
4565 | # Create object for reading puppet.conf values |
4566 | - puppet_config = ConfigParser.ConfigParser() |
4567 | + puppet_config = helpers.DefaultingConfigParser() |
4568 | # Read puppet.conf values from original file in order to be able to |
4569 | - # mix the rest up |
4570 | - puppet_config.readfp(StringIO.StringIO(''.join(i.lstrip() for i in |
4571 | - puppet_conf_fh.readlines()))) |
4572 | - # Close original file, no longer needed |
4573 | - puppet_conf_fh.close() |
4574 | - for cfg_name, cfg in puppet_cfg['conf'].iteritems(): |
4575 | - # ca_cert configuration is a special case |
4576 | - # Dump the puppetmaster ca certificate in the correct place |
4577 | + # mix the rest up. First clean them up (TODO is this really needed??) |
4578 | + cleaned_lines = [i.lstrip() for i in contents.splitlines()] |
4579 | + cleaned_contents = '\n'.join(cleaned_lines) |
4580 | + puppet_config.readfp(StringIO(cleaned_contents), |
4581 | + filename=puppet_conf_fn) |
4582 | + for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): |
4583 | + # Cert configuration is a special case |
4584 | + # Dump the puppet master ca certificate in the correct place |
4585 | if cfg_name == 'ca_cert': |
4586 | # Puppet ssl sub-directory isn't created yet |
4587 | # Create it with the proper permissions and ownership |
4588 | - os.makedirs('/var/lib/puppet/ssl') |
4589 | - os.chmod('/var/lib/puppet/ssl', 0771) |
4590 | - os.chown('/var/lib/puppet/ssl', |
4591 | - pwd.getpwnam('puppet').pw_uid, 0) |
4592 | - os.makedirs('/var/lib/puppet/ssl/certs/') |
4593 | - os.chown('/var/lib/puppet/ssl/certs/', |
4594 | - pwd.getpwnam('puppet').pw_uid, 0) |
4595 | - ca_fh = open('/var/lib/puppet/ssl/certs/ca.pem', 'w') |
4596 | - ca_fh.write(cfg) |
4597 | - ca_fh.close() |
4598 | - os.chown('/var/lib/puppet/ssl/certs/ca.pem', |
4599 | - pwd.getpwnam('puppet').pw_uid, 0) |
4600 | - util.restorecon_if_possible('/var/lib/puppet', recursive=True) |
4601 | + pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl') |
4602 | + util.ensure_dir(pp_ssl_dir, 0771) |
4603 | + util.chownbyid(pp_ssl_dir, |
4604 | + pwd.getpwnam('puppet').pw_uid, 0) |
4605 | + pp_ssl_certs = cloud.paths.join(False, |
4606 | + '/var/lib/puppet/ssl/certs/') |
4607 | + util.ensure_dir(pp_ssl_certs) |
4608 | + util.chownbyid(pp_ssl_certs, |
4609 | + pwd.getpwnam('puppet').pw_uid, 0) |
4610 | + pp_ssl_ca_certs = cloud.paths.join(False, |
4611 | + ('/var/lib/puppet/' |
4612 | + 'ssl/certs/ca.pem')) |
4613 | + util.write_file(pp_ssl_ca_certs, cfg) |
4614 | + util.chownbyid(pp_ssl_ca_certs, |
4615 | + pwd.getpwnam('puppet').pw_uid, 0) |
4616 | else: |
4617 | - #puppet_conf_fh.write("\n[%s]\n" % (cfg_name)) |
4618 | - # If puppet.conf already has this section we don't want to |
4619 | - # write it again |
4620 | - if puppet_config.has_section(cfg_name) == False: |
4621 | - puppet_config.add_section(cfg_name) |
4622 | # Iterate throug the config items, we'll use ConfigParser.set |
4623 | # to overwrite or create new items as needed |
4624 | - for o, v in cfg.iteritems(): |
4625 | + for (o, v) in cfg.iteritems(): |
4626 | if o == 'certname': |
4627 | # Expand %f as the fqdn |
4628 | + # TODO should this use the cloud fqdn?? |
4629 | v = v.replace("%f", socket.getfqdn()) |
4630 | # Expand %i as the instance id |
4631 | - v = v.replace("%i", |
4632 | - cloud.datasource.get_instance_id()) |
4633 | - # certname needs to be downcase |
4634 | + v = v.replace("%i", cloud.get_instance_id()) |
4635 | + # certname needs to be downcased |
4636 | v = v.lower() |
4637 | puppet_config.set(cfg_name, o, v) |
4638 | - #puppet_conf_fh.write("%s=%s\n" % (o, v)) |
4639 | # We got all our config as wanted we'll rename |
4640 | # the previous puppet.conf and create our new one |
4641 | - os.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old') |
4642 | - with open('/etc/puppet/puppet.conf', 'wb') as configfile: |
4643 | - puppet_config.write(configfile) |
4644 | - util.restorecon_if_possible('/etc/puppet/puppet.conf') |
4645 | + conf_old_fn = cloud.paths.join(False, |
4646 | + '/etc/puppet/puppet.conf.old') |
4647 | + util.rename(puppet_conf_fn, conf_old_fn) |
4648 | + puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf') |
4649 | + util.write_file(puppet_conf_rw, puppet_config.stringify()) |
4650 | + |
4651 | # Set puppet to automatically start |
4652 | if os.path.exists('/etc/default/puppet'): |
4653 | - subprocess.check_call(['sed', '-i', |
4654 | - '-e', 's/^START=.*/START=yes/', |
4655 | - '/etc/default/puppet']) |
4656 | + util.subp(['sed', '-i', |
4657 | + '-e', 's/^START=.*/START=yes/', |
4658 | + '/etc/default/puppet'], capture=False) |
4659 | elif os.path.exists('/bin/systemctl'): |
4660 | - subprocess.check_call(['/bin/systemctl', 'enable', 'puppet.service']) |
4661 | + util.subp(['/bin/systemctl', 'enable', 'puppet.service'], |
4662 | + capture=False) |
4663 | elif os.path.exists('/sbin/chkconfig'): |
4664 | - subprocess.check_call(['/sbin/chkconfig', 'puppet', 'on']) |
4665 | + util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) |
4666 | else: |
4667 | - log.warn("Do not know how to enable puppet service on this system") |
4668 | + log.warn(("Sorry we do not know how to enable" |
4669 | + " puppet services on this system")) |
4670 | + |
4671 | # Start puppetd |
4672 | - subprocess.check_call(['service', 'puppet', 'start']) |
4673 | + util.subp(['service', 'puppet', 'start'], capture=False) |
4674 | |
4675 | === modified file 'cloudinit/config/cc_resizefs.py' |
4676 | --- cloudinit/CloudConfig/cc_resizefs.py 2012-03-21 20:41:50 +0000 |
4677 | +++ cloudinit/config/cc_resizefs.py 2012-07-06 21:16:18 +0000 |
4678 | @@ -18,91 +18,123 @@ |
4679 | # You should have received a copy of the GNU General Public License |
4680 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4681 | |
4682 | -import cloudinit.util as util |
4683 | -import subprocess |
4684 | import os |
4685 | import stat |
4686 | -import sys |
4687 | import time |
4688 | -import tempfile |
4689 | -from cloudinit.CloudConfig import per_always |
4690 | - |
4691 | -frequency = per_always |
4692 | - |
4693 | - |
4694 | -def handle(_name, cfg, _cloud, log, args): |
4695 | - if len(args) != 0: |
4696 | - resize_root = False |
4697 | - if str(args[0]).lower() in ['true', '1', 'on', 'yes']: |
4698 | - resize_root = True |
4699 | - else: |
4700 | - resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) |
4701 | - |
4702 | - if str(resize_root).lower() in ['false', '0']: |
4703 | - return |
4704 | - |
4705 | - # we use mktemp rather than mkstemp because early in boot nothing |
4706 | - # else should be able to race us for this, and we need to mknod. |
4707 | - devpth = tempfile.mktemp(prefix="cloudinit.resizefs.", dir="/run") |
4708 | - |
4709 | + |
4710 | +from cloudinit import util |
4711 | +from cloudinit.settings import PER_ALWAYS |
4712 | + |
4713 | +frequency = PER_ALWAYS |
4714 | + |
4715 | +RESIZE_FS_PREFIXES_CMDS = [ |
4716 | + ('ext', 'resize2fs'), |
4717 | + ('xfs', 'xfs_growfs'), |
4718 | +] |
4719 | + |
4720 | + |
4721 | +def nodeify_path(devpth, where, log): |
4722 | try: |
4723 | - st_dev = os.stat("/").st_dev |
4724 | + st_dev = os.stat(where).st_dev |
4725 | dev = os.makedev(os.major(st_dev), os.minor(st_dev)) |
4726 | os.mknod(devpth, 0400 | stat.S_IFBLK, dev) |
4727 | + return st_dev |
4728 | except: |
4729 | if util.is_container(): |
4730 | - log.debug("inside container, ignoring mknod failure in resizefs") |
4731 | + log.debug("Inside container, ignoring mknod failure in resizefs") |
4732 | return |
4733 | - log.warn("Failed to make device node to resize /") |
4734 | + log.warn("Failed to make device node to resize %s at %s", |
4735 | + where, devpth) |
4736 | raise |
4737 | |
4738 | - cmd = ['blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth] |
4739 | + |
4740 | +def get_fs_type(st_dev, path, log): |
4741 | try: |
4742 | - (fstype, _err) = util.subp(cmd) |
4743 | - except subprocess.CalledProcessError as e: |
4744 | - log.warn("Failed to get filesystem type of maj=%s, min=%s via: %s" % |
4745 | - (os.major(st_dev), os.minor(st_dev), cmd)) |
4746 | - log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1]) |
4747 | - os.unlink(devpth) |
4748 | + dev_entries = util.find_devs_with(tag='TYPE', oformat='value', |
4749 | + no_cache=True, path=path) |
4750 | + if not dev_entries: |
4751 | + return None |
4752 | + return dev_entries[0].strip() |
4753 | + except util.ProcessExecutionError: |
4754 | + util.logexc(log, ("Failed to get filesystem type" |
4755 | + " of maj=%s, min=%s for path %s"), |
4756 | + os.major(st_dev), os.minor(st_dev), path) |
4757 | raise |
4758 | |
4759 | - if str(fstype).startswith("ext"): |
4760 | - resize_cmd = ['resize2fs', devpth] |
4761 | - elif fstype == "xfs": |
4762 | - resize_cmd = ['xfs_growfs', devpth] |
4763 | + |
4764 | +def handle(name, cfg, cloud, log, args): |
4765 | + if len(args) != 0: |
4766 | + resize_root = args[0] |
4767 | else: |
4768 | - os.unlink(devpth) |
4769 | - log.debug("not resizing unknown filesystem %s" % fstype) |
4770 | + resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) |
4771 | + |
4772 | + if not util.translate_bool(resize_root): |
4773 | + log.debug("Skipping module named %s, resizing disabled", name) |
4774 | return |
4775 | |
4776 | + # TODO is the directory ok to be used?? |
4777 | + resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") |
4778 | + resize_root_d = cloud.paths.join(False, resize_root_d) |
4779 | + util.ensure_dir(resize_root_d) |
4780 | + |
4781 | + # TODO: allow what is to be resized to be configurable?? |
4782 | + resize_what = cloud.paths.join(False, "/") |
4783 | + with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", |
4784 | + dir=resize_root_d, delete=True) as tfh: |
4785 | + devpth = tfh.name |
4786 | + |
4787 | + # Delete the file so that mknod will work |
4788 | + # but don't change the file handle to know that its |
4789 | + # removed so that when a later call that recreates |
4790 | + # occurs this temporary file will still benefit from |
4791 | + # auto deletion |
4792 | + tfh.unlink_now() |
4793 | + |
4794 | + st_dev = nodeify_path(devpth, resize_what, log) |
4795 | + fs_type = get_fs_type(st_dev, devpth, log) |
4796 | + if not fs_type: |
4797 | + log.warn("Could not determine filesystem type of %s", resize_what) |
4798 | + return |
4799 | + |
4800 | + resizer = None |
4801 | + fstype_lc = fs_type.lower() |
4802 | + for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: |
4803 | + if fstype_lc.startswith(pfix): |
4804 | + resizer = root_cmd |
4805 | + break |
4806 | + |
4807 | + if not resizer: |
4808 | + log.warn("Not resizing unknown filesystem type %s for %s", |
4809 | + fs_type, resize_what) |
4810 | + return |
4811 | + |
4812 | + log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer) |
4813 | + resize_cmd = [resizer, devpth] |
4814 | + |
4815 | + if resize_root == "noblock": |
4816 | + # Fork to a child that will run |
4817 | + # the resize command |
4818 | + util.fork_cb(do_resize, resize_cmd, log) |
4819 | + # Don't delete the file now in the parent |
4820 | + tfh.delete = False |
4821 | + else: |
4822 | + do_resize(resize_cmd, log) |
4823 | + |
4824 | + action = 'Resized' |
4825 | if resize_root == "noblock": |
4826 | - fid = os.fork() |
4827 | - if fid == 0: |
4828 | - try: |
4829 | - do_resize(resize_cmd, devpth, log) |
4830 | - os._exit(0) # pylint: disable=W0212 |
4831 | - except Exception as exc: |
4832 | - sys.stderr.write("Failed: %s" % exc) |
4833 | - os._exit(1) # pylint: disable=W0212 |
4834 | - else: |
4835 | - do_resize(resize_cmd, devpth, log) |
4836 | - |
4837 | - log.debug("resizing root filesystem (type=%s, maj=%i, min=%i, val=%s)" % |
4838 | - (str(fstype).rstrip("\n"), os.major(st_dev), os.minor(st_dev), |
4839 | - resize_root)) |
4840 | - |
4841 | - return |
4842 | - |
4843 | - |
4844 | -def do_resize(resize_cmd, devpth, log): |
4845 | + action = 'Resizing (via forking)' |
4846 | + log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)", |
4847 | + action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root) |
4848 | + |
4849 | + |
4850 | +def do_resize(resize_cmd, log): |
4851 | + start = time.time() |
4852 | try: |
4853 | - start = time.time() |
4854 | util.subp(resize_cmd) |
4855 | - except subprocess.CalledProcessError as e: |
4856 | - log.warn("Failed to resize filesystem (%s)" % resize_cmd) |
4857 | - log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1]) |
4858 | - os.unlink(devpth) |
4859 | + except util.ProcessExecutionError: |
4860 | + util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd) |
4861 | raise |
4862 | - |
4863 | - os.unlink(devpth) |
4864 | - log.debug("resize took %s seconds" % (time.time() - start)) |
4865 | + tot_time = int(time.time() - start) |
4866 | + log.debug("Resizing took %s seconds", tot_time) |
4867 | + # TODO: Should we add a fsck check after this to make |
4868 | + # sure we didn't corrupt anything? |
4869 | |
4870 | === modified file 'cloudinit/config/cc_rightscale_userdata.py' |
4871 | --- cloudinit/CloudConfig/cc_rightscale_userdata.py 2012-01-18 14:07:33 +0000 |
4872 | +++ cloudinit/config/cc_rightscale_userdata.py 2012-07-06 21:16:18 +0000 |
4873 | @@ -35,44 +35,68 @@ |
4874 | ## |
4875 | ## |
4876 | |
4877 | -import cloudinit.util as util |
4878 | -from cloudinit.CloudConfig import per_instance |
4879 | -from cloudinit import get_ipath_cur |
4880 | +import os |
4881 | + |
4882 | +from cloudinit import url_helper as uhelp |
4883 | +from cloudinit import util |
4884 | +from cloudinit.settings import PER_INSTANCE |
4885 | + |
4886 | from urlparse import parse_qs |
4887 | |
4888 | -frequency = per_instance |
4889 | -my_name = "cc_rightscale_userdata" |
4890 | -my_hookname = 'CLOUD_INIT_REMOTE_HOOK' |
4891 | - |
4892 | - |
4893 | -def handle(_name, _cfg, cloud, log, _args): |
4894 | +frequency = PER_INSTANCE |
4895 | + |
4896 | +MY_NAME = "cc_rightscale_userdata" |
4897 | +MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK' |
4898 | + |
4899 | + |
4900 | +def handle(name, _cfg, cloud, log, _args): |
4901 | try: |
4902 | ud = cloud.get_userdata_raw() |
4903 | except: |
4904 | - log.warn("failed to get raw userdata in %s" % my_name) |
4905 | + log.warn("Failed to get raw userdata in module %s", name) |
4906 | return |
4907 | |
4908 | try: |
4909 | mdict = parse_qs(ud) |
4910 | - if not my_hookname in mdict: |
4911 | + if not mdict or not MY_HOOKNAME in mdict: |
4912 | + log.debug(("Skipping module %s, " |
4913 | + "did not find %s in parsed" |
4914 | + " raw userdata"), name, MY_HOOKNAME) |
4915 | return |
4916 | except: |
4917 | - log.warn("failed to urlparse.parse_qa(userdata_raw())") |
4918 | + util.logexc(log, ("Failed to parse query string %s" |
4919 | + " into a dictionary"), ud) |
4920 | raise |
4921 | |
4922 | - scripts_d = get_ipath_cur('scripts') |
4923 | - i = 0 |
4924 | - first_e = None |
4925 | - for url in mdict[my_hookname]: |
4926 | - fname = "%s/rightscale-%02i" % (scripts_d, i) |
4927 | - i = i + 1 |
4928 | + wrote_fns = [] |
4929 | + captured_excps = [] |
4930 | + |
4931 | + # These will eventually be then ran by the cc_scripts_user |
4932 | + # TODO: maybe this should just be a new user data handler?? |
4933 | + # Instead of a late module that acts like a user data handler? |
4934 | + scripts_d = cloud.get_ipath_cur('scripts') |
4935 | + urls = mdict[MY_HOOKNAME] |
4936 | + for (i, url) in enumerate(urls): |
4937 | + fname = os.path.join(scripts_d, "rightscale-%02i" % (i)) |
4938 | try: |
4939 | - content = util.readurl(url) |
4940 | - util.write_file(fname, content, mode=0700) |
4941 | + resp = uhelp.readurl(url) |
4942 | + # Ensure its a valid http response (and something gotten) |
4943 | + if resp.ok() and resp.contents: |
4944 | + util.write_file(fname, str(resp), mode=0700) |
4945 | + wrote_fns.append(fname) |
4946 | except Exception as e: |
4947 | - if not first_e: |
4948 | - first_e = None |
4949 | - log.warn("%s failed to read %s: %s" % (my_name, url, e)) |
4950 | - |
4951 | - if first_e: |
4952 | - raise(e) |
4953 | + captured_excps.append(e) |
4954 | + util.logexc(log, "%s failed to read %s and write %s", |
4955 | + MY_NAME, url, fname) |
4956 | + |
4957 | + if wrote_fns: |
4958 | + log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns)) |
4959 | + |
4960 | + if len(wrote_fns) != len(urls): |
4961 | + skipped = len(urls) - len(wrote_fns) |
4962 | + log.debug("%s urls were skipped or failed", skipped) |
4963 | + |
4964 | + if captured_excps: |
4965 | + log.warn("%s failed with exceptions, re-raising the last one", |
4966 | + len(captured_excps)) |
4967 | + raise captured_excps[-1] |
4968 | |
4969 | === modified file 'cloudinit/config/cc_rsyslog.py' |
4970 | --- cloudinit/CloudConfig/cc_rsyslog.py 2012-01-18 14:07:33 +0000 |
4971 | +++ cloudinit/config/cc_rsyslog.py 2012-07-06 21:16:18 +0000 |
4972 | @@ -18,16 +18,15 @@ |
4973 | # You should have received a copy of the GNU General Public License |
4974 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4975 | |
4976 | -import cloudinit |
4977 | -import logging |
4978 | -import cloudinit.util as util |
4979 | -import traceback |
4980 | +import os |
4981 | + |
4982 | +from cloudinit import util |
4983 | |
4984 | DEF_FILENAME = "20-cloud-config.conf" |
4985 | DEF_DIR = "/etc/rsyslog.d" |
4986 | |
4987 | |
4988 | -def handle(_name, cfg, _cloud, log, _args): |
4989 | +def handle(name, cfg, cloud, log, _args): |
4990 | # rsyslog: |
4991 | # - "*.* @@192.158.1.1" |
4992 | # - content: "*.* @@192.0.2.1:10514" |
4993 | @@ -37,17 +36,18 @@ |
4994 | |
4995 | # process 'rsyslog' |
4996 | if not 'rsyslog' in cfg: |
4997 | + log.debug(("Skipping module named %s," |
4998 | + " no 'rsyslog' key in configuration"), name) |
4999 | return |
5000 |
The diff has been truncated for viewing.