Merge lp:~cloud-init/cloud-init/rework into lp:~cloud-init-dev/cloud-init/trunk
- rework
- Merge into trunk
Proposed by
Joshua Harlow
Status: | Merged |
---|---|
Merged at revision: | 564 |
Proposed branch: | lp:~cloud-init/cloud-init/rework |
Merge into: | lp:~cloud-init-dev/cloud-init/trunk |
Diff against target: |
17875 lines (+10046/-4803) 113 files modified
ChangeLog (+193/-0) Makefile (+24/-5) Requires (+30/-0) TODO (+27/-4) bin/cloud-init (+474/-0) cloud-init-cfg.py (+0/-115) cloud-init-query.py (+0/-56) cloud-init.py (+0/-229) cloudinit/DataSource.py (+0/-214) cloudinit/UserDataHandler.py (+0/-262) cloudinit/__init__.py (+4/-650) cloudinit/cloud.py (+101/-0) cloudinit/config/__init__.py (+34/-252) cloudinit/config/cc_apt_pipelining.py (+35/-29) cloudinit/config/cc_apt_update_upgrade.py (+117/-86) cloudinit/config/cc_bootcmd.py (+31/-24) cloudinit/config/cc_byobu.py (+10/-16) cloudinit/config/cc_ca_certs.py (+34/-25) cloudinit/config/cc_chef.py (+72/-62) cloudinit/config/cc_disable_ec2_metadata.py (+17/-11) cloudinit/config/cc_final_message.py (+44/-34) cloudinit/config/cc_foo.py (+32/-9) cloudinit/config/cc_grub_dpkg.py (+15/-12) cloudinit/config/cc_keys_to_console.py (+28/-17) cloudinit/config/cc_landscape.py (+42/-22) cloudinit/config/cc_locale.py (+9/-26) cloudinit/config/cc_mcollective.py (+55/-63) cloudinit/config/cc_mounts.py (+57/-36) cloudinit/config/cc_phone_home.py (+51/-39) cloudinit/config/cc_puppet.py (+59/-54) cloudinit/config/cc_resizefs.py (+99/-67) cloudinit/config/cc_rightscale_userdata.py (+50/-26) cloudinit/config/cc_rsyslog.py (+32/-31) cloudinit/config/cc_runcmd.py (+14/-8) cloudinit/config/cc_salt_minion.py (+30/-26) cloudinit/config/cc_scripts_per_boot.py (+17/-10) cloudinit/config/cc_scripts_per_instance.py (+17/-10) cloudinit/config/cc_scripts_per_once.py (+17/-10) cloudinit/config/cc_scripts_user.py (+18/-10) cloudinit/config/cc_set_hostname.py (+10/-17) cloudinit/config/cc_set_passwords.py (+62/-45) cloudinit/config/cc_ssh.py (+76/-50) cloudinit/config/cc_ssh_import_id.py (+19/-16) cloudinit/config/cc_timezone.py (+10/-38) cloudinit/config/cc_update_etc_hosts.py (+36/-63) cloudinit/config/cc_update_hostname.py (+14/-74) cloudinit/distros/__init__.py (+163/-0) cloudinit/distros/debian.py (+149/-0) cloudinit/distros/fedora.py (+31/-0) cloudinit/distros/rhel.py (+337/-0) cloudinit/distros/ubuntu.py (+31/-0) cloudinit/handlers/__init__.py (+222/-0) cloudinit/handlers/boot_hook.py (+73/-0) cloudinit/handlers/cloud_config.py (+62/-0) cloudinit/handlers/shell_script.py (+52/-0) cloudinit/handlers/upstart_job.py (+66/-0) cloudinit/helpers.py (+452/-0) cloudinit/importer.py (+65/-0) cloudinit/log.py (+133/-0) cloudinit/netinfo.py (+81/-30) cloudinit/settings.py (+57/-0) cloudinit/sources/DataSourceCloudStack.py (+94/-39) cloudinit/sources/DataSourceConfigDrive.py (+116/-121) cloudinit/sources/DataSourceEc2.py (+143/-95) cloudinit/sources/DataSourceMAAS.py (+81/-162) cloudinit/sources/DataSourceNoCloud.py (+75/-79) cloudinit/sources/DataSourceOVF.py (+117/-156) cloudinit/sources/__init__.py (+223/-0) cloudinit/ssh_util.py (+275/-188) cloudinit/stages.py (+551/-0) cloudinit/templater.py (+41/-0) cloudinit/url_helper.py (+226/-0) cloudinit/user_data.py (+243/-0) cloudinit/util.py (+1136/-592) cloudinit/version.py (+27/-0) config/cloud.cfg (+36/-4) config/cloud.cfg.d/05_logging.cfg (+5/-1) install.sh (+0/-31) packages/bddeb (+172/-33) packages/brpm (+216/-0) packages/debian/changelog (+1/-1) packages/debian/control (+4/-6) packages/debian/rules (+3/-15) packages/make-dist-tarball (+2/-2) packages/make-tarball (+89/-0) packages/redhat/cloud-init.spec (+183/-0) setup.py (+102/-17) sysvinit/cloud-config (+124/-0) sysvinit/cloud-final (+124/-0) sysvinit/cloud-init (+124/-0) sysvinit/cloud-init-local (+124/-0) templates/chef_client.rb.tmpl (+4/-4) templates/default-locale.tmpl (+0/-1) templates/hosts.redhat.tmpl (+22/-0) templates/hosts.ubuntu.tmpl (+7/-8) templates/sources.list.tmpl (+56/-57) tests/configs/sample1.yaml (+53/-0) tests/unittests/test__init__.py (+75/-93) tests/unittests/test_builtin_handlers.py (+54/-0) tests/unittests/test_datasource/test_maas.py (+33/-37) tests/unittests/test_handler/test_handler_ca_certs.py (+62/-45) tests/unittests/test_userdata.py (+90/-53) tests/unittests/test_util.py (+69/-64) tools/hacking.py (+175/-0) tools/mock-meta.py (+444/-0) tools/read-dependencies (+45/-0) tools/read-version (+70/-0) tools/run-pep8 (+35/-0) tools/run-pylint (+1/-12) upstart/cloud-config.conf (+1/-1) upstart/cloud-final.conf (+1/-1) upstart/cloud-init-local.conf (+1/-1) upstart/cloud-init.conf (+1/-1) |
To merge this branch: | bzr merge lp:~cloud-init/cloud-init/rework |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
cloud-init Commiters | Pending | ||
Review via email: mp+113684@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
lp:~cloud-init/cloud-init/rework
updated
- 992. By Joshua Harlow
-
Updated so that if no mirror is found, the module stops running.
- 993. By Joshua Harlow
-
Add comment about keeping track of what people think about the 'read'
and 'write' root, and if it confuses them, remove it later and just
recommend a more 'natural' way of doing it (ie 'chroot'). - 994. By Scott Moser
-
setup.py: rename "daemon type" to "init system"
This brings with it other changes, and also makes an install
install all of the requisite init files. (ie, cloud-init needs the -local and
the non-local) - 995. By Joshua Harlow
-
Fix the initsys variable, setuptools/distools will automatically assign
to a variable of the name 'init_system' instead due to the param name being
'init-system'.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'ChangeLog' | |||
2 | --- ChangeLog 2012-06-21 15:37:22 +0000 | |||
3 | +++ ChangeLog 2012-07-06 21:16:18 +0000 | |||
4 | @@ -1,3 +1,196 @@ | |||
5 | 1 | 0.7.0: | ||
6 | 2 | - unified binary that activates the various stages | ||
7 | 3 | - Now using argparse + subcommands to specify the various CLI options | ||
8 | 4 | - a stage module that clearly separates the stages of the different | ||
9 | 5 | components (also described how they are used and in what order in the | ||
10 | 6 | new unified binary) | ||
11 | 7 | - user_data is now a module that just does user data processing while the | ||
12 | 8 | actual activation and 'handling' of the processed user data is done via | ||
13 | 9 | a separate set of files (and modules) with the main 'init' stage being the | ||
14 | 10 | controller of this | ||
15 | 11 | - creation of boot_hook, cloud_config, shell_script, upstart_job version 2 | ||
16 | 12 | modules (with classes that perform there functionality) instead of those | ||
17 | 13 | having functionality that is attached to the cloudinit object (which | ||
18 | 14 | reduces reuse and limits future functionality, and makes testing harder) | ||
19 | 15 | - removal of global config that defined paths, shared config, now this is | ||
20 | 16 | via objects making unit testing testing and global side-effects a non issue | ||
21 | 17 | - creation of a 'helpers.py' | ||
22 | 18 | - this contains an abstraction for the 'lock' like objects that the various | ||
23 | 19 | module/handler running stages use to avoid re-running a given | ||
24 | 20 | module/handler for a given frequency. this makes it separated from | ||
25 | 21 | the actual usage of that object (thus helpful for testing and clear lines | ||
26 | 22 | usage and how the actual job is accomplished) | ||
27 | 23 | - a common 'runner' class is the main entrypoint using these locks to | ||
28 | 24 | run function objects passed in (along with there arguments) and there | ||
29 | 25 | frequency | ||
30 | 26 | - add in a 'paths' object that provides access to the previously global | ||
31 | 27 | and/or config based paths (thus providing a single entrypoint object/type | ||
32 | 28 | that provides path information) | ||
33 | 29 | - this also adds in the ability to change the path when constructing | ||
34 | 30 | that path 'object' and adding in additional config that can be used to | ||
35 | 31 | alter the root paths of 'joins' (useful for testing or possibly useful | ||
36 | 32 | in chroots?) | ||
37 | 33 | - config options now avaiable that can alter the 'write_root' and the | ||
38 | 34 | 'read_root' when backing code uses the paths join() function | ||
39 | 35 | - add a config parser subclass that will automatically add unknown sections | ||
40 | 36 | and return default values (instead of throwing exceptions for these cases) | ||
41 | 37 | - a new config merging class that will be the central object that knows | ||
42 | 38 | how to do the common configuration merging from the various configuration | ||
43 | 39 | sources. The order is the following: | ||
44 | 40 | - cli config files override environment config files | ||
45 | 41 | which override instance configs which override datasource | ||
46 | 42 | configs which override base configuration which overrides | ||
47 | 43 | default configuration. | ||
48 | 44 | - remove the passing around of the 'cloudinit' object as a 'cloud' variable | ||
49 | 45 | and instead pass around an 'interface' object that can be given to modules | ||
50 | 46 | and handlers as there cloud access layer while the backing of that | ||
51 | 47 | object can be varied (good for abstraction and testing) | ||
52 | 48 | - use a single set of functions to do importing of modules | ||
53 | 49 | - add a function in which will search for a given set of module names with | ||
54 | 50 | a given set of attributes and return those which are found | ||
55 | 51 | - refactor logging so that instead of using a single top level 'log' that | ||
56 | 52 | instead each component/module can use its own logger (if desired), this | ||
57 | 53 | should be backwards compatible with handlers and config modules that used | ||
58 | 54 | the passed in logger (its still passed in) | ||
59 | 55 | - ensure that all places where exception are caught and where applicable | ||
60 | 56 | that the util logexc() is called, so that no exceptions that may occur | ||
61 | 57 | are dropped without first being logged (where it makes sense for this | ||
62 | 58 | to happen) | ||
63 | 59 | - add a 'requires' file that lists cloud-init dependencies | ||
64 | 60 | - applying it in package creation (bdeb and brpm) as well as using it | ||
65 | 61 | in the modified setup.py to ensure dependencies are installed when | ||
66 | 62 | using that method of packaging | ||
67 | 63 | - add a 'version.py' that lists the active version (in code) so that code | ||
68 | 64 | inside cloud-init can report the version in messaging and other config files | ||
69 | 65 | - cleanup of subprocess usage so that all subprocess calls go through the | ||
70 | 66 | subp() utility method, which now has an exception type that will provide | ||
71 | 67 | detailed information on python 2.6 and 2.7 | ||
72 | 68 | - forced all code loading, moving, chmod, writing files and other system | ||
73 | 69 | level actions to go through standard set of util functions, this greatly | ||
74 | 70 | helps in debugging and determining exactly which system actions cloud-init is | ||
75 | 71 | performing | ||
76 | 72 | - switching out the templating engine cheetah for tempita since tempita has | ||
77 | 73 | no external dependencies (minus python) while cheetah has many dependencies | ||
78 | 74 | which makes it more difficult to adopt cloud-init in distros that may not | ||
79 | 75 | have those dependencies | ||
80 | 76 | - adjust url fetching and url trying to go through a single function that | ||
81 | 77 | reads urls in the new 'url helper' file, this helps in tracing, debugging | ||
82 | 78 | and knowing which urls are being called and/or posted to from with-in | ||
83 | 79 | cloud-init code | ||
84 | 80 | - add in the sending of a 'User-Agent' header for all urls fetched that | ||
85 | 81 | do not provide there own header mapping, derive this user-agent from | ||
86 | 82 | the following template, 'Cloud-Init/{version}' where the version is the | ||
87 | 83 | cloud-init version number | ||
88 | 84 | - using prettytable for netinfo 'debug' printing since it provides a standard | ||
89 | 85 | and defined output that should be easier to parse than a custom format | ||
90 | 86 | - add a set of distro specific classes, that handle distro specific actions | ||
91 | 87 | that modules and or handler code can use as needed, this is organized into | ||
92 | 88 | a base abstract class with child classes that implement the shared | ||
93 | 89 | functionality. config determines exactly which subclass to load, so it can | ||
94 | 90 | be easily extended as needed. | ||
95 | 91 | - current functionality | ||
96 | 92 | - network interface config file writing | ||
97 | 93 | - hostname setting/updating | ||
98 | 94 | - locale/timezone/ setting | ||
99 | 95 | - updating of /etc/hosts (with templates or generically) | ||
100 | 96 | - package commands (ie installing, removing)/mirror finding | ||
101 | 97 | - interface up/down activating | ||
102 | 98 | - implemented a debian + ubuntu subclass | ||
103 | 99 | - implemented a redhat + fedora subclass | ||
104 | 100 | - adjust the root 'cloud.cfg' file to now have distrobution/path specific | ||
105 | 101 | configuration values in it. these special configs are merged as the normal | ||
106 | 102 | config is, but the system level config is not passed into modules/handlers | ||
107 | 103 | - modules/handlers must go through the path and distro object instead | ||
108 | 104 | - have the cloudstack datasource test the url before calling into boto to | ||
109 | 105 | avoid the long wait for boto to finish retrying and finally fail when | ||
110 | 106 | the gateway meta-data address is unavailable | ||
111 | 107 | - add a simple mock ec2 meta-data python based http server that can serve a | ||
112 | 108 | very simple set of ec2 meta-data back to callers | ||
113 | 109 | - useful for testing or for understanding what the ec2 meta-data | ||
114 | 110 | service can provide in terms of data or functionality | ||
115 | 111 | - for ssh key and authorized key file parsing add in classes and util functions | ||
116 | 112 | that maintain the state of individual lines, allowing for a clearer | ||
117 | 113 | separation of parsing and modification (useful for testing and tracing) | ||
118 | 114 | - add a set of 'base' init.d scripts that can be used on systems that do | ||
119 | 115 | not have full upstart or systemd support (or support that does not match | ||
120 | 116 | the standard fedora/ubuntu implementation) | ||
121 | 117 | - currently these are being tested on RHEL 6.2 | ||
122 | 118 | - separate the datasources into there own subdirectory (instead of being | ||
123 | 119 | a top-level item), this matches how config 'modules' and user-data 'handlers' | ||
124 | 120 | are also in there own subdirectory (thus helping new developers and others | ||
125 | 121 | understand the code layout in a quicker manner) | ||
126 | 122 | - add the building of rpms based off a new cli tool and template 'spec' file | ||
127 | 123 | that will templatize and perform the necessary commands to create a source | ||
128 | 124 | and binary package to be used with a cloud-init install on a 'rpm' supporting | ||
129 | 125 | system | ||
130 | 126 | - uses the new standard set of requires and converts those pypi requirements | ||
131 | 127 | into a local set of package requirments (that are known to exist on RHEL | ||
132 | 128 | systems but should also exist on fedora systems) | ||
133 | 129 | - adjust the bdeb builder to be a python script (instead of a shell script) and | ||
134 | 130 | make its 'control' file a template that takes in the standard set of pypi | ||
135 | 131 | dependencies and uses a local mapping (known to work on ubuntu) to create the | ||
136 | 132 | packages set of dependencies (that should also work on ubuntu-like systems) | ||
137 | 133 | - pythonify a large set of various pieces of code | ||
138 | 134 | - remove wrapping return statements with () when it has no effect | ||
139 | 135 | - upper case all constants used | ||
140 | 136 | - correctly 'case' class and method names (where applicable) | ||
141 | 137 | - use os.path.join (and similar commands) instead of custom path creation | ||
142 | 138 | - use 'is None' instead of the frowned upon '== None' which picks up a large | ||
143 | 139 | set of 'true' cases than is typically desired (ie for objects that have | ||
144 | 140 | there own equality) | ||
145 | 141 | - use context managers on locks, tempdir, chdir, file, selinux, umask, | ||
146 | 142 | unmounting commands so that these actions do not have to be closed and/or | ||
147 | 143 | cleaned up manually in finally blocks, which is typically not done and will | ||
148 | 144 | eventually be a bug in the future | ||
149 | 145 | - use the 'abc' module for abstract classes base where possible | ||
150 | 146 | - applied in the datasource root class, the distro root class, and the | ||
151 | 147 | user-data v2 root class | ||
152 | 148 | - when loading yaml, check that the 'root' type matches a predefined set of | ||
153 | 149 | valid types (typically just 'dict') and throw a type error if a mismatch | ||
154 | 150 | occurs, this seems to be a good idea to do when loading user config files | ||
155 | 151 | - when forking a long running task (ie resizing a filesytem) use a new util | ||
156 | 152 | function that will fork and then call a callback, instead of having to | ||
157 | 153 | implement all that code in a non-shared location (thus allowing it to be | ||
158 | 154 | used by others in the future) | ||
159 | 155 | - when writing out filenames, go through a util function that will attempt to | ||
160 | 156 | ensure that the given filename is 'filesystem' safe by replacing '/' with | ||
161 | 157 | '_' and removing characters which do not match a given whitelist of allowed | ||
162 | 158 | filename characters | ||
163 | 159 | - for the varying usages of the 'blkid' command make a function in the util | ||
164 | 160 | module that can be used as the single point of entry for interaction with | ||
165 | 161 | that command (and its results) instead of having X separate implementations | ||
166 | 162 | - place the rfc 8222 time formatting and uptime repeated pieces of code in the | ||
167 | 163 | util module as a set of function with the name 'time_rfc2822'/'uptime' | ||
168 | 164 | - separate the pylint+pep8 calling from one tool into two indivudal tools so | ||
169 | 165 | that they can be called independently, add make file sections that can be | ||
170 | 166 | used to call these independently | ||
171 | 167 | - remove the support for the old style config that was previously located in | ||
172 | 168 | '/etc/ec2-init/ec2-config.cfg', no longer supported! | ||
173 | 169 | - instead of using a altered config parser that added its own 'dummy' section | ||
174 | 170 | on in the 'mcollective' module, use configobj which handles the parsing of | ||
175 | 171 | config without sections better (and it also maintains comments instead of | ||
176 | 172 | removing them) | ||
177 | 173 | - use the new defaulting config parser (that will not raise errors on sections | ||
178 | 174 | that do not exist or return errors when values are fetched that do not exist) | ||
179 | 175 | in the 'puppet' module | ||
180 | 176 | - for config 'modules' add in the ability for the module to provide a list of | ||
181 | 177 | distro names which it is known to work with, if when ran and the distro being | ||
182 | 178 | used name does not match one of those in this list, a warning will be written | ||
183 | 179 | out saying that this module may not work correctly on this distrobution | ||
184 | 180 | - for all dynamically imported modules ensure that they are fixed up before | ||
185 | 181 | they are used by ensuring that they have certain attributes, if they do not | ||
186 | 182 | have those attributes they will be set to a sensible set of defaults instead | ||
187 | 183 | - adjust all 'config' modules and handlers to use the adjusted util functions | ||
188 | 184 | and the new distro objects where applicable so that those pieces of code can | ||
189 | 185 | benefit from the unified and enhanced functionality being provided in that | ||
190 | 186 | util module | ||
191 | 187 | - fix a potential bug whereby when a #includeonce was encountered it would | ||
192 | 188 | enable checking of urls against a cache, if later a #include was encountered | ||
193 | 189 | it would continue checking against that cache, instead of refetching (which | ||
194 | 190 | would likely be the expected case) | ||
195 | 191 | - add a openstack/nova based pep8 extension utility ('hacking.py') that allows | ||
196 | 192 | for custom checks (along with the standard pep8 checks) to occur when running | ||
197 | 193 | 'make pep8' and its derivatives | ||
198 | 1 | 0.6.4: | 194 | 0.6.4: |
199 | 2 | - support relative path in AuthorizedKeysFile (LP: #970071). | 195 | - support relative path in AuthorizedKeysFile (LP: #970071). |
200 | 3 | - make apt-get update run with --quiet (suitable for logging) (LP: #1012613) | 196 | - make apt-get update run with --quiet (suitable for logging) (LP: #1012613) |
201 | 4 | 197 | ||
202 | === modified file 'Makefile' | |||
203 | --- Makefile 2012-01-12 15:06:27 +0000 | |||
204 | +++ Makefile 2012-07-06 21:16:18 +0000 | |||
205 | @@ -1,14 +1,33 @@ | |||
206 | 1 | CWD=$(shell pwd) | ||
207 | 2 | PY_FILES=$(shell find cloudinit bin -name "*.py") | ||
208 | 3 | PY_FILES+="bin/cloud-init" | ||
209 | 1 | 4 | ||
210 | 2 | all: test | 5 | all: test |
211 | 3 | 6 | ||
212 | 7 | pep8: | ||
213 | 8 | $(CWD)/tools/run-pep8 $(PY_FILES) | ||
214 | 9 | |||
215 | 4 | pylint: | 10 | pylint: |
217 | 5 | pylint cloudinit | 11 | $(CWD)/tools/run-pylint $(PY_FILES) |
218 | 6 | 12 | ||
219 | 7 | pyflakes: | 13 | pyflakes: |
221 | 8 | pyflakes . | 14 | pyflakes $(PY_FILES) |
222 | 9 | 15 | ||
223 | 10 | test: | 16 | test: |
227 | 11 | nosetests tests/unittests/ | 17 | nosetests $(noseopts) tests/unittests/ |
228 | 12 | 18 | ||
229 | 13 | .PHONY: test pylint pyflakes | 19 | 2to3: |
230 | 20 | 2to3 $(PY_FILES) | ||
231 | 21 | |||
232 | 22 | clean: | ||
233 | 23 | rm -rf /var/log/cloud-init.log \ | ||
234 | 24 | /var/lib/cloud/ | ||
235 | 25 | |||
236 | 26 | rpm: | ||
237 | 27 | cd packages && ./brpm | ||
238 | 28 | |||
239 | 29 | deb: | ||
240 | 30 | cd packages && ./bddeb | ||
241 | 31 | |||
242 | 32 | .PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb | ||
243 | 14 | 33 | ||
244 | 15 | 34 | ||
245 | === added file 'Requires' | |||
246 | --- Requires 1970-01-01 00:00:00 +0000 | |||
247 | +++ Requires 2012-07-06 21:16:18 +0000 | |||
248 | @@ -0,0 +1,30 @@ | |||
249 | 1 | # Pypi requirements for cloud-init to work | ||
250 | 2 | |||
251 | 3 | # Used for templating any files or strings that are considered | ||
252 | 4 | # to be templates, not cheetah since it pulls in alot of extra libs. | ||
253 | 5 | # This one is pretty dinky and does want we want (var substituion) | ||
254 | 6 | Tempita | ||
255 | 7 | |||
256 | 8 | # This is used for any pretty printing of tabular data. | ||
257 | 9 | PrettyTable | ||
258 | 10 | |||
259 | 11 | # This one is currently only used by the MAAS datasource. If that | ||
260 | 12 | # datasource is removed, this is no longer needed | ||
261 | 13 | oauth | ||
262 | 14 | |||
263 | 15 | # This is used to fetch the ec2 metadata into a easily | ||
264 | 16 | # parseable format, instead of having to have cloud-init perform | ||
265 | 17 | # those same fetchs and decodes and signing (...) that ec2 requires. | ||
266 | 18 | boto | ||
267 | 19 | |||
268 | 20 | # This is only needed for places where we need to support configs in a manner | ||
269 | 21 | # that the built-in config parser is not sufficent (ie | ||
270 | 22 | # when we need to preserve comments, or do not have a top-level | ||
271 | 23 | # section)... | ||
272 | 24 | configobj | ||
273 | 25 | |||
274 | 26 | # All new style configurations are in the yaml format | ||
275 | 27 | pyyaml | ||
276 | 28 | |||
277 | 29 | # The new main entrypoint uses argparse instead of optparse | ||
278 | 30 | argparse | ||
279 | 0 | 31 | ||
280 | === modified file 'TODO' | |||
281 | --- TODO 2011-02-17 20:48:41 +0000 | |||
282 | +++ TODO 2012-07-06 21:16:18 +0000 | |||
283 | @@ -1,14 +1,37 @@ | |||
285 | 1 | - consider 'failsafe' DataSource | 1 | - Consider a 'failsafe' DataSource |
286 | 2 | If all others fail, setting a default that | 2 | If all others fail, setting a default that |
287 | 3 | - sets the user password, writing it to console | 3 | - sets the user password, writing it to console |
288 | 4 | - logs to console that this happened | 4 | - logs to console that this happened |
290 | 5 | - consider 'previous' DataSource | 5 | - Consider a 'previous' DataSource |
291 | 6 | If no other data source is found, fall back to the 'previous' one | 6 | If no other data source is found, fall back to the 'previous' one |
292 | 7 | keep a indication of what instance id that is in /var/lib/cloud | 7 | keep a indication of what instance id that is in /var/lib/cloud |
295 | 8 | - rewrite "cloud-init-query" | 8 | - Rewrite "cloud-init-query" (currently not implemented) |
296 | 9 | have DataSource and cloudinit expose explicit fields | 9 | Possibly have DataSource and cloudinit expose explicit fields |
297 | 10 | - instance-id | 10 | - instance-id |
298 | 11 | - hostname | 11 | - hostname |
299 | 12 | - mirror | 12 | - mirror |
300 | 13 | - release | 13 | - release |
301 | 14 | - ssh public keys | 14 | - ssh public keys |
302 | 15 | - Remove the conversion of the ubuntu network interface format conversion | ||
303 | 16 | to a RH/fedora format and replace it with a top level format that uses | ||
304 | 17 | the netcf libraries format instead (which itself knows how to translate | ||
305 | 18 | into the specific formats) | ||
306 | 19 | - Replace the 'apt*' modules with variants that now use the distro classes | ||
307 | 20 | to perform distro independent packaging commands (where possible) | ||
308 | 21 | - Canonicalize the semaphore/lock name for modules and user data handlers | ||
309 | 22 | a. It is most likely a bug that currently exists that if a module in config | ||
310 | 23 | alters its name and it has already ran, then it will get ran again since | ||
311 | 24 | the lock name hasn't be canonicalized | ||
312 | 25 | - Replace some the LOG.debug calls with a LOG.info where appropriate instead | ||
313 | 26 | of how right now there is really only 2 levels (WARN and DEBUG) | ||
314 | 27 | - Remove the 'cc_' for config modules, either have them fully specified (ie | ||
315 | 28 | 'cloudinit.config.resizefs') or by default only look in the 'cloudinit.config' | ||
316 | 29 | for these modules (or have a combination of the above), this avoids having | ||
317 | 30 | to understand where your modules are coming from (which can be altered by | ||
318 | 31 | the current python inclusion path) | ||
319 | 32 | - Depending on if people think the wrapper around 'os.path.join' provided | ||
320 | 33 | by the 'paths' object is useful (allowing us to modify based off a 'read' | ||
321 | 34 | and 'write' configuration based 'root') or is just to confusing, it might be | ||
322 | 35 | something to remove later, and just recommend using 'chroot' instead (or the X | ||
323 | 36 | different other options which are similar to 'chroot'), which is might be more | ||
324 | 37 | natural and less confusing... | ||
325 | 15 | 38 | ||
326 | === added directory 'bin' | |||
327 | === added file 'bin/cloud-init' | |||
328 | --- bin/cloud-init 1970-01-01 00:00:00 +0000 | |||
329 | +++ bin/cloud-init 2012-07-06 21:16:18 +0000 | |||
330 | @@ -0,0 +1,474 @@ | |||
331 | 1 | #!/usr/bin/python | ||
332 | 2 | # vi: ts=4 expandtab | ||
333 | 3 | # | ||
334 | 4 | # Copyright (C) 2012 Canonical Ltd. | ||
335 | 5 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. | ||
336 | 6 | # Copyright (C) 2012 Yahoo! Inc. | ||
337 | 7 | # | ||
338 | 8 | # Author: Scott Moser <scott.moser@canonical.com> | ||
339 | 9 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | ||
340 | 10 | # Author: Joshua Harlow <harlowja@yahoo-inc.com> | ||
341 | 11 | # | ||
342 | 12 | # This program is free software: you can redistribute it and/or modify | ||
343 | 13 | # it under the terms of the GNU General Public License version 3, as | ||
344 | 14 | # published by the Free Software Foundation. | ||
345 | 15 | # | ||
346 | 16 | # This program is distributed in the hope that it will be useful, | ||
347 | 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
348 | 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
349 | 19 | # GNU General Public License for more details. | ||
350 | 20 | # | ||
351 | 21 | # You should have received a copy of the GNU General Public License | ||
352 | 22 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
353 | 23 | |||
354 | 24 | import argparse | ||
355 | 25 | import os | ||
356 | 26 | import sys | ||
357 | 27 | import traceback | ||
358 | 28 | |||
359 | 29 | # This is more just for running from the bin folder so that | ||
360 | 30 | # cloud-init binary can find the cloudinit module | ||
361 | 31 | possible_topdir = os.path.normpath(os.path.join(os.path.abspath( | ||
362 | 32 | sys.argv[0]), os.pardir, os.pardir)) | ||
363 | 33 | if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")): | ||
364 | 34 | sys.path.insert(0, possible_topdir) | ||
365 | 35 | |||
366 | 36 | from cloudinit import log as logging | ||
367 | 37 | from cloudinit import netinfo | ||
368 | 38 | from cloudinit import sources | ||
369 | 39 | from cloudinit import stages | ||
370 | 40 | from cloudinit import templater | ||
371 | 41 | from cloudinit import util | ||
372 | 42 | from cloudinit import version | ||
373 | 43 | |||
374 | 44 | from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, | ||
375 | 45 | CLOUD_CONFIG) | ||
376 | 46 | |||
377 | 47 | |||
378 | 48 | # Pretty little welcome message template | ||
379 | 49 | WELCOME_MSG_TPL = ("Cloud-init v. {{version}} running '{{action}}' at " | ||
380 | 50 | "{{timestamp}}. Up {{uptime}} seconds.") | ||
381 | 51 | |||
382 | 52 | # Module section template | ||
383 | 53 | MOD_SECTION_TPL = "cloud_%s_modules" | ||
384 | 54 | |||
385 | 55 | # Things u can query on | ||
386 | 56 | QUERY_DATA_TYPES = [ | ||
387 | 57 | 'data', | ||
388 | 58 | 'data_raw', | ||
389 | 59 | 'instance_id', | ||
390 | 60 | ] | ||
391 | 61 | |||
392 | 62 | # Frequency shortname to full name | ||
393 | 63 | # (so users don't have to remember the full name...) | ||
394 | 64 | FREQ_SHORT_NAMES = { | ||
395 | 65 | 'instance': PER_INSTANCE, | ||
396 | 66 | 'always': PER_ALWAYS, | ||
397 | 67 | 'once': PER_ONCE, | ||
398 | 68 | } | ||
399 | 69 | |||
400 | 70 | LOG = logging.getLogger() | ||
401 | 71 | |||
402 | 72 | |||
403 | 73 | # Used for when a logger may not be active | ||
404 | 74 | # and we still want to print exceptions... | ||
405 | 75 | def print_exc(msg=''): | ||
406 | 76 | if msg: | ||
407 | 77 | sys.stderr.write("%s\n" % (msg)) | ||
408 | 78 | sys.stderr.write('-' * 60) | ||
409 | 79 | sys.stderr.write("\n") | ||
410 | 80 | traceback.print_exc(file=sys.stderr) | ||
411 | 81 | sys.stderr.write('-' * 60) | ||
412 | 82 | sys.stderr.write("\n") | ||
413 | 83 | |||
414 | 84 | |||
415 | 85 | def welcome(action): | ||
416 | 86 | tpl_params = { | ||
417 | 87 | 'version': version.version_string(), | ||
418 | 88 | 'uptime': util.uptime(), | ||
419 | 89 | 'timestamp': util.time_rfc2822(), | ||
420 | 90 | 'action': action, | ||
421 | 91 | } | ||
422 | 92 | tpl_msg = templater.render_string(WELCOME_MSG_TPL, tpl_params) | ||
423 | 93 | util.multi_log("%s\n" % (tpl_msg), | ||
424 | 94 | console=False, stderr=True) | ||
425 | 95 | |||
426 | 96 | |||
427 | 97 | def extract_fns(args): | ||
428 | 98 | # Files are already opened so lets just pass that along | ||
429 | 99 | # since it would of broke if it couldn't have | ||
430 | 100 | # read that file already... | ||
431 | 101 | fn_cfgs = [] | ||
432 | 102 | if args.files: | ||
433 | 103 | for fh in args.files: | ||
434 | 104 | # The realpath is more useful in logging | ||
435 | 105 | # so lets resolve to that... | ||
436 | 106 | fn_cfgs.append(os.path.realpath(fh.name)) | ||
437 | 107 | return fn_cfgs | ||
438 | 108 | |||
439 | 109 | |||
440 | 110 | def run_module_section(mods, action_name, section): | ||
441 | 111 | full_section_name = MOD_SECTION_TPL % (section) | ||
442 | 112 | (which_ran, failures) = mods.run_section(full_section_name) | ||
443 | 113 | total_attempted = len(which_ran) + len(failures) | ||
444 | 114 | if total_attempted == 0: | ||
445 | 115 | msg = ("No '%s' modules to run" | ||
446 | 116 | " under section '%s'") % (action_name, full_section_name) | ||
447 | 117 | sys.stderr.write("%s\n" % (msg)) | ||
448 | 118 | LOG.debug(msg) | ||
449 | 119 | return 0 | ||
450 | 120 | else: | ||
451 | 121 | LOG.debug("Ran %s modules with %s failures", | ||
452 | 122 | len(which_ran), len(failures)) | ||
453 | 123 | return len(failures) | ||
454 | 124 | |||
455 | 125 | |||
456 | 126 | def main_init(name, args): | ||
457 | 127 | deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] | ||
458 | 128 | if args.local: | ||
459 | 129 | deps = [sources.DEP_FILESYSTEM] | ||
460 | 130 | |||
461 | 131 | if not args.local: | ||
462 | 132 | # See doc/kernel-cmdline.txt | ||
463 | 133 | # | ||
464 | 134 | # This is used in maas datasource, in "ephemeral" (read-only root) | ||
465 | 135 | # environment where the instance netboots to iscsi ro root. | ||
466 | 136 | # and the entity that controls the pxe config has to configure | ||
467 | 137 | # the maas datasource. | ||
468 | 138 | # | ||
469 | 139 | # Could be used elsewhere, only works on network based (not local). | ||
470 | 140 | root_name = "%s.d" % (CLOUD_CONFIG) | ||
471 | 141 | target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg") | ||
472 | 142 | util.read_write_cmdline_url(target_fn) | ||
473 | 143 | |||
474 | 144 | # Cloud-init 'init' stage is broken up into the following sub-stages | ||
475 | 145 | # 1. Ensure that the init object fetches its config without errors | ||
476 | 146 | # 2. Setup logging/output redirections with resultant config (if any) | ||
477 | 147 | # 3. Initialize the cloud-init filesystem | ||
478 | 148 | # 4. Check if we can stop early by looking for various files | ||
479 | 149 | # 5. Fetch the datasource | ||
480 | 150 | # 6. Connect to the current instance location + update the cache | ||
481 | 151 | # 7. Consume the userdata (handlers get activated here) | ||
482 | 152 | # 8. Construct the modules object | ||
483 | 153 | # 9. Adjust any subsequent logging/output redirections using | ||
484 | 154 | # the modules objects configuration | ||
485 | 155 | # 10. Run the modules for the 'init' stage | ||
486 | 156 | # 11. Done! | ||
487 | 157 | welcome(name) | ||
488 | 158 | init = stages.Init(deps) | ||
489 | 159 | # Stage 1 | ||
490 | 160 | init.read_cfg(extract_fns(args)) | ||
491 | 161 | # Stage 2 | ||
492 | 162 | outfmt = None | ||
493 | 163 | errfmt = None | ||
494 | 164 | try: | ||
495 | 165 | LOG.debug("Closing stdin") | ||
496 | 166 | util.close_stdin() | ||
497 | 167 | (outfmt, errfmt) = util.fixup_output(init.cfg, name) | ||
498 | 168 | except: | ||
499 | 169 | util.logexc(LOG, "Failed to setup output redirection!") | ||
500 | 170 | print_exc("Failed to setup output redirection!") | ||
501 | 171 | if args.debug: | ||
502 | 172 | # Reset so that all the debug handlers are closed out | ||
503 | 173 | LOG.debug(("Logging being reset, this logger may no" | ||
504 | 174 | " longer be active shortly")) | ||
505 | 175 | logging.resetLogging() | ||
506 | 176 | logging.setupLogging(init.cfg) | ||
507 | 177 | # Stage 3 | ||
508 | 178 | try: | ||
509 | 179 | init.initialize() | ||
510 | 180 | except Exception: | ||
511 | 181 | util.logexc(LOG, "Failed to initialize, likely bad things to come!") | ||
512 | 182 | # Stage 4 | ||
513 | 183 | path_helper = init.paths | ||
514 | 184 | if not args.local: | ||
515 | 185 | sys.stderr.write("%s\n" % (netinfo.debug_info())) | ||
516 | 186 | LOG.debug(("Checking to see if files that we need already" | ||
517 | 187 | " exist from a previous run that would allow us" | ||
518 | 188 | " to stop early.")) | ||
519 | 189 | stop_files = [ | ||
520 | 190 | os.path.join(path_helper.get_cpath("data"), "no-net"), | ||
521 | 191 | path_helper.get_ipath_cur("obj_pkl"), | ||
522 | 192 | ] | ||
523 | 193 | existing_files = [] | ||
524 | 194 | for fn in stop_files: | ||
525 | 195 | try: | ||
526 | 196 | c = util.load_file(fn) | ||
527 | 197 | if len(c): | ||
528 | 198 | existing_files.append((fn, len(c))) | ||
529 | 199 | except Exception: | ||
530 | 200 | pass | ||
531 | 201 | if existing_files: | ||
532 | 202 | LOG.debug("Exiting early due to the existence of %s files", | ||
533 | 203 | existing_files) | ||
534 | 204 | return 0 | ||
535 | 205 | else: | ||
536 | 206 | # The cache is not instance specific, so it has to be purged | ||
537 | 207 | # but we want 'start' to benefit from a cache if | ||
538 | 208 | # a previous start-local populated one... | ||
539 | 209 | manual_clean = util.get_cfg_option_bool(init.cfg, | ||
540 | 210 | 'manual_cache_clean', False) | ||
541 | 211 | if manual_clean: | ||
542 | 212 | LOG.debug("Not purging instance link, manual cleaning enabled") | ||
543 | 213 | init.purge_cache(False) | ||
544 | 214 | else: | ||
545 | 215 | init.purge_cache() | ||
546 | 216 | # Delete the non-net file as well | ||
547 | 217 | util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) | ||
548 | 218 | # Stage 5 | ||
549 | 219 | try: | ||
550 | 220 | init.fetch() | ||
551 | 221 | except sources.DataSourceNotFoundException: | ||
552 | 222 | util.logexc(LOG, ("No instance datasource found!" | ||
553 | 223 | " Likely bad things to come!")) | ||
554 | 224 | # In the case of cloud-init (net mode) it is a bit | ||
555 | 225 | # more likely that the user would consider it | ||
556 | 226 | # failure if nothing was found. When using | ||
557 | 227 | # upstart it will also mentions job failure | ||
558 | 228 | # in console log if exit code is != 0. | ||
559 | 229 | if not args.force: | ||
560 | 230 | if args.local: | ||
561 | 231 | return 0 | ||
562 | 232 | else: | ||
563 | 233 | return 1 | ||
564 | 234 | # Stage 6 | ||
565 | 235 | iid = init.instancify() | ||
566 | 236 | LOG.debug("%s will now be targeting instance id: %s", name, iid) | ||
567 | 237 | init.update() | ||
568 | 238 | # Stage 7 | ||
569 | 239 | try: | ||
570 | 240 | # Attempt to consume the data per instance. | ||
571 | 241 | # This may run user-data handlers and/or perform | ||
572 | 242 | # url downloads and such as needed. | ||
573 | 243 | (ran, _results) = init.cloudify().run('consume_userdata', | ||
574 | 244 | init.consume_userdata, | ||
575 | 245 | args=[PER_INSTANCE], | ||
576 | 246 | freq=PER_INSTANCE) | ||
577 | 247 | if not ran: | ||
578 | 248 | # Just consume anything that is set to run per-always | ||
579 | 249 | # if nothing ran in the per-instance code | ||
580 | 250 | # | ||
581 | 251 | # See: https://bugs.launchpad.net/bugs/819507 for a little | ||
582 | 252 | # reason behind this... | ||
583 | 253 | init.consume_userdata(PER_ALWAYS) | ||
584 | 254 | except Exception: | ||
585 | 255 | util.logexc(LOG, "Consuming user data failed!") | ||
586 | 256 | return 1 | ||
587 | 257 | # Stage 8 - TODO - do we really need to re-extract our configs? | ||
588 | 258 | mods = stages.Modules(init, extract_fns(args)) | ||
589 | 259 | # Stage 9 - TODO is this really needed?? | ||
590 | 260 | try: | ||
591 | 261 | outfmt_orig = outfmt | ||
592 | 262 | errfmt_orig = errfmt | ||
593 | 263 | (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) | ||
594 | 264 | if outfmt_orig != outfmt or errfmt_orig != errfmt: | ||
595 | 265 | LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) | ||
596 | 266 | (outfmt, errfmt) = util.fixup_output(mods.cfg, name) | ||
597 | 267 | except: | ||
598 | 268 | util.logexc(LOG, "Failed to re-adjust output redirection!") | ||
599 | 269 | # Stage 10 | ||
600 | 270 | return run_module_section(mods, name, name) | ||
601 | 271 | |||
602 | 272 | |||
603 | 273 | def main_modules(action_name, args): | ||
604 | 274 | name = args.mode | ||
605 | 275 | # Cloud-init 'modules' stages are broken up into the following sub-stages | ||
606 | 276 | # 1. Ensure that the init object fetches its config without errors | ||
607 | 277 | # 2. Get the datasource from the init object, if it does | ||
608 | 278 | # not exist then that means the main_init stage never | ||
609 | 279 | # worked, and thus this stage can not run. | ||
610 | 280 | # 3. Construct the modules object | ||
611 | 281 | # 4. Adjust any subsequent logging/output redirections using | ||
612 | 282 | # the modules objects configuration | ||
613 | 283 | # 5. Run the modules for the given stage name | ||
614 | 284 | # 6. Done! | ||
615 | 285 | welcome("%s:%s" % (action_name, name)) | ||
616 | 286 | init = stages.Init(ds_deps=[]) | ||
617 | 287 | # Stage 1 | ||
618 | 288 | init.read_cfg(extract_fns(args)) | ||
619 | 289 | # Stage 2 | ||
620 | 290 | try: | ||
621 | 291 | init.fetch() | ||
622 | 292 | except sources.DataSourceNotFoundException: | ||
623 | 293 | # There was no datasource found, theres nothing to do | ||
624 | 294 | util.logexc(LOG, ('Can not apply stage %s, ' | ||
625 | 295 | 'no datasource found!' | ||
626 | 296 | " Likely bad things to come!"), name) | ||
627 | 297 | print_exc(('Can not apply stage %s, ' | ||
628 | 298 | 'no datasource found!' | ||
629 | 299 | " Likely bad things to come!") % (name)) | ||
630 | 300 | if not args.force: | ||
631 | 301 | return 1 | ||
632 | 302 | # Stage 3 | ||
633 | 303 | mods = stages.Modules(init, extract_fns(args)) | ||
634 | 304 | # Stage 4 | ||
635 | 305 | try: | ||
636 | 306 | LOG.debug("Closing stdin") | ||
637 | 307 | util.close_stdin() | ||
638 | 308 | util.fixup_output(mods.cfg, name) | ||
639 | 309 | except: | ||
640 | 310 | util.logexc(LOG, "Failed to setup output redirection!") | ||
641 | 311 | if args.debug: | ||
642 | 312 | # Reset so that all the debug handlers are closed out | ||
643 | 313 | LOG.debug(("Logging being reset, this logger may no" | ||
644 | 314 | " longer be active shortly")) | ||
645 | 315 | logging.resetLogging() | ||
646 | 316 | logging.setupLogging(mods.cfg) | ||
647 | 317 | # Stage 5 | ||
648 | 318 | return run_module_section(mods, name, name) | ||
649 | 319 | |||
650 | 320 | |||
651 | 321 | def main_query(name, _args): | ||
652 | 322 | raise NotImplementedError(("Action '%s' is not" | ||
653 | 323 | " currently implemented") % (name)) | ||
654 | 324 | |||
655 | 325 | |||
656 | 326 | def main_single(name, args): | ||
657 | 327 | # Cloud-init single stage is broken up into the following sub-stages | ||
658 | 328 | # 1. Ensure that the init object fetches its config without errors | ||
659 | 329 | # 2. Attempt to fetch the datasource (warn if it doesn't work) | ||
660 | 330 | # 3. Construct the modules object | ||
661 | 331 | # 4. Adjust any subsequent logging/output redirections using | ||
662 | 332 | # the modules objects configuration | ||
663 | 333 | # 5. Run the single module | ||
664 | 334 | # 6. Done! | ||
665 | 335 | mod_name = args.name | ||
666 | 336 | welcome("%s:%s" % (name, mod_name)) | ||
667 | 337 | init = stages.Init(ds_deps=[]) | ||
668 | 338 | # Stage 1 | ||
669 | 339 | init.read_cfg(extract_fns(args)) | ||
670 | 340 | # Stage 2 | ||
671 | 341 | try: | ||
672 | 342 | init.fetch() | ||
673 | 343 | except sources.DataSourceNotFoundException: | ||
674 | 344 | # There was no datasource found, | ||
675 | 345 | # that might be bad (or ok) depending on | ||
676 | 346 | # the module being ran (so continue on) | ||
677 | 347 | util.logexc(LOG, ("Failed to fetch your datasource," | ||
678 | 348 | " likely bad things to come!")) | ||
679 | 349 | print_exc(("Failed to fetch your datasource," | ||
680 | 350 | " likely bad things to come!")) | ||
681 | 351 | if not args.force: | ||
682 | 352 | return 1 | ||
683 | 353 | # Stage 3 | ||
684 | 354 | mods = stages.Modules(init, extract_fns(args)) | ||
685 | 355 | mod_args = args.module_args | ||
686 | 356 | if mod_args: | ||
687 | 357 | LOG.debug("Using passed in arguments %s", mod_args) | ||
688 | 358 | mod_freq = args.frequency | ||
689 | 359 | if mod_freq: | ||
690 | 360 | LOG.debug("Using passed in frequency %s", mod_freq) | ||
691 | 361 | mod_freq = FREQ_SHORT_NAMES.get(mod_freq) | ||
692 | 362 | # Stage 4 | ||
693 | 363 | try: | ||
694 | 364 | LOG.debug("Closing stdin") | ||
695 | 365 | util.close_stdin() | ||
696 | 366 | util.fixup_output(mods.cfg, None) | ||
697 | 367 | except: | ||
698 | 368 | util.logexc(LOG, "Failed to setup output redirection!") | ||
699 | 369 | if args.debug: | ||
700 | 370 | # Reset so that all the debug handlers are closed out | ||
701 | 371 | LOG.debug(("Logging being reset, this logger may no" | ||
702 | 372 | " longer be active shortly")) | ||
703 | 373 | logging.resetLogging() | ||
704 | 374 | logging.setupLogging(mods.cfg) | ||
705 | 375 | # Stage 5 | ||
706 | 376 | (which_ran, failures) = mods.run_single(mod_name, | ||
707 | 377 | mod_args, | ||
708 | 378 | mod_freq) | ||
709 | 379 | if failures: | ||
710 | 380 | LOG.warn("Ran %s but it failed!", mod_name) | ||
711 | 381 | return 1 | ||
712 | 382 | elif not which_ran: | ||
713 | 383 | LOG.warn("Did not run %s, does it exist?", mod_name) | ||
714 | 384 | return 1 | ||
715 | 385 | else: | ||
716 | 386 | # Guess it worked | ||
717 | 387 | return 0 | ||
718 | 388 | |||
719 | 389 | |||
720 | 390 | def main(): | ||
721 | 391 | parser = argparse.ArgumentParser() | ||
722 | 392 | |||
723 | 393 | # Top level args | ||
724 | 394 | parser.add_argument('--version', '-v', action='version', | ||
725 | 395 | version='%(prog)s ' + (version.version_string())) | ||
726 | 396 | parser.add_argument('--file', '-f', action='append', | ||
727 | 397 | dest='files', | ||
728 | 398 | help=('additional yaml configuration' | ||
729 | 399 | ' files to use'), | ||
730 | 400 | type=argparse.FileType('rb')) | ||
731 | 401 | parser.add_argument('--debug', '-d', action='store_true', | ||
732 | 402 | help=('show additional pre-action' | ||
733 | 403 | ' logging (default: %(default)s)'), | ||
734 | 404 | default=False) | ||
735 | 405 | parser.add_argument('--force', action='store_true', | ||
736 | 406 | help=('force running even if no datasource is' | ||
737 | 407 | ' found (use at your own risk)'), | ||
738 | 408 | dest='force', | ||
739 | 409 | default=False) | ||
740 | 410 | subparsers = parser.add_subparsers() | ||
741 | 411 | |||
742 | 412 | # Each action and its sub-options (if any) | ||
743 | 413 | parser_init = subparsers.add_parser('init', | ||
744 | 414 | help=('initializes cloud-init and' | ||
745 | 415 | ' performs initial modules')) | ||
746 | 416 | parser_init.add_argument("--local", '-l', action='store_true', | ||
747 | 417 | help="start in local mode (default: %(default)s)", | ||
748 | 418 | default=False) | ||
749 | 419 | # This is used so that we can know which action is selected + | ||
750 | 420 | # the functor to use to run this subcommand | ||
751 | 421 | parser_init.set_defaults(action=('init', main_init)) | ||
752 | 422 | |||
753 | 423 | # These settings are used for the 'config' and 'final' stages | ||
754 | 424 | parser_mod = subparsers.add_parser('modules', | ||
755 | 425 | help=('activates modules ' | ||
756 | 426 | 'using a given configuration key')) | ||
757 | 427 | parser_mod.add_argument("--mode", '-m', action='store', | ||
758 | 428 | help=("module configuration name " | ||
759 | 429 | "to use (default: %(default)s)"), | ||
760 | 430 | default='config', | ||
761 | 431 | choices=('init', 'config', 'final')) | ||
762 | 432 | parser_mod.set_defaults(action=('modules', main_modules)) | ||
763 | 433 | |||
764 | 434 | # These settings are used when you want to query information | ||
765 | 435 | # stored in the cloud-init data objects/directories/files | ||
766 | 436 | parser_query = subparsers.add_parser('query', | ||
767 | 437 | help=('query information stored ' | ||
768 | 438 | 'in cloud-init')) | ||
769 | 439 | parser_query.add_argument("--name", '-n', action="store", | ||
770 | 440 | help="item name to query on", | ||
771 | 441 | required=True, | ||
772 | 442 | choices=QUERY_DATA_TYPES) | ||
773 | 443 | parser_query.set_defaults(action=('query', main_query)) | ||
774 | 444 | |||
775 | 445 | # This subcommand allows you to run a single module | ||
776 | 446 | parser_single = subparsers.add_parser('single', | ||
777 | 447 | help=('run a single module ')) | ||
778 | 448 | parser_single.set_defaults(action=('single', main_single)) | ||
779 | 449 | parser_single.add_argument("--name", '-n', action="store", | ||
780 | 450 | help="module name to run", | ||
781 | 451 | required=True) | ||
782 | 452 | parser_single.add_argument("--frequency", action="store", | ||
783 | 453 | help=("frequency of the module"), | ||
784 | 454 | required=False, | ||
785 | 455 | choices=list(FREQ_SHORT_NAMES.keys())) | ||
786 | 456 | parser_single.add_argument("module_args", nargs="*", | ||
787 | 457 | metavar='argument', | ||
788 | 458 | help=('any additional arguments to' | ||
789 | 459 | ' pass to this module')) | ||
790 | 460 | parser_single.set_defaults(action=('single', main_single)) | ||
791 | 461 | |||
792 | 462 | args = parser.parse_args() | ||
793 | 463 | |||
794 | 464 | # Setup basic logging to start (until reinitialized) | ||
795 | 465 | # iff in debug mode... | ||
796 | 466 | if args.debug: | ||
797 | 467 | logging.setupBasicLogging() | ||
798 | 468 | |||
799 | 469 | (name, functor) = args.action | ||
800 | 470 | return functor(name, args) | ||
801 | 471 | |||
802 | 472 | |||
803 | 473 | if __name__ == '__main__': | ||
804 | 474 | sys.exit(main()) | ||
805 | 0 | 475 | ||
806 | === removed file 'cloud-init-cfg.py' | |||
807 | --- cloud-init-cfg.py 2012-01-18 14:07:33 +0000 | |||
808 | +++ cloud-init-cfg.py 1970-01-01 00:00:00 +0000 | |||
809 | @@ -1,115 +0,0 @@ | |||
810 | 1 | #!/usr/bin/python | ||
811 | 2 | # vi: ts=4 expandtab | ||
812 | 3 | # | ||
813 | 4 | # Copyright (C) 2009-2010 Canonical Ltd. | ||
814 | 5 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. | ||
815 | 6 | # | ||
816 | 7 | # Author: Scott Moser <scott.moser@canonical.com> | ||
817 | 8 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | ||
818 | 9 | # | ||
819 | 10 | # This program is free software: you can redistribute it and/or modify | ||
820 | 11 | # it under the terms of the GNU General Public License version 3, as | ||
821 | 12 | # published by the Free Software Foundation. | ||
822 | 13 | # | ||
823 | 14 | # This program is distributed in the hope that it will be useful, | ||
824 | 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
825 | 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
826 | 17 | # GNU General Public License for more details. | ||
827 | 18 | # | ||
828 | 19 | # You should have received a copy of the GNU General Public License | ||
829 | 20 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
830 | 21 | |||
831 | 22 | import sys | ||
832 | 23 | import cloudinit | ||
833 | 24 | import cloudinit.util as util | ||
834 | 25 | import cloudinit.CloudConfig as CC | ||
835 | 26 | import logging | ||
836 | 27 | import os | ||
837 | 28 | |||
838 | 29 | |||
839 | 30 | def Usage(out=sys.stdout): | ||
840 | 31 | out.write("Usage: %s name\n" % sys.argv[0]) | ||
841 | 32 | |||
842 | 33 | |||
843 | 34 | def main(): | ||
844 | 35 | # expect to be called with | ||
845 | 36 | # name [ freq [ args ] | ||
846 | 37 | # run the cloud-config job 'name' at with given args | ||
847 | 38 | # or | ||
848 | 39 | # read cloud config jobs from config (builtin -> system) | ||
849 | 40 | # and run all in order | ||
850 | 41 | |||
851 | 42 | util.close_stdin() | ||
852 | 43 | |||
853 | 44 | modename = "config" | ||
854 | 45 | |||
855 | 46 | if len(sys.argv) < 2: | ||
856 | 47 | Usage(sys.stderr) | ||
857 | 48 | sys.exit(1) | ||
858 | 49 | if sys.argv[1] == "all": | ||
859 | 50 | name = "all" | ||
860 | 51 | if len(sys.argv) > 2: | ||
861 | 52 | modename = sys.argv[2] | ||
862 | 53 | else: | ||
863 | 54 | freq = None | ||
864 | 55 | run_args = [] | ||
865 | 56 | name = sys.argv[1] | ||
866 | 57 | if len(sys.argv) > 2: | ||
867 | 58 | freq = sys.argv[2] | ||
868 | 59 | if freq == "None": | ||
869 | 60 | freq = None | ||
870 | 61 | if len(sys.argv) > 3: | ||
871 | 62 | run_args = sys.argv[3:] | ||
872 | 63 | |||
873 | 64 | cfg_path = cloudinit.get_ipath_cur("cloud_config") | ||
874 | 65 | cfg_env_name = cloudinit.cfg_env_name | ||
875 | 66 | if cfg_env_name in os.environ: | ||
876 | 67 | cfg_path = os.environ[cfg_env_name] | ||
877 | 68 | |||
878 | 69 | cloud = cloudinit.CloudInit(ds_deps=[]) # ds_deps=[], get only cached | ||
879 | 70 | try: | ||
880 | 71 | cloud.get_data_source() | ||
881 | 72 | except cloudinit.DataSourceNotFoundException as e: | ||
882 | 73 | # there was no datasource found, theres nothing to do | ||
883 | 74 | sys.exit(0) | ||
884 | 75 | |||
885 | 76 | cc = CC.CloudConfig(cfg_path, cloud) | ||
886 | 77 | |||
887 | 78 | try: | ||
888 | 79 | (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, modename) | ||
889 | 80 | CC.redirect_output(outfmt, errfmt) | ||
890 | 81 | except Exception as e: | ||
891 | 82 | err("Failed to get and set output config: %s\n" % e) | ||
892 | 83 | |||
893 | 84 | cloudinit.logging_set_from_cfg(cc.cfg) | ||
894 | 85 | log = logging.getLogger() | ||
895 | 86 | log.info("cloud-init-cfg %s" % sys.argv[1:]) | ||
896 | 87 | |||
897 | 88 | module_list = [] | ||
898 | 89 | if name == "all": | ||
899 | 90 | modlist_cfg_name = "cloud_%s_modules" % modename | ||
900 | 91 | module_list = CC.read_cc_modules(cc.cfg, modlist_cfg_name) | ||
901 | 92 | if not len(module_list): | ||
902 | 93 | err("no modules to run in cloud_config [%s]" % modename, log) | ||
903 | 94 | sys.exit(0) | ||
904 | 95 | else: | ||
905 | 96 | module_list.append([name, freq] + run_args) | ||
906 | 97 | |||
907 | 98 | failures = CC.run_cc_modules(cc, module_list, log) | ||
908 | 99 | if len(failures): | ||
909 | 100 | err("errors running cloud_config [%s]: %s" % (modename, failures), log) | ||
910 | 101 | sys.exit(len(failures)) | ||
911 | 102 | |||
912 | 103 | |||
913 | 104 | def err(msg, log=None): | ||
914 | 105 | if log: | ||
915 | 106 | log.error(msg) | ||
916 | 107 | sys.stderr.write(msg + "\n") | ||
917 | 108 | |||
918 | 109 | |||
919 | 110 | def fail(msg, log=None): | ||
920 | 111 | err(msg, log) | ||
921 | 112 | sys.exit(1) | ||
922 | 113 | |||
923 | 114 | if __name__ == '__main__': | ||
924 | 115 | main() | ||
925 | 116 | 0 | ||
926 | === removed file 'cloud-init-query.py' | |||
927 | --- cloud-init-query.py 2012-01-18 14:07:33 +0000 | |||
928 | +++ cloud-init-query.py 1970-01-01 00:00:00 +0000 | |||
929 | @@ -1,56 +0,0 @@ | |||
930 | 1 | #!/usr/bin/python | ||
931 | 2 | # vi: ts=4 expandtab | ||
932 | 3 | # | ||
933 | 4 | # Copyright (C) 2009-2010 Canonical Ltd. | ||
934 | 5 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. | ||
935 | 6 | # | ||
936 | 7 | # Author: Scott Moser <scott.moser@canonical.com> | ||
937 | 8 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | ||
938 | 9 | # | ||
939 | 10 | # This program is free software: you can redistribute it and/or modify | ||
940 | 11 | # it under the terms of the GNU General Public License version 3, as | ||
941 | 12 | # published by the Free Software Foundation. | ||
942 | 13 | # | ||
943 | 14 | # This program is distributed in the hope that it will be useful, | ||
944 | 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
945 | 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
946 | 17 | # GNU General Public License for more details. | ||
947 | 18 | # | ||
948 | 19 | # You should have received a copy of the GNU General Public License | ||
949 | 20 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
950 | 21 | |||
951 | 22 | import sys | ||
952 | 23 | import cloudinit | ||
953 | 24 | import cloudinit.CloudConfig | ||
954 | 25 | |||
955 | 26 | |||
956 | 27 | def Usage(out=sys.stdout): | ||
957 | 28 | out.write("Usage: %s name\n" % sys.argv[0]) | ||
958 | 29 | |||
959 | 30 | |||
960 | 31 | def main(): | ||
961 | 32 | # expect to be called with name of item to fetch | ||
962 | 33 | if len(sys.argv) != 2: | ||
963 | 34 | Usage(sys.stderr) | ||
964 | 35 | sys.exit(1) | ||
965 | 36 | |||
966 | 37 | cfg_path = cloudinit.get_ipath_cur("cloud_config") | ||
967 | 38 | cc = cloudinit.CloudConfig.CloudConfig(cfg_path) | ||
968 | 39 | data = { | ||
969 | 40 | 'user_data': cc.cloud.get_userdata(), | ||
970 | 41 | 'user_data_raw': cc.cloud.get_userdata_raw(), | ||
971 | 42 | 'instance_id': cc.cloud.get_instance_id(), | ||
972 | 43 | } | ||
973 | 44 | |||
974 | 45 | name = sys.argv[1].replace('-', '_') | ||
975 | 46 | |||
976 | 47 | if name not in data: | ||
977 | 48 | sys.stderr.write("unknown name '%s'. Known values are:\n %s\n" % | ||
978 | 49 | (sys.argv[1], ' '.join(data.keys()))) | ||
979 | 50 | sys.exit(1) | ||
980 | 51 | |||
981 | 52 | print data[name] | ||
982 | 53 | sys.exit(0) | ||
983 | 54 | |||
984 | 55 | if __name__ == '__main__': | ||
985 | 56 | main() | ||
986 | 57 | 0 | ||
987 | === removed file 'cloud-init.py' | |||
988 | --- cloud-init.py 2012-04-10 20:08:25 +0000 | |||
989 | +++ cloud-init.py 1970-01-01 00:00:00 +0000 | |||
990 | @@ -1,229 +0,0 @@ | |||
991 | 1 | #!/usr/bin/python | ||
992 | 2 | # vi: ts=4 expandtab | ||
993 | 3 | # | ||
994 | 4 | # Copyright (C) 2009-2010 Canonical Ltd. | ||
995 | 5 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. | ||
996 | 6 | # | ||
997 | 7 | # Author: Scott Moser <scott.moser@canonical.com> | ||
998 | 8 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | ||
999 | 9 | # | ||
1000 | 10 | # This program is free software: you can redistribute it and/or modify | ||
1001 | 11 | # it under the terms of the GNU General Public License version 3, as | ||
1002 | 12 | # published by the Free Software Foundation. | ||
1003 | 13 | # | ||
1004 | 14 | # This program is distributed in the hope that it will be useful, | ||
1005 | 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1006 | 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1007 | 17 | # GNU General Public License for more details. | ||
1008 | 18 | # | ||
1009 | 19 | # You should have received a copy of the GNU General Public License | ||
1010 | 20 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
1011 | 21 | |||
1012 | 22 | import subprocess | ||
1013 | 23 | import sys | ||
1014 | 24 | |||
1015 | 25 | import cloudinit | ||
1016 | 26 | import cloudinit.util as util | ||
1017 | 27 | import cloudinit.CloudConfig as CC | ||
1018 | 28 | import cloudinit.DataSource as ds | ||
1019 | 29 | import cloudinit.netinfo as netinfo | ||
1020 | 30 | import time | ||
1021 | 31 | import traceback | ||
1022 | 32 | import logging | ||
1023 | 33 | import errno | ||
1024 | 34 | import os | ||
1025 | 35 | |||
1026 | 36 | |||
1027 | 37 | def warn(wstr): | ||
1028 | 38 | sys.stderr.write("WARN:%s" % wstr) | ||
1029 | 39 | |||
1030 | 40 | |||
1031 | 41 | def main(): | ||
1032 | 42 | util.close_stdin() | ||
1033 | 43 | |||
1034 | 44 | cmds = ("start", "start-local") | ||
1035 | 45 | deps = {"start": (ds.DEP_FILESYSTEM, ds.DEP_NETWORK), | ||
1036 | 46 | "start-local": (ds.DEP_FILESYSTEM, )} | ||
1037 | 47 | |||
1038 | 48 | cmd = "" | ||
1039 | 49 | if len(sys.argv) > 1: | ||
1040 | 50 | cmd = sys.argv[1] | ||
1041 | 51 | |||
1042 | 52 | cfg_path = None | ||
1043 | 53 | if len(sys.argv) > 2: | ||
1044 | 54 | # this is really for debugging only | ||
1045 | 55 | # but you can invoke on development system with ./config/cloud.cfg | ||
1046 | 56 | cfg_path = sys.argv[2] | ||
1047 | 57 | |||
1048 | 58 | if not cmd in cmds: | ||
1049 | 59 | sys.stderr.write("bad command %s. use one of %s\n" % (cmd, cmds)) | ||
1050 | 60 | sys.exit(1) | ||
1051 | 61 | |||
1052 | 62 | now = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime()) | ||
1053 | 63 | try: | ||
1054 | 64 | uptimef = open("/proc/uptime") | ||
1055 | 65 | uptime = uptimef.read().split(" ")[0] | ||
1056 | 66 | uptimef.close() | ||
1057 | 67 | except IOError as e: | ||
1058 | 68 | warn("unable to open /proc/uptime\n") | ||
1059 | 69 | uptime = "na" | ||
1060 | 70 | |||
1061 | 71 | cmdline_msg = None | ||
1062 | 72 | cmdline_exc = None | ||
1063 | 73 | if cmd == "start": | ||
1064 | 74 | target = "%s.d/%s" % (cloudinit.system_config, | ||
1065 | 75 | "91_kernel_cmdline_url.cfg") | ||
1066 | 76 | if os.path.exists(target): | ||
1067 | 77 | cmdline_msg = "cmdline: %s existed" % target | ||
1068 | 78 | else: | ||
1069 | 79 | cmdline = util.get_cmdline() | ||
1070 | 80 | try: | ||
1071 | 81 | (key, url, content) = cloudinit.get_cmdline_url( | ||
1072 | 82 | cmdline=cmdline) | ||
1073 | 83 | if key and content: | ||
1074 | 84 | util.write_file(target, content, mode=0600) | ||
1075 | 85 | cmdline_msg = ("cmdline: wrote %s from %s, %s" % | ||
1076 | 86 | (target, key, url)) | ||
1077 | 87 | elif key: | ||
1078 | 88 | cmdline_msg = ("cmdline: %s, %s had no cloud-config" % | ||
1079 | 89 | (key, url)) | ||
1080 | 90 | except Exception: | ||
1081 | 91 | cmdline_exc = ("cmdline: '%s' raised exception\n%s" % | ||
1082 | 92 | (cmdline, traceback.format_exc())) | ||
1083 | 93 | warn(cmdline_exc) | ||
1084 | 94 | |||
1085 | 95 | try: | ||
1086 | 96 | cfg = cloudinit.get_base_cfg(cfg_path) | ||
1087 | 97 | except Exception as e: | ||
1088 | 98 | warn("Failed to get base config. falling back to builtin: %s\n" % e) | ||
1089 | 99 | try: | ||
1090 | 100 | cfg = cloudinit.get_builtin_cfg() | ||
1091 | 101 | except Exception as e: | ||
1092 | 102 | warn("Unable to load builtin config\n") | ||
1093 | 103 | raise | ||
1094 | 104 | |||
1095 | 105 | try: | ||
1096 | 106 | (outfmt, errfmt) = CC.get_output_cfg(cfg, "init") | ||
1097 | 107 | CC.redirect_output(outfmt, errfmt) | ||
1098 | 108 | except Exception as e: | ||
1099 | 109 | warn("Failed to get and set output config: %s\n" % e) | ||
1100 | 110 | |||
1101 | 111 | cloudinit.logging_set_from_cfg(cfg) | ||
1102 | 112 | log = logging.getLogger() | ||
1103 | 113 | |||
1104 | 114 | if cmdline_exc: | ||
1105 | 115 | log.debug(cmdline_exc) | ||
1106 | 116 | elif cmdline_msg: | ||
1107 | 117 | log.debug(cmdline_msg) | ||
1108 | 118 | |||
1109 | 119 | try: | ||
1110 | 120 | cloudinit.initfs() | ||
1111 | 121 | except Exception as e: | ||
1112 | 122 | warn("failed to initfs, likely bad things to come: %s\n" % str(e)) | ||
1113 | 123 | |||
1114 | 124 | nonet_path = "%s/%s" % (cloudinit.get_cpath("data"), "no-net") | ||
1115 | 125 | |||
1116 | 126 | if cmd == "start": | ||
1117 | 127 | print netinfo.debug_info() | ||
1118 | 128 | |||
1119 | 129 | stop_files = (cloudinit.get_ipath_cur("obj_pkl"), nonet_path) | ||
1120 | 130 | # if starting as the network start, there are cases | ||
1121 | 131 | # where everything is already done for us, and it makes | ||
1122 | 132 | # most sense to exit early and silently | ||
1123 | 133 | for f in stop_files: | ||
1124 | 134 | try: | ||
1125 | 135 | fp = open(f, "r") | ||
1126 | 136 | fp.close() | ||
1127 | 137 | except: | ||
1128 | 138 | continue | ||
1129 | 139 | |||
1130 | 140 | log.debug("no need for cloud-init start to run (%s)\n", f) | ||
1131 | 141 | sys.exit(0) | ||
1132 | 142 | elif cmd == "start-local": | ||
1133 | 143 | # cache is not instance specific, so it has to be purged | ||
1134 | 144 | # but we want 'start' to benefit from a cache if | ||
1135 | 145 | # a previous start-local populated one | ||
1136 | 146 | manclean = util.get_cfg_option_bool(cfg, 'manual_cache_clean', False) | ||
1137 | 147 | if manclean: | ||
1138 | 148 | log.debug("not purging cache, manual_cache_clean = True") | ||
1139 | 149 | cloudinit.purge_cache(not manclean) | ||
1140 | 150 | |||
1141 | 151 | try: | ||
1142 | 152 | os.unlink(nonet_path) | ||
1143 | 153 | except OSError as e: | ||
1144 | 154 | if e.errno != errno.ENOENT: | ||
1145 | 155 | raise | ||
1146 | 156 | |||
1147 | 157 | msg = "cloud-init %s running: %s. up %s seconds" % (cmd, now, uptime) | ||
1148 | 158 | sys.stderr.write(msg + "\n") | ||
1149 | 159 | sys.stderr.flush() | ||
1150 | 160 | |||
1151 | 161 | log.info(msg) | ||
1152 | 162 | |||
1153 | 163 | cloud = cloudinit.CloudInit(ds_deps=deps[cmd]) | ||
1154 | 164 | |||
1155 | 165 | try: | ||
1156 | 166 | cloud.get_data_source() | ||
1157 | 167 | except cloudinit.DataSourceNotFoundException as e: | ||
1158 | 168 | sys.stderr.write("no instance data found in %s\n" % cmd) | ||
1159 | 169 | sys.exit(0) | ||
1160 | 170 | |||
1161 | 171 | # set this as the current instance | ||
1162 | 172 | cloud.set_cur_instance() | ||
1163 | 173 | |||
1164 | 174 | # store the metadata | ||
1165 | 175 | cloud.update_cache() | ||
1166 | 176 | |||
1167 | 177 | msg = "found data source: %s" % cloud.datasource | ||
1168 | 178 | sys.stderr.write(msg + "\n") | ||
1169 | 179 | log.debug(msg) | ||
1170 | 180 | |||
1171 | 181 | # parse the user data (ec2-run-userdata.py) | ||
1172 | 182 | try: | ||
1173 | 183 | ran = cloud.sem_and_run("consume_userdata", cloudinit.per_instance, | ||
1174 | 184 | cloud.consume_userdata, [cloudinit.per_instance], False) | ||
1175 | 185 | if not ran: | ||
1176 | 186 | cloud.consume_userdata(cloudinit.per_always) | ||
1177 | 187 | except: | ||
1178 | 188 | warn("consuming user data failed!\n") | ||
1179 | 189 | raise | ||
1180 | 190 | |||
1181 | 191 | cfg_path = cloudinit.get_ipath_cur("cloud_config") | ||
1182 | 192 | cc = CC.CloudConfig(cfg_path, cloud) | ||
1183 | 193 | |||
1184 | 194 | # if the output config changed, update output and err | ||
1185 | 195 | try: | ||
1186 | 196 | outfmt_orig = outfmt | ||
1187 | 197 | errfmt_orig = errfmt | ||
1188 | 198 | (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, "init") | ||
1189 | 199 | if outfmt_orig != outfmt or errfmt_orig != errfmt: | ||
1190 | 200 | warn("stdout, stderr changing to (%s,%s)" % (outfmt, errfmt)) | ||
1191 | 201 | CC.redirect_output(outfmt, errfmt) | ||
1192 | 202 | except Exception as e: | ||
1193 | 203 | warn("Failed to get and set output config: %s\n" % e) | ||
1194 | 204 | |||
1195 | 205 | # send the cloud-config ready event | ||
1196 | 206 | cc_path = cloudinit.get_ipath_cur('cloud_config') | ||
1197 | 207 | cc_ready = cc.cfg.get("cc_ready_cmd", | ||
1198 | 208 | ['initctl', 'emit', 'cloud-config', | ||
1199 | 209 | '%s=%s' % (cloudinit.cfg_env_name, cc_path)]) | ||
1200 | 210 | if cc_ready: | ||
1201 | 211 | if isinstance(cc_ready, str): | ||
1202 | 212 | cc_ready = ['sh', '-c', cc_ready] | ||
1203 | 213 | subprocess.Popen(cc_ready).communicate() | ||
1204 | 214 | |||
1205 | 215 | module_list = CC.read_cc_modules(cc.cfg, "cloud_init_modules") | ||
1206 | 216 | |||
1207 | 217 | failures = [] | ||
1208 | 218 | if len(module_list): | ||
1209 | 219 | failures = CC.run_cc_modules(cc, module_list, log) | ||
1210 | 220 | else: | ||
1211 | 221 | msg = "no cloud_init_modules to run" | ||
1212 | 222 | sys.stderr.write(msg + "\n") | ||
1213 | 223 | log.debug(msg) | ||
1214 | 224 | sys.exit(0) | ||
1215 | 225 | |||
1216 | 226 | sys.exit(len(failures)) | ||
1217 | 227 | |||
1218 | 228 | if __name__ == '__main__': | ||
1219 | 229 | main() | ||
1220 | 230 | 0 | ||
1221 | === removed file 'cloudinit/DataSource.py' | |||
1222 | --- cloudinit/DataSource.py 2012-03-19 17:33:39 +0000 | |||
1223 | +++ cloudinit/DataSource.py 1970-01-01 00:00:00 +0000 | |||
1224 | @@ -1,214 +0,0 @@ | |||
1225 | 1 | # vi: ts=4 expandtab | ||
1226 | 2 | # | ||
1227 | 3 | # Copyright (C) 2009-2010 Canonical Ltd. | ||
1228 | 4 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. | ||
1229 | 5 | # | ||
1230 | 6 | # Author: Scott Moser <scott.moser@canonical.com> | ||
1231 | 7 | # Author: Juerg Hafliger <juerg.haefliger@hp.com> | ||
1232 | 8 | # | ||
1233 | 9 | # This program is free software: you can redistribute it and/or modify | ||
1234 | 10 | # it under the terms of the GNU General Public License version 3, as | ||
1235 | 11 | # published by the Free Software Foundation. | ||
1236 | 12 | # | ||
1237 | 13 | # This program is distributed in the hope that it will be useful, | ||
1238 | 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1239 | 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1240 | 16 | # GNU General Public License for more details. | ||
1241 | 17 | # | ||
1242 | 18 | # You should have received a copy of the GNU General Public License | ||
1243 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
1244 | 20 | |||
1245 | 21 | |||
1246 | 22 | DEP_FILESYSTEM = "FILESYSTEM" | ||
1247 | 23 | DEP_NETWORK = "NETWORK" | ||
1248 | 24 | |||
1249 | 25 | import cloudinit.UserDataHandler as ud | ||
1250 | 26 | import cloudinit.util as util | ||
1251 | 27 | import socket | ||
1252 | 28 | |||
1253 | 29 | |||
1254 | 30 | class DataSource: | ||
1255 | 31 | userdata = None | ||
1256 | 32 | metadata = None | ||
1257 | 33 | userdata_raw = None | ||
1258 | 34 | cfgname = "" | ||
1259 | 35 | # system config (passed in from cloudinit, | ||
1260 | 36 | # cloud-config before input from the DataSource) | ||
1261 | 37 | sys_cfg = {} | ||
1262 | 38 | # datasource config, the cloud-config['datasource']['__name__'] | ||
1263 | 39 | ds_cfg = {} # datasource config | ||
1264 | 40 | |||
1265 | 41 | def __init__(self, sys_cfg=None): | ||
1266 | 42 | if not self.cfgname: | ||
1267 | 43 | name = str(self.__class__).split(".")[-1] | ||
1268 | 44 | if name.startswith("DataSource"): | ||
1269 | 45 | name = name[len("DataSource"):] | ||
1270 | 46 | self.cfgname = name | ||
1271 | 47 | if sys_cfg: | ||
1272 | 48 | self.sys_cfg = sys_cfg | ||
1273 | 49 | |||
1274 | 50 | self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, | ||
1275 | 51 | ("datasource", self.cfgname), self.ds_cfg) | ||
1276 | 52 | |||
1277 | 53 | def get_userdata(self): | ||
1278 | 54 | if self.userdata == None: | ||
1279 | 55 | self.userdata = ud.preprocess_userdata(self.userdata_raw) | ||
1280 | 56 | return self.userdata | ||
1281 | 57 | |||
1282 | 58 | def get_userdata_raw(self): | ||
1283 | 59 | return(self.userdata_raw) | ||
1284 | 60 | |||
1285 | 61 | # the data sources' config_obj is a cloud-config formated | ||
1286 | 62 | # object that came to it from ways other than cloud-config | ||
1287 | 63 | # because cloud-config content would be handled elsewhere | ||
1288 | 64 | def get_config_obj(self): | ||
1289 | 65 | return({}) | ||
1290 | 66 | |||
1291 | 67 | def get_public_ssh_keys(self): | ||
1292 | 68 | keys = [] | ||
1293 | 69 | if 'public-keys' not in self.metadata: | ||
1294 | 70 | return([]) | ||
1295 | 71 | |||
1296 | 72 | if isinstance(self.metadata['public-keys'], str): | ||
1297 | 73 | return(str(self.metadata['public-keys']).splitlines()) | ||
1298 | 74 | |||
1299 | 75 | if isinstance(self.metadata['public-keys'], list): | ||
1300 | 76 | return(self.metadata['public-keys']) | ||
1301 | 77 | |||
1302 | 78 | for _keyname, klist in self.metadata['public-keys'].items(): | ||
1303 | 79 | # lp:506332 uec metadata service responds with | ||
1304 | 80 | # data that makes boto populate a string for 'klist' rather | ||
1305 | 81 | # than a list. | ||
1306 | 82 | if isinstance(klist, str): | ||
1307 | 83 | klist = [klist] | ||
1308 | 84 | for pkey in klist: | ||
1309 | 85 | # there is an empty string at the end of the keylist, trim it | ||
1310 | 86 | if pkey: | ||
1311 | 87 | keys.append(pkey) | ||
1312 | 88 | |||
1313 | 89 | return(keys) | ||
1314 | 90 | |||
1315 | 91 | def device_name_to_device(self, _name): | ||
1316 | 92 | # translate a 'name' to a device | ||
1317 | 93 | # the primary function at this point is on ec2 | ||
1318 | 94 | # to consult metadata service, that has | ||
1319 | 95 | # ephemeral0: sdb | ||
1320 | 96 | # and return 'sdb' for input 'ephemeral0' | ||
1321 | 97 | return(None) | ||
1322 | 98 | |||
1323 | 99 | def get_locale(self): | ||
1324 | 100 | return('en_US.UTF-8') | ||
1325 | 101 | |||
1326 | 102 | def get_local_mirror(self): | ||
1327 | 103 | return None | ||
1328 | 104 | |||
1329 | 105 | def get_instance_id(self): | ||
1330 | 106 | if 'instance-id' not in self.metadata: | ||
1331 | 107 | return "iid-datasource" | ||
1332 | 108 | return(self.metadata['instance-id']) | ||
1333 | 109 | |||
1334 | 110 | def get_hostname(self, fqdn=False): | ||
1335 | 111 | defdomain = "localdomain" | ||
1336 | 112 | defhost = "localhost" | ||
1337 | 113 | |||
1338 | 114 | domain = defdomain | ||
1339 | 115 | if not 'local-hostname' in self.metadata: | ||
1340 | 116 | |||
1341 | 117 | # this is somewhat questionable really. | ||
1342 | 118 | # the cloud datasource was asked for a hostname | ||
1343 | 119 | # and didn't have one. raising error might be more appropriate | ||
1344 | 120 | # but instead, basically look up the existing hostname | ||
1345 | 121 | toks = [] | ||
1346 | 122 | |||
1347 | 123 | hostname = socket.gethostname() | ||
1348 | 124 | |||
1349 | 125 | fqdn = util.get_fqdn_from_hosts(hostname) | ||
1350 | 126 | |||
1351 | 127 | if fqdn and fqdn.find(".") > 0: | ||
1352 | 128 | toks = str(fqdn).split(".") | ||
1353 | 129 | elif hostname: | ||
1354 | 130 | toks = [hostname, defdomain] | ||
1355 | 131 | else: | ||
1356 | 132 | toks = [defhost, defdomain] | ||
1357 | 133 | |||
1358 | 134 | else: | ||
1359 | 135 | # if there is an ipv4 address in 'local-hostname', then | ||
1360 | 136 | # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx | ||
1361 | 137 | lhost = self.metadata['local-hostname'] | ||
1362 | 138 | if is_ipv4(lhost): | ||
1363 | 139 | toks = "ip-%s" % lhost.replace(".", "-") | ||
1364 | 140 | else: | ||
1365 | 141 | toks = lhost.split(".") | ||
1366 | 142 | |||
1367 | 143 | if len(toks) > 1: | ||
1368 | 144 | hostname = toks[0] | ||
1369 | 145 | domain = '.'.join(toks[1:]) | ||
1370 | 146 | else: | ||
1371 | 147 | hostname = toks[0] | ||
1372 | 148 | |||
1373 | 149 | if fqdn: | ||
1374 | 150 | return "%s.%s" % (hostname, domain) | ||
1375 | 151 | else: | ||
1376 | 152 | return hostname | ||
1377 | 153 | |||
1378 | 154 | |||
1379 | 155 | # return a list of classes that have the same depends as 'depends' | ||
1380 | 156 | # iterate through cfg_list, loading "DataSourceCollections" modules | ||
1381 | 157 | # and calling their "get_datasource_list". | ||
1382 | 158 | # return an ordered list of classes that match | ||
1383 | 159 | # | ||
1384 | 160 | # - modules must be named "DataSource<item>", where 'item' is an entry | ||
1385 | 161 | # in cfg_list | ||
1386 | 162 | # - if pkglist is given, it will iterate try loading from that package | ||
1387 | 163 | # ie, pkglist=[ "foo", "" ] | ||
1388 | 164 | # will first try to load foo.DataSource<item> | ||
1389 | 165 | # then DataSource<item> | ||
1390 | 166 | def list_sources(cfg_list, depends, pkglist=None): | ||
1391 | 167 | if pkglist is None: | ||
1392 | 168 | pkglist = [] | ||
1393 | 169 | retlist = [] | ||
1394 | 170 | for ds_coll in cfg_list: | ||
1395 | 171 | for pkg in pkglist: | ||
1396 | 172 | if pkg: | ||
1397 | 173 | pkg = "%s." % pkg | ||
1398 | 174 | try: | ||
1399 | 175 | mod = __import__("%sDataSource%s" % (pkg, ds_coll)) | ||
1400 | 176 | if pkg: | ||
1401 | 177 | mod = getattr(mod, "DataSource%s" % ds_coll) | ||
1402 | 178 | lister = getattr(mod, "get_datasource_list") | ||
1403 | 179 | retlist.extend(lister(depends)) | ||
1404 | 180 | break | ||
1405 | 181 | except: | ||
1406 | 182 | raise | ||
1407 | 183 | return(retlist) | ||
1408 | 184 | |||
1409 | 185 | |||
1410 | 186 | # depends is a list of dependencies (DEP_FILESYSTEM) | ||
1411 | 187 | # dslist is a list of 2 item lists | ||
1412 | 188 | # dslist = [ | ||
1413 | 189 | # ( class, ( depends-that-this-class-needs ) ) | ||
1414 | 190 | # } | ||
1415 | 191 | # it returns a list of 'class' that matched these deps exactly | ||
1416 | 192 | # it is a helper function for DataSourceCollections | ||
1417 | 193 | def list_from_depends(depends, dslist): | ||
1418 | 194 | retlist = [] | ||
1419 | 195 | depset = set(depends) | ||
1420 | 196 | for elem in dslist: | ||
1421 | 197 | (cls, deps) = elem | ||
1422 | 198 | if depset == set(deps): | ||
1423 | 199 | retlist.append(cls) | ||
1424 | 200 | return(retlist) | ||
1425 | 201 | |||
1426 | 202 | |||
1427 | 203 | def is_ipv4(instr): | ||
1428 | 204 | """ determine if input string is a ipv4 address. return boolean""" | ||
1429 | 205 | toks = instr.split('.') | ||
1430 | 206 | if len(toks) != 4: | ||
1431 | 207 | return False | ||
1432 | 208 | |||
1433 | 209 | try: | ||
1434 | 210 | toks = [x for x in toks if (int(x) < 256 and int(x) > 0)] | ||
1435 | 211 | except: | ||
1436 | 212 | return False | ||
1437 | 213 | |||
1438 | 214 | return (len(toks) == 4) | ||
1439 | 215 | 0 | ||
1440 | === removed file 'cloudinit/UserDataHandler.py' | |||
1441 | --- cloudinit/UserDataHandler.py 2012-06-21 15:37:22 +0000 | |||
1442 | +++ cloudinit/UserDataHandler.py 1970-01-01 00:00:00 +0000 | |||
1443 | @@ -1,262 +0,0 @@ | |||
1444 | 1 | # vi: ts=4 expandtab | ||
1445 | 2 | # | ||
1446 | 3 | # Copyright (C) 2009-2010 Canonical Ltd. | ||
1447 | 4 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. | ||
1448 | 5 | # | ||
1449 | 6 | # Author: Scott Moser <scott.moser@canonical.com> | ||
1450 | 7 | # Author: Juerg Hafliger <juerg.haefliger@hp.com> | ||
1451 | 8 | # | ||
1452 | 9 | # This program is free software: you can redistribute it and/or modify | ||
1453 | 10 | # it under the terms of the GNU General Public License version 3, as | ||
1454 | 11 | # published by the Free Software Foundation. | ||
1455 | 12 | # | ||
1456 | 13 | # This program is distributed in the hope that it will be useful, | ||
1457 | 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1458 | 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1459 | 16 | # GNU General Public License for more details. | ||
1460 | 17 | # | ||
1461 | 18 | # You should have received a copy of the GNU General Public License | ||
1462 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
1463 | 20 | |||
1464 | 21 | import email | ||
1465 | 22 | |||
1466 | 23 | from email.mime.multipart import MIMEMultipart | ||
1467 | 24 | from email.mime.text import MIMEText | ||
1468 | 25 | from email.mime.base import MIMEBase | ||
1469 | 26 | import yaml | ||
1470 | 27 | import cloudinit | ||
1471 | 28 | import cloudinit.util as util | ||
1472 | 29 | import hashlib | ||
1473 | 30 | import urllib | ||
1474 | 31 | |||
1475 | 32 | |||
1476 | 33 | starts_with_mappings = { | ||
1477 | 34 | '#include': 'text/x-include-url', | ||
1478 | 35 | '#include-once': 'text/x-include-once-url', | ||
1479 | 36 | '#!': 'text/x-shellscript', | ||
1480 | 37 | '#cloud-config': 'text/cloud-config', | ||
1481 | 38 | '#upstart-job': 'text/upstart-job', | ||
1482 | 39 | '#part-handler': 'text/part-handler', | ||
1483 | 40 | '#cloud-boothook': 'text/cloud-boothook', | ||
1484 | 41 | '#cloud-config-archive': 'text/cloud-config-archive', | ||
1485 | 42 | } | ||
1486 | 43 | |||
1487 | 44 | |||
1488 | 45 | # if 'string' is compressed return decompressed otherwise return it | ||
1489 | 46 | def decomp_str(string): | ||
1490 | 47 | import StringIO | ||
1491 | 48 | import gzip | ||
1492 | 49 | try: | ||
1493 | 50 | uncomp = gzip.GzipFile(None, "rb", 1, StringIO.StringIO(string)).read() | ||
1494 | 51 | return(uncomp) | ||
1495 | 52 | except: | ||
1496 | 53 | return(string) | ||
1497 | 54 | |||
1498 | 55 | |||
1499 | 56 | def do_include(content, appendmsg): | ||
1500 | 57 | import os | ||
1501 | 58 | # is just a list of urls, one per line | ||
1502 | 59 | # also support '#include <url here>' | ||
1503 | 60 | includeonce = False | ||
1504 | 61 | for line in content.splitlines(): | ||
1505 | 62 | if line == "#include": | ||
1506 | 63 | continue | ||
1507 | 64 | if line == "#include-once": | ||
1508 | 65 | includeonce = True | ||
1509 | 66 | continue | ||
1510 | 67 | if line.startswith("#include-once"): | ||
1511 | 68 | line = line[len("#include-once"):].lstrip() | ||
1512 | 69 | includeonce = True | ||
1513 | 70 | elif line.startswith("#include"): | ||
1514 | 71 | line = line[len("#include"):].lstrip() | ||
1515 | 72 | if line.startswith("#"): | ||
1516 | 73 | continue | ||
1517 | 74 | if line.strip() == "": | ||
1518 | 75 | continue | ||
1519 | 76 | |||
1520 | 77 | # urls cannot not have leading or trailing white space | ||
1521 | 78 | msum = hashlib.md5() # pylint: disable=E1101 | ||
1522 | 79 | msum.update(line.strip()) | ||
1523 | 80 | includeonce_filename = "%s/urlcache/%s" % ( | ||
1524 | 81 | cloudinit.get_ipath_cur("data"), msum.hexdigest()) | ||
1525 | 82 | try: | ||
1526 | 83 | if includeonce and os.path.isfile(includeonce_filename): | ||
1527 | 84 | with open(includeonce_filename, "r") as fp: | ||
1528 | 85 | content = fp.read() | ||
1529 | 86 | else: | ||
1530 | 87 | content = urllib.urlopen(line).read() | ||
1531 | 88 | if includeonce: | ||
1532 | 89 | util.write_file(includeonce_filename, content, mode=0600) | ||
1533 | 90 | except Exception: | ||
1534 | 91 | raise | ||
1535 | 92 | |||
1536 | 93 | process_includes(message_from_string(decomp_str(content)), appendmsg) | ||
1537 | 94 | |||
1538 | 95 | |||
1539 | 96 | def explode_cc_archive(archive, appendmsg): | ||
1540 | 97 | for ent in yaml.safe_load(archive): | ||
1541 | 98 | # ent can be one of: | ||
1542 | 99 | # dict { 'filename' : 'value', 'content' : 'value', 'type' : 'value' } | ||
1543 | 100 | # filename and type not be present | ||
1544 | 101 | # or | ||
1545 | 102 | # scalar(payload) | ||
1546 | 103 | |||
1547 | 104 | def_type = "text/cloud-config" | ||
1548 | 105 | if isinstance(ent, str): | ||
1549 | 106 | ent = {'content': ent} | ||
1550 | 107 | |||
1551 | 108 | content = ent.get('content', '') | ||
1552 | 109 | mtype = ent.get('type', None) | ||
1553 | 110 | if mtype == None: | ||
1554 | 111 | mtype = type_from_startswith(content, def_type) | ||
1555 | 112 | |||
1556 | 113 | maintype, subtype = mtype.split('/', 1) | ||
1557 | 114 | if maintype == "text": | ||
1558 | 115 | msg = MIMEText(content, _subtype=subtype) | ||
1559 | 116 | else: | ||
1560 | 117 | msg = MIMEBase(maintype, subtype) | ||
1561 | 118 | msg.set_payload(content) | ||
1562 | 119 | |||
1563 | 120 | if 'filename' in ent: | ||
1564 | 121 | msg.add_header('Content-Disposition', 'attachment', | ||
1565 | 122 | filename=ent['filename']) | ||
1566 | 123 | |||
1567 | 124 | for header in ent.keys(): | ||
1568 | 125 | if header in ('content', 'filename', 'type'): | ||
1569 | 126 | continue | ||
1570 | 127 | msg.add_header(header, ent['header']) | ||
1571 | 128 | |||
1572 | 129 | _attach_part(appendmsg, msg) | ||
1573 | 130 | |||
1574 | 131 | |||
1575 | 132 | def multi_part_count(outermsg, newcount=None): | ||
1576 | 133 | """ | ||
1577 | 134 | Return the number of attachments to this MIMEMultipart by looking | ||
1578 | 135 | at its 'Number-Attachments' header. | ||
1579 | 136 | """ | ||
1580 | 137 | nfield = 'Number-Attachments' | ||
1581 | 138 | if nfield not in outermsg: | ||
1582 | 139 | outermsg[nfield] = "0" | ||
1583 | 140 | |||
1584 | 141 | if newcount != None: | ||
1585 | 142 | outermsg.replace_header(nfield, str(newcount)) | ||
1586 | 143 | |||
1587 | 144 | return(int(outermsg.get('Number-Attachments', 0))) | ||
1588 | 145 | |||
1589 | 146 | |||
1590 | 147 | def _attach_part(outermsg, part): | ||
1591 | 148 | """ | ||
1592 | 149 | Attach an part to an outer message. outermsg must be a MIMEMultipart. | ||
1593 | 150 | Modifies a header in outermsg to keep track of number of attachments. | ||
1594 | 151 | """ | ||
1595 | 152 | cur = multi_part_count(outermsg) | ||
1596 | 153 | if not part.get_filename(None): | ||
1597 | 154 | part.add_header('Content-Disposition', 'attachment', | ||
1598 | 155 | filename='part-%03d' % (cur + 1)) | ||
1599 | 156 | outermsg.attach(part) | ||
1600 | 157 | multi_part_count(outermsg, cur + 1) | ||
1601 | 158 | |||
1602 | 159 | |||
1603 | 160 | def type_from_startswith(payload, default=None): | ||
1604 | 161 | # slist is sorted longest first | ||
1605 | 162 | slist = sorted(starts_with_mappings.keys(), key=lambda e: 0 - len(e)) | ||
1606 | 163 | for sstr in slist: | ||
1607 | 164 | if payload.startswith(sstr): | ||
1608 | 165 | return(starts_with_mappings[sstr]) | ||
1609 | 166 | return default | ||
1610 | 167 | |||
1611 | 168 | |||
1612 | 169 | def process_includes(msg, appendmsg=None): | ||
1613 | 170 | if appendmsg == None: | ||
1614 | 171 | appendmsg = MIMEMultipart() | ||
1615 | 172 | |||
1616 | 173 | for part in msg.walk(): | ||
1617 | 174 | # multipart/* are just containers | ||
1618 | 175 | if part.get_content_maintype() == 'multipart': | ||
1619 | 176 | continue | ||
1620 | 177 | |||
1621 | 178 | ctype = None | ||
1622 | 179 | ctype_orig = part.get_content_type() | ||
1623 | 180 | |||
1624 | 181 | payload = part.get_payload(decode=True) | ||
1625 | 182 | |||
1626 | 183 | if ctype_orig in ("text/plain", "text/x-not-multipart"): | ||
1627 | 184 | ctype = type_from_startswith(payload) | ||
1628 | 185 | |||
1629 | 186 | if ctype is None: | ||
1630 | 187 | ctype = ctype_orig | ||
1631 | 188 | |||
1632 | 189 | if ctype in ('text/x-include-url', 'text/x-include-once-url'): | ||
1633 | 190 | do_include(payload, appendmsg) | ||
1634 | 191 | continue | ||
1635 | 192 | |||
1636 | 193 | if ctype == "text/cloud-config-archive": | ||
1637 | 194 | explode_cc_archive(payload, appendmsg) | ||
1638 | 195 | continue | ||
1639 | 196 | |||
1640 | 197 | if 'Content-Type' in msg: | ||
1641 | 198 | msg.replace_header('Content-Type', ctype) | ||
1642 | 199 | else: | ||
1643 | 200 | msg['Content-Type'] = ctype | ||
1644 | 201 | |||
1645 | 202 | _attach_part(appendmsg, part) | ||
1646 | 203 | |||
1647 | 204 | |||
1648 | 205 | def message_from_string(data, headers=None): | ||
1649 | 206 | if headers is None: | ||
1650 | 207 | headers = {} | ||
1651 | 208 | if "mime-version:" in data[0:4096].lower(): | ||
1652 | 209 | msg = email.message_from_string(data) | ||
1653 | 210 | for (key, val) in headers.items(): | ||
1654 | 211 | if key in msg: | ||
1655 | 212 | msg.replace_header(key, val) | ||
1656 | 213 | else: | ||
1657 | 214 | msg[key] = val | ||
1658 | 215 | else: | ||
1659 | 216 | mtype = headers.get("Content-Type", "text/x-not-multipart") | ||
1660 | 217 | maintype, subtype = mtype.split("/", 1) | ||
1661 | 218 | msg = MIMEBase(maintype, subtype, *headers) | ||
1662 | 219 | msg.set_payload(data) | ||
1663 | 220 | |||
1664 | 221 | return(msg) | ||
1665 | 222 | |||
1666 | 223 | |||
1667 | 224 | # this is heavily wasteful, reads through userdata string input | ||
1668 | 225 | def preprocess_userdata(data): | ||
1669 | 226 | newmsg = MIMEMultipart() | ||
1670 | 227 | process_includes(message_from_string(decomp_str(data)), newmsg) | ||
1671 | 228 | return(newmsg.as_string()) | ||
1672 | 229 | |||
1673 | 230 | |||
1674 | 231 | # callback is a function that will be called with (data, content_type, | ||
1675 | 232 | # filename, payload) | ||
1676 | 233 | def walk_userdata(istr, callback, data=None): | ||
1677 | 234 | partnum = 0 | ||
1678 | 235 | for part in message_from_string(istr).walk(): | ||
1679 | 236 | # multipart/* are just containers | ||
1680 | 237 | if part.get_content_maintype() == 'multipart': | ||
1681 | 238 | continue | ||
1682 | 239 | |||
1683 | 240 | ctype = part.get_content_type() | ||
1684 | 241 | if ctype is None: | ||
1685 | 242 | ctype = 'application/octet-stream' | ||
1686 | 243 | |||
1687 | 244 | filename = part.get_filename() | ||
1688 | 245 | if not filename: | ||
1689 | 246 | filename = 'part-%03d' % partnum | ||
1690 | 247 | |||
1691 | 248 | callback(data, ctype, filename, part.get_payload(decode=True)) | ||
1692 | 249 | |||
1693 | 250 | partnum = partnum + 1 | ||
1694 | 251 | |||
1695 | 252 | |||
1696 | 253 | if __name__ == "__main__": | ||
1697 | 254 | def main(): | ||
1698 | 255 | import sys | ||
1699 | 256 | data = decomp_str(file(sys.argv[1]).read()) | ||
1700 | 257 | newmsg = MIMEMultipart() | ||
1701 | 258 | process_includes(message_from_string(data), newmsg) | ||
1702 | 259 | print newmsg | ||
1703 | 260 | print "#found %s parts" % multi_part_count(newmsg) | ||
1704 | 261 | |||
1705 | 262 | main() | ||
1706 | 263 | 0 | ||
1707 | === modified file 'cloudinit/__init__.py' | |||
1708 | --- cloudinit/__init__.py 2012-06-28 17:10:56 +0000 | |||
1709 | +++ cloudinit/__init__.py 2012-07-06 21:16:18 +0000 | |||
1710 | @@ -1,11 +1,12 @@ | |||
1711 | 1 | # vi: ts=4 expandtab | 1 | # vi: ts=4 expandtab |
1712 | 2 | # | 2 | # |
1715 | 3 | # Common code for the EC2 initialisation scripts in Ubuntu | 3 | # Copyright (C) 2012 Canonical Ltd. |
1714 | 4 | # Copyright (C) 2008-2009 Canonical Ltd | ||
1716 | 5 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. | 4 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. |
1717 | 5 | # Copyright (C) 2012 Yahoo! Inc. | ||
1718 | 6 | # | 6 | # |
1720 | 7 | # Author: Soren Hansen <soren@canonical.com> | 7 | # Author: Scott Moser <scott.moser@canonical.com> |
1721 | 8 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | 8 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> |
1722 | 9 | # Author: Joshua Harlow <harlowja@yahoo-inc.com> | ||
1723 | 9 | # | 10 | # |
1724 | 10 | # This program is free software: you can redistribute it and/or modify | 11 | # This program is free software: you can redistribute it and/or modify |
1725 | 11 | # it under the terms of the GNU General Public License version 3, as | 12 | # it under the terms of the GNU General Public License version 3, as |
1726 | @@ -18,650 +19,3 @@ | |||
1727 | 18 | # | 19 | # |
1728 | 19 | # You should have received a copy of the GNU General Public License | 20 | # You should have received a copy of the GNU General Public License |
1729 | 20 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 21 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
1730 | 21 | # | ||
1731 | 22 | |||
1732 | 23 | varlibdir = '/var/lib/cloud' | ||
1733 | 24 | cur_instance_link = varlibdir + "/instance" | ||
1734 | 25 | boot_finished = cur_instance_link + "/boot-finished" | ||
1735 | 26 | system_config = '/etc/cloud/cloud.cfg' | ||
1736 | 27 | seeddir = varlibdir + "/seed" | ||
1737 | 28 | cfg_env_name = "CLOUD_CFG" | ||
1738 | 29 | |||
1739 | 30 | cfg_builtin = """ | ||
1740 | 31 | log_cfgs: [] | ||
1741 | 32 | datasource_list: ["NoCloud", "ConfigDrive", "OVF", "MAAS", "Ec2", "CloudStack"] | ||
1742 | 33 | def_log_file: /var/log/cloud-init.log | ||
1743 | 34 | syslog_fix_perms: syslog:adm | ||
1744 | 35 | """ | ||
1745 | 36 | logger_name = "cloudinit" | ||
1746 | 37 | |||
1747 | 38 | pathmap = { | ||
1748 | 39 | "handlers": "/handlers", | ||
1749 | 40 | "scripts": "/scripts", | ||
1750 | 41 | "sem": "/sem", | ||
1751 | 42 | "boothooks": "/boothooks", | ||
1752 | 43 | "userdata_raw": "/user-data.txt", | ||
1753 | 44 | "userdata": "/user-data.txt.i", | ||
1754 | 45 | "obj_pkl": "/obj.pkl", | ||
1755 | 46 | "cloud_config": "/cloud-config.txt", | ||
1756 | 47 | "data": "/data", | ||
1757 | 48 | None: "", | ||
1758 | 49 | } | ||
1759 | 50 | |||
1760 | 51 | per_instance = "once-per-instance" | ||
1761 | 52 | per_always = "always" | ||
1762 | 53 | per_once = "once" | ||
1763 | 54 | |||
1764 | 55 | parsed_cfgs = {} | ||
1765 | 56 | |||
1766 | 57 | import os | ||
1767 | 58 | |||
1768 | 59 | import cPickle | ||
1769 | 60 | import sys | ||
1770 | 61 | import os.path | ||
1771 | 62 | import errno | ||
1772 | 63 | import subprocess | ||
1773 | 64 | import yaml | ||
1774 | 65 | import logging | ||
1775 | 66 | import logging.config | ||
1776 | 67 | import StringIO | ||
1777 | 68 | import glob | ||
1778 | 69 | import traceback | ||
1779 | 70 | |||
1780 | 71 | import cloudinit.util as util | ||
1781 | 72 | |||
1782 | 73 | |||
1783 | 74 | class NullHandler(logging.Handler): | ||
1784 | 75 | def emit(self, record): | ||
1785 | 76 | pass | ||
1786 | 77 | |||
1787 | 78 | |||
1788 | 79 | log = logging.getLogger(logger_name) | ||
1789 | 80 | log.addHandler(NullHandler()) | ||
1790 | 81 | |||
1791 | 82 | |||
1792 | 83 | def logging_set_from_cfg_file(cfg_file=system_config): | ||
1793 | 84 | logging_set_from_cfg(util.get_base_cfg(cfg_file, cfg_builtin, parsed_cfgs)) | ||
1794 | 85 | |||
1795 | 86 | |||
1796 | 87 | def logging_set_from_cfg(cfg): | ||
1797 | 88 | log_cfgs = [] | ||
1798 | 89 | logcfg = util.get_cfg_option_str(cfg, "log_cfg", False) | ||
1799 | 90 | if logcfg: | ||
1800 | 91 | # if there is a 'logcfg' entry in the config, respect | ||
1801 | 92 | # it, it is the old keyname | ||
1802 | 93 | log_cfgs = [logcfg] | ||
1803 | 94 | elif "log_cfgs" in cfg: | ||
1804 | 95 | for cfg in cfg['log_cfgs']: | ||
1805 | 96 | if isinstance(cfg, list): | ||
1806 | 97 | log_cfgs.append('\n'.join(cfg)) | ||
1807 | 98 | else: | ||
1808 | 99 | log_cfgs.append() | ||
1809 | 100 | |||
1810 | 101 | if not len(log_cfgs): | ||
1811 | 102 | sys.stderr.write("Warning, no logging configured\n") | ||
1812 | 103 | return | ||
1813 | 104 | |||
1814 | 105 | for logcfg in log_cfgs: | ||
1815 | 106 | try: | ||
1816 | 107 | logging.config.fileConfig(StringIO.StringIO(logcfg)) | ||
1817 | 108 | return | ||
1818 | 109 | except: | ||
1819 | 110 | pass | ||
1820 | 111 | |||
1821 | 112 | raise Exception("no valid logging found\n") | ||
1822 | 113 | |||
1823 | 114 | |||
1824 | 115 | import cloudinit.DataSource as DataSource | ||
1825 | 116 | import cloudinit.UserDataHandler as UserDataHandler | ||
1826 | 117 | |||
1827 | 118 | |||
1828 | 119 | class CloudInit: | ||
1829 | 120 | cfg = None | ||
1830 | 121 | part_handlers = {} | ||
1831 | 122 | old_conffile = '/etc/ec2-init/ec2-config.cfg' | ||
1832 | 123 | ds_deps = [DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK] | ||
1833 | 124 | datasource = None | ||
1834 | 125 | cloud_config_str = '' | ||
1835 | 126 | datasource_name = '' | ||
1836 | 127 | |||
1837 | 128 | builtin_handlers = [] | ||
1838 | 129 | |||
1839 | 130 | def __init__(self, ds_deps=None, sysconfig=system_config): | ||
1840 | 131 | self.builtin_handlers = [ | ||
1841 | 132 | ['text/x-shellscript', self.handle_user_script, per_always], | ||
1842 | 133 | ['text/cloud-config', self.handle_cloud_config, per_always], | ||
1843 | 134 | ['text/upstart-job', self.handle_upstart_job, per_instance], | ||
1844 | 135 | ['text/cloud-boothook', self.handle_cloud_boothook, per_always], | ||
1845 | 136 | ] | ||
1846 | 137 | |||
1847 | 138 | if ds_deps != None: | ||
1848 | 139 | self.ds_deps = ds_deps | ||
1849 | 140 | |||
1850 | 141 | self.sysconfig = sysconfig | ||
1851 | 142 | |||
1852 | 143 | self.cfg = self.read_cfg() | ||
1853 | 144 | |||
1854 | 145 | def read_cfg(self): | ||
1855 | 146 | if self.cfg: | ||
1856 | 147 | return(self.cfg) | ||
1857 | 148 | |||
1858 | 149 | try: | ||
1859 | 150 | conf = util.get_base_cfg(self.sysconfig, cfg_builtin, parsed_cfgs) | ||
1860 | 151 | except Exception: | ||
1861 | 152 | conf = get_builtin_cfg() | ||
1862 | 153 | |||
1863 | 154 | # support reading the old ConfigObj format file and merging | ||
1864 | 155 | # it into the yaml dictionary | ||
1865 | 156 | try: | ||
1866 | 157 | from configobj import ConfigObj | ||
1867 | 158 | oldcfg = ConfigObj(self.old_conffile) | ||
1868 | 159 | if oldcfg is None: | ||
1869 | 160 | oldcfg = {} | ||
1870 | 161 | conf = util.mergedict(conf, oldcfg) | ||
1871 | 162 | except: | ||
1872 | 163 | pass | ||
1873 | 164 | |||
1874 | 165 | return(conf) | ||
1875 | 166 | |||
1876 | 167 | def restore_from_cache(self): | ||
1877 | 168 | try: | ||
1878 | 169 | # we try to restore from a current link and static path | ||
1879 | 170 | # by using the instance link, if purge_cache was called | ||
1880 | 171 | # the file wont exist | ||
1881 | 172 | cache = get_ipath_cur('obj_pkl') | ||
1882 | 173 | f = open(cache, "rb") | ||
1883 | 174 | data = cPickle.load(f) | ||
1884 | 175 | f.close() | ||
1885 | 176 | self.datasource = data | ||
1886 | 177 | return True | ||
1887 | 178 | except: | ||
1888 | 179 | return False | ||
1889 | 180 | |||
1890 | 181 | def write_to_cache(self): | ||
1891 | 182 | cache = self.get_ipath("obj_pkl") | ||
1892 | 183 | try: | ||
1893 | 184 | os.makedirs(os.path.dirname(cache)) | ||
1894 | 185 | except OSError as e: | ||
1895 | 186 | if e.errno != errno.EEXIST: | ||
1896 | 187 | return False | ||
1897 | 188 | |||
1898 | 189 | try: | ||
1899 | 190 | f = open(cache, "wb") | ||
1900 | 191 | cPickle.dump(self.datasource, f) | ||
1901 | 192 | f.close() | ||
1902 | 193 | os.chmod(cache, 0400) | ||
1903 | 194 | except: | ||
1904 | 195 | raise | ||
1905 | 196 | |||
1906 | 197 | def get_data_source(self): | ||
1907 | 198 | if self.datasource is not None: | ||
1908 | 199 | return True | ||
1909 | 200 | |||
1910 | 201 | if self.restore_from_cache(): | ||
1911 | 202 | log.debug("restored from cache type %s" % self.datasource) | ||
1912 | 203 | return True | ||
1913 | 204 | |||
1914 | 205 | cfglist = self.cfg['datasource_list'] | ||
1915 | 206 | dslist = list_sources(cfglist, self.ds_deps) | ||
1916 | 207 | dsnames = [f.__name__ for f in dslist] | ||
1917 | 208 | |||
1918 | 209 | log.debug("searching for data source in %s" % dsnames) | ||
1919 | 210 | for cls in dslist: | ||
1920 | 211 | ds = cls.__name__ | ||
1921 | 212 | try: | ||
1922 | 213 | s = cls(sys_cfg=self.cfg) | ||
1923 | 214 | if s.get_data(): | ||
1924 | 215 | self.datasource = s | ||
1925 | 216 | self.datasource_name = ds | ||
1926 | 217 | log.debug("found data source %s" % ds) | ||
1927 | 218 | return True | ||
1928 | 219 | except Exception as e: | ||
1929 | 220 | log.warn("get_data of %s raised %s" % (ds, e)) | ||
1930 | 221 | util.logexc(log) | ||
1931 | 222 | msg = "Did not find data source. searched classes: %s" % dsnames | ||
1932 | 223 | log.debug(msg) | ||
1933 | 224 | raise DataSourceNotFoundException(msg) | ||
1934 | 225 | |||
1935 | 226 | def set_cur_instance(self): | ||
1936 | 227 | try: | ||
1937 | 228 | os.unlink(cur_instance_link) | ||
1938 | 229 | except OSError as e: | ||
1939 | 230 | if e.errno != errno.ENOENT: | ||
1940 | 231 | raise | ||
1941 | 232 | |||
1942 | 233 | iid = self.get_instance_id() | ||
1943 | 234 | os.symlink("./instances/%s" % iid, cur_instance_link) | ||
1944 | 235 | idir = self.get_ipath() | ||
1945 | 236 | dlist = [] | ||
1946 | 237 | for d in ["handlers", "scripts", "sem"]: | ||
1947 | 238 | dlist.append("%s/%s" % (idir, d)) | ||
1948 | 239 | |||
1949 | 240 | util.ensure_dirs(dlist) | ||
1950 | 241 | |||
1951 | 242 | ds = "%s: %s\n" % (self.datasource.__class__, str(self.datasource)) | ||
1952 | 243 | dp = self.get_cpath('data') | ||
1953 | 244 | util.write_file("%s/%s" % (idir, 'datasource'), ds) | ||
1954 | 245 | util.write_file("%s/%s" % (dp, 'previous-datasource'), ds) | ||
1955 | 246 | util.write_file("%s/%s" % (dp, 'previous-instance-id'), "%s\n" % iid) | ||
1956 | 247 | |||
1957 | 248 | def get_userdata(self): | ||
1958 | 249 | return(self.datasource.get_userdata()) | ||
1959 | 250 | |||
1960 | 251 | def get_userdata_raw(self): | ||
1961 | 252 | return(self.datasource.get_userdata_raw()) | ||
1962 | 253 | |||
1963 | 254 | def get_instance_id(self): | ||
1964 | 255 | return(self.datasource.get_instance_id()) | ||
1965 | 256 | |||
1966 | 257 | def update_cache(self): | ||
1967 | 258 | self.write_to_cache() | ||
1968 | 259 | self.store_userdata() | ||
1969 | 260 | |||
1970 | 261 | def store_userdata(self): | ||
1971 | 262 | util.write_file(self.get_ipath('userdata_raw'), | ||
1972 | 263 | self.datasource.get_userdata_raw(), 0600) | ||
1973 | 264 | util.write_file(self.get_ipath('userdata'), | ||
1974 | 265 | self.datasource.get_userdata(), 0600) | ||
1975 | 266 | |||
1976 | 267 | def sem_getpath(self, name, freq): | ||
1977 | 268 | if freq == 'once-per-instance': | ||
1978 | 269 | return("%s/%s" % (self.get_ipath("sem"), name)) | ||
1979 | 270 | return("%s/%s.%s" % (get_cpath("sem"), name, freq)) | ||
1980 | 271 | |||
1981 | 272 | def sem_has_run(self, name, freq): | ||
1982 | 273 | if freq == per_always: | ||
1983 | 274 | return False | ||
1984 | 275 | semfile = self.sem_getpath(name, freq) | ||
1985 | 276 | if os.path.exists(semfile): | ||
1986 | 277 | return True | ||
1987 | 278 | return False | ||
1988 | 279 | |||
1989 | 280 | def sem_acquire(self, name, freq): | ||
1990 | 281 | from time import time | ||
1991 | 282 | semfile = self.sem_getpath(name, freq) | ||
1992 | 283 | |||
1993 | 284 | try: | ||
1994 | 285 | os.makedirs(os.path.dirname(semfile)) | ||
1995 | 286 | except OSError as e: | ||
1996 | 287 | if e.errno != errno.EEXIST: | ||
1997 | 288 | raise e | ||
1998 | 289 | |||
1999 | 290 | if os.path.exists(semfile) and freq != per_always: | ||
2000 | 291 | return False | ||
2001 | 292 | |||
2002 | 293 | # race condition | ||
2003 | 294 | try: | ||
2004 | 295 | f = open(semfile, "w") | ||
2005 | 296 | f.write("%s\n" % str(time())) | ||
2006 | 297 | f.close() | ||
2007 | 298 | except: | ||
2008 | 299 | return(False) | ||
2009 | 300 | return(True) | ||
2010 | 301 | |||
2011 | 302 | def sem_clear(self, name, freq): | ||
2012 | 303 | semfile = self.sem_getpath(name, freq) | ||
2013 | 304 | try: | ||
2014 | 305 | os.unlink(semfile) | ||
2015 | 306 | except OSError as e: | ||
2016 | 307 | if e.errno != errno.ENOENT: | ||
2017 | 308 | return False | ||
2018 | 309 | |||
2019 | 310 | return True | ||
2020 | 311 | |||
2021 | 312 | # acquire lock on 'name' for given 'freq' | ||
2022 | 313 | # if that does not exist, then call 'func' with given 'args' | ||
2023 | 314 | # if 'clear_on_fail' is True and func throws an exception | ||
2024 | 315 | # then remove the lock (so it would run again) | ||
2025 | 316 | def sem_and_run(self, semname, freq, func, args=None, clear_on_fail=False): | ||
2026 | 317 | if args is None: | ||
2027 | 318 | args = [] | ||
2028 | 319 | if self.sem_has_run(semname, freq): | ||
2029 | 320 | log.debug("%s already ran %s", semname, freq) | ||
2030 | 321 | return False | ||
2031 | 322 | try: | ||
2032 | 323 | if not self.sem_acquire(semname, freq): | ||
2033 | 324 | raise Exception("Failed to acquire lock on %s" % semname) | ||
2034 | 325 | |||
2035 | 326 | func(*args) | ||
2036 | 327 | except: | ||
2037 | 328 | if clear_on_fail: | ||
2038 | 329 | self.sem_clear(semname, freq) | ||
2039 | 330 | raise | ||
2040 | 331 | |||
2041 | 332 | return True | ||
2042 | 333 | |||
2043 | 334 | # get_ipath : get the instance path for a name in pathmap | ||
2044 | 335 | # (/var/lib/cloud/instances/<instance>/name)<name>) | ||
2045 | 336 | def get_ipath(self, name=None): | ||
2046 | 337 | return("%s/instances/%s%s" | ||
2047 | 338 | % (varlibdir, self.get_instance_id(), pathmap[name])) | ||
2048 | 339 | |||
2049 | 340 | def consume_userdata(self, frequency=per_instance): | ||
2050 | 341 | self.get_userdata() | ||
2051 | 342 | data = self | ||
2052 | 343 | |||
2053 | 344 | cdir = get_cpath("handlers") | ||
2054 | 345 | idir = self.get_ipath("handlers") | ||
2055 | 346 | |||
2056 | 347 | # add the path to the plugins dir to the top of our list for import | ||
2057 | 348 | # instance dir should be read before cloud-dir | ||
2058 | 349 | sys.path.insert(0, cdir) | ||
2059 | 350 | sys.path.insert(0, idir) | ||
2060 | 351 | |||
2061 | 352 | part_handlers = {} | ||
2062 | 353 | # add handlers in cdir | ||
2063 | 354 | for fname in glob.glob("%s/*.py" % cdir): | ||
2064 | 355 | if not os.path.isfile(fname): | ||
2065 | 356 | continue | ||
2066 | 357 | modname = os.path.basename(fname)[0:-3] | ||
2067 | 358 | try: | ||
2068 | 359 | mod = __import__(modname) | ||
2069 | 360 | handler_register(mod, part_handlers, data, frequency) | ||
2070 | 361 | log.debug("added handler for [%s] from %s" % (mod.list_types(), | ||
2071 | 362 | fname)) | ||
2072 | 363 | except: | ||
2073 | 364 | log.warn("failed to initialize handler in %s" % fname) | ||
2074 | 365 | util.logexc(log) | ||
2075 | 366 | |||
2076 | 367 | # add the internal handers if their type hasn't been already claimed | ||
2077 | 368 | for (btype, bhand, bfreq) in self.builtin_handlers: | ||
2078 | 369 | if btype in part_handlers: | ||
2079 | 370 | continue | ||
2080 | 371 | handler_register(InternalPartHandler(bhand, [btype], bfreq), | ||
2081 | 372 | part_handlers, data, frequency) | ||
2082 | 373 | |||
2083 | 374 | # walk the data | ||
2084 | 375 | pdata = {'handlers': part_handlers, 'handlerdir': idir, | ||
2085 | 376 | 'data': data, 'frequency': frequency} | ||
2086 | 377 | UserDataHandler.walk_userdata(self.get_userdata(), | ||
2087 | 378 | partwalker_callback, data=pdata) | ||
2088 | 379 | |||
2089 | 380 | # give callbacks opportunity to finalize | ||
2090 | 381 | called = [] | ||
2091 | 382 | for (_mtype, mod) in part_handlers.iteritems(): | ||
2092 | 383 | if mod in called: | ||
2093 | 384 | continue | ||
2094 | 385 | handler_call_end(mod, data, frequency) | ||
2095 | 386 | |||
2096 | 387 | def handle_user_script(self, _data, ctype, filename, payload, _frequency): | ||
2097 | 388 | if ctype == "__end__": | ||
2098 | 389 | return | ||
2099 | 390 | if ctype == "__begin__": | ||
2100 | 391 | # maybe delete existing things here | ||
2101 | 392 | return | ||
2102 | 393 | |||
2103 | 394 | filename = filename.replace(os.sep, '_') | ||
2104 | 395 | scriptsdir = get_ipath_cur('scripts') | ||
2105 | 396 | util.write_file("%s/%s" % | ||
2106 | 397 | (scriptsdir, filename), util.dos2unix(payload), 0700) | ||
2107 | 398 | |||
2108 | 399 | def handle_upstart_job(self, _data, ctype, filename, payload, frequency): | ||
2109 | 400 | # upstart jobs are only written on the first boot | ||
2110 | 401 | if frequency != per_instance: | ||
2111 | 402 | return | ||
2112 | 403 | |||
2113 | 404 | if ctype == "__end__" or ctype == "__begin__": | ||
2114 | 405 | return | ||
2115 | 406 | if not filename.endswith(".conf"): | ||
2116 | 407 | filename = filename + ".conf" | ||
2117 | 408 | |||
2118 | 409 | util.write_file("%s/%s" % ("/etc/init", filename), | ||
2119 | 410 | util.dos2unix(payload), 0644) | ||
2120 | 411 | |||
2121 | 412 | def handle_cloud_config(self, _data, ctype, filename, payload, _frequency): | ||
2122 | 413 | if ctype == "__begin__": | ||
2123 | 414 | self.cloud_config_str = "" | ||
2124 | 415 | return | ||
2125 | 416 | if ctype == "__end__": | ||
2126 | 417 | cloud_config = self.get_ipath("cloud_config") | ||
2127 | 418 | util.write_file(cloud_config, self.cloud_config_str, 0600) | ||
2128 | 419 | |||
2129 | 420 | ## this could merge the cloud config with the system config | ||
2130 | 421 | ## for now, not doing this as it seems somewhat circular | ||
2131 | 422 | ## as CloudConfig does that also, merging it with this cfg | ||
2132 | 423 | ## | ||
2133 | 424 | # ccfg = yaml.safe_load(self.cloud_config_str) | ||
2134 | 425 | # if ccfg is None: ccfg = {} | ||
2135 | 426 | # self.cfg = util.mergedict(ccfg, self.cfg) | ||
2136 | 427 | |||
2137 | 428 | return | ||
2138 | 429 | |||
2139 | 430 | self.cloud_config_str += "\n#%s\n%s" % (filename, payload) | ||
2140 | 431 | |||
2141 | 432 | def handle_cloud_boothook(self, _data, ctype, filename, payload, | ||
2142 | 433 | _frequency): | ||
2143 | 434 | if ctype == "__end__": | ||
2144 | 435 | return | ||
2145 | 436 | if ctype == "__begin__": | ||
2146 | 437 | return | ||
2147 | 438 | |||
2148 | 439 | filename = filename.replace(os.sep, '_') | ||
2149 | 440 | payload = util.dos2unix(payload) | ||
2150 | 441 | prefix = "#cloud-boothook" | ||
2151 | 442 | start = 0 | ||
2152 | 443 | if payload.startswith(prefix): | ||
2153 | 444 | start = len(prefix) + 1 | ||
2154 | 445 | |||
2155 | 446 | boothooks_dir = self.get_ipath("boothooks") | ||
2156 | 447 | filepath = "%s/%s" % (boothooks_dir, filename) | ||
2157 | 448 | util.write_file(filepath, payload[start:], 0700) | ||
2158 | 449 | try: | ||
2159 | 450 | env = os.environ.copy() | ||
2160 | 451 | env['INSTANCE_ID'] = self.datasource.get_instance_id() | ||
2161 | 452 | subprocess.check_call([filepath], env=env) | ||
2162 | 453 | except subprocess.CalledProcessError as e: | ||
2163 | 454 | log.error("boothooks script %s returned %i" % | ||
2164 | 455 | (filepath, e.returncode)) | ||
2165 | 456 | except Exception as e: | ||
2166 | 457 | log.error("boothooks unknown exception %s when running %s" % | ||
2167 | 458 | (e, filepath)) | ||
2168 | 459 | |||
2169 | 460 | def get_public_ssh_keys(self): | ||
2170 | 461 | return(self.datasource.get_public_ssh_keys()) | ||
2171 | 462 | |||
2172 | 463 | def get_locale(self): | ||
2173 | 464 | return(self.datasource.get_locale()) | ||
2174 | 465 | |||
2175 | 466 | def get_mirror(self): | ||
2176 | 467 | return(self.datasource.get_local_mirror()) | ||
2177 | 468 | |||
2178 | 469 | def get_hostname(self, fqdn=False): | ||
2179 | 470 | return(self.datasource.get_hostname(fqdn=fqdn)) | ||
2180 | 471 | |||
2181 | 472 | def device_name_to_device(self, name): | ||
2182 | 473 | return(self.datasource.device_name_to_device(name)) | ||
2183 | 474 | |||
2184 | 475 | # I really don't know if this should be here or not, but | ||
2185 | 476 | # I needed it in cc_update_hostname, where that code had a valid 'cloud' | ||
2186 | 477 | # reference, but did not have a cloudinit handle | ||
2187 | 478 | # (ie, no cloudinit.get_cpath()) | ||
2188 | 479 | def get_cpath(self, name=None): | ||
2189 | 480 | return(get_cpath(name)) | ||
2190 | 481 | |||
2191 | 482 | |||
2192 | 483 | def initfs(): | ||
2193 | 484 | subds = ['scripts/per-instance', 'scripts/per-once', 'scripts/per-boot', | ||
2194 | 485 | 'seed', 'instances', 'handlers', 'sem', 'data'] | ||
2195 | 486 | dlist = [] | ||
2196 | 487 | for subd in subds: | ||
2197 | 488 | dlist.append("%s/%s" % (varlibdir, subd)) | ||
2198 | 489 | util.ensure_dirs(dlist) | ||
2199 | 490 | |||
2200 | 491 | cfg = util.get_base_cfg(system_config, cfg_builtin, parsed_cfgs) | ||
2201 | 492 | log_file = util.get_cfg_option_str(cfg, 'def_log_file', None) | ||
2202 | 493 | perms = util.get_cfg_option_str(cfg, 'syslog_fix_perms', None) | ||
2203 | 494 | if log_file: | ||
2204 | 495 | fp = open(log_file, "ab") | ||
2205 | 496 | fp.close() | ||
2206 | 497 | if log_file and perms: | ||
2207 | 498 | (u, g) = perms.split(':', 1) | ||
2208 | 499 | if u == "-1" or u == "None": | ||
2209 | 500 | u = None | ||
2210 | 501 | if g == "-1" or g == "None": | ||
2211 | 502 | g = None | ||
2212 | 503 | util.chownbyname(log_file, u, g) | ||
2213 | 504 | |||
2214 | 505 | |||
2215 | 506 | def purge_cache(rmcur=True): | ||
2216 | 507 | rmlist = [boot_finished] | ||
2217 | 508 | if rmcur: | ||
2218 | 509 | rmlist.append(cur_instance_link) | ||
2219 | 510 | for f in rmlist: | ||
2220 | 511 | try: | ||
2221 | 512 | os.unlink(f) | ||
2222 | 513 | except OSError as e: | ||
2223 | 514 | if e.errno == errno.ENOENT: | ||
2224 | 515 | continue | ||
2225 | 516 | return(False) | ||
2226 | 517 | except: | ||
2227 | 518 | return(False) | ||
2228 | 519 | return(True) | ||
2229 | 520 | |||
2230 | 521 | |||
2231 | 522 | # get_ipath_cur: get the current instance path for an item | ||
2232 | 523 | def get_ipath_cur(name=None): | ||
2233 | 524 | return("%s/%s%s" % (varlibdir, "instance", pathmap[name])) | ||
2234 | 525 | |||
2235 | 526 | |||
2236 | 527 | # get_cpath : get the "clouddir" (/var/lib/cloud/<name>) | ||
2237 | 528 | # for a name in dirmap | ||
2238 | 529 | def get_cpath(name=None): | ||
2239 | 530 | return("%s%s" % (varlibdir, pathmap[name])) | ||
2240 | 531 | |||
2241 | 532 | |||
2242 | 533 | def get_base_cfg(cfg_path=None): | ||
2243 | 534 | if cfg_path is None: | ||
2244 | 535 | cfg_path = system_config | ||
2245 | 536 | return(util.get_base_cfg(cfg_path, cfg_builtin, parsed_cfgs)) | ||
2246 | 537 | |||
2247 | 538 | |||
2248 | 539 | def get_builtin_cfg(): | ||
2249 | 540 | return(yaml.safe_load(cfg_builtin)) | ||
2250 | 541 | |||
2251 | 542 | |||
2252 | 543 | class DataSourceNotFoundException(Exception): | ||
2253 | 544 | pass | ||
2254 | 545 | |||
2255 | 546 | |||
2256 | 547 | def list_sources(cfg_list, depends): | ||
2257 | 548 | return(DataSource.list_sources(cfg_list, depends, ["cloudinit", ""])) | ||
2258 | 549 | |||
2259 | 550 | |||
2260 | 551 | def handler_register(mod, part_handlers, data, frequency=per_instance): | ||
2261 | 552 | if not hasattr(mod, "handler_version"): | ||
2262 | 553 | setattr(mod, "handler_version", 1) | ||
2263 | 554 | |||
2264 | 555 | for mtype in mod.list_types(): | ||
2265 | 556 | part_handlers[mtype] = mod | ||
2266 | 557 | |||
2267 | 558 | handler_call_begin(mod, data, frequency) | ||
2268 | 559 | return(mod) | ||
2269 | 560 | |||
2270 | 561 | |||
2271 | 562 | def handler_call_begin(mod, data, frequency): | ||
2272 | 563 | handler_handle_part(mod, data, "__begin__", None, None, frequency) | ||
2273 | 564 | |||
2274 | 565 | |||
2275 | 566 | def handler_call_end(mod, data, frequency): | ||
2276 | 567 | handler_handle_part(mod, data, "__end__", None, None, frequency) | ||
2277 | 568 | |||
2278 | 569 | |||
2279 | 570 | def handler_handle_part(mod, data, ctype, filename, payload, frequency): | ||
2280 | 571 | # only add the handler if the module should run | ||
2281 | 572 | modfreq = getattr(mod, "frequency", per_instance) | ||
2282 | 573 | if not (modfreq == per_always or | ||
2283 | 574 | (frequency == per_instance and modfreq == per_instance)): | ||
2284 | 575 | return | ||
2285 | 576 | try: | ||
2286 | 577 | if mod.handler_version == 1: | ||
2287 | 578 | mod.handle_part(data, ctype, filename, payload) | ||
2288 | 579 | else: | ||
2289 | 580 | mod.handle_part(data, ctype, filename, payload, frequency) | ||
2290 | 581 | except: | ||
2291 | 582 | util.logexc(log) | ||
2292 | 583 | traceback.print_exc(file=sys.stderr) | ||
2293 | 584 | |||
2294 | 585 | |||
2295 | 586 | def partwalker_handle_handler(pdata, _ctype, _filename, payload): | ||
2296 | 587 | curcount = pdata['handlercount'] | ||
2297 | 588 | modname = 'part-handler-%03d' % curcount | ||
2298 | 589 | frequency = pdata['frequency'] | ||
2299 | 590 | |||
2300 | 591 | modfname = modname + ".py" | ||
2301 | 592 | util.write_file("%s/%s" % (pdata['handlerdir'], modfname), payload, 0600) | ||
2302 | 593 | |||
2303 | 594 | try: | ||
2304 | 595 | mod = __import__(modname) | ||
2305 | 596 | handler_register(mod, pdata['handlers'], pdata['data'], frequency) | ||
2306 | 597 | pdata['handlercount'] = curcount + 1 | ||
2307 | 598 | except: | ||
2308 | 599 | util.logexc(log) | ||
2309 | 600 | traceback.print_exc(file=sys.stderr) | ||
2310 | 601 | |||
2311 | 602 | |||
2312 | 603 | def partwalker_callback(pdata, ctype, filename, payload): | ||
2313 | 604 | # data here is the part_handlers array and then the data to pass through | ||
2314 | 605 | if ctype == "text/part-handler": | ||
2315 | 606 | if 'handlercount' not in pdata: | ||
2316 | 607 | pdata['handlercount'] = 0 | ||
2317 | 608 | partwalker_handle_handler(pdata, ctype, filename, payload) | ||
2318 | 609 | return | ||
2319 | 610 | if ctype not in pdata['handlers'] and payload: | ||
2320 | 611 | if ctype == "text/x-not-multipart": | ||
2321 | 612 | # Extract the first line or 24 bytes for displaying in the log | ||
2322 | 613 | start = payload.split("\n", 1)[0][:24] | ||
2323 | 614 | if start < payload: | ||
2324 | 615 | details = "starting '%s...'" % start.encode("string-escape") | ||
2325 | 616 | else: | ||
2326 | 617 | details = repr(payload) | ||
2327 | 618 | log.warning("Unhandled non-multipart userdata %s", details) | ||
2328 | 619 | return | ||
2329 | 620 | handler_handle_part(pdata['handlers'][ctype], pdata['data'], | ||
2330 | 621 | ctype, filename, payload, pdata['frequency']) | ||
2331 | 622 | |||
2332 | 623 | |||
2333 | 624 | class InternalPartHandler: | ||
2334 | 625 | freq = per_instance | ||
2335 | 626 | mtypes = [] | ||
2336 | 627 | handler_version = 1 | ||
2337 | 628 | handler = None | ||
2338 | 629 | |||
2339 | 630 | def __init__(self, handler, mtypes, frequency, version=2): | ||
2340 | 631 | self.handler = handler | ||
2341 | 632 | self.mtypes = mtypes | ||
2342 | 633 | self.frequency = frequency | ||
2343 | 634 | self.handler_version = version | ||
2344 | 635 | |||
2345 | 636 | def __repr__(self): | ||
2346 | 637 | return("InternalPartHandler: [%s]" % self.mtypes) | ||
2347 | 638 | |||
2348 | 639 | def list_types(self): | ||
2349 | 640 | return(self.mtypes) | ||
2350 | 641 | |||
2351 | 642 | def handle_part(self, data, ctype, filename, payload, frequency): | ||
2352 | 643 | return(self.handler(data, ctype, filename, payload, frequency)) | ||
2353 | 644 | |||
2354 | 645 | |||
2355 | 646 | def get_cmdline_url(names=('cloud-config-url', 'url'), | ||
2356 | 647 | starts="#cloud-config", cmdline=None): | ||
2357 | 648 | |||
2358 | 649 | if cmdline == None: | ||
2359 | 650 | cmdline = util.get_cmdline() | ||
2360 | 651 | |||
2361 | 652 | data = util.keyval_str_to_dict(cmdline) | ||
2362 | 653 | url = None | ||
2363 | 654 | key = None | ||
2364 | 655 | for key in names: | ||
2365 | 656 | if key in data: | ||
2366 | 657 | url = data[key] | ||
2367 | 658 | break | ||
2368 | 659 | if url == None: | ||
2369 | 660 | return (None, None, None) | ||
2370 | 661 | |||
2371 | 662 | contents = util.readurl(url) | ||
2372 | 663 | |||
2373 | 664 | if contents.startswith(starts): | ||
2374 | 665 | return (key, url, contents) | ||
2375 | 666 | |||
2376 | 667 | return (key, url, None) | ||
2377 | 668 | 22 | ||
2378 | === added file 'cloudinit/cloud.py' | |||
2379 | --- cloudinit/cloud.py 1970-01-01 00:00:00 +0000 | |||
2380 | +++ cloudinit/cloud.py 2012-07-06 21:16:18 +0000 | |||
2381 | @@ -0,0 +1,101 @@ | |||
2382 | 1 | # vi: ts=4 expandtab | ||
2383 | 2 | # | ||
2384 | 3 | # Copyright (C) 2012 Canonical Ltd. | ||
2385 | 4 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. | ||
2386 | 5 | # Copyright (C) 2012 Yahoo! Inc. | ||
2387 | 6 | # | ||
2388 | 7 | # Author: Scott Moser <scott.moser@canonical.com> | ||
2389 | 8 | # Author: Juerg Haefliger <juerg.haefliger@hp.com> | ||
2390 | 9 | # Author: Joshua Harlow <harlowja@yahoo-inc.com> | ||
2391 | 10 | # | ||
2392 | 11 | # This program is free software: you can redistribute it and/or modify | ||
2393 | 12 | # it under the terms of the GNU General Public License version 3, as | ||
2394 | 13 | # published by the Free Software Foundation. | ||
2395 | 14 | # | ||
2396 | 15 | # This program is distributed in the hope that it will be useful, | ||
2397 | 16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
2398 | 17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
2399 | 18 | # GNU General Public License for more details. | ||
2400 | 19 | # | ||
2401 | 20 | # You should have received a copy of the GNU General Public License | ||
2402 | 21 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
2403 | 22 | |||
2404 | 23 | import copy | ||
2405 | 24 | import os | ||
2406 | 25 | |||
2407 | 26 | from cloudinit import log as logging | ||
2408 | 27 | |||
2409 | 28 | LOG = logging.getLogger(__name__) | ||
2410 | 29 | |||
2411 | 30 | # This class is the high level wrapper that provides | ||
2412 | 31 | # access to cloud-init objects without exposing the stage objects | ||
2413 | 32 | # to handler and or module manipulation. It allows for cloud | ||
2414 | 33 | # init to restrict what those types of user facing code may see | ||
2415 | 34 | # and or adjust (which helps avoid code messing with each other) | ||
2416 | 35 | # | ||
2417 | 36 | # It also provides util functions that avoid having to know | ||
2418 | 37 | # how to get a certain member from this submembers as well | ||
2419 | 38 | # as providing a backwards compatible object that can be maintained | ||
2420 | 39 | # while the stages/other objects can be worked on independently... | ||
2421 | 40 | |||
2422 | 41 | |||
2423 | 42 | class Cloud(object): | ||
2424 | 43 | def __init__(self, datasource, paths, cfg, distro, runners): | ||
2425 | 44 | self.datasource = datasource | ||
2426 | 45 | self.paths = paths | ||
2427 | 46 | self.distro = distro | ||
2428 | 47 | self._cfg = cfg | ||
2429 | 48 | self._runners = runners | ||
2430 | 49 | |||
2431 | 50 | # If a 'user' manipulates logging or logging services | ||
2432 | 51 | # it is typically useful to cause the logging to be | ||
2433 | 52 | # setup again. | ||
2434 | 53 | def cycle_logging(self): | ||
2435 | 54 | logging.resetLogging() | ||
2436 | 55 | logging.setupLogging(self.cfg) | ||
2437 | 56 | |||
2438 | 57 | @property | ||
2439 | 58 | def cfg(self): | ||
2440 | 59 | # Ensure that not indirectly modified | ||
2441 | 60 | return copy.deepcopy(self._cfg) | ||
2442 | 61 | |||
2443 | 62 | def run(self, name, functor, args, freq=None, clear_on_fail=False): | ||
2444 | 63 | return self._runners.run(name, functor, args, freq, clear_on_fail) | ||
2445 | 64 | |||
2446 | 65 | def get_template_filename(self, name): | ||
2447 | 66 | fn = self.paths.template_tpl % (name) | ||
2448 | 67 | if not os.path.isfile(fn): | ||
2449 | 68 | LOG.warn("No template found at %s for template named %s", fn, name) | ||
2450 | 69 | return None | ||
2451 | 70 | return fn | ||
2452 | 71 | |||
2453 | 72 | # The rest of thes are just useful proxies | ||
2454 | 73 | def get_userdata(self): | ||
2455 | 74 | return self.datasource.get_userdata() | ||
2456 | 75 | |||
2457 | 76 | def get_instance_id(self): | ||
2458 | 77 | return self.datasource.get_instance_id() | ||
2459 | 78 | |||
2460 | 79 | def get_public_ssh_keys(self): | ||
2461 | 80 | return self.datasource.get_public_ssh_keys() | ||
2462 | 81 | |||
2463 | 82 | def get_locale(self): | ||
2464 | 83 | return self.datasource.get_locale() | ||
2465 | 84 | |||
2466 | 85 | def get_local_mirror(self): | ||
2467 | 86 | return self.datasource.get_local_mirror() | ||
2468 | 87 | |||
2469 | 88 | def get_hostname(self, fqdn=False): | ||
2470 | 89 | return self.datasource.get_hostname(fqdn=fqdn) | ||
2471 | 90 | |||
2472 | 91 | def device_name_to_device(self, name): | ||
2473 | 92 | return self.datasource.device_name_to_device(name) | ||
2474 | 93 | |||
2475 | 94 | def get_ipath_cur(self, name=None): | ||
2476 | 95 | return self.paths.get_ipath_cur(name) | ||
2477 | 96 | |||
2478 | 97 | def get_cpath(self, name=None): | ||
2479 | 98 | return self.paths.get_cpath(name) | ||
2480 | 99 | |||
2481 | 100 | def get_ipath(self, name=None): | ||
2482 | 101 | return self.paths.get_ipath(name) | ||
2483 | 0 | 102 | ||
2484 | === renamed directory 'cloudinit/CloudConfig' => 'cloudinit/config' | |||
2485 | === modified file 'cloudinit/config/__init__.py' | |||
2486 | --- cloudinit/CloudConfig/__init__.py 2012-06-13 13:11:27 +0000 | |||
2487 | +++ cloudinit/config/__init__.py 2012-07-06 21:16:18 +0000 | |||
2488 | @@ -19,256 +19,38 @@ | |||
2489 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
2490 | 20 | # | 20 | # |
2491 | 21 | 21 | ||
2615 | 22 | import yaml | 22 | from cloudinit.settings import (PER_INSTANCE, FREQUENCIES) |
2616 | 23 | import cloudinit | 23 | |
2617 | 24 | import cloudinit.util as util | 24 | from cloudinit import log as logging |
2618 | 25 | import sys | 25 | |
2619 | 26 | import traceback | 26 | LOG = logging.getLogger(__name__) |
2620 | 27 | import os | 27 | |
2621 | 28 | import subprocess | 28 | # This prefix is used to make it less |
2622 | 29 | import time | 29 | # of a chance that when importing |
2623 | 30 | 30 | # we will not find something else with the same | |
2624 | 31 | per_instance = cloudinit.per_instance | 31 | # name in the lookup path... |
2625 | 32 | per_always = cloudinit.per_always | 32 | MOD_PREFIX = "cc_" |
2626 | 33 | per_once = cloudinit.per_once | 33 | |
2627 | 34 | 34 | ||
2628 | 35 | 35 | def form_module_name(name): | |
2629 | 36 | class CloudConfig(): | 36 | canon_name = name.replace("-", "_") |
2630 | 37 | cfgfile = None | 37 | if canon_name.lower().endswith(".py"): |
2631 | 38 | cfg = None | 38 | canon_name = canon_name[0:(len(canon_name) - 3)] |
2632 | 39 | 39 | canon_name = canon_name.strip() | |
2633 | 40 | def __init__(self, cfgfile, cloud=None, ds_deps=None): | 40 | if not canon_name: |
2634 | 41 | if cloud == None: | 41 | return None |
2635 | 42 | self.cloud = cloudinit.CloudInit(ds_deps) | 42 | if not canon_name.startswith(MOD_PREFIX): |
2636 | 43 | self.cloud.get_data_source() | 43 | canon_name = '%s%s' % (MOD_PREFIX, canon_name) |
2637 | 44 | else: | 44 | return canon_name |
2638 | 45 | self.cloud = cloud | 45 | |
2639 | 46 | self.cfg = self.get_config_obj(cfgfile) | 46 | |
2640 | 47 | 47 | def fixup_module(mod, def_freq=PER_INSTANCE): | |
2641 | 48 | def get_config_obj(self, cfgfile): | 48 | if not hasattr(mod, 'frequency'): |
2642 | 49 | try: | 49 | setattr(mod, 'frequency', def_freq) |
2520 | 50 | cfg = util.read_conf(cfgfile) | ||
2521 | 51 | except: | ||
2522 | 52 | # TODO: this 'log' could/should be passed in | ||
2523 | 53 | cloudinit.log.critical("Failed loading of cloud config '%s'. " | ||
2524 | 54 | "Continuing with empty config\n" % cfgfile) | ||
2525 | 55 | cloudinit.log.debug(traceback.format_exc() + "\n") | ||
2526 | 56 | cfg = None | ||
2527 | 57 | if cfg is None: | ||
2528 | 58 | cfg = {} | ||
2529 | 59 | |||
2530 | 60 | try: | ||
2531 | 61 | ds_cfg = self.cloud.datasource.get_config_obj() | ||
2532 | 62 | except: | ||
2533 | 63 | ds_cfg = {} | ||
2534 | 64 | |||
2535 | 65 | cfg = util.mergedict(cfg, ds_cfg) | ||
2536 | 66 | return(util.mergedict(cfg, self.cloud.cfg)) | ||
2537 | 67 | |||
2538 | 68 | def handle(self, name, args, freq=None): | ||
2539 | 69 | try: | ||
2540 | 70 | mod = __import__("cc_" + name.replace("-", "_"), globals()) | ||
2541 | 71 | def_freq = getattr(mod, "frequency", per_instance) | ||
2542 | 72 | handler = getattr(mod, "handle") | ||
2543 | 73 | |||
2544 | 74 | if not freq: | ||
2545 | 75 | freq = def_freq | ||
2546 | 76 | |||
2547 | 77 | self.cloud.sem_and_run("config-" + name, freq, handler, | ||
2548 | 78 | [name, self.cfg, self.cloud, cloudinit.log, args]) | ||
2549 | 79 | except: | ||
2550 | 80 | raise | ||
2551 | 81 | |||
2552 | 82 | |||
2553 | 83 | # reads a cloudconfig module list, returns | ||
2554 | 84 | # a 2 dimensional array suitable to pass to run_cc_modules | ||
2555 | 85 | def read_cc_modules(cfg, name): | ||
2556 | 86 | if name not in cfg: | ||
2557 | 87 | return([]) | ||
2558 | 88 | module_list = [] | ||
2559 | 89 | # create 'module_list', an array of arrays | ||
2560 | 90 | # where array[0] = config | ||
2561 | 91 | # array[1] = freq | ||
2562 | 92 | # array[2:] = arguemnts | ||
2563 | 93 | for item in cfg[name]: | ||
2564 | 94 | if isinstance(item, str): | ||
2565 | 95 | module_list.append((item,)) | ||
2566 | 96 | elif isinstance(item, list): | ||
2567 | 97 | module_list.append(item) | ||
2568 | 98 | else: | ||
2569 | 99 | raise TypeError("failed to read '%s' item in config") | ||
2570 | 100 | return(module_list) | ||
2571 | 101 | |||
2572 | 102 | |||
2573 | 103 | def run_cc_modules(cc, module_list, log): | ||
2574 | 104 | failures = [] | ||
2575 | 105 | for cfg_mod in module_list: | ||
2576 | 106 | name = cfg_mod[0] | ||
2577 | 107 | freq = None | ||
2578 | 108 | run_args = [] | ||
2579 | 109 | if len(cfg_mod) > 1: | ||
2580 | 110 | freq = cfg_mod[1] | ||
2581 | 111 | if len(cfg_mod) > 2: | ||
2582 | 112 | run_args = cfg_mod[2:] | ||
2583 | 113 | |||
2584 | 114 | try: | ||
2585 | 115 | log.debug("handling %s with freq=%s and args=%s" % | ||
2586 | 116 | (name, freq, run_args)) | ||
2587 | 117 | cc.handle(name, run_args, freq=freq) | ||
2588 | 118 | except: | ||
2589 | 119 | log.warn(traceback.format_exc()) | ||
2590 | 120 | log.error("config handling of %s, %s, %s failed\n" % | ||
2591 | 121 | (name, freq, run_args)) | ||
2592 | 122 | failures.append(name) | ||
2593 | 123 | |||
2594 | 124 | return(failures) | ||
2595 | 125 | |||
2596 | 126 | |||
2597 | 127 | # always returns well formated values | ||
2598 | 128 | # cfg is expected to have an entry 'output' in it, which is a dictionary | ||
2599 | 129 | # that includes entries for 'init', 'config', 'final' or 'all' | ||
2600 | 130 | # init: /var/log/cloud.out | ||
2601 | 131 | # config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ] | ||
2602 | 132 | # final: | ||
2603 | 133 | # output: "| logger -p" | ||
2604 | 134 | # error: "> /dev/null" | ||
2605 | 135 | # this returns the specific 'mode' entry, cleanly formatted, with value | ||
2606 | 136 | # None if if none is given | ||
2607 | 137 | def get_output_cfg(cfg, mode="init"): | ||
2608 | 138 | ret = [None, None] | ||
2609 | 139 | if not 'output' in cfg: | ||
2610 | 140 | return ret | ||
2611 | 141 | |||
2612 | 142 | outcfg = cfg['output'] | ||
2613 | 143 | if mode in outcfg: | ||
2614 | 144 | modecfg = outcfg[mode] | ||
2643 | 145 | else: | 50 | else: |
2773 | 146 | if 'all' not in outcfg: | 51 | freq = mod.frequency |
2774 | 147 | return ret | 52 | if freq and freq not in FREQUENCIES: |
2775 | 148 | # if there is a 'all' item in the output list | 53 | LOG.warn("Module %s has an unknown frequency %s", mod, freq) |
2776 | 149 | # then it applies to all users of this (init, config, final) | 54 | if not hasattr(mod, 'distros'): |
2777 | 150 | modecfg = outcfg['all'] | 55 | setattr(mod, 'distros', None) |
2778 | 151 | 56 | return mod | |
2650 | 152 | # if value is a string, it specifies stdout and stderr | ||
2651 | 153 | if isinstance(modecfg, str): | ||
2652 | 154 | ret = [modecfg, modecfg] | ||
2653 | 155 | |||
2654 | 156 | # if its a list, then we expect (stdout, stderr) | ||
2655 | 157 | if isinstance(modecfg, list): | ||
2656 | 158 | if len(modecfg) > 0: | ||
2657 | 159 | ret[0] = modecfg[0] | ||
2658 | 160 | if len(modecfg) > 1: | ||
2659 | 161 | ret[1] = modecfg[1] | ||
2660 | 162 | |||
2661 | 163 | # if it is a dictionary, expect 'out' and 'error' | ||
2662 | 164 | # items, which indicate out and error | ||
2663 | 165 | if isinstance(modecfg, dict): | ||
2664 | 166 | if 'output' in modecfg: | ||
2665 | 167 | ret[0] = modecfg['output'] | ||
2666 | 168 | if 'error' in modecfg: | ||
2667 | 169 | ret[1] = modecfg['error'] | ||
2668 | 170 | |||
2669 | 171 | # if err's entry == "&1", then make it same as stdout | ||
2670 | 172 | # as in shell syntax of "echo foo >/dev/null 2>&1" | ||
2671 | 173 | if ret[1] == "&1": | ||
2672 | 174 | ret[1] = ret[0] | ||
2673 | 175 | |||
2674 | 176 | swlist = [">>", ">", "|"] | ||
2675 | 177 | for i in range(len(ret)): | ||
2676 | 178 | if not ret[i]: | ||
2677 | 179 | continue | ||
2678 | 180 | val = ret[i].lstrip() | ||
2679 | 181 | found = False | ||
2680 | 182 | for s in swlist: | ||
2681 | 183 | if val.startswith(s): | ||
2682 | 184 | val = "%s %s" % (s, val[len(s):].strip()) | ||
2683 | 185 | found = True | ||
2684 | 186 | break | ||
2685 | 187 | if not found: | ||
2686 | 188 | # default behavior is append | ||
2687 | 189 | val = "%s %s" % (">>", val.strip()) | ||
2688 | 190 | ret[i] = val | ||
2689 | 191 | |||
2690 | 192 | return(ret) | ||
2691 | 193 | |||
2692 | 194 | |||
2693 | 195 | # redirect_output(outfmt, errfmt, orig_out, orig_err) | ||
2694 | 196 | # replace orig_out and orig_err with filehandles specified in outfmt or errfmt | ||
2695 | 197 | # fmt can be: | ||
2696 | 198 | # > FILEPATH | ||
2697 | 199 | # >> FILEPATH | ||
2698 | 200 | # | program [ arg1 [ arg2 [ ... ] ] ] | ||
2699 | 201 | # | ||
2700 | 202 | # with a '|', arguments are passed to shell, so one level of | ||
2701 | 203 | # shell escape is required. | ||
2702 | 204 | def redirect_output(outfmt, errfmt, o_out=sys.stdout, o_err=sys.stderr): | ||
2703 | 205 | if outfmt: | ||
2704 | 206 | (mode, arg) = outfmt.split(" ", 1) | ||
2705 | 207 | if mode == ">" or mode == ">>": | ||
2706 | 208 | owith = "ab" | ||
2707 | 209 | if mode == ">": | ||
2708 | 210 | owith = "wb" | ||
2709 | 211 | new_fp = open(arg, owith) | ||
2710 | 212 | elif mode == "|": | ||
2711 | 213 | proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) | ||
2712 | 214 | new_fp = proc.stdin | ||
2713 | 215 | else: | ||
2714 | 216 | raise TypeError("invalid type for outfmt: %s" % outfmt) | ||
2715 | 217 | |||
2716 | 218 | if o_out: | ||
2717 | 219 | os.dup2(new_fp.fileno(), o_out.fileno()) | ||
2718 | 220 | if errfmt == outfmt: | ||
2719 | 221 | os.dup2(new_fp.fileno(), o_err.fileno()) | ||
2720 | 222 | return | ||
2721 | 223 | |||
2722 | 224 | if errfmt: | ||
2723 | 225 | (mode, arg) = errfmt.split(" ", 1) | ||
2724 | 226 | if mode == ">" or mode == ">>": | ||
2725 | 227 | owith = "ab" | ||
2726 | 228 | if mode == ">": | ||
2727 | 229 | owith = "wb" | ||
2728 | 230 | new_fp = open(arg, owith) | ||
2729 | 231 | elif mode == "|": | ||
2730 | 232 | proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) | ||
2731 | 233 | new_fp = proc.stdin | ||
2732 | 234 | else: | ||
2733 | 235 | raise TypeError("invalid type for outfmt: %s" % outfmt) | ||
2734 | 236 | |||
2735 | 237 | if o_err: | ||
2736 | 238 | os.dup2(new_fp.fileno(), o_err.fileno()) | ||
2737 | 239 | return | ||
2738 | 240 | |||
2739 | 241 | |||
2740 | 242 | def run_per_instance(name, func, args, clear_on_fail=False): | ||
2741 | 243 | semfile = "%s/%s" % (cloudinit.get_ipath_cur("data"), name) | ||
2742 | 244 | if os.path.exists(semfile): | ||
2743 | 245 | return | ||
2744 | 246 | |||
2745 | 247 | util.write_file(semfile, str(time.time())) | ||
2746 | 248 | try: | ||
2747 | 249 | func(*args) | ||
2748 | 250 | except: | ||
2749 | 251 | if clear_on_fail: | ||
2750 | 252 | os.unlink(semfile) | ||
2751 | 253 | raise | ||
2752 | 254 | |||
2753 | 255 | |||
2754 | 256 | # apt_get top level command (install, update...), and args to pass it | ||
2755 | 257 | def apt_get(tlc, args=None): | ||
2756 | 258 | if args is None: | ||
2757 | 259 | args = [] | ||
2758 | 260 | e = os.environ.copy() | ||
2759 | 261 | e['DEBIAN_FRONTEND'] = 'noninteractive' | ||
2760 | 262 | cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold', | ||
2761 | 263 | '--assume-yes', '--quiet', tlc] | ||
2762 | 264 | cmd.extend(args) | ||
2763 | 265 | subprocess.check_call(cmd, env=e) | ||
2764 | 266 | |||
2765 | 267 | |||
2766 | 268 | def update_package_sources(): | ||
2767 | 269 | run_per_instance("update-sources", apt_get, ("update",)) | ||
2768 | 270 | |||
2769 | 271 | |||
2770 | 272 | def install_packages(pkglist): | ||
2771 | 273 | update_package_sources() | ||
2772 | 274 | apt_get("install", pkglist) | ||
2779 | 275 | 57 | ||
2780 | === modified file 'cloudinit/config/cc_apt_pipelining.py' | |||
2781 | --- cloudinit/CloudConfig/cc_apt_pipelining.py 2012-03-09 15:26:09 +0000 | |||
2782 | +++ cloudinit/config/cc_apt_pipelining.py 2012-07-06 21:16:18 +0000 | |||
2783 | @@ -16,38 +16,44 @@ | |||
2784 | 16 | # You should have received a copy of the GNU General Public License | 16 | # You should have received a copy of the GNU General Public License |
2785 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
2786 | 18 | 18 | ||
2795 | 19 | import cloudinit.util as util | 19 | from cloudinit import util |
2796 | 20 | from cloudinit.CloudConfig import per_instance | 20 | from cloudinit.settings import PER_INSTANCE |
2797 | 21 | 21 | ||
2798 | 22 | frequency = per_instance | 22 | frequency = PER_INSTANCE |
2799 | 23 | default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining" | 23 | |
2800 | 24 | 24 | distros = ['ubuntu', 'debian'] | |
2801 | 25 | 25 | ||
2802 | 26 | def handle(_name, cfg, _cloud, log, _args): | 26 | DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining" |
2803 | 27 | |||
2804 | 28 | APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" | ||
2805 | 29 | 'Acquire::http::Pipeline-Depth "%s";\n') | ||
2806 | 30 | |||
2807 | 31 | # Acquire::http::Pipeline-Depth can be a value | ||
2808 | 32 | # from 0 to 5 indicating how many outstanding requests APT should send. | ||
2809 | 33 | # A value of zero MUST be specified if the remote host does not properly linger | ||
2810 | 34 | # on TCP connections - otherwise data corruption will occur. | ||
2811 | 35 | |||
2812 | 36 | |||
2813 | 37 | def handle(_name, cfg, cloud, log, _args): | ||
2814 | 27 | 38 | ||
2815 | 28 | apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) | 39 | apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) |
2822 | 29 | apt_pipe_value = str(apt_pipe_value).lower() | 40 | apt_pipe_value_s = str(apt_pipe_value).lower().strip() |
2823 | 30 | 41 | ||
2824 | 31 | if apt_pipe_value == "false": | 42 | if apt_pipe_value_s == "false": |
2825 | 32 | write_apt_snippet("0", log) | 43 | write_apt_snippet(cloud, "0", log, DEFAULT_FILE) |
2826 | 33 | 44 | elif apt_pipe_value_s in ("none", "unchanged", "os"): | |
2821 | 34 | elif apt_pipe_value in ("none", "unchanged", "os"): | ||
2827 | 35 | return | 45 | return |
2832 | 36 | 46 | elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: | |
2833 | 37 | elif apt_pipe_value in str(range(0, 6)): | 47 | write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE) |
2830 | 38 | write_apt_snippet(apt_pipe_value, log) | ||
2831 | 39 | |||
2834 | 40 | else: | 48 | else: |
2839 | 41 | log.warn("Invalid option for apt_pipeling: %s" % apt_pipe_value) | 49 | log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) |
2840 | 42 | 50 | ||
2841 | 43 | 51 | ||
2842 | 44 | def write_apt_snippet(setting, log, f_name=default_file): | 52 | def write_apt_snippet(cloud, setting, log, f_name): |
2843 | 45 | """ Writes f_name with apt pipeline depth 'setting' """ | 53 | """ Writes f_name with apt pipeline depth 'setting' """ |
2844 | 46 | 54 | ||
2852 | 47 | acquire_pipeline_depth = 'Acquire::http::Pipeline-Depth "%s";\n' | 55 | file_contents = APT_PIPE_TPL % (setting) |
2853 | 48 | file_contents = ("//Written by cloud-init per 'apt_pipelining'\n" | 56 | |
2854 | 49 | + (acquire_pipeline_depth % setting)) | 57 | util.write_file(cloud.paths.join(False, f_name), file_contents) |
2855 | 50 | 58 | ||
2856 | 51 | util.write_file(f_name, file_contents) | 59 | log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) |
2850 | 52 | |||
2851 | 53 | log.debug("Wrote %s with APT pipeline setting" % f_name) | ||
2857 | 54 | 60 | ||
2858 | === modified file 'cloudinit/config/cc_apt_update_upgrade.py' | |||
2859 | --- cloudinit/CloudConfig/cc_apt_update_upgrade.py 2012-01-18 14:07:33 +0000 | |||
2860 | +++ cloudinit/config/cc_apt_update_upgrade.py 2012-07-06 21:16:18 +0000 | |||
2861 | @@ -18,50 +18,73 @@ | |||
2862 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
2863 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
2864 | 20 | 20 | ||
2868 | 21 | import cloudinit.util as util | 21 | import glob |
2866 | 22 | import subprocess | ||
2867 | 23 | import traceback | ||
2869 | 24 | import os | 22 | import os |
2875 | 25 | import glob | 23 | |
2876 | 26 | import cloudinit.CloudConfig as cc | 24 | from cloudinit import templater |
2877 | 27 | 25 | from cloudinit import util | |
2878 | 28 | 26 | ||
2879 | 29 | def handle(_name, cfg, cloud, log, _args): | 27 | distros = ['ubuntu', 'debian'] |
2880 | 28 | |||
2881 | 29 | PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" | ||
2882 | 30 | PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" | ||
2883 | 31 | |||
2884 | 32 | # A temporary shell program to get a given gpg key | ||
2885 | 33 | # from a given keyserver | ||
2886 | 34 | EXPORT_GPG_KEYID = """ | ||
2887 | 35 | k=${1} ks=${2}; | ||
2888 | 36 | exec 2>/dev/null | ||
2889 | 37 | [ -n "$k" ] || exit 1; | ||
2890 | 38 | armour=$(gpg --list-keys --armour "${k}") | ||
2891 | 39 | if [ -z "${armour}" ]; then | ||
2892 | 40 | gpg --keyserver ${ks} --recv $k >/dev/null && | ||
2893 | 41 | armour=$(gpg --export --armour "${k}") && | ||
2894 | 42 | gpg --batch --yes --delete-keys "${k}" | ||
2895 | 43 | fi | ||
2896 | 44 | [ -n "${armour}" ] && echo "${armour}" | ||
2897 | 45 | """ | ||
2898 | 46 | |||
2899 | 47 | |||
2900 | 48 | def handle(name, cfg, cloud, log, _args): | ||
2901 | 30 | update = util.get_cfg_option_bool(cfg, 'apt_update', False) | 49 | update = util.get_cfg_option_bool(cfg, 'apt_update', False) |
2902 | 31 | upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) | 50 | upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) |
2903 | 32 | 51 | ||
2904 | 33 | release = get_release() | 52 | release = get_release() |
2905 | 34 | |||
2906 | 35 | mirror = find_apt_mirror(cloud, cfg) | 53 | mirror = find_apt_mirror(cloud, cfg) |
2915 | 36 | 54 | if not mirror: | |
2916 | 37 | log.debug("selected mirror at: %s" % mirror) | 55 | log.debug(("Skipping module named %s," |
2917 | 38 | 56 | " no package 'mirror' located"), name) | |
2918 | 39 | if not util.get_cfg_option_bool(cfg, \ | 57 | return |
2919 | 40 | 'apt_preserve_sources_list', False): | 58 | |
2920 | 41 | generate_sources_list(release, mirror) | 59 | log.debug("Selected mirror at: %s" % mirror) |
2921 | 42 | old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \ | 60 | |
2922 | 43 | "archive.ubuntu.com/ubuntu") | 61 | if not util.get_cfg_option_bool(cfg, |
2923 | 62 | 'apt_preserve_sources_list', False): | ||
2924 | 63 | generate_sources_list(release, mirror, cloud, log) | ||
2925 | 64 | old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', | ||
2926 | 65 | "archive.ubuntu.com/ubuntu") | ||
2927 | 44 | rename_apt_lists(old_mir, mirror) | 66 | rename_apt_lists(old_mir, mirror) |
2928 | 45 | 67 | ||
2930 | 46 | # set up proxy | 68 | # Set up any apt proxy |
2931 | 47 | proxy = cfg.get("apt_proxy", None) | 69 | proxy = cfg.get("apt_proxy", None) |
2933 | 48 | proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy" | 70 | proxy_filename = PROXY_FN |
2934 | 49 | if proxy: | 71 | if proxy: |
2935 | 50 | try: | 72 | try: |
2939 | 51 | contents = "Acquire::HTTP::Proxy \"%s\";\n" | 73 | # See man 'apt.conf' |
2940 | 52 | with open(proxy_filename, "w") as fp: | 74 | contents = PROXY_TPL % (proxy) |
2941 | 53 | fp.write(contents % proxy) | 75 | util.write_file(cloud.paths.join(False, proxy_filename), |
2942 | 76 | contents) | ||
2943 | 54 | except Exception as e: | 77 | except Exception as e: |
2945 | 55 | log.warn("Failed to write proxy to %s" % proxy_filename) | 78 | util.logexc(log, "Failed to write proxy to %s", proxy_filename) |
2946 | 56 | elif os.path.isfile(proxy_filename): | 79 | elif os.path.isfile(proxy_filename): |
2948 | 57 | os.unlink(proxy_filename) | 80 | util.del_file(proxy_filename) |
2949 | 58 | 81 | ||
2951 | 59 | # process 'apt_sources' | 82 | # Process 'apt_sources' |
2952 | 60 | if 'apt_sources' in cfg: | 83 | if 'apt_sources' in cfg: |
2954 | 61 | errors = add_sources(cfg['apt_sources'], | 84 | errors = add_sources(cloud, cfg['apt_sources'], |
2955 | 62 | {'MIRROR': mirror, 'RELEASE': release}) | 85 | {'MIRROR': mirror, 'RELEASE': release}) |
2956 | 63 | for e in errors: | 86 | for e in errors: |
2958 | 64 | log.warn("Source Error: %s\n" % ':'.join(e)) | 87 | log.warn("Source Error: %s", ':'.join(e)) |
2959 | 65 | 88 | ||
2960 | 66 | dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) | 89 | dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) |
2961 | 67 | if dconf_sel: | 90 | if dconf_sel: |
2962 | @@ -69,45 +92,51 @@ | |||
2963 | 69 | try: | 92 | try: |
2964 | 70 | util.subp(('debconf-set-selections', '-'), dconf_sel) | 93 | util.subp(('debconf-set-selections', '-'), dconf_sel) |
2965 | 71 | except: | 94 | except: |
2968 | 72 | log.error("Failed to run debconf-set-selections") | 95 | util.logexc(log, "Failed to run debconf-set-selections") |
2967 | 73 | log.debug(traceback.format_exc()) | ||
2969 | 74 | 96 | ||
2971 | 75 | pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', []) | 97 | pkglist = util.get_cfg_option_list(cfg, 'packages', []) |
2972 | 76 | 98 | ||
2973 | 77 | errors = [] | 99 | errors = [] |
2974 | 78 | if update or len(pkglist) or upgrade: | 100 | if update or len(pkglist) or upgrade: |
2975 | 79 | try: | 101 | try: |
2980 | 80 | cc.update_package_sources() | 102 | cloud.distro.update_package_sources() |
2981 | 81 | except subprocess.CalledProcessError as e: | 103 | except Exception as e: |
2982 | 82 | log.warn("apt-get update failed") | 104 | util.logexc(log, "Package update failed") |
2979 | 83 | log.debug(traceback.format_exc()) | ||
2983 | 84 | errors.append(e) | 105 | errors.append(e) |
2984 | 85 | 106 | ||
2985 | 86 | if upgrade: | 107 | if upgrade: |
2986 | 87 | try: | 108 | try: |
2991 | 88 | cc.apt_get("upgrade") | 109 | cloud.distro.package_command("upgrade") |
2992 | 89 | except subprocess.CalledProcessError as e: | 110 | except Exception as e: |
2993 | 90 | log.warn("apt upgrade failed") | 111 | util.logexc(log, "Package upgrade failed") |
2990 | 91 | log.debug(traceback.format_exc()) | ||
2994 | 92 | errors.append(e) | 112 | errors.append(e) |
2995 | 93 | 113 | ||
2996 | 94 | if len(pkglist): | 114 | if len(pkglist): |
2997 | 95 | try: | 115 | try: |
3002 | 96 | cc.install_packages(pkglist) | 116 | cloud.distro.install_packages(pkglist) |
3003 | 97 | except subprocess.CalledProcessError as e: | 117 | except Exception as e: |
3004 | 98 | log.warn("Failed to install packages: %s " % pkglist) | 118 | util.logexc(log, "Failed to install packages: %s ", pkglist) |
3001 | 99 | log.debug(traceback.format_exc()) | ||
3005 | 100 | errors.append(e) | 119 | errors.append(e) |
3006 | 101 | 120 | ||
3007 | 102 | if len(errors): | 121 | if len(errors): |
3011 | 103 | raise errors[0] | 122 | log.warn("%s failed with exceptions, re-raising the last one", |
3012 | 104 | 123 | len(errors)) | |
3013 | 105 | return(True) | 124 | raise errors[-1] |
3014 | 125 | |||
3015 | 126 | |||
3016 | 127 | # get gpg keyid from keyserver | ||
3017 | 128 | def getkeybyid(keyid, keyserver): | ||
3018 | 129 | with util.ExtendedTemporaryFile(suffix='.sh') as fh: | ||
3019 | 130 | fh.write(EXPORT_GPG_KEYID) | ||
3020 | 131 | fh.flush() | ||
3021 | 132 | cmd = ['/bin/sh', fh.name, keyid, keyserver] | ||
3022 | 133 | (stdout, _stderr) = util.subp(cmd) | ||
3023 | 134 | return stdout.strip() | ||
3024 | 106 | 135 | ||
3025 | 107 | 136 | ||
3026 | 108 | def mirror2lists_fileprefix(mirror): | 137 | def mirror2lists_fileprefix(mirror): |
3027 | 109 | string = mirror | 138 | string = mirror |
3029 | 110 | # take of http:// or ftp:// | 139 | # take off http:// or ftp:// |
3030 | 111 | if string.endswith("/"): | 140 | if string.endswith("/"): |
3031 | 112 | string = string[0:-1] | 141 | string = string[0:-1] |
3032 | 113 | pos = string.find("://") | 142 | pos = string.find("://") |
3033 | @@ -118,39 +147,44 @@ | |||
3034 | 118 | 147 | ||
3035 | 119 | 148 | ||
3036 | 120 | def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): | 149 | def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): |
3040 | 121 | oprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(omirror)) | 150 | oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) |
3041 | 122 | nprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(new_mirror)) | 151 | nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror)) |
3042 | 123 | if(oprefix == nprefix): | 152 | if oprefix == nprefix: |
3043 | 124 | return | 153 | return |
3044 | 125 | olen = len(oprefix) | 154 | olen = len(oprefix) |
3045 | 126 | for filename in glob.glob("%s_*" % oprefix): | 155 | for filename in glob.glob("%s_*" % oprefix): |
3047 | 127 | os.rename(filename, "%s%s" % (nprefix, filename[olen:])) | 156 | # TODO use the cloud.paths.join... |
3048 | 157 | util.rename(filename, "%s%s" % (nprefix, filename[olen:])) | ||
3049 | 128 | 158 | ||
3050 | 129 | 159 | ||
3051 | 130 | def get_release(): | 160 | def get_release(): |
3063 | 131 | stdout, _stderr = subprocess.Popen(['lsb_release', '-cs'], | 161 | (stdout, _stderr) = util.subp(['lsb_release', '-cs']) |
3064 | 132 | stdout=subprocess.PIPE).communicate() | 162 | return stdout.strip() |
3065 | 133 | return(str(stdout).strip()) | 163 | |
3066 | 134 | 164 | ||
3067 | 135 | 165 | def generate_sources_list(codename, mirror, cloud, log): | |
3068 | 136 | def generate_sources_list(codename, mirror): | 166 | template_fn = cloud.get_template_filename('sources.list') |
3069 | 137 | util.render_to_file('sources.list', '/etc/apt/sources.list', \ | 167 | if template_fn: |
3070 | 138 | {'mirror': mirror, 'codename': codename}) | 168 | params = {'mirror': mirror, 'codename': codename} |
3071 | 139 | 169 | out_fn = cloud.paths.join(False, '/etc/apt/sources.list') | |
3072 | 140 | 170 | templater.render_to_file(template_fn, out_fn, params) | |
3073 | 141 | def add_sources(srclist, searchList=None): | 171 | else: |
3074 | 172 | log.warn("No template found, not rendering /etc/apt/sources.list") | ||
3075 | 173 | |||
3076 | 174 | |||
3077 | 175 | def add_sources(cloud, srclist, template_params=None): | ||
3078 | 142 | """ | 176 | """ |
3079 | 143 | add entries in /etc/apt/sources.list.d for each abbreviated | 177 | add entries in /etc/apt/sources.list.d for each abbreviated |
3080 | 144 | sources.list entry in 'srclist'. When rendering template, also | 178 | sources.list entry in 'srclist'. When rendering template, also |
3081 | 145 | include the values in dictionary searchList | 179 | include the values in dictionary searchList |
3082 | 146 | """ | 180 | """ |
3086 | 147 | if searchList is None: | 181 | if template_params is None: |
3087 | 148 | searchList = {} | 182 | template_params = {} |
3085 | 149 | elst = [] | ||
3088 | 150 | 183 | ||
3089 | 184 | errorlist = [] | ||
3090 | 151 | for ent in srclist: | 185 | for ent in srclist: |
3091 | 152 | if 'source' not in ent: | 186 | if 'source' not in ent: |
3093 | 153 | elst.append(["", "missing source"]) | 187 | errorlist.append(["", "missing source"]) |
3094 | 154 | continue | 188 | continue |
3095 | 155 | 189 | ||
3096 | 156 | source = ent['source'] | 190 | source = ent['source'] |
3097 | @@ -158,51 +192,48 @@ | |||
3098 | 158 | try: | 192 | try: |
3099 | 159 | util.subp(["add-apt-repository", source]) | 193 | util.subp(["add-apt-repository", source]) |
3100 | 160 | except: | 194 | except: |
3102 | 161 | elst.append([source, "add-apt-repository failed"]) | 195 | errorlist.append([source, "add-apt-repository failed"]) |
3103 | 162 | continue | 196 | continue |
3104 | 163 | 197 | ||
3106 | 164 | source = util.render_string(source, searchList) | 198 | source = templater.render_string(source, template_params) |
3107 | 165 | 199 | ||
3108 | 166 | if 'filename' not in ent: | 200 | if 'filename' not in ent: |
3109 | 167 | ent['filename'] = 'cloud_config_sources.list' | 201 | ent['filename'] = 'cloud_config_sources.list' |
3110 | 168 | 202 | ||
3111 | 169 | if not ent['filename'].startswith("/"): | 203 | if not ent['filename'].startswith("/"): |
3114 | 170 | ent['filename'] = "%s/%s" % \ | 204 | ent['filename'] = os.path.join("/etc/apt/sources.list.d/", |
3115 | 171 | ("/etc/apt/sources.list.d/", ent['filename']) | 205 | ent['filename']) |
3116 | 172 | 206 | ||
3117 | 173 | if ('keyid' in ent and 'key' not in ent): | 207 | if ('keyid' in ent and 'key' not in ent): |
3118 | 174 | ks = "keyserver.ubuntu.com" | 208 | ks = "keyserver.ubuntu.com" |
3119 | 175 | if 'keyserver' in ent: | 209 | if 'keyserver' in ent: |
3120 | 176 | ks = ent['keyserver'] | 210 | ks = ent['keyserver'] |
3121 | 177 | try: | 211 | try: |
3123 | 178 | ent['key'] = util.getkeybyid(ent['keyid'], ks) | 212 | ent['key'] = getkeybyid(ent['keyid'], ks) |
3124 | 179 | except: | 213 | except: |
3126 | 180 | elst.append([source, "failed to get key from %s" % ks]) | 214 | errorlist.append([source, "failed to get key from %s" % ks]) |
3127 | 181 | continue | 215 | continue |
3128 | 182 | 216 | ||
3129 | 183 | if 'key' in ent: | 217 | if 'key' in ent: |
3130 | 184 | try: | 218 | try: |
3131 | 185 | util.subp(('apt-key', 'add', '-'), ent['key']) | 219 | util.subp(('apt-key', 'add', '-'), ent['key']) |
3132 | 186 | except: | 220 | except: |
3134 | 187 | elst.append([source, "failed add key"]) | 221 | errorlist.append([source, "failed add key"]) |
3135 | 188 | 222 | ||
3136 | 189 | try: | 223 | try: |
3138 | 190 | util.write_file(ent['filename'], source + "\n", omode="ab") | 224 | contents = "%s\n" % (source) |
3139 | 225 | util.write_file(cloud.paths.join(False, ent['filename']), | ||
3140 | 226 | contents, omode="ab") | ||
3141 | 191 | except: | 227 | except: |
3143 | 192 | elst.append([source, "failed write to file %s" % ent['filename']]) | 228 | errorlist.append([source, |
3144 | 229 | "failed write to file %s" % ent['filename']]) | ||
3145 | 193 | 230 | ||
3147 | 194 | return(elst) | 231 | return errorlist |
3148 | 195 | 232 | ||
3149 | 196 | 233 | ||
3150 | 197 | def find_apt_mirror(cloud, cfg): | 234 | def find_apt_mirror(cloud, cfg): |
3151 | 198 | """ find an apt_mirror given the cloud and cfg provided """ | 235 | """ find an apt_mirror given the cloud and cfg provided """ |
3152 | 199 | 236 | ||
3153 | 200 | # TODO: distro and defaults should be configurable | ||
3154 | 201 | distro = "ubuntu" | ||
3155 | 202 | defaults = { | ||
3156 | 203 | 'ubuntu': "http://archive.ubuntu.com/ubuntu", | ||
3157 | 204 | 'debian': "http://archive.debian.org/debian", | ||
3158 | 205 | } | ||
3159 | 206 | mirror = None | 237 | mirror = None |
3160 | 207 | 238 | ||
3161 | 208 | cfg_mirror = cfg.get("apt_mirror", None) | 239 | cfg_mirror = cfg.get("apt_mirror", None) |
3162 | @@ -211,14 +242,13 @@ | |||
3163 | 211 | elif "apt_mirror_search" in cfg: | 242 | elif "apt_mirror_search" in cfg: |
3164 | 212 | mirror = util.search_for_mirror(cfg['apt_mirror_search']) | 243 | mirror = util.search_for_mirror(cfg['apt_mirror_search']) |
3165 | 213 | else: | 244 | else: |
3168 | 214 | if cloud: | 245 | mirror = cloud.get_local_mirror() |
3167 | 215 | mirror = cloud.get_mirror() | ||
3169 | 216 | 246 | ||
3170 | 217 | mydom = "" | 247 | mydom = "" |
3171 | 218 | 248 | ||
3172 | 219 | doms = [] | 249 | doms = [] |
3173 | 220 | 250 | ||
3175 | 221 | if not mirror and cloud: | 251 | if not mirror: |
3176 | 222 | # if we have a fqdn, then search its domain portion first | 252 | # if we have a fqdn, then search its domain portion first |
3177 | 223 | (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) | 253 | (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) |
3178 | 224 | mydom = ".".join(fqdn.split(".")[1:]) | 254 | mydom = ".".join(fqdn.split(".")[1:]) |
3179 | @@ -229,13 +259,14 @@ | |||
3180 | 229 | doms.extend((".localdomain", "",)) | 259 | doms.extend((".localdomain", "",)) |
3181 | 230 | 260 | ||
3182 | 231 | mirror_list = [] | 261 | mirror_list = [] |
3183 | 262 | distro = cloud.distro.name | ||
3184 | 232 | mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) | 263 | mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) |
3185 | 233 | for post in doms: | 264 | for post in doms: |
3187 | 234 | mirror_list.append(mirrorfmt % post) | 265 | mirror_list.append(mirrorfmt % (post)) |
3188 | 235 | 266 | ||
3189 | 236 | mirror = util.search_for_mirror(mirror_list) | 267 | mirror = util.search_for_mirror(mirror_list) |
3190 | 237 | 268 | ||
3191 | 238 | if not mirror: | 269 | if not mirror: |
3193 | 239 | mirror = defaults[distro] | 270 | mirror = cloud.distro.get_package_mirror() |
3194 | 240 | 271 | ||
3195 | 241 | return mirror | 272 | return mirror |
3196 | 242 | 273 | ||
3197 | === modified file 'cloudinit/config/cc_bootcmd.py' | |||
3198 | --- cloudinit/CloudConfig/cc_bootcmd.py 2012-01-18 14:07:33 +0000 | |||
3199 | +++ cloudinit/config/cc_bootcmd.py 2012-07-06 21:16:18 +0000 | |||
3200 | @@ -17,32 +17,39 @@ | |||
3201 | 17 | # | 17 | # |
3202 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
3203 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3207 | 20 | import cloudinit.util as util | 20 | |
3205 | 21 | import subprocess | ||
3206 | 22 | import tempfile | ||
3208 | 23 | import os | 21 | import os |
3214 | 24 | from cloudinit.CloudConfig import per_always | 22 | |
3215 | 25 | frequency = per_always | 23 | from cloudinit import util |
3216 | 26 | 24 | from cloudinit.settings import PER_ALWAYS | |
3217 | 27 | 25 | ||
3218 | 28 | def handle(_name, cfg, cloud, log, _args): | 26 | frequency = PER_ALWAYS |
3219 | 27 | |||
3220 | 28 | |||
3221 | 29 | def handle(name, cfg, cloud, log, _args): | ||
3222 | 30 | |||
3223 | 29 | if "bootcmd" not in cfg: | 31 | if "bootcmd" not in cfg: |
3224 | 32 | log.debug(("Skipping module named %s," | ||
3225 | 33 | " no 'bootcmd' key in configuration"), name) | ||
3226 | 30 | return | 34 | return |
3227 | 31 | 35 | ||
3236 | 32 | try: | 36 | with util.ExtendedTemporaryFile(suffix=".sh") as tmpf: |
3237 | 33 | content = util.shellify(cfg["bootcmd"]) | 37 | try: |
3238 | 34 | tmpf = tempfile.TemporaryFile() | 38 | content = util.shellify(cfg["bootcmd"]) |
3239 | 35 | tmpf.write(content) | 39 | tmpf.write(content) |
3240 | 36 | tmpf.seek(0) | 40 | tmpf.flush() |
3241 | 37 | except: | 41 | except: |
3242 | 38 | log.warn("failed to shellify bootcmd") | 42 | util.logexc(log, "Failed to shellify bootcmd") |
3243 | 39 | raise | 43 | raise |
3244 | 40 | 44 | ||
3253 | 41 | try: | 45 | try: |
3254 | 42 | env = os.environ.copy() | 46 | env = os.environ.copy() |
3255 | 43 | env['INSTANCE_ID'] = cloud.get_instance_id() | 47 | iid = cloud.get_instance_id() |
3256 | 44 | subprocess.check_call(['/bin/sh'], env=env, stdin=tmpf) | 48 | if iid: |
3257 | 45 | tmpf.close() | 49 | env['INSTANCE_ID'] = str(iid) |
3258 | 46 | except: | 50 | cmd = ['/bin/sh', tmpf.name] |
3259 | 47 | log.warn("failed to run commands from bootcmd") | 51 | util.subp(cmd, env=env, capture=False) |
3260 | 48 | raise | 52 | except: |
3261 | 53 | util.logexc(log, | ||
3262 | 54 | ("Failed to run bootcmd module %s"), name) | ||
3263 | 55 | raise | ||
3264 | 49 | 56 | ||
3265 | === modified file 'cloudinit/config/cc_byobu.py' | |||
3266 | --- cloudinit/CloudConfig/cc_byobu.py 2012-01-18 14:07:33 +0000 | |||
3267 | +++ cloudinit/config/cc_byobu.py 2012-07-06 21:16:18 +0000 | |||
3268 | @@ -18,18 +18,19 @@ | |||
3269 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
3270 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3271 | 20 | 20 | ||
3278 | 21 | import cloudinit.util as util | 21 | from cloudinit import util |
3279 | 22 | import subprocess | 22 | |
3280 | 23 | import traceback | 23 | distros = ['ubuntu', 'debian'] |
3281 | 24 | 24 | ||
3282 | 25 | 25 | ||
3283 | 26 | def handle(_name, cfg, _cloud, log, args): | 26 | def handle(name, cfg, _cloud, log, args): |
3284 | 27 | if len(args) != 0: | 27 | if len(args) != 0: |
3285 | 28 | value = args[0] | 28 | value = args[0] |
3286 | 29 | else: | 29 | else: |
3287 | 30 | value = util.get_cfg_option_str(cfg, "byobu_by_default", "") | 30 | value = util.get_cfg_option_str(cfg, "byobu_by_default", "") |
3288 | 31 | 31 | ||
3289 | 32 | if not value: | 32 | if not value: |
3290 | 33 | log.debug("Skipping module named %s, no 'byobu' values found", name) | ||
3291 | 33 | return | 34 | return |
3292 | 34 | 35 | ||
3293 | 35 | if value == "user" or value == "system": | 36 | if value == "user" or value == "system": |
3294 | @@ -38,7 +39,7 @@ | |||
3295 | 38 | valid = ("enable-user", "enable-system", "enable", | 39 | valid = ("enable-user", "enable-system", "enable", |
3296 | 39 | "disable-user", "disable-system", "disable") | 40 | "disable-user", "disable-system", "disable") |
3297 | 40 | if not value in valid: | 41 | if not value in valid: |
3299 | 41 | log.warn("Unknown value %s for byobu_by_default" % value) | 42 | log.warn("Unknown value %s for byobu_by_default", value) |
3300 | 42 | 43 | ||
3301 | 43 | mod_user = value.endswith("-user") | 44 | mod_user = value.endswith("-user") |
3302 | 44 | mod_sys = value.endswith("-system") | 45 | mod_sys = value.endswith("-system") |
3303 | @@ -65,13 +66,6 @@ | |||
3304 | 65 | 66 | ||
3305 | 66 | cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] | 67 | cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] |
3306 | 67 | 68 | ||
3308 | 68 | log.debug("setting byobu to %s" % value) | 69 | log.debug("Setting byobu to %s", value) |
3309 | 69 | 70 | ||
3318 | 70 | try: | 71 | util.subp(cmd, capture=False) |
3311 | 71 | subprocess.check_call(cmd) | ||
3312 | 72 | except subprocess.CalledProcessError as e: | ||
3313 | 73 | log.debug(traceback.format_exc(e)) | ||
3314 | 74 | raise Exception("Cmd returned %s: %s" % (e.returncode, cmd)) | ||
3315 | 75 | except OSError as e: | ||
3316 | 76 | log.debug(traceback.format_exc(e)) | ||
3317 | 77 | raise Exception("Cmd failed to execute: %s" % (cmd)) | ||
3319 | 78 | 72 | ||
3320 | === modified file 'cloudinit/config/cc_ca_certs.py' | |||
3321 | --- cloudinit/CloudConfig/cc_ca_certs.py 2012-03-08 12:45:43 +0000 | |||
3322 | +++ cloudinit/config/cc_ca_certs.py 2012-07-06 21:16:18 +0000 | |||
3323 | @@ -13,25 +13,27 @@ | |||
3324 | 13 | # | 13 | # |
3325 | 14 | # You should have received a copy of the GNU General Public License | 14 | # You should have received a copy of the GNU General Public License |
3326 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3327 | 16 | |||
3328 | 16 | import os | 17 | import os |
3332 | 17 | from subprocess import check_call | 18 | |
3333 | 18 | from cloudinit.util import (write_file, get_cfg_option_list_or_str, | 19 | from cloudinit import util |
3331 | 19 | delete_dir_contents, subp) | ||
3334 | 20 | 20 | ||
3335 | 21 | CA_CERT_PATH = "/usr/share/ca-certificates/" | 21 | CA_CERT_PATH = "/usr/share/ca-certificates/" |
3336 | 22 | CA_CERT_FILENAME = "cloud-init-ca-certs.crt" | 22 | CA_CERT_FILENAME = "cloud-init-ca-certs.crt" |
3337 | 23 | CA_CERT_CONFIG = "/etc/ca-certificates.conf" | 23 | CA_CERT_CONFIG = "/etc/ca-certificates.conf" |
3338 | 24 | CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" | 24 | CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" |
3339 | 25 | 25 | ||
3340 | 26 | distros = ['ubuntu', 'debian'] | ||
3341 | 27 | |||
3342 | 26 | 28 | ||
3343 | 27 | def update_ca_certs(): | 29 | def update_ca_certs(): |
3344 | 28 | """ | 30 | """ |
3345 | 29 | Updates the CA certificate cache on the current machine. | 31 | Updates the CA certificate cache on the current machine. |
3346 | 30 | """ | 32 | """ |
3351 | 31 | check_call(["update-ca-certificates"]) | 33 | util.subp(["update-ca-certificates"], capture=False) |
3352 | 32 | 34 | ||
3353 | 33 | 35 | ||
3354 | 34 | def add_ca_certs(certs): | 36 | def add_ca_certs(paths, certs): |
3355 | 35 | """ | 37 | """ |
3356 | 36 | Adds certificates to the system. To actually apply the new certificates | 38 | Adds certificates to the system. To actually apply the new certificates |
3357 | 37 | you must also call L{update_ca_certs}. | 39 | you must also call L{update_ca_certs}. |
3358 | @@ -39,26 +41,29 @@ | |||
3359 | 39 | @param certs: A list of certificate strings. | 41 | @param certs: A list of certificate strings. |
3360 | 40 | """ | 42 | """ |
3361 | 41 | if certs: | 43 | if certs: |
3363 | 42 | cert_file_contents = "\n".join(certs) | 44 | # First ensure they are strings... |
3364 | 45 | cert_file_contents = "\n".join([str(c) for c in certs]) | ||
3365 | 43 | cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) | 46 | cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) |
3367 | 44 | write_file(cert_file_fullpath, cert_file_contents, mode=0644) | 47 | cert_file_fullpath = paths.join(False, cert_file_fullpath) |
3368 | 48 | util.write_file(cert_file_fullpath, cert_file_contents, mode=0644) | ||
3369 | 45 | # Append cert filename to CA_CERT_CONFIG file. | 49 | # Append cert filename to CA_CERT_CONFIG file. |
3374 | 46 | write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="a") | 50 | util.write_file(paths.join(False, CA_CERT_CONFIG), |
3375 | 47 | 51 | "\n%s" % CA_CERT_FILENAME, omode="ab") | |
3376 | 48 | 52 | ||
3377 | 49 | def remove_default_ca_certs(): | 53 | |
3378 | 54 | def remove_default_ca_certs(paths): | ||
3379 | 50 | """ | 55 | """ |
3380 | 51 | Removes all default trusted CA certificates from the system. To actually | 56 | Removes all default trusted CA certificates from the system. To actually |
3381 | 52 | apply the change you must also call L{update_ca_certs}. | 57 | apply the change you must also call L{update_ca_certs}. |
3382 | 53 | """ | 58 | """ |
3386 | 54 | delete_dir_contents(CA_CERT_PATH) | 59 | util.delete_dir_contents(paths.join(False, CA_CERT_PATH)) |
3387 | 55 | delete_dir_contents(CA_CERT_SYSTEM_PATH) | 60 | util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH)) |
3388 | 56 | write_file(CA_CERT_CONFIG, "", mode=0644) | 61 | util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644) |
3389 | 57 | debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" | 62 | debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" |
3394 | 58 | subp(('debconf-set-selections', '-'), debconf_sel) | 63 | util.subp(('debconf-set-selections', '-'), debconf_sel) |
3395 | 59 | 64 | ||
3396 | 60 | 65 | ||
3397 | 61 | def handle(_name, cfg, _cloud, log, _args): | 66 | def handle(name, cfg, cloud, log, _args): |
3398 | 62 | """ | 67 | """ |
3399 | 63 | Call to handle ca-cert sections in cloud-config file. | 68 | Call to handle ca-cert sections in cloud-config file. |
3400 | 64 | 69 | ||
3401 | @@ -70,21 +75,25 @@ | |||
3402 | 70 | """ | 75 | """ |
3403 | 71 | # If there isn't a ca-certs section in the configuration don't do anything | 76 | # If there isn't a ca-certs section in the configuration don't do anything |
3404 | 72 | if "ca-certs" not in cfg: | 77 | if "ca-certs" not in cfg: |
3405 | 78 | log.debug(("Skipping module named %s," | ||
3406 | 79 | " no 'ca-certs' key in configuration"), name) | ||
3407 | 73 | return | 80 | return |
3408 | 81 | |||
3409 | 74 | ca_cert_cfg = cfg['ca-certs'] | 82 | ca_cert_cfg = cfg['ca-certs'] |
3410 | 75 | 83 | ||
3411 | 76 | # If there is a remove-defaults option set to true, remove the system | 84 | # If there is a remove-defaults option set to true, remove the system |
3412 | 77 | # default trusted CA certs first. | 85 | # default trusted CA certs first. |
3413 | 78 | if ca_cert_cfg.get("remove-defaults", False): | 86 | if ca_cert_cfg.get("remove-defaults", False): |
3416 | 79 | log.debug("removing default certificates") | 87 | log.debug("Removing default certificates") |
3417 | 80 | remove_default_ca_certs() | 88 | remove_default_ca_certs(cloud.paths) |
3418 | 81 | 89 | ||
3419 | 82 | # If we are given any new trusted CA certs to add, add them. | 90 | # If we are given any new trusted CA certs to add, add them. |
3420 | 83 | if "trusted" in ca_cert_cfg: | 91 | if "trusted" in ca_cert_cfg: |
3422 | 84 | trusted_certs = get_cfg_option_list_or_str(ca_cert_cfg, "trusted") | 92 | trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") |
3423 | 85 | if trusted_certs: | 93 | if trusted_certs: |
3426 | 86 | log.debug("adding %d certificates" % len(trusted_certs)) | 94 | log.debug("Adding %d certificates" % len(trusted_certs)) |
3427 | 87 | add_ca_certs(trusted_certs) | 95 | add_ca_certs(cloud.paths, trusted_certs) |
3428 | 88 | 96 | ||
3429 | 89 | # Update the system with the new cert configuration. | 97 | # Update the system with the new cert configuration. |
3430 | 98 | log.debug("Updating certificates") | ||
3431 | 90 | update_ca_certs() | 99 | update_ca_certs() |
3432 | 91 | 100 | ||
3433 | === modified file 'cloudinit/config/cc_chef.py' | |||
3434 | --- cloudinit/CloudConfig/cc_chef.py 2012-03-26 17:49:06 +0000 | |||
3435 | +++ cloudinit/config/cc_chef.py 2012-07-06 21:16:18 +0000 | |||
3436 | @@ -18,53 +18,71 @@ | |||
3437 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
3438 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3439 | 20 | 20 | ||
3440 | 21 | import json | ||
3441 | 21 | import os | 22 | import os |
3451 | 22 | import subprocess | 23 | |
3452 | 23 | import json | 24 | from cloudinit import templater |
3453 | 24 | import cloudinit.CloudConfig as cc | 25 | from cloudinit import util |
3454 | 25 | import cloudinit.util as util | 26 | |
3455 | 26 | 27 | RUBY_VERSION_DEFAULT = "1.8" | |
3456 | 27 | ruby_version_default = "1.8" | 28 | |
3457 | 28 | 29 | ||
3458 | 29 | 30 | def handle(name, cfg, cloud, log, _args): | |
3459 | 30 | def handle(_name, cfg, cloud, log, _args): | 31 | |
3460 | 31 | # If there isn't a chef key in the configuration don't do anything | 32 | # If there isn't a chef key in the configuration don't do anything |
3461 | 32 | if 'chef' not in cfg: | 33 | if 'chef' not in cfg: |
3462 | 34 | log.debug(("Skipping module named %s," | ||
3463 | 35 | " no 'chef' key in configuration"), name) | ||
3464 | 33 | return | 36 | return |
3465 | 34 | chef_cfg = cfg['chef'] | 37 | chef_cfg = cfg['chef'] |
3466 | 35 | 38 | ||
3470 | 36 | # ensure the chef directories we use exist | 39 | # Ensure the chef directories we use exist |
3471 | 37 | mkdirs(['/etc/chef', '/var/log/chef', '/var/lib/chef', | 40 | c_dirs = [ |
3472 | 38 | '/var/cache/chef', '/var/backups/chef', '/var/run/chef']) | 41 | '/etc/chef', |
3473 | 42 | '/var/log/chef', | ||
3474 | 43 | '/var/lib/chef', | ||
3475 | 44 | '/var/cache/chef', | ||
3476 | 45 | '/var/backups/chef', | ||
3477 | 46 | '/var/run/chef', | ||
3478 | 47 | ] | ||
3479 | 48 | for d in c_dirs: | ||
3480 | 49 | util.ensure_dir(cloud.paths.join(False, d)) | ||
3481 | 39 | 50 | ||
3483 | 40 | # set the validation key based on the presence of either 'validation_key' | 51 | # Set the validation key based on the presence of either 'validation_key' |
3484 | 41 | # or 'validation_cert'. In the case where both exist, 'validation_key' | 52 | # or 'validation_cert'. In the case where both exist, 'validation_key' |
3485 | 42 | # takes precedence | 53 | # takes precedence |
3486 | 43 | for key in ('validation_key', 'validation_cert'): | 54 | for key in ('validation_key', 'validation_cert'): |
3487 | 44 | if key in chef_cfg and chef_cfg[key]: | 55 | if key in chef_cfg and chef_cfg[key]: |
3490 | 45 | with open('/etc/chef/validation.pem', 'w') as validation_key_fh: | 56 | v_fn = cloud.paths.join(False, '/etc/chef/validation.pem') |
3491 | 46 | validation_key_fh.write(chef_cfg[key]) | 57 | util.write_file(v_fn, chef_cfg[key]) |
3492 | 47 | break | 58 | break |
3493 | 48 | 59 | ||
3502 | 49 | # create the chef config from template | 60 | # Create the chef config from template |
3503 | 50 | util.render_to_file('chef_client.rb', '/etc/chef/client.rb', | 61 | template_fn = cloud.get_template_filename('chef_client.rb') |
3504 | 51 | {'server_url': chef_cfg['server_url'], | 62 | if template_fn: |
3505 | 52 | 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', | 63 | iid = str(cloud.datasource.get_instance_id()) |
3506 | 53 | cloud.datasource.get_instance_id()), | 64 | params = { |
3507 | 54 | 'environment': util.get_cfg_option_str(chef_cfg, 'environment', | 65 | 'server_url': chef_cfg['server_url'], |
3508 | 55 | '_default'), | 66 | 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid), |
3509 | 56 | 'validation_name': chef_cfg['validation_name']}) | 67 | 'environment': util.get_cfg_option_str(chef_cfg, 'environment', |
3510 | 68 | '_default'), | ||
3511 | 69 | 'validation_name': chef_cfg['validation_name'] | ||
3512 | 70 | } | ||
3513 | 71 | out_fn = cloud.paths.join(False, '/etc/chef/client.rb') | ||
3514 | 72 | templater.render_to_file(template_fn, out_fn, params) | ||
3515 | 73 | else: | ||
3516 | 74 | log.warn("No template found, not rendering to /etc/chef/client.rb") | ||
3517 | 57 | 75 | ||
3518 | 58 | # set the firstboot json | 76 | # set the firstboot json |
3528 | 59 | with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh: | 77 | initial_json = {} |
3529 | 60 | initial_json = {} | 78 | if 'run_list' in chef_cfg: |
3530 | 61 | if 'run_list' in chef_cfg: | 79 | initial_json['run_list'] = chef_cfg['run_list'] |
3531 | 62 | initial_json['run_list'] = chef_cfg['run_list'] | 80 | if 'initial_attributes' in chef_cfg: |
3532 | 63 | if 'initial_attributes' in chef_cfg: | 81 | initial_attributes = chef_cfg['initial_attributes'] |
3533 | 64 | initial_attributes = chef_cfg['initial_attributes'] | 82 | for k in list(initial_attributes.keys()): |
3534 | 65 | for k in initial_attributes.keys(): | 83 | initial_json[k] = initial_attributes[k] |
3535 | 66 | initial_json[k] = initial_attributes[k] | 84 | firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json') |
3536 | 67 | firstboot_json_fh.write(json.dumps(initial_json)) | 85 | util.write_file(firstboot_fn, json.dumps(initial_json)) |
3537 | 68 | 86 | ||
3538 | 69 | # If chef is not installed, we install chef based on 'install_type' | 87 | # If chef is not installed, we install chef based on 'install_type' |
3539 | 70 | if not os.path.isfile('/usr/bin/chef-client'): | 88 | if not os.path.isfile('/usr/bin/chef-client'): |
3540 | @@ -74,15 +92,17 @@ | |||
3541 | 74 | # this will install and run the chef-client from gems | 92 | # this will install and run the chef-client from gems |
3542 | 75 | chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) | 93 | chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) |
3543 | 76 | ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', | 94 | ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', |
3546 | 77 | ruby_version_default) | 95 | RUBY_VERSION_DEFAULT) |
3547 | 78 | install_chef_from_gems(ruby_version, chef_version) | 96 | install_chef_from_gems(cloud.distro, ruby_version, chef_version) |
3548 | 79 | # and finally, run chef-client | 97 | # and finally, run chef-client |
3552 | 80 | log.debug('running chef-client') | 98 | log.debug('Running chef-client') |
3553 | 81 | subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800', | 99 | util.subp(['/usr/bin/chef-client', |
3554 | 82 | '-s', '20']) | 100 | '-d', '-i', '1800', '-s', '20'], capture=False) |
3555 | 101 | elif install_type == 'packages': | ||
3556 | 102 | # this will install and run the chef-client from packages | ||
3557 | 103 | cloud.distro.install_packages(('chef',)) | ||
3558 | 83 | else: | 104 | else: |
3561 | 84 | # this will install and run the chef-client from packages | 105 | log.warn("Unknown chef install type %s", install_type) |
3560 | 85 | cc.install_packages(('chef',)) | ||
3562 | 86 | 106 | ||
3563 | 87 | 107 | ||
3564 | 88 | def get_ruby_packages(version): | 108 | def get_ruby_packages(version): |
3565 | @@ -90,30 +110,20 @@ | |||
3566 | 90 | pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] | 110 | pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] |
3567 | 91 | if version == "1.8": | 111 | if version == "1.8": |
3568 | 92 | pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) | 112 | pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) |
3574 | 93 | return(pkgs) | 113 | return pkgs |
3575 | 94 | 114 | ||
3576 | 95 | 115 | ||
3577 | 96 | def install_chef_from_gems(ruby_version, chef_version=None): | 116 | def install_chef_from_gems(ruby_version, chef_version, distro): |
3578 | 97 | cc.install_packages(get_ruby_packages(ruby_version)) | 117 | distro.install_packages(get_ruby_packages(ruby_version)) |
3579 | 98 | if not os.path.exists('/usr/bin/gem'): | 118 | if not os.path.exists('/usr/bin/gem'): |
3581 | 99 | os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') | 119 | util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') |
3582 | 100 | if not os.path.exists('/usr/bin/ruby'): | 120 | if not os.path.exists('/usr/bin/ruby'): |
3584 | 101 | os.symlink('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') | 121 | util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') |
3585 | 102 | if chef_version: | 122 | if chef_version: |
3589 | 103 | subprocess.check_call(['/usr/bin/gem', 'install', 'chef', | 123 | util.subp(['/usr/bin/gem', 'install', 'chef', |
3590 | 104 | '-v %s' % chef_version, '--no-ri', | 124 | '-v %s' % chef_version, '--no-ri', |
3591 | 105 | '--no-rdoc', '--bindir', '/usr/bin', '-q']) | 125 | '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) |
3592 | 106 | else: | 126 | else: |
3606 | 107 | subprocess.check_call(['/usr/bin/gem', 'install', 'chef', | 127 | util.subp(['/usr/bin/gem', 'install', 'chef', |
3607 | 108 | '--no-ri', '--no-rdoc', '--bindir', | 128 | '--no-ri', '--no-rdoc', '--bindir', |
3608 | 109 | '/usr/bin', '-q']) | 129 | '/usr/bin', '-q'], capture=False) |
3596 | 110 | |||
3597 | 111 | |||
3598 | 112 | def ensure_dir(d): | ||
3599 | 113 | if not os.path.exists(d): | ||
3600 | 114 | os.makedirs(d) | ||
3601 | 115 | |||
3602 | 116 | |||
3603 | 117 | def mkdirs(dirs): | ||
3604 | 118 | for d in dirs: | ||
3605 | 119 | ensure_dir(d) | ||
3609 | 120 | 130 | ||
3610 | === modified file 'cloudinit/config/cc_disable_ec2_metadata.py' | |||
3611 | --- cloudinit/CloudConfig/cc_disable_ec2_metadata.py 2012-01-18 14:07:33 +0000 | |||
3612 | +++ cloudinit/config/cc_disable_ec2_metadata.py 2012-07-06 21:16:18 +0000 | |||
3613 | @@ -17,14 +17,20 @@ | |||
3614 | 17 | # | 17 | # |
3615 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
3616 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3628 | 20 | import cloudinit.util as util | 20 | |
3629 | 21 | import subprocess | 21 | from cloudinit import util |
3630 | 22 | from cloudinit.CloudConfig import per_always | 22 | |
3631 | 23 | 23 | from cloudinit.settings import PER_ALWAYS | |
3632 | 24 | frequency = per_always | 24 | |
3633 | 25 | 25 | frequency = PER_ALWAYS | |
3634 | 26 | 26 | ||
3635 | 27 | def handle(_name, cfg, _cloud, _log, _args): | 27 | REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject'] |
3636 | 28 | if util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False): | 28 | |
3637 | 29 | fwall = "route add -host 169.254.169.254 reject" | 29 | |
3638 | 30 | subprocess.call(fwall.split(' ')) | 30 | def handle(name, cfg, _cloud, log, _args): |
3639 | 31 | disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) | ||
3640 | 32 | if disabled: | ||
3641 | 33 | util.subp(REJECT_CMD, capture=False) | ||
3642 | 34 | else: | ||
3643 | 35 | log.debug(("Skipping module named %s," | ||
3644 | 36 | " disabling the ec2 route not enabled"), name) | ||
3645 | 31 | 37 | ||
3646 | === modified file 'cloudinit/config/cc_final_message.py' | |||
3647 | --- cloudinit/CloudConfig/cc_final_message.py 2012-01-18 14:07:33 +0000 | |||
3648 | +++ cloudinit/config/cc_final_message.py 2012-07-06 21:16:18 +0000 | |||
3649 | @@ -18,41 +18,51 @@ | |||
3650 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
3651 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3652 | 20 | 20 | ||
3664 | 21 | from cloudinit.CloudConfig import per_always | 21 | from cloudinit import templater |
3665 | 22 | import sys | 22 | from cloudinit import util |
3666 | 23 | from cloudinit import util, boot_finished | 23 | from cloudinit import version |
3667 | 24 | import time | 24 | |
3668 | 25 | 25 | from cloudinit.settings import PER_ALWAYS | |
3669 | 26 | frequency = per_always | 26 | |
3670 | 27 | 27 | frequency = PER_ALWAYS | |
3671 | 28 | final_message = "cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds" | 28 | |
3672 | 29 | 29 | FINAL_MESSAGE_DEF = ("Cloud-init v. {{version}} finished at {{timestamp}}." | |
3673 | 30 | 30 | " Up {{uptime}} seconds.") | |
3674 | 31 | def handle(_name, cfg, _cloud, log, args): | 31 | |
3675 | 32 | |||
3676 | 33 | def handle(_name, cfg, cloud, log, args): | ||
3677 | 34 | |||
3678 | 35 | msg_in = None | ||
3679 | 32 | if len(args) != 0: | 36 | if len(args) != 0: |
3680 | 33 | msg_in = args[0] | 37 | msg_in = args[0] |
3681 | 34 | else: | 38 | else: |
3694 | 35 | msg_in = util.get_cfg_option_str(cfg, "final_message", final_message) | 39 | msg_in = util.get_cfg_option_str(cfg, "final_message") |
3695 | 36 | 40 | ||
3696 | 37 | try: | 41 | if not msg_in: |
3697 | 38 | uptimef = open("/proc/uptime") | 42 | template_fn = cloud.get_template_filename('final_message') |
3698 | 39 | uptime = uptimef.read().split(" ")[0] | 43 | if template_fn: |
3699 | 40 | uptimef.close() | 44 | msg_in = util.load_file(template_fn) |
3700 | 41 | except IOError as e: | 45 | |
3701 | 42 | log.warn("unable to open /proc/uptime\n") | 46 | if not msg_in: |
3702 | 43 | uptime = "na" | 47 | msg_in = FINAL_MESSAGE_DEF |
3703 | 44 | 48 | ||
3704 | 45 | try: | 49 | uptime = util.uptime() |
3705 | 46 | ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime()) | 50 | ts = util.time_rfc2822() |
3706 | 51 | cver = version.version_string() | ||
3707 | 52 | try: | ||
3708 | 53 | subs = { | ||
3709 | 54 | 'uptime': uptime, | ||
3710 | 55 | 'timestamp': ts, | ||
3711 | 56 | 'version': cver, | ||
3712 | 57 | } | ||
3713 | 58 | util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), | ||
3714 | 59 | console=False, stderr=True) | ||
3715 | 60 | except Exception: | ||
3716 | 61 | util.logexc(log, "Failed to render final message template") | ||
3717 | 62 | |||
3718 | 63 | boot_fin_fn = cloud.paths.boot_finished | ||
3719 | 64 | try: | ||
3720 | 65 | contents = "%s - %s - v. %s\n" % (uptime, ts, cver) | ||
3721 | 66 | util.write_file(boot_fin_fn, contents) | ||
3722 | 47 | except: | 67 | except: |
3734 | 48 | ts = "na" | 68 | util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn) |
3724 | 49 | |||
3725 | 50 | try: | ||
3726 | 51 | subs = {'UPTIME': uptime, 'TIMESTAMP': ts} | ||
3727 | 52 | sys.stdout.write("%s\n" % util.render_string(msg_in, subs)) | ||
3728 | 53 | except Exception as e: | ||
3729 | 54 | log.warn("failed to render string to stdout: %s" % e) | ||
3730 | 55 | |||
3731 | 56 | fp = open(boot_finished, "wb") | ||
3732 | 57 | fp.write(uptime + "\n") | ||
3733 | 58 | fp.close() | ||
3735 | 59 | 69 | ||
3736 | === modified file 'cloudinit/config/cc_foo.py' | |||
3737 | --- cloudinit/CloudConfig/cc_foo.py 2012-01-18 14:07:33 +0000 | |||
3738 | +++ cloudinit/config/cc_foo.py 2012-07-06 21:16:18 +0000 | |||
3739 | @@ -18,12 +18,35 @@ | |||
3740 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
3741 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3742 | 20 | 20 | ||
3752 | 21 | #import cloudinit | 21 | from cloudinit.settings import PER_INSTANCE |
3753 | 22 | #import cloudinit.util as util | 22 | |
3754 | 23 | from cloudinit.CloudConfig import per_instance | 23 | # Modules are expected to have the following attributes. |
3755 | 24 | 24 | # 1. A required 'handle' method which takes the following params. | |
3756 | 25 | frequency = per_instance | 25 | # a) The name will not be this files name, but instead |
3757 | 26 | 26 | # the name specified in configuration (which is the name | |
3758 | 27 | 27 | # which will be used to find this module). | |
3759 | 28 | def handle(_name, _cfg, _cloud, _log, _args): | 28 | # b) A configuration object that is the result of the merging |
3760 | 29 | print "hi" | 29 | # of cloud configs configuration with legacy configuration |
3761 | 30 | # as well as any datasource provided configuration | ||
3762 | 31 | # c) A cloud object that can be used to access various | ||
3763 | 32 | # datasource and paths for the given distro and data provided | ||
3764 | 33 | # by the various datasource instance types. | ||
3765 | 34 | # d) A argument list that may or may not be empty to this module. | ||
3766 | 35 | # Typically those are from module configuration where the module | ||
3767 | 36 | # is defined with some extra configuration that will eventually | ||
3768 | 37 | # be translated from yaml into arguments to this module. | ||
3769 | 38 | # 2. A optional 'frequency' that defines how often this module should be ran. | ||
3770 | 39 | # Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not | ||
3771 | 40 | # provided PER_INSTANCE will be assumed. | ||
3772 | 41 | # See settings.py for these constants. | ||
3773 | 42 | # 3. A optional 'distros' array/set/tuple that defines the known distros | ||
3774 | 43 | # this module will work with (if not all of them). This is used to write | ||
3775 | 44 | # a warning out if a module is being ran on a untested distribution for | ||
3776 | 45 | # informational purposes. If non existent all distros are assumed and | ||
3777 | 46 | # no warning occurs. | ||
3778 | 47 | |||
3779 | 48 | frequency = PER_INSTANCE | ||
3780 | 49 | |||
3781 | 50 | |||
3782 | 51 | def handle(name, _cfg, _cloud, log, _args): | ||
3783 | 52 | log.debug("Hi from module %s", name) | ||
3784 | 30 | 53 | ||
3785 | === modified file 'cloudinit/config/cc_grub_dpkg.py' | |||
3786 | --- cloudinit/CloudConfig/cc_grub_dpkg.py 2012-01-18 14:07:33 +0000 | |||
3787 | +++ cloudinit/config/cc_grub_dpkg.py 2012-07-06 21:16:18 +0000 | |||
3788 | @@ -18,10 +18,12 @@ | |||
3789 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
3790 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3791 | 20 | 20 | ||
3792 | 21 | import cloudinit.util as util | ||
3793 | 22 | import traceback | ||
3794 | 23 | import os | 21 | import os |
3795 | 24 | 22 | ||
3796 | 23 | from cloudinit import util | ||
3797 | 24 | |||
3798 | 25 | distros = ['ubuntu', 'debian'] | ||
3799 | 26 | |||
3800 | 25 | 27 | ||
3801 | 26 | def handle(_name, cfg, _cloud, log, _args): | 28 | def handle(_name, cfg, _cloud, log, _args): |
3802 | 27 | idevs = None | 29 | idevs = None |
3803 | @@ -35,14 +37,14 @@ | |||
3804 | 35 | 37 | ||
3805 | 36 | if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or | 38 | if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or |
3806 | 37 | (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): | 39 | (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): |
3808 | 38 | if idevs == None: | 40 | if idevs is None: |
3809 | 39 | idevs = "" | 41 | idevs = "" |
3811 | 40 | if idevs_empty == None: | 42 | if idevs_empty is None: |
3812 | 41 | idevs_empty = "true" | 43 | idevs_empty = "true" |
3813 | 42 | else: | 44 | else: |
3815 | 43 | if idevs_empty == None: | 45 | if idevs_empty is None: |
3816 | 44 | idevs_empty = "false" | 46 | idevs_empty = "false" |
3818 | 45 | if idevs == None: | 47 | if idevs is None: |
3819 | 46 | idevs = "/dev/sda" | 48 | idevs = "/dev/sda" |
3820 | 47 | for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): | 49 | for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): |
3821 | 48 | if os.path.exists(dev): | 50 | if os.path.exists(dev): |
3822 | @@ -52,13 +54,14 @@ | |||
3823 | 52 | # now idevs and idevs_empty are set to determined values | 54 | # now idevs and idevs_empty are set to determined values |
3824 | 53 | # or, those set by user | 55 | # or, those set by user |
3825 | 54 | 56 | ||
3829 | 55 | dconf_sel = "grub-pc grub-pc/install_devices string %s\n" % idevs + \ | 57 | dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" |
3830 | 56 | "grub-pc grub-pc/install_devices_empty boolean %s\n" % idevs_empty | 58 | "grub-pc grub-pc/install_devices_empty boolean %s\n") % |
3831 | 57 | log.debug("setting grub debconf-set-selections with '%s','%s'" % | 59 | (idevs, idevs_empty)) |
3832 | 60 | |||
3833 | 61 | log.debug("Setting grub debconf-set-selections with '%s','%s'" % | ||
3834 | 58 | (idevs, idevs_empty)) | 62 | (idevs, idevs_empty)) |
3835 | 59 | 63 | ||
3836 | 60 | try: | 64 | try: |
3838 | 61 | util.subp(('debconf-set-selections'), dconf_sel) | 65 | util.subp(['debconf-set-selections'], dconf_sel) |
3839 | 62 | except: | 66 | except: |
3842 | 63 | log.error("Failed to run debconf-set-selections for grub-dpkg") | 67 | util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg") |
3841 | 64 | log.debug(traceback.format_exc()) | ||
3843 | 65 | 68 | ||
3844 | === modified file 'cloudinit/config/cc_keys_to_console.py' | |||
3845 | --- cloudinit/CloudConfig/cc_keys_to_console.py 2012-01-18 14:07:33 +0000 | |||
3846 | +++ cloudinit/config/cc_keys_to_console.py 2012-07-06 21:16:18 +0000 | |||
3847 | @@ -18,25 +18,36 @@ | |||
3848 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
3849 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3850 | 20 | 20 | ||
3864 | 21 | from cloudinit.CloudConfig import per_instance | 21 | import os |
3865 | 22 | import cloudinit.util as util | 22 | |
3866 | 23 | import subprocess | 23 | from cloudinit.settings import PER_INSTANCE |
3867 | 24 | 24 | from cloudinit import util | |
3868 | 25 | frequency = per_instance | 25 | |
3869 | 26 | 26 | frequency = PER_INSTANCE | |
3870 | 27 | 27 | ||
3871 | 28 | def handle(_name, cfg, _cloud, log, _args): | 28 | # This is a tool that cloud init provides |
3872 | 29 | cmd = ['/usr/lib/cloud-init/write-ssh-key-fingerprints'] | 29 | HELPER_TOOL = '/usr/lib/cloud-init/write-ssh-key-fingerprints' |
3873 | 30 | fp_blacklist = util.get_cfg_option_list_or_str(cfg, | 30 | |
3874 | 31 | "ssh_fp_console_blacklist", []) | 31 | |
3875 | 32 | key_blacklist = util.get_cfg_option_list_or_str(cfg, | 32 | def handle(name, cfg, _cloud, log, _args): |
3876 | 33 | "ssh_key_console_blacklist", ["ssh-dss"]) | 33 | if not os.path.exists(HELPER_TOOL): |
3877 | 34 | log.warn(("Unable to activate module %s," | ||
3878 | 35 | " helper tool not found at %s"), name, HELPER_TOOL) | ||
3879 | 36 | return | ||
3880 | 37 | |||
3881 | 38 | fp_blacklist = util.get_cfg_option_list(cfg, | ||
3882 | 39 | "ssh_fp_console_blacklist", []) | ||
3883 | 40 | key_blacklist = util.get_cfg_option_list(cfg, | ||
3884 | 41 | "ssh_key_console_blacklist", | ||
3885 | 42 | ["ssh-dss"]) | ||
3886 | 43 | |||
3887 | 34 | try: | 44 | try: |
3889 | 35 | confp = open('/dev/console', "wb") | 45 | cmd = [HELPER_TOOL] |
3890 | 36 | cmd.append(','.join(fp_blacklist)) | 46 | cmd.append(','.join(fp_blacklist)) |
3891 | 37 | cmd.append(','.join(key_blacklist)) | 47 | cmd.append(','.join(key_blacklist)) |
3894 | 38 | subprocess.call(cmd, stdout=confp) | 48 | (stdout, _stderr) = util.subp(cmd) |
3895 | 39 | confp.close() | 49 | util.multi_log("%s\n" % (stdout.strip()), |
3896 | 50 | stderr=False, console=True) | ||
3897 | 40 | except: | 51 | except: |
3899 | 41 | log.warn("writing keys to console value") | 52 | log.warn("Writing keys to the system console failed!") |
3900 | 42 | raise | 53 | raise |
3901 | 43 | 54 | ||
3902 | === modified file 'cloudinit/config/cc_landscape.py' | |||
3903 | --- cloudinit/CloudConfig/cc_landscape.py 2012-04-10 20:22:47 +0000 | |||
3904 | +++ cloudinit/config/cc_landscape.py 2012-07-06 21:16:18 +0000 | |||
3905 | @@ -19,16 +19,23 @@ | |||
3906 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
3907 | 20 | 20 | ||
3908 | 21 | import os | 21 | import os |
3911 | 22 | import os.path | 22 | |
3912 | 23 | from cloudinit.CloudConfig import per_instance | 23 | from StringIO import StringIO |
3913 | 24 | |||
3914 | 24 | from configobj import ConfigObj | 25 | from configobj import ConfigObj |
3915 | 25 | 26 | ||
3919 | 26 | frequency = per_instance | 27 | from cloudinit import util |
3920 | 27 | 28 | ||
3921 | 28 | lsc_client_cfg_file = "/etc/landscape/client.conf" | 29 | from cloudinit.settings import PER_INSTANCE |
3922 | 30 | |||
3923 | 31 | frequency = PER_INSTANCE | ||
3924 | 32 | |||
3925 | 33 | LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf" | ||
3926 | 34 | |||
3927 | 35 | distros = ['ubuntu'] | ||
3928 | 29 | 36 | ||
3929 | 30 | # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 | 37 | # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 |
3931 | 31 | lsc_builtincfg = { | 38 | LSC_BUILTIN_CFG = { |
3932 | 32 | 'client': { | 39 | 'client': { |
3933 | 33 | 'log_level': "info", | 40 | 'log_level': "info", |
3934 | 34 | 'url': "https://landscape.canonical.com/message-system", | 41 | 'url': "https://landscape.canonical.com/message-system", |
3935 | @@ -38,7 +45,7 @@ | |||
3936 | 38 | } | 45 | } |
3937 | 39 | 46 | ||
3938 | 40 | 47 | ||
3940 | 41 | def handle(_name, cfg, _cloud, log, _args): | 48 | def handle(_name, cfg, cloud, log, _args): |
3941 | 42 | """ | 49 | """ |
3942 | 43 | Basically turn a top level 'landscape' entry with a 'client' dict | 50 | Basically turn a top level 'landscape' entry with a 'client' dict |
3943 | 44 | and render it to ConfigObj format under '[client]' section in | 51 | and render it to ConfigObj format under '[client]' section in |
3944 | @@ -47,27 +54,40 @@ | |||
3945 | 47 | 54 | ||
3946 | 48 | ls_cloudcfg = cfg.get("landscape", {}) | 55 | ls_cloudcfg = cfg.get("landscape", {}) |
3947 | 49 | 56 | ||
3963 | 50 | if not isinstance(ls_cloudcfg, dict): | 57 | if not isinstance(ls_cloudcfg, (dict)): |
3964 | 51 | raise(Exception("'landscape' existed in config, but not a dict")) | 58 | raise RuntimeError(("'landscape' key existed in config," |
3965 | 52 | 59 | " but not a dictionary type," | |
3966 | 53 | merged = mergeTogether([lsc_builtincfg, lsc_client_cfg_file, ls_cloudcfg]) | 60 | " is a %s instead"), util.obj_name(ls_cloudcfg)) |
3967 | 54 | 61 | ||
3968 | 55 | if not os.path.isdir(os.path.dirname(lsc_client_cfg_file)): | 62 | merge_data = [ |
3969 | 56 | os.makedirs(os.path.dirname(lsc_client_cfg_file)) | 63 | LSC_BUILTIN_CFG, |
3970 | 57 | 64 | cloud.paths.join(True, LSC_CLIENT_CFG_FILE), | |
3971 | 58 | with open(lsc_client_cfg_file, "w") as fp: | 65 | ls_cloudcfg, |
3972 | 59 | merged.write(fp) | 66 | ] |
3973 | 60 | 67 | merged = merge_together(merge_data) | |
3974 | 61 | log.debug("updated %s" % lsc_client_cfg_file) | 68 | |
3975 | 62 | 69 | lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE) | |
3976 | 63 | 70 | lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn)) | |
3977 | 64 | def mergeTogether(objs): | 71 | if not os.path.isdir(lsc_dir): |
3978 | 72 | util.ensure_dir(lsc_dir) | ||
3979 | 73 | |||
3980 | 74 | contents = StringIO() | ||
3981 | 75 | merged.write(contents) | ||
3982 | 76 | contents.flush() | ||
3983 | 77 | |||
3984 | 78 | util.write_file(lsc_client_fn, contents.getvalue()) | ||
3985 | 79 | log.debug("Wrote landscape config file to %s", lsc_client_fn) | ||
3986 | 80 | |||
3987 | 81 | |||
3988 | 82 | def merge_together(objs): | ||
3989 | 65 | """ | 83 | """ |
3990 | 66 | merge together ConfigObj objects or things that ConfigObj() will take in | 84 | merge together ConfigObj objects or things that ConfigObj() will take in |
3991 | 67 | later entries override earlier | 85 | later entries override earlier |
3992 | 68 | """ | 86 | """ |
3993 | 69 | cfg = ConfigObj({}) | 87 | cfg = ConfigObj({}) |
3994 | 70 | for obj in objs: | 88 | for obj in objs: |
3995 | 89 | if not obj: | ||
3996 | 90 | continue | ||
3997 | 71 | if isinstance(obj, ConfigObj): | 91 | if isinstance(obj, ConfigObj): |
3998 | 72 | cfg.merge(obj) | 92 | cfg.merge(obj) |
3999 | 73 | else: | 93 | else: |
4000 | 74 | 94 | ||
4001 | === modified file 'cloudinit/config/cc_locale.py' | |||
4002 | --- cloudinit/CloudConfig/cc_locale.py 2012-01-18 14:07:33 +0000 | |||
4003 | +++ cloudinit/config/cc_locale.py 2012-07-06 21:16:18 +0000 | |||
4004 | @@ -18,37 +18,20 @@ | |||
4005 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
4006 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4007 | 20 | 20 | ||
4024 | 21 | import cloudinit.util as util | 21 | from cloudinit import util |
4025 | 22 | import os.path | 22 | |
4026 | 23 | import subprocess | 23 | |
4027 | 24 | import traceback | 24 | def handle(name, cfg, cloud, log, args): |
4012 | 25 | |||
4013 | 26 | |||
4014 | 27 | def apply_locale(locale, cfgfile): | ||
4015 | 28 | if os.path.exists('/usr/sbin/locale-gen'): | ||
4016 | 29 | subprocess.Popen(['locale-gen', locale]).communicate() | ||
4017 | 30 | if os.path.exists('/usr/sbin/update-locale'): | ||
4018 | 31 | subprocess.Popen(['update-locale', locale]).communicate() | ||
4019 | 32 | |||
4020 | 33 | util.render_to_file('default-locale', cfgfile, {'locale': locale}) | ||
4021 | 34 | |||
4022 | 35 | |||
4023 | 36 | def handle(_name, cfg, cloud, log, args): | ||
4028 | 37 | if len(args) != 0: | 25 | if len(args) != 0: |
4029 | 38 | locale = args[0] | 26 | locale = args[0] |
4030 | 39 | else: | 27 | else: |
4031 | 40 | locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) | 28 | locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) |
4032 | 41 | 29 | ||
4033 | 42 | locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile", | ||
4034 | 43 | "/etc/default/locale") | ||
4035 | 44 | |||
4036 | 45 | if not locale: | 30 | if not locale: |
4037 | 31 | log.debug(("Skipping module named %s, " | ||
4038 | 32 | "no 'locale' configuration found"), name) | ||
4039 | 46 | return | 33 | return |
4040 | 47 | 34 | ||
4048 | 48 | log.debug("setting locale to %s" % locale) | 35 | log.debug("Setting locale to %s", locale) |
4049 | 49 | 36 | locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") | |
4050 | 50 | try: | 37 | cloud.distro.apply_locale(locale, locale_cfgfile) |
4044 | 51 | apply_locale(locale, locale_cfgfile) | ||
4045 | 52 | except Exception as e: | ||
4046 | 53 | log.debug(traceback.format_exc(e)) | ||
4047 | 54 | raise Exception("failed to apply locale %s" % locale) | ||
4051 | 55 | 38 | ||
4052 | === modified file 'cloudinit/config/cc_mcollective.py' | |||
4053 | --- cloudinit/CloudConfig/cc_mcollective.py 2012-01-18 14:07:33 +0000 | |||
4054 | +++ cloudinit/config/cc_mcollective.py 2012-07-06 21:16:18 +0000 | |||
4055 | @@ -19,81 +19,73 @@ | |||
4056 | 19 | # You should have received a copy of the GNU General Public License | 19 | # You should have received a copy of the GNU General Public License |
4057 | 20 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 20 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4058 | 21 | 21 | ||
4087 | 22 | import os | 22 | from StringIO import StringIO |
4088 | 23 | import subprocess | 23 | |
4089 | 24 | import StringIO | 24 | # Used since this can maintain comments |
4090 | 25 | import ConfigParser | 25 | # and doesn't need a top level section |
4091 | 26 | import cloudinit.CloudConfig as cc | 26 | from configobj import ConfigObj |
4092 | 27 | import cloudinit.util as util | 27 | |
4093 | 28 | 28 | from cloudinit import util | |
4094 | 29 | pubcert_file = "/etc/mcollective/ssl/server-public.pem" | 29 | |
4095 | 30 | pricert_file = "/etc/mcollective/ssl/server-private.pem" | 30 | PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" |
4096 | 31 | 31 | PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" | |
4097 | 32 | 32 | ||
4098 | 33 | # Our fake header section | 33 | |
4099 | 34 | class FakeSecHead(object): | 34 | def handle(name, cfg, cloud, log, _args): |
4100 | 35 | def __init__(self, fp): | 35 | |
4073 | 36 | self.fp = fp | ||
4074 | 37 | self.sechead = '[nullsection]\n' | ||
4075 | 38 | |||
4076 | 39 | def readline(self): | ||
4077 | 40 | if self.sechead: | ||
4078 | 41 | try: | ||
4079 | 42 | return self.sechead | ||
4080 | 43 | finally: | ||
4081 | 44 | self.sechead = None | ||
4082 | 45 | else: | ||
4083 | 46 | return self.fp.readline() | ||
4084 | 47 | |||
4085 | 48 | |||
4086 | 49 | def handle(_name, cfg, _cloud, _log, _args): | ||
4101 | 50 | # If there isn't a mcollective key in the configuration don't do anything | 36 | # If there isn't a mcollective key in the configuration don't do anything |
4102 | 51 | if 'mcollective' not in cfg: | 37 | if 'mcollective' not in cfg: |
4103 | 38 | log.debug(("Skipping module named %s, " | ||
4104 | 39 | "no 'mcollective' key in configuration"), name) | ||
4105 | 52 | return | 40 | return |
4106 | 41 | |||
4107 | 53 | mcollective_cfg = cfg['mcollective'] | 42 | mcollective_cfg = cfg['mcollective'] |
4108 | 43 | |||
4109 | 54 | # Start by installing the mcollective package ... | 44 | # Start by installing the mcollective package ... |
4111 | 55 | cc.install_packages(("mcollective",)) | 45 | cloud.distro.install_packages(("mcollective",)) |
4112 | 56 | 46 | ||
4113 | 57 | # ... and then update the mcollective configuration | 47 | # ... and then update the mcollective configuration |
4114 | 58 | if 'conf' in mcollective_cfg: | 48 | if 'conf' in mcollective_cfg: |
4122 | 59 | # Create object for reading server.cfg values | 49 | # Read server.cfg values from the |
4123 | 60 | mcollective_config = ConfigParser.ConfigParser() | 50 | # original file in order to be able to mix the rest up |
4124 | 61 | # Read server.cfg values from original file in order to be able to mix | 51 | server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg') |
4125 | 62 | # the rest up | 52 | mcollective_config = ConfigObj(server_cfg_fn) |
4126 | 63 | mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/' | 53 | # See: http://tiny.cc/jh9agw |
4127 | 64 | 'server.cfg'))) | 54 | for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): |
4121 | 65 | for cfg_name, cfg in mcollective_cfg['conf'].iteritems(): | ||
4128 | 66 | if cfg_name == 'public-cert': | 55 | if cfg_name == 'public-cert': |
4133 | 67 | util.write_file(pubcert_file, cfg, mode=0644) | 56 | pubcert_fn = cloud.paths.join(True, PUBCERT_FILE) |
4134 | 68 | mcollective_config.set(cfg_name, | 57 | util.write_file(pubcert_fn, cfg, mode=0644) |
4135 | 69 | 'plugin.ssl_server_public', pubcert_file) | 58 | mcollective_config['plugin.ssl_server_public'] = pubcert_fn |
4136 | 70 | mcollective_config.set(cfg_name, 'securityprovider', 'ssl') | 59 | mcollective_config['securityprovider'] = 'ssl' |
4137 | 71 | elif cfg_name == 'private-cert': | 60 | elif cfg_name == 'private-cert': |
4142 | 72 | util.write_file(pricert_file, cfg, mode=0600) | 61 | pricert_fn = cloud.paths.join(True, PRICERT_FILE) |
4143 | 73 | mcollective_config.set(cfg_name, | 62 | util.write_file(pricert_fn, cfg, mode=0600) |
4144 | 74 | 'plugin.ssl_server_private', pricert_file) | 63 | mcollective_config['plugin.ssl_server_private'] = pricert_fn |
4145 | 75 | mcollective_config.set(cfg_name, 'securityprovider', 'ssl') | 64 | mcollective_config['securityprovider'] = 'ssl' |
4146 | 76 | else: | 65 | else: |
4151 | 77 | # Iterate throug the config items, we'll use ConfigParser.set | 66 | if isinstance(cfg, (basestring, str)): |
4152 | 78 | # to overwrite or create new items as needed | 67 | # Just set it in the 'main' section |
4153 | 79 | for o, v in cfg.iteritems(): | 68 | mcollective_config[cfg_name] = cfg |
4154 | 80 | mcollective_config.set(cfg_name, o, v) | 69 | elif isinstance(cfg, (dict)): |
4155 | 70 | # Iterate throug the config items, create a section | ||
4156 | 71 | # if it is needed and then add/or create items as needed | ||
4157 | 72 | if cfg_name not in mcollective_config.sections: | ||
4158 | 73 | mcollective_config[cfg_name] = {} | ||
4159 | 74 | for (o, v) in cfg.iteritems(): | ||
4160 | 75 | mcollective_config[cfg_name][o] = v | ||
4161 | 76 | else: | ||
4162 | 77 | # Otherwise just try to convert it to a string | ||
4163 | 78 | mcollective_config[cfg_name] = str(cfg) | ||
4164 | 81 | # We got all our config as wanted we'll rename | 79 | # We got all our config as wanted we'll rename |
4165 | 82 | # the previous server.cfg and create our new one | 80 | # the previous server.cfg and create our new one |
4180 | 83 | os.rename('/etc/mcollective/server.cfg', | 81 | old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old') |
4181 | 84 | '/etc/mcollective/server.cfg.old') | 82 | util.rename(server_cfg_fn, old_fn) |
4182 | 85 | outputfile = StringIO.StringIO() | 83 | # Now we got the whole file, write to disk... |
4183 | 86 | mcollective_config.write(outputfile) | 84 | contents = StringIO() |
4184 | 87 | # Now we got the whole file, write to disk except first line | 85 | mcollective_config.write(contents) |
4185 | 88 | # Note below, that we've just used ConfigParser because it generally | 86 | contents = contents.getvalue() |
4186 | 89 | # works. Below, we remove the initial 'nullsection' header | 87 | server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg') |
4187 | 90 | # and then change 'key = value' to 'key: value'. The global | 88 | util.write_file(server_cfg_rw, contents, mode=0644) |
4174 | 91 | # search and replace of '=' with ':' could be problematic though. | ||
4175 | 92 | # this most likely needs fixing. | ||
4176 | 93 | util.write_file('/etc/mcollective/server.cfg', | ||
4177 | 94 | outputfile.getvalue().replace('[nullsection]\n', '').replace(' =', | ||
4178 | 95 | ':'), | ||
4179 | 96 | mode=0644) | ||
4188 | 97 | 89 | ||
4189 | 98 | # Start mcollective | 90 | # Start mcollective |
4191 | 99 | subprocess.check_call(['service', 'mcollective', 'start']) | 91 | util.subp(['service', 'mcollective', 'start'], capture=False) |
4192 | 100 | 92 | ||
4193 | === modified file 'cloudinit/config/cc_mounts.py' | |||
4194 | --- cloudinit/CloudConfig/cc_mounts.py 2012-01-18 14:07:33 +0000 | |||
4195 | +++ cloudinit/config/cc_mounts.py 2012-07-06 21:16:18 +0000 | |||
4196 | @@ -18,10 +18,16 @@ | |||
4197 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
4198 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4199 | 20 | 20 | ||
4202 | 21 | import cloudinit.util as util | 21 | from string import whitespace # pylint: disable=W0402 |
4203 | 22 | import os | 22 | |
4204 | 23 | import re | 23 | import re |
4206 | 24 | from string import whitespace # pylint: disable=W0402 | 24 | |
4207 | 25 | from cloudinit import util | ||
4208 | 26 | |||
4209 | 27 | # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 | ||
4210 | 28 | SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" | ||
4211 | 29 | SHORTNAME = re.compile(SHORTNAME_FILTER) | ||
4212 | 30 | WS = re.compile("[%s]+" % (whitespace)) | ||
4213 | 25 | 31 | ||
4214 | 26 | 32 | ||
4215 | 27 | def is_mdname(name): | 33 | def is_mdname(name): |
4216 | @@ -49,38 +55,46 @@ | |||
4217 | 49 | if "mounts" in cfg: | 55 | if "mounts" in cfg: |
4218 | 50 | cfgmnt = cfg["mounts"] | 56 | cfgmnt = cfg["mounts"] |
4219 | 51 | 57 | ||
4220 | 52 | # shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 | ||
4221 | 53 | shortname_filter = r"^[x]{0,1}[shv]d[a-z][0-9]*$" | ||
4222 | 54 | shortname = re.compile(shortname_filter) | ||
4223 | 55 | |||
4224 | 56 | for i in range(len(cfgmnt)): | 58 | for i in range(len(cfgmnt)): |
4225 | 57 | # skip something that wasn't a list | 59 | # skip something that wasn't a list |
4226 | 58 | if not isinstance(cfgmnt[i], list): | 60 | if not isinstance(cfgmnt[i], list): |
4227 | 61 | log.warn("Mount option %s not a list, got a %s instead", | ||
4228 | 62 | (i + 1), util.obj_name(cfgmnt[i])) | ||
4229 | 59 | continue | 63 | continue |
4230 | 60 | 64 | ||
4231 | 65 | startname = str(cfgmnt[i][0]) | ||
4232 | 66 | log.debug("Attempting to determine the real name of %s", startname) | ||
4233 | 67 | |||
4234 | 61 | # workaround, allow user to specify 'ephemeral' | 68 | # workaround, allow user to specify 'ephemeral' |
4235 | 62 | # rather than more ec2 correct 'ephemeral0' | 69 | # rather than more ec2 correct 'ephemeral0' |
4237 | 63 | if cfgmnt[i][0] == "ephemeral": | 70 | if startname == "ephemeral": |
4238 | 64 | cfgmnt[i][0] = "ephemeral0" | 71 | cfgmnt[i][0] = "ephemeral0" |
4239 | 72 | log.debug(("Adjusted mount option %s " | ||
4240 | 73 | "name from ephemeral to ephemeral0"), (i + 1)) | ||
4241 | 65 | 74 | ||
4244 | 66 | if is_mdname(cfgmnt[i][0]): | 75 | if is_mdname(startname): |
4245 | 67 | newname = cloud.device_name_to_device(cfgmnt[i][0]) | 76 | newname = cloud.device_name_to_device(startname) |
4246 | 68 | if not newname: | 77 | if not newname: |
4248 | 69 | log.debug("ignoring nonexistant named mount %s" % cfgmnt[i][0]) | 78 | log.debug("Ignoring nonexistant named mount %s", startname) |
4249 | 70 | cfgmnt[i][1] = None | 79 | cfgmnt[i][1] = None |
4250 | 71 | else: | 80 | else: |
4255 | 72 | if newname.startswith("/"): | 81 | renamed = newname |
4256 | 73 | cfgmnt[i][0] = newname | 82 | if not newname.startswith("/"): |
4257 | 74 | else: | 83 | renamed = "/dev/%s" % newname |
4258 | 75 | cfgmnt[i][0] = "/dev/%s" % newname | 84 | cfgmnt[i][0] = renamed |
4259 | 85 | log.debug("Mapped metadata name %s to %s", startname, renamed) | ||
4260 | 76 | else: | 86 | else: |
4263 | 77 | if shortname.match(cfgmnt[i][0]): | 87 | if SHORTNAME.match(startname): |
4264 | 78 | cfgmnt[i][0] = "/dev/%s" % cfgmnt[i][0] | 88 | renamed = "/dev/%s" % startname |
4265 | 89 | log.debug("Mapped shortname name %s to %s", startname, renamed) | ||
4266 | 90 | cfgmnt[i][0] = renamed | ||
4267 | 79 | 91 | ||
4268 | 80 | # in case the user did not quote a field (likely fs-freq, fs_passno) | 92 | # in case the user did not quote a field (likely fs-freq, fs_passno) |
4269 | 81 | # but do not convert None to 'None' (LP: #898365) | 93 | # but do not convert None to 'None' (LP: #898365) |
4270 | 82 | for j in range(len(cfgmnt[i])): | 94 | for j in range(len(cfgmnt[i])): |
4272 | 83 | if isinstance(cfgmnt[i][j], int): | 95 | if j is None: |
4273 | 96 | continue | ||
4274 | 97 | else: | ||
4275 | 84 | cfgmnt[i][j] = str(cfgmnt[i][j]) | 98 | cfgmnt[i][j] = str(cfgmnt[i][j]) |
4276 | 85 | 99 | ||
4277 | 86 | for i in range(len(cfgmnt)): | 100 | for i in range(len(cfgmnt)): |
4278 | @@ -102,14 +116,18 @@ | |||
4279 | 102 | # for each of the "default" mounts, add them only if no other | 116 | # for each of the "default" mounts, add them only if no other |
4280 | 103 | # entry has the same device name | 117 | # entry has the same device name |
4281 | 104 | for defmnt in defmnts: | 118 | for defmnt in defmnts: |
4283 | 105 | devname = cloud.device_name_to_device(defmnt[0]) | 119 | startname = defmnt[0] |
4284 | 120 | devname = cloud.device_name_to_device(startname) | ||
4285 | 106 | if devname is None: | 121 | if devname is None: |
4286 | 122 | log.debug("Ignoring nonexistant named default mount %s", startname) | ||
4287 | 107 | continue | 123 | continue |
4288 | 108 | if devname.startswith("/"): | 124 | if devname.startswith("/"): |
4289 | 109 | defmnt[0] = devname | 125 | defmnt[0] = devname |
4290 | 110 | else: | 126 | else: |
4291 | 111 | defmnt[0] = "/dev/%s" % devname | 127 | defmnt[0] = "/dev/%s" % devname |
4292 | 112 | 128 | ||
4293 | 129 | log.debug("Mapped default device %s to %s", startname, defmnt[0]) | ||
4294 | 130 | |||
4295 | 113 | cfgmnt_has = False | 131 | cfgmnt_has = False |
4296 | 114 | for cfgm in cfgmnt: | 132 | for cfgm in cfgmnt: |
4297 | 115 | if cfgm[0] == defmnt[0]: | 133 | if cfgm[0] == defmnt[0]: |
4298 | @@ -117,14 +135,22 @@ | |||
4299 | 117 | break | 135 | break |
4300 | 118 | 136 | ||
4301 | 119 | if cfgmnt_has: | 137 | if cfgmnt_has: |
4302 | 138 | log.debug(("Not including %s, already" | ||
4303 | 139 | " previously included"), startname) | ||
4304 | 120 | continue | 140 | continue |
4305 | 121 | cfgmnt.append(defmnt) | 141 | cfgmnt.append(defmnt) |
4306 | 122 | 142 | ||
4307 | 123 | # now, each entry in the cfgmnt list has all fstab values | 143 | # now, each entry in the cfgmnt list has all fstab values |
4308 | 124 | # if the second field is None (not the string, the value) we skip it | 144 | # if the second field is None (not the string, the value) we skip it |
4310 | 125 | actlist = [x for x in cfgmnt if x[1] is not None] | 145 | actlist = [] |
4311 | 146 | for x in cfgmnt: | ||
4312 | 147 | if x[1] is None: | ||
4313 | 148 | log.debug("Skipping non-existent device named %s", x[0]) | ||
4314 | 149 | else: | ||
4315 | 150 | actlist.append(x) | ||
4316 | 126 | 151 | ||
4317 | 127 | if len(actlist) == 0: | 152 | if len(actlist) == 0: |
4318 | 153 | log.debug("No modifications to fstab needed.") | ||
4319 | 128 | return | 154 | return |
4320 | 129 | 155 | ||
4321 | 130 | comment = "comment=cloudconfig" | 156 | comment = "comment=cloudconfig" |
4322 | @@ -133,7 +159,7 @@ | |||
4323 | 133 | dirs = [] | 159 | dirs = [] |
4324 | 134 | for line in actlist: | 160 | for line in actlist: |
4325 | 135 | # write 'comment' in the fs_mntops, entry, claiming this | 161 | # write 'comment' in the fs_mntops, entry, claiming this |
4327 | 136 | line[3] = "%s,comment=cloudconfig" % line[3] | 162 | line[3] = "%s,%s" % (line[3], comment) |
4328 | 137 | if line[2] == "swap": | 163 | if line[2] == "swap": |
4329 | 138 | needswap = True | 164 | needswap = True |
4330 | 139 | if line[1].startswith("/"): | 165 | if line[1].startswith("/"): |
4331 | @@ -141,11 +167,10 @@ | |||
4332 | 141 | cc_lines.append('\t'.join(line)) | 167 | cc_lines.append('\t'.join(line)) |
4333 | 142 | 168 | ||
4334 | 143 | fstab_lines = [] | 169 | fstab_lines = [] |
4338 | 144 | fstab = open("/etc/fstab", "r+") | 170 | fstab = util.load_file(cloud.paths.join(True, "/etc/fstab")) |
4339 | 145 | ws = re.compile("[%s]+" % whitespace) | 171 | for line in fstab.splitlines(): |
4337 | 146 | for line in fstab.read().splitlines(): | ||
4340 | 147 | try: | 172 | try: |
4342 | 148 | toks = ws.split(line) | 173 | toks = WS.split(line) |
4343 | 149 | if toks[3].find(comment) != -1: | 174 | if toks[3].find(comment) != -1: |
4344 | 150 | continue | 175 | continue |
4345 | 151 | except: | 176 | except: |
4346 | @@ -153,27 +178,23 @@ | |||
4347 | 153 | fstab_lines.append(line) | 178 | fstab_lines.append(line) |
4348 | 154 | 179 | ||
4349 | 155 | fstab_lines.extend(cc_lines) | 180 | fstab_lines.extend(cc_lines) |
4355 | 156 | 181 | contents = "%s\n" % ('\n'.join(fstab_lines)) | |
4356 | 157 | fstab.seek(0) | 182 | util.write_file(cloud.paths.join(False, "/etc/fstab"), contents) |
4352 | 158 | fstab.write("%s\n" % '\n'.join(fstab_lines)) | ||
4353 | 159 | fstab.truncate() | ||
4354 | 160 | fstab.close() | ||
4357 | 161 | 183 | ||
4358 | 162 | if needswap: | 184 | if needswap: |
4359 | 163 | try: | 185 | try: |
4360 | 164 | util.subp(("swapon", "-a")) | 186 | util.subp(("swapon", "-a")) |
4361 | 165 | except: | 187 | except: |
4363 | 166 | log.warn("Failed to enable swap") | 188 | util.logexc(log, "Activating swap via 'swapon -a' failed") |
4364 | 167 | 189 | ||
4365 | 168 | for d in dirs: | 190 | for d in dirs: |
4368 | 169 | if os.path.exists(d): | 191 | real_dir = cloud.paths.join(False, d) |
4367 | 170 | continue | ||
4369 | 171 | try: | 192 | try: |
4371 | 172 | os.makedirs(d) | 193 | util.ensure_dir(real_dir) |
4372 | 173 | except: | 194 | except: |
4374 | 174 | log.warn("Failed to make '%s' config-mount\n", d) | 195 | util.logexc(log, "Failed to make '%s' config-mount", d) |
4375 | 175 | 196 | ||
4376 | 176 | try: | 197 | try: |
4377 | 177 | util.subp(("mount", "-a")) | 198 | util.subp(("mount", "-a")) |
4378 | 178 | except: | 199 | except: |
4380 | 179 | log.warn("'mount -a' failed") | 200 | util.logexc(log, "Activating mounts via 'mount -a' failed") |
4381 | 180 | 201 | ||
4382 | === modified file 'cloudinit/config/cc_phone_home.py' | |||
4383 | --- cloudinit/CloudConfig/cc_phone_home.py 2012-01-18 14:07:33 +0000 | |||
4384 | +++ cloudinit/config/cc_phone_home.py 2012-07-06 21:16:18 +0000 | |||
4385 | @@ -17,13 +17,22 @@ | |||
4386 | 17 | # | 17 | # |
4387 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
4388 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4396 | 20 | from cloudinit.CloudConfig import per_instance | 20 | |
4397 | 21 | import cloudinit.util as util | 21 | from cloudinit import templater |
4398 | 22 | from time import sleep | 22 | from cloudinit import url_helper as uhelp |
4399 | 23 | 23 | from cloudinit import util | |
4400 | 24 | frequency = per_instance | 24 | |
4401 | 25 | post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id', | 25 | from cloudinit.settings import PER_INSTANCE |
4402 | 26 | 'hostname'] | 26 | |
4403 | 27 | frequency = PER_INSTANCE | ||
4404 | 28 | |||
4405 | 29 | POST_LIST_ALL = [ | ||
4406 | 30 | 'pub_key_dsa', | ||
4407 | 31 | 'pub_key_rsa', | ||
4408 | 32 | 'pub_key_ecdsa', | ||
4409 | 33 | 'instance_id', | ||
4410 | 34 | 'hostname' | ||
4411 | 35 | ] | ||
4412 | 27 | 36 | ||
4413 | 28 | 37 | ||
4414 | 29 | # phone_home: | 38 | # phone_home: |
4415 | @@ -35,29 +44,33 @@ | |||
4416 | 35 | # url: http://my.foo.bar/$INSTANCE_ID/ | 44 | # url: http://my.foo.bar/$INSTANCE_ID/ |
4417 | 36 | # post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id | 45 | # post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id |
4418 | 37 | # | 46 | # |
4420 | 38 | def handle(_name, cfg, cloud, log, args): | 47 | def handle(name, cfg, cloud, log, args): |
4421 | 39 | if len(args) != 0: | 48 | if len(args) != 0: |
4422 | 40 | ph_cfg = util.read_conf(args[0]) | 49 | ph_cfg = util.read_conf(args[0]) |
4423 | 41 | else: | 50 | else: |
4424 | 42 | if not 'phone_home' in cfg: | 51 | if not 'phone_home' in cfg: |
4425 | 52 | log.debug(("Skipping module named %s, " | ||
4426 | 53 | "no 'phone_home' configuration found"), name) | ||
4427 | 43 | return | 54 | return |
4428 | 44 | ph_cfg = cfg['phone_home'] | 55 | ph_cfg = cfg['phone_home'] |
4429 | 45 | 56 | ||
4430 | 46 | if 'url' not in ph_cfg: | 57 | if 'url' not in ph_cfg: |
4432 | 47 | log.warn("no 'url' token in phone_home") | 58 | log.warn(("Skipping module named %s, " |
4433 | 59 | "no 'url' found in 'phone_home' configuration"), name) | ||
4434 | 48 | return | 60 | return |
4435 | 49 | 61 | ||
4436 | 50 | url = ph_cfg['url'] | 62 | url = ph_cfg['url'] |
4437 | 51 | post_list = ph_cfg.get('post', 'all') | 63 | post_list = ph_cfg.get('post', 'all') |
4439 | 52 | tries = ph_cfg.get('tries', 10) | 64 | tries = ph_cfg.get('tries') |
4440 | 53 | try: | 65 | try: |
4441 | 54 | tries = int(tries) | 66 | tries = int(tries) |
4442 | 55 | except: | 67 | except: |
4443 | 56 | log.warn("tries is not an integer. using 10") | ||
4444 | 57 | tries = 10 | 68 | tries = 10 |
4445 | 69 | util.logexc(log, ("Configuration entry 'tries'" | ||
4446 | 70 | " is not an integer, using %s instead"), tries) | ||
4447 | 58 | 71 | ||
4448 | 59 | if post_list == "all": | 72 | if post_list == "all": |
4450 | 60 | post_list = post_list_all | 73 | post_list = POST_LIST_ALL |
4451 | 61 | 74 | ||
4452 | 62 | all_keys = {} | 75 | all_keys = {} |
4453 | 63 | all_keys['instance_id'] = cloud.get_instance_id() | 76 | all_keys['instance_id'] = cloud.get_instance_id() |
4454 | @@ -69,38 +82,37 @@ | |||
4455 | 69 | 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', | 82 | 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', |
4456 | 70 | } | 83 | } |
4457 | 71 | 84 | ||
4459 | 72 | for n, path in pubkeys.iteritems(): | 85 | for (n, path) in pubkeys.iteritems(): |
4460 | 73 | try: | 86 | try: |
4464 | 74 | fp = open(path, "rb") | 87 | all_keys[n] = util.load_file(cloud.paths.join(True, path)) |
4462 | 75 | all_keys[n] = fp.read() | ||
4463 | 76 | fp.close() | ||
4465 | 77 | except: | 88 | except: |
4467 | 78 | log.warn("%s: failed to open in phone_home" % path) | 89 | util.logexc(log, ("%s: failed to open, can not" |
4468 | 90 | " phone home that data"), path) | ||
4469 | 79 | 91 | ||
4470 | 80 | submit_keys = {} | 92 | submit_keys = {} |
4471 | 81 | for k in post_list: | 93 | for k in post_list: |
4472 | 82 | if k in all_keys: | 94 | if k in all_keys: |
4473 | 83 | submit_keys[k] = all_keys[k] | 95 | submit_keys[k] = all_keys[k] |
4474 | 84 | else: | 96 | else: |
4497 | 85 | submit_keys[k] = "N/A" | 97 | submit_keys[k] = None |
4498 | 86 | log.warn("requested key %s from 'post' list not available") | 98 | log.warn(("Requested key %s from 'post'" |
4499 | 87 | 99 | " configuration list not available"), k) | |
4500 | 88 | url = util.render_string(url, {'INSTANCE_ID': all_keys['instance_id']}) | 100 | |
4501 | 89 | 101 | # Get them read to be posted | |
4502 | 90 | null_exc = object() | 102 | real_submit_keys = {} |
4503 | 91 | last_e = null_exc | 103 | for (k, v) in submit_keys.iteritems(): |
4504 | 92 | for i in range(0, tries): | 104 | if v is None: |
4505 | 93 | try: | 105 | real_submit_keys[k] = 'N/A' |
4506 | 94 | util.readurl(url, submit_keys) | 106 | else: |
4507 | 95 | log.debug("succeeded submit to %s on try %i" % (url, i + 1)) | 107 | real_submit_keys[k] = str(v) |
4508 | 96 | return | 108 | |
4509 | 97 | except Exception as e: | 109 | # Incase the url is parameterized |
4510 | 98 | log.debug("failed to post to %s on try %i" % (url, i + 1)) | 110 | url_params = { |
4511 | 99 | last_e = e | 111 | 'INSTANCE_ID': all_keys['instance_id'], |
4512 | 100 | sleep(3) | 112 | } |
4513 | 101 | 113 | url = templater.render_string(url, url_params) | |
4514 | 102 | log.warn("failed to post to %s in %i tries" % (url, tries)) | 114 | try: |
4515 | 103 | if last_e is not null_exc: | 115 | uhelp.readurl(url, data=real_submit_keys, retries=tries, sec_between=3) |
4516 | 104 | raise(last_e) | 116 | except: |
4517 | 105 | 117 | util.logexc(log, ("Failed to post phone home data to" | |
4518 | 106 | return | 118 | " %s in %s tries"), url, tries) |
4519 | 107 | 119 | ||
4520 | === modified file 'cloudinit/config/cc_puppet.py' | |||
4521 | --- cloudinit/CloudConfig/cc_puppet.py 2012-01-18 14:07:33 +0000 | |||
4522 | +++ cloudinit/config/cc_puppet.py 2012-07-06 21:16:18 +0000 | |||
4523 | @@ -18,91 +18,96 @@ | |||
4524 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
4525 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4526 | 20 | 20 | ||
4527 | 21 | from StringIO import StringIO | ||
4528 | 22 | |||
4529 | 21 | import os | 23 | import os |
4530 | 22 | import os.path | ||
4531 | 23 | import pwd | 24 | import pwd |
4532 | 24 | import socket | 25 | import socket |
4541 | 25 | import subprocess | 26 | |
4542 | 26 | import StringIO | 27 | from cloudinit import helpers |
4543 | 27 | import ConfigParser | 28 | from cloudinit import util |
4544 | 28 | import cloudinit.CloudConfig as cc | 29 | |
4545 | 29 | import cloudinit.util as util | 30 | |
4546 | 30 | 31 | def handle(name, cfg, cloud, log, _args): | |
4539 | 31 | |||
4540 | 32 | def handle(_name, cfg, cloud, log, _args): | ||
4547 | 33 | # If there isn't a puppet key in the configuration don't do anything | 32 | # If there isn't a puppet key in the configuration don't do anything |
4548 | 34 | if 'puppet' not in cfg: | 33 | if 'puppet' not in cfg: |
4549 | 34 | log.debug(("Skipping module named %s," | ||
4550 | 35 | " no 'puppet' configuration found"), name) | ||
4551 | 35 | return | 36 | return |
4552 | 37 | |||
4553 | 36 | puppet_cfg = cfg['puppet'] | 38 | puppet_cfg = cfg['puppet'] |
4554 | 39 | |||
4555 | 37 | # Start by installing the puppet package ... | 40 | # Start by installing the puppet package ... |
4557 | 38 | cc.install_packages(("puppet",)) | 41 | cloud.distro.install_packages(["puppet"]) |
4558 | 39 | 42 | ||
4559 | 40 | # ... and then update the puppet configuration | 43 | # ... and then update the puppet configuration |
4560 | 41 | if 'conf' in puppet_cfg: | 44 | if 'conf' in puppet_cfg: |
4561 | 42 | # Add all sections from the conf object to puppet.conf | 45 | # Add all sections from the conf object to puppet.conf |
4563 | 43 | puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r') | 46 | puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf') |
4564 | 47 | contents = util.load_file(puppet_conf_fn) | ||
4565 | 44 | # Create object for reading puppet.conf values | 48 | # Create object for reading puppet.conf values |
4567 | 45 | puppet_config = ConfigParser.ConfigParser() | 49 | puppet_config = helpers.DefaultingConfigParser() |
4568 | 46 | # Read puppet.conf values from original file in order to be able to | 50 | # Read puppet.conf values from original file in order to be able to |
4577 | 47 | # mix the rest up | 51 | # mix the rest up. First clean them up (TODO is this really needed??) |
4578 | 48 | puppet_config.readfp(StringIO.StringIO(''.join(i.lstrip() for i in | 52 | cleaned_lines = [i.lstrip() for i in contents.splitlines()] |
4579 | 49 | puppet_conf_fh.readlines()))) | 53 | cleaned_contents = '\n'.join(cleaned_lines) |
4580 | 50 | # Close original file, no longer needed | 54 | puppet_config.readfp(StringIO(cleaned_contents), |
4581 | 51 | puppet_conf_fh.close() | 55 | filename=puppet_conf_fn) |
4582 | 52 | for cfg_name, cfg in puppet_cfg['conf'].iteritems(): | 56 | for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): |
4583 | 53 | # ca_cert configuration is a special case | 57 | # Cert configuration is a special case |
4584 | 54 | # Dump the puppetmaster ca certificate in the correct place | 58 | # Dump the puppet master ca certificate in the correct place |
4585 | 55 | if cfg_name == 'ca_cert': | 59 | if cfg_name == 'ca_cert': |
4586 | 56 | # Puppet ssl sub-directory isn't created yet | 60 | # Puppet ssl sub-directory isn't created yet |
4587 | 57 | # Create it with the proper permissions and ownership | 61 | # Create it with the proper permissions and ownership |
4601 | 58 | os.makedirs('/var/lib/puppet/ssl') | 62 | pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl') |
4602 | 59 | os.chmod('/var/lib/puppet/ssl', 0771) | 63 | util.ensure_dir(pp_ssl_dir, 0771) |
4603 | 60 | os.chown('/var/lib/puppet/ssl', | 64 | util.chownbyid(pp_ssl_dir, |
4604 | 61 | pwd.getpwnam('puppet').pw_uid, 0) | 65 | pwd.getpwnam('puppet').pw_uid, 0) |
4605 | 62 | os.makedirs('/var/lib/puppet/ssl/certs/') | 66 | pp_ssl_certs = cloud.paths.join(False, |
4606 | 63 | os.chown('/var/lib/puppet/ssl/certs/', | 67 | '/var/lib/puppet/ssl/certs/') |
4607 | 64 | pwd.getpwnam('puppet').pw_uid, 0) | 68 | util.ensure_dir(pp_ssl_certs) |
4608 | 65 | ca_fh = open('/var/lib/puppet/ssl/certs/ca.pem', 'w') | 69 | util.chownbyid(pp_ssl_certs, |
4609 | 66 | ca_fh.write(cfg) | 70 | pwd.getpwnam('puppet').pw_uid, 0) |
4610 | 67 | ca_fh.close() | 71 | pp_ssl_ca_certs = cloud.paths.join(False, |
4611 | 68 | os.chown('/var/lib/puppet/ssl/certs/ca.pem', | 72 | ('/var/lib/puppet/' |
4612 | 69 | pwd.getpwnam('puppet').pw_uid, 0) | 73 | 'ssl/certs/ca.pem')) |
4613 | 70 | util.restorecon_if_possible('/var/lib/puppet', recursive=True) | 74 | util.write_file(pp_ssl_ca_certs, cfg) |
4614 | 75 | util.chownbyid(pp_ssl_ca_certs, | ||
4615 | 76 | pwd.getpwnam('puppet').pw_uid, 0) | ||
4616 | 71 | else: | 77 | else: |
4617 | 72 | #puppet_conf_fh.write("\n[%s]\n" % (cfg_name)) | ||
4618 | 73 | # If puppet.conf already has this section we don't want to | ||
4619 | 74 | # write it again | ||
4620 | 75 | if puppet_config.has_section(cfg_name) == False: | ||
4621 | 76 | puppet_config.add_section(cfg_name) | ||
4622 | 77 | # Iterate throug the config items, we'll use ConfigParser.set | 78 | # Iterate throug the config items, we'll use ConfigParser.set |
4623 | 78 | # to overwrite or create new items as needed | 79 | # to overwrite or create new items as needed |
4625 | 79 | for o, v in cfg.iteritems(): | 80 | for (o, v) in cfg.iteritems(): |
4626 | 80 | if o == 'certname': | 81 | if o == 'certname': |
4627 | 81 | # Expand %f as the fqdn | 82 | # Expand %f as the fqdn |
4628 | 83 | # TODO should this use the cloud fqdn?? | ||
4629 | 82 | v = v.replace("%f", socket.getfqdn()) | 84 | v = v.replace("%f", socket.getfqdn()) |
4630 | 83 | # Expand %i as the instance id | 85 | # Expand %i as the instance id |
4634 | 84 | v = v.replace("%i", | 86 | v = v.replace("%i", cloud.get_instance_id()) |
4635 | 85 | cloud.datasource.get_instance_id()) | 87 | # certname needs to be downcased |
4633 | 86 | # certname needs to be downcase | ||
4636 | 87 | v = v.lower() | 88 | v = v.lower() |
4637 | 88 | puppet_config.set(cfg_name, o, v) | 89 | puppet_config.set(cfg_name, o, v) |
4638 | 89 | #puppet_conf_fh.write("%s=%s\n" % (o, v)) | ||
4639 | 90 | # We got all our config as wanted we'll rename | 90 | # We got all our config as wanted we'll rename |
4640 | 91 | # the previous puppet.conf and create our new one | 91 | # the previous puppet.conf and create our new one |
4645 | 92 | os.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old') | 92 | conf_old_fn = cloud.paths.join(False, |
4646 | 93 | with open('/etc/puppet/puppet.conf', 'wb') as configfile: | 93 | '/etc/puppet/puppet.conf.old') |
4647 | 94 | puppet_config.write(configfile) | 94 | util.rename(puppet_conf_fn, conf_old_fn) |
4648 | 95 | util.restorecon_if_possible('/etc/puppet/puppet.conf') | 95 | puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf') |
4649 | 96 | util.write_file(puppet_conf_rw, puppet_config.stringify()) | ||
4650 | 97 | |||
4651 | 96 | # Set puppet to automatically start | 98 | # Set puppet to automatically start |
4652 | 97 | if os.path.exists('/etc/default/puppet'): | 99 | if os.path.exists('/etc/default/puppet'): |
4656 | 98 | subprocess.check_call(['sed', '-i', | 100 | util.subp(['sed', '-i', |
4657 | 99 | '-e', 's/^START=.*/START=yes/', | 101 | '-e', 's/^START=.*/START=yes/', |
4658 | 100 | '/etc/default/puppet']) | 102 | '/etc/default/puppet'], capture=False) |
4659 | 101 | elif os.path.exists('/bin/systemctl'): | 103 | elif os.path.exists('/bin/systemctl'): |
4661 | 102 | subprocess.check_call(['/bin/systemctl', 'enable', 'puppet.service']) | 104 | util.subp(['/bin/systemctl', 'enable', 'puppet.service'], |
4662 | 105 | capture=False) | ||
4663 | 103 | elif os.path.exists('/sbin/chkconfig'): | 106 | elif os.path.exists('/sbin/chkconfig'): |
4665 | 104 | subprocess.check_call(['/sbin/chkconfig', 'puppet', 'on']) | 107 | util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) |
4666 | 105 | else: | 108 | else: |
4668 | 106 | log.warn("Do not know how to enable puppet service on this system") | 109 | log.warn(("Sorry we do not know how to enable" |
4669 | 110 | " puppet services on this system")) | ||
4670 | 111 | |||
4671 | 107 | # Start puppetd | 112 | # Start puppetd |
4673 | 108 | subprocess.check_call(['service', 'puppet', 'start']) | 113 | util.subp(['service', 'puppet', 'start'], capture=False) |
4674 | 109 | 114 | ||
4675 | === modified file 'cloudinit/config/cc_resizefs.py' | |||
4676 | --- cloudinit/CloudConfig/cc_resizefs.py 2012-03-21 20:41:50 +0000 | |||
4677 | +++ cloudinit/config/cc_resizefs.py 2012-07-06 21:16:18 +0000 | |||
4678 | @@ -18,91 +18,123 @@ | |||
4679 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
4680 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4681 | 20 | 20 | ||
4682 | 21 | import cloudinit.util as util | ||
4683 | 22 | import subprocess | ||
4684 | 23 | import os | 21 | import os |
4685 | 24 | import stat | 22 | import stat |
4686 | 25 | import sys | ||
4687 | 26 | import time | 23 | import time |
4709 | 27 | import tempfile | 24 | |
4710 | 28 | from cloudinit.CloudConfig import per_always | 25 | from cloudinit import util |
4711 | 29 | 26 | from cloudinit.settings import PER_ALWAYS | |
4712 | 30 | frequency = per_always | 27 | |
4713 | 31 | 28 | frequency = PER_ALWAYS | |
4714 | 32 | 29 | ||
4715 | 33 | def handle(_name, cfg, _cloud, log, args): | 30 | RESIZE_FS_PREFIXES_CMDS = [ |
4716 | 34 | if len(args) != 0: | 31 | ('ext', 'resize2fs'), |
4717 | 35 | resize_root = False | 32 | ('xfs', 'xfs_growfs'), |
4718 | 36 | if str(args[0]).lower() in ['true', '1', 'on', 'yes']: | 33 | ] |
4719 | 37 | resize_root = True | 34 | |
4720 | 38 | else: | 35 | |
4721 | 39 | resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) | 36 | def nodeify_path(devpth, where, log): |
4701 | 40 | |||
4702 | 41 | if str(resize_root).lower() in ['false', '0']: | ||
4703 | 42 | return | ||
4704 | 43 | |||
4705 | 44 | # we use mktemp rather than mkstemp because early in boot nothing | ||
4706 | 45 | # else should be able to race us for this, and we need to mknod. | ||
4707 | 46 | devpth = tempfile.mktemp(prefix="cloudinit.resizefs.", dir="/run") | ||
4708 | 47 | |||
4722 | 48 | try: | 37 | try: |
4724 | 49 | st_dev = os.stat("/").st_dev | 38 | st_dev = os.stat(where).st_dev |
4725 | 50 | dev = os.makedev(os.major(st_dev), os.minor(st_dev)) | 39 | dev = os.makedev(os.major(st_dev), os.minor(st_dev)) |
4726 | 51 | os.mknod(devpth, 0400 | stat.S_IFBLK, dev) | 40 | os.mknod(devpth, 0400 | stat.S_IFBLK, dev) |
4727 | 41 | return st_dev | ||
4728 | 52 | except: | 42 | except: |
4729 | 53 | if util.is_container(): | 43 | if util.is_container(): |
4731 | 54 | log.debug("inside container, ignoring mknod failure in resizefs") | 44 | log.debug("Inside container, ignoring mknod failure in resizefs") |
4732 | 55 | return | 45 | return |
4734 | 56 | log.warn("Failed to make device node to resize /") | 46 | log.warn("Failed to make device node to resize %s at %s", |
4735 | 47 | where, devpth) | ||
4736 | 57 | raise | 48 | raise |
4737 | 58 | 49 | ||
4739 | 59 | cmd = ['blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth] | 50 | |
4740 | 51 | def get_fs_type(st_dev, path, log): | ||
4741 | 60 | try: | 52 | try: |
4748 | 61 | (fstype, _err) = util.subp(cmd) | 53 | dev_entries = util.find_devs_with(tag='TYPE', oformat='value', |
4749 | 62 | except subprocess.CalledProcessError as e: | 54 | no_cache=True, path=path) |
4750 | 63 | log.warn("Failed to get filesystem type of maj=%s, min=%s via: %s" % | 55 | if not dev_entries: |
4751 | 64 | (os.major(st_dev), os.minor(st_dev), cmd)) | 56 | return None |
4752 | 65 | log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1]) | 57 | return dev_entries[0].strip() |
4753 | 66 | os.unlink(devpth) | 58 | except util.ProcessExecutionError: |
4754 | 59 | util.logexc(log, ("Failed to get filesystem type" | ||
4755 | 60 | " of maj=%s, min=%s for path %s"), | ||
4756 | 61 | os.major(st_dev), os.minor(st_dev), path) | ||
4757 | 67 | raise | 62 | raise |
4758 | 68 | 63 | ||
4763 | 69 | if str(fstype).startswith("ext"): | 64 | |
4764 | 70 | resize_cmd = ['resize2fs', devpth] | 65 | def handle(name, cfg, cloud, log, args): |
4765 | 71 | elif fstype == "xfs": | 66 | if len(args) != 0: |
4766 | 72 | resize_cmd = ['xfs_growfs', devpth] | 67 | resize_root = args[0] |
4767 | 73 | else: | 68 | else: |
4770 | 74 | os.unlink(devpth) | 69 | resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) |
4771 | 75 | log.debug("not resizing unknown filesystem %s" % fstype) | 70 | |
4772 | 71 | if not util.translate_bool(resize_root): | ||
4773 | 72 | log.debug("Skipping module named %s, resizing disabled", name) | ||
4774 | 76 | return | 73 | return |
4775 | 77 | 74 | ||
4776 | 75 | # TODO is the directory ok to be used?? | ||
4777 | 76 | resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") | ||
4778 | 77 | resize_root_d = cloud.paths.join(False, resize_root_d) | ||
4779 | 78 | util.ensure_dir(resize_root_d) | ||
4780 | 79 | |||
4781 | 80 | # TODO: allow what is to be resized to be configurable?? | ||
4782 | 81 | resize_what = cloud.paths.join(False, "/") | ||
4783 | 82 | with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", | ||
4784 | 83 | dir=resize_root_d, delete=True) as tfh: | ||
4785 | 84 | devpth = tfh.name | ||
4786 | 85 | |||
4787 | 86 | # Delete the file so that mknod will work | ||
4788 | 87 | # but don't change the file handle to know that its | ||
4789 | 88 | # removed so that when a later call that recreates | ||
4790 | 89 | # occurs this temporary file will still benefit from | ||
4791 | 90 | # auto deletion | ||
4792 | 91 | tfh.unlink_now() | ||
4793 | 92 | |||
4794 | 93 | st_dev = nodeify_path(devpth, resize_what, log) | ||
4795 | 94 | fs_type = get_fs_type(st_dev, devpth, log) | ||
4796 | 95 | if not fs_type: | ||
4797 | 96 | log.warn("Could not determine filesystem type of %s", resize_what) | ||
4798 | 97 | return | ||
4799 | 98 | |||
4800 | 99 | resizer = None | ||
4801 | 100 | fstype_lc = fs_type.lower() | ||
4802 | 101 | for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: | ||
4803 | 102 | if fstype_lc.startswith(pfix): | ||
4804 | 103 | resizer = root_cmd | ||
4805 | 104 | break | ||
4806 | 105 | |||
4807 | 106 | if not resizer: | ||
4808 | 107 | log.warn("Not resizing unknown filesystem type %s for %s", | ||
4809 | 108 | fs_type, resize_what) | ||
4810 | 109 | return | ||
4811 | 110 | |||
4812 | 111 | log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer) | ||
4813 | 112 | resize_cmd = [resizer, devpth] | ||
4814 | 113 | |||
4815 | 114 | if resize_root == "noblock": | ||
4816 | 115 | # Fork to a child that will run | ||
4817 | 116 | # the resize command | ||
4818 | 117 | util.fork_cb(do_resize, resize_cmd, log) | ||
4819 | 118 | # Don't delete the file now in the parent | ||
4820 | 119 | tfh.delete = False | ||
4821 | 120 | else: | ||
4822 | 121 | do_resize(resize_cmd, log) | ||
4823 | 122 | |||
4824 | 123 | action = 'Resized' | ||
4825 | 78 | if resize_root == "noblock": | 124 | if resize_root == "noblock": |
4845 | 79 | fid = os.fork() | 125 | action = 'Resizing (via forking)' |
4846 | 80 | if fid == 0: | 126 | log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)", |
4847 | 81 | try: | 127 | action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root) |
4848 | 82 | do_resize(resize_cmd, devpth, log) | 128 | |
4849 | 83 | os._exit(0) # pylint: disable=W0212 | 129 | |
4850 | 84 | except Exception as exc: | 130 | def do_resize(resize_cmd, log): |
4851 | 85 | sys.stderr.write("Failed: %s" % exc) | 131 | start = time.time() |
4833 | 86 | os._exit(1) # pylint: disable=W0212 | ||
4834 | 87 | else: | ||
4835 | 88 | do_resize(resize_cmd, devpth, log) | ||
4836 | 89 | |||
4837 | 90 | log.debug("resizing root filesystem (type=%s, maj=%i, min=%i, val=%s)" % | ||
4838 | 91 | (str(fstype).rstrip("\n"), os.major(st_dev), os.minor(st_dev), | ||
4839 | 92 | resize_root)) | ||
4840 | 93 | |||
4841 | 94 | return | ||
4842 | 95 | |||
4843 | 96 | |||
4844 | 97 | def do_resize(resize_cmd, devpth, log): | ||
4852 | 98 | try: | 132 | try: |
4853 | 99 | start = time.time() | ||
4854 | 100 | util.subp(resize_cmd) | 133 | util.subp(resize_cmd) |
4859 | 101 | except subprocess.CalledProcessError as e: | 134 | except util.ProcessExecutionError: |
4860 | 102 | log.warn("Failed to resize filesystem (%s)" % resize_cmd) | 135 | util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd) |
4857 | 103 | log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1]) | ||
4858 | 104 | os.unlink(devpth) | ||
4861 | 105 | raise | 136 | raise |
4865 | 106 | 137 | tot_time = int(time.time() - start) | |
4866 | 107 | os.unlink(devpth) | 138 | log.debug("Resizing took %s seconds", tot_time) |
4867 | 108 | log.debug("resize took %s seconds" % (time.time() - start)) | 139 | # TODO: Should we add a fsck check after this to make |
4868 | 140 | # sure we didn't corrupt anything? | ||
4869 | 109 | 141 | ||
4870 | === modified file 'cloudinit/config/cc_rightscale_userdata.py' | |||
4871 | --- cloudinit/CloudConfig/cc_rightscale_userdata.py 2012-01-18 14:07:33 +0000 | |||
4872 | +++ cloudinit/config/cc_rightscale_userdata.py 2012-07-06 21:16:18 +0000 | |||
4873 | @@ -35,44 +35,68 @@ | |||
4874 | 35 | ## | 35 | ## |
4875 | 36 | ## | 36 | ## |
4876 | 37 | 37 | ||
4880 | 38 | import cloudinit.util as util | 38 | import os |
4881 | 39 | from cloudinit.CloudConfig import per_instance | 39 | |
4882 | 40 | from cloudinit import get_ipath_cur | 40 | from cloudinit import url_helper as uhelp |
4883 | 41 | from cloudinit import util | ||
4884 | 42 | from cloudinit.settings import PER_INSTANCE | ||
4885 | 43 | |||
4886 | 41 | from urlparse import parse_qs | 44 | from urlparse import parse_qs |
4887 | 42 | 45 | ||
4894 | 43 | frequency = per_instance | 46 | frequency = PER_INSTANCE |
4895 | 44 | my_name = "cc_rightscale_userdata" | 47 | |
4896 | 45 | my_hookname = 'CLOUD_INIT_REMOTE_HOOK' | 48 | MY_NAME = "cc_rightscale_userdata" |
4897 | 46 | 49 | MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK' | |
4898 | 47 | 50 | ||
4899 | 48 | def handle(_name, _cfg, cloud, log, _args): | 51 | |
4900 | 52 | def handle(name, _cfg, cloud, log, _args): | ||
4901 | 49 | try: | 53 | try: |
4902 | 50 | ud = cloud.get_userdata_raw() | 54 | ud = cloud.get_userdata_raw() |
4903 | 51 | except: | 55 | except: |
4905 | 52 | log.warn("failed to get raw userdata in %s" % my_name) | 56 | log.warn("Failed to get raw userdata in module %s", name) |
4906 | 53 | return | 57 | return |
4907 | 54 | 58 | ||
4908 | 55 | try: | 59 | try: |
4909 | 56 | mdict = parse_qs(ud) | 60 | mdict = parse_qs(ud) |
4911 | 57 | if not my_hookname in mdict: | 61 | if not mdict or not MY_HOOKNAME in mdict: |
4912 | 62 | log.debug(("Skipping module %s, " | ||
4913 | 63 | "did not find %s in parsed" | ||
4914 | 64 | " raw userdata"), name, MY_HOOKNAME) | ||
4915 | 58 | return | 65 | return |
4916 | 59 | except: | 66 | except: |
4918 | 60 | log.warn("failed to urlparse.parse_qa(userdata_raw())") | 67 | util.logexc(log, ("Failed to parse query string %s" |
4919 | 68 | " into a dictionary"), ud) | ||
4920 | 61 | raise | 69 | raise |
4921 | 62 | 70 | ||
4928 | 63 | scripts_d = get_ipath_cur('scripts') | 71 | wrote_fns = [] |
4929 | 64 | i = 0 | 72 | captured_excps = [] |
4930 | 65 | first_e = None | 73 | |
4931 | 66 | for url in mdict[my_hookname]: | 74 | # These will eventually be then ran by the cc_scripts_user |
4932 | 67 | fname = "%s/rightscale-%02i" % (scripts_d, i) | 75 | # TODO: maybe this should just be a new user data handler?? |
4933 | 68 | i = i + 1 | 76 | # Instead of a late module that acts like a user data handler? |
4934 | 77 | scripts_d = cloud.get_ipath_cur('scripts') | ||
4935 | 78 | urls = mdict[MY_HOOKNAME] | ||
4936 | 79 | for (i, url) in enumerate(urls): | ||
4937 | 80 | fname = os.path.join(scripts_d, "rightscale-%02i" % (i)) | ||
4938 | 69 | try: | 81 | try: |
4941 | 70 | content = util.readurl(url) | 82 | resp = uhelp.readurl(url) |
4942 | 71 | util.write_file(fname, content, mode=0700) | 83 | # Ensure its a valid http response (and something gotten) |
4943 | 84 | if resp.ok() and resp.contents: | ||
4944 | 85 | util.write_file(fname, str(resp), mode=0700) | ||
4945 | 86 | wrote_fns.append(fname) | ||
4946 | 72 | except Exception as e: | 87 | except Exception as e: |
4953 | 73 | if not first_e: | 88 | captured_excps.append(e) |
4954 | 74 | first_e = None | 89 | util.logexc(log, "%s failed to read %s and write %s", |
4955 | 75 | log.warn("%s failed to read %s: %s" % (my_name, url, e)) | 90 | MY_NAME, url, fname) |
4956 | 76 | 91 | ||
4957 | 77 | if first_e: | 92 | if wrote_fns: |
4958 | 78 | raise(e) | 93 | log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns)) |
4959 | 94 | |||
4960 | 95 | if len(wrote_fns) != len(urls): | ||
4961 | 96 | skipped = len(urls) - len(wrote_fns) | ||
4962 | 97 | log.debug("%s urls were skipped or failed", skipped) | ||
4963 | 98 | |||
4964 | 99 | if captured_excps: | ||
4965 | 100 | log.warn("%s failed with exceptions, re-raising the last one", | ||
4966 | 101 | len(captured_excps)) | ||
4967 | 102 | raise captured_excps[-1] | ||
4968 | 79 | 103 | ||
4969 | === modified file 'cloudinit/config/cc_rsyslog.py' | |||
4970 | --- cloudinit/CloudConfig/cc_rsyslog.py 2012-01-18 14:07:33 +0000 | |||
4971 | +++ cloudinit/config/cc_rsyslog.py 2012-07-06 21:16:18 +0000 | |||
4972 | @@ -18,16 +18,15 @@ | |||
4973 | 18 | # You should have received a copy of the GNU General Public License | 18 | # You should have received a copy of the GNU General Public License |
4974 | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
4975 | 20 | 20 | ||
4980 | 21 | import cloudinit | 21 | import os |
4981 | 22 | import logging | 22 | |
4982 | 23 | import cloudinit.util as util | 23 | from cloudinit import util |
4979 | 24 | import traceback | ||
4983 | 25 | 24 | ||
4984 | 26 | DEF_FILENAME = "20-cloud-config.conf" | 25 | DEF_FILENAME = "20-cloud-config.conf" |
4985 | 27 | DEF_DIR = "/etc/rsyslog.d" | 26 | DEF_DIR = "/etc/rsyslog.d" |
4986 | 28 | 27 | ||
4987 | 29 | 28 | ||
4989 | 30 | def handle(_name, cfg, _cloud, log, _args): | 29 | def handle(name, cfg, cloud, log, _args): |
4990 | 31 | # rsyslog: | 30 | # rsyslog: |
4991 | 32 | # - "*.* @@192.158.1.1" | 31 | # - "*.* @@192.158.1.1" |
4992 | 33 | # - content: "*.* @@192.0.2.1:10514" | 32 | # - content: "*.* @@192.0.2.1:10514" |
4993 | @@ -37,17 +36,18 @@ | |||
4994 | 37 | 36 | ||
4995 | 38 | # process 'rsyslog' | 37 | # process 'rsyslog' |
4996 | 39 | if not 'rsyslog' in cfg: | 38 | if not 'rsyslog' in cfg: |
4997 | 39 | log.debug(("Skipping module named %s," | ||
4998 | 40 | " no 'rsyslog' key in configuration"), name) | ||
4999 | 40 | return | 41 | return |
5000 | 41 | 42 |
The diff has been truncated for viewing.