Merge ~jacekn/squid-reverseproxy-charm:master into squid-reverseproxy-charm:master
- Git
- lp:~jacekn/squid-reverseproxy-charm
- master
- Merge into master
Proposed by
Jacek Nykis
Status: | Merged |
---|---|
Approved by: | David Lawson |
Approved revision: | fef2287b380a554e7dabad17283cf6bbf6f9ac9d |
Merged at revision: | f6087f897ba0f06a25b80391a92994c28db36457 |
Proposed branch: | ~jacekn/squid-reverseproxy-charm:master |
Merge into: | squid-reverseproxy-charm:master |
Diff against target: |
7572 lines (+6069/-434) 43 files modified
charm-helpers.yaml (+2/-1) config.yaml (+1/-1) hooks/charmhelpers/__init__.py (+97/-0) hooks/charmhelpers/contrib/__init__.py (+13/-0) hooks/charmhelpers/contrib/charmsupport/__init__.py (+13/-0) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+253/-21) hooks/charmhelpers/contrib/charmsupport/volumes.py (+19/-2) hooks/charmhelpers/core/__init__.py (+13/-0) hooks/charmhelpers/core/decorators.py (+55/-0) hooks/charmhelpers/core/files.py (+43/-0) hooks/charmhelpers/core/fstab.py (+132/-0) hooks/charmhelpers/core/hookenv.py (+1045/-62) hooks/charmhelpers/core/host.py (+888/-85) hooks/charmhelpers/core/host_factory/__init__.py (+0/-0) hooks/charmhelpers/core/host_factory/centos.py (+72/-0) hooks/charmhelpers/core/host_factory/ubuntu.py (+90/-0) hooks/charmhelpers/core/hugepage.py (+69/-0) hooks/charmhelpers/core/kernel.py (+72/-0) hooks/charmhelpers/core/kernel_factory/__init__.py (+0/-0) hooks/charmhelpers/core/kernel_factory/centos.py (+17/-0) hooks/charmhelpers/core/kernel_factory/ubuntu.py (+13/-0) hooks/charmhelpers/core/services/__init__.py (+16/-0) hooks/charmhelpers/core/services/base.py (+362/-0) hooks/charmhelpers/core/services/helpers.py (+290/-0) hooks/charmhelpers/core/strutils.py (+129/-0) hooks/charmhelpers/core/sysctl.py (+58/-0) hooks/charmhelpers/core/templating.py (+93/-0) hooks/charmhelpers/core/unitdata.py (+525/-0) hooks/charmhelpers/fetch/__init__.py (+145/-149) hooks/charmhelpers/fetch/archiveurl.py (+126/-9) hooks/charmhelpers/fetch/bzrurl.py (+53/-21) hooks/charmhelpers/fetch/centos.py (+171/-0) hooks/charmhelpers/fetch/giturl.py (+69/-0) hooks/charmhelpers/fetch/snap.py (+150/-0) hooks/charmhelpers/fetch/ubuntu.py (+592/-0) hooks/charmhelpers/osplatform.py (+25/-0) hooks/hooks.py (+41/-20) hooks/install (+3/-0) hooks/tests/test_helpers.py (+32/-21) hooks/tests/test_nrpe_hooks.py (+20/-39) metadata.yaml (+1/-0) scripts/charm_helpers_sync.py (+258/-0) templates/main_config.template (+3/-3) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Squid Reverse Proxy Charmers | Pending | ||
Review via email:
|
Commit message
Update charm to support bionic
Description of the change
To post a comment you must log in.
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Change successfully merged at revision f6087f897ba0f06
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/charm-helpers.yaml b/charm-helpers.yaml | |||
2 | index ffd6ac8..622437b 100644 | |||
3 | --- a/charm-helpers.yaml | |||
4 | +++ b/charm-helpers.yaml | |||
5 | @@ -1,4 +1,5 @@ | |||
6 | 1 | include: | 1 | include: |
7 | 2 | - core | 2 | - core |
8 | 3 | - fetch | 3 | - fetch |
9 | 4 | - contrib.charmsupport | ||
10 | 5 | \ No newline at end of file | 4 | \ No newline at end of file |
11 | 5 | - osplatform | ||
12 | 6 | - contrib.charmsupport | ||
13 | diff --git a/config.yaml b/config.yaml | |||
14 | index fc56126..8f98d85 100644 | |||
15 | --- a/config.yaml | |||
16 | +++ b/config.yaml | |||
17 | @@ -59,7 +59,7 @@ options: | |||
18 | 59 | description: Maximum size of the on-disk object cache (MB). Set to zero to disable disk caching. | 59 | description: Maximum size of the on-disk object cache (MB). Set to zero to disable disk caching. |
19 | 60 | cache_dir: | 60 | cache_dir: |
20 | 61 | type: string | 61 | type: string |
22 | 62 | default: '/var/spool/squid3' | 62 | default: '' |
23 | 63 | description: The top-level directory where cache swap files will be stored. | 63 | description: The top-level directory where cache swap files will be stored. |
24 | 64 | target_objs_per_dir: | 64 | target_objs_per_dir: |
25 | 65 | type: int | 65 | type: int |
26 | diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py | |||
27 | index e69de29..e7aa471 100644 | |||
28 | --- a/hooks/charmhelpers/__init__.py | |||
29 | +++ b/hooks/charmhelpers/__init__.py | |||
30 | @@ -0,0 +1,97 @@ | |||
31 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
32 | 2 | # | ||
33 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
34 | 4 | # you may not use this file except in compliance with the License. | ||
35 | 5 | # You may obtain a copy of the License at | ||
36 | 6 | # | ||
37 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
38 | 8 | # | ||
39 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
40 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
41 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
42 | 12 | # See the License for the specific language governing permissions and | ||
43 | 13 | # limitations under the License. | ||
44 | 14 | |||
45 | 15 | # Bootstrap charm-helpers, installing its dependencies if necessary using | ||
46 | 16 | # only standard libraries. | ||
47 | 17 | from __future__ import print_function | ||
48 | 18 | from __future__ import absolute_import | ||
49 | 19 | |||
50 | 20 | import functools | ||
51 | 21 | import inspect | ||
52 | 22 | import subprocess | ||
53 | 23 | import sys | ||
54 | 24 | |||
55 | 25 | try: | ||
56 | 26 | import six # flake8: noqa | ||
57 | 27 | except ImportError: | ||
58 | 28 | if sys.version_info.major == 2: | ||
59 | 29 | subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) | ||
60 | 30 | else: | ||
61 | 31 | subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) | ||
62 | 32 | import six # flake8: noqa | ||
63 | 33 | |||
64 | 34 | try: | ||
65 | 35 | import yaml # flake8: noqa | ||
66 | 36 | except ImportError: | ||
67 | 37 | if sys.version_info.major == 2: | ||
68 | 38 | subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) | ||
69 | 39 | else: | ||
70 | 40 | subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) | ||
71 | 41 | import yaml # flake8: noqa | ||
72 | 42 | |||
73 | 43 | |||
74 | 44 | # Holds a list of mapping of mangled function names that have been deprecated | ||
75 | 45 | # using the @deprecate decorator below. This is so that the warning is only | ||
76 | 46 | # printed once for each usage of the function. | ||
77 | 47 | __deprecated_functions = {} | ||
78 | 48 | |||
79 | 49 | |||
80 | 50 | def deprecate(warning, date=None, log=None): | ||
81 | 51 | """Add a deprecation warning the first time the function is used. | ||
82 | 52 | The date, which is a string in semi-ISO8660 format indicate the year-month | ||
83 | 53 | that the function is officially going to be removed. | ||
84 | 54 | |||
85 | 55 | usage: | ||
86 | 56 | |||
87 | 57 | @deprecate('use core/fetch/add_source() instead', '2017-04') | ||
88 | 58 | def contributed_add_source_thing(...): | ||
89 | 59 | ... | ||
90 | 60 | |||
91 | 61 | And it then prints to the log ONCE that the function is deprecated. | ||
92 | 62 | The reason for passing the logging function (log) is so that hookenv.log | ||
93 | 63 | can be used for a charm if needed. | ||
94 | 64 | |||
95 | 65 | :param warning: String to indicat where it has moved ot. | ||
96 | 66 | :param date: optional sting, in YYYY-MM format to indicate when the | ||
97 | 67 | function will definitely (probably) be removed. | ||
98 | 68 | :param log: The log function to call to log. If not, logs to stdout | ||
99 | 69 | """ | ||
100 | 70 | def wrap(f): | ||
101 | 71 | |||
102 | 72 | @functools.wraps(f) | ||
103 | 73 | def wrapped_f(*args, **kwargs): | ||
104 | 74 | try: | ||
105 | 75 | module = inspect.getmodule(f) | ||
106 | 76 | file = inspect.getsourcefile(f) | ||
107 | 77 | lines = inspect.getsourcelines(f) | ||
108 | 78 | f_name = "{}-{}-{}..{}-{}".format( | ||
109 | 79 | module.__name__, file, lines[0], lines[-1], f.__name__) | ||
110 | 80 | except (IOError, TypeError): | ||
111 | 81 | # assume it was local, so just use the name of the function | ||
112 | 82 | f_name = f.__name__ | ||
113 | 83 | if f_name not in __deprecated_functions: | ||
114 | 84 | __deprecated_functions[f_name] = True | ||
115 | 85 | s = "DEPRECATION WARNING: Function {} is being removed".format( | ||
116 | 86 | f.__name__) | ||
117 | 87 | if date: | ||
118 | 88 | s = "{} on/around {}".format(s, date) | ||
119 | 89 | if warning: | ||
120 | 90 | s = "{} : {}".format(s, warning) | ||
121 | 91 | if log: | ||
122 | 92 | log(s) | ||
123 | 93 | else: | ||
124 | 94 | print(s) | ||
125 | 95 | return f(*args, **kwargs) | ||
126 | 96 | return wrapped_f | ||
127 | 97 | return wrap | ||
128 | diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py | |||
129 | index e69de29..d7567b8 100644 | |||
130 | --- a/hooks/charmhelpers/contrib/__init__.py | |||
131 | +++ b/hooks/charmhelpers/contrib/__init__.py | |||
132 | @@ -0,0 +1,13 @@ | |||
133 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
134 | 2 | # | ||
135 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
136 | 4 | # you may not use this file except in compliance with the License. | ||
137 | 5 | # You may obtain a copy of the License at | ||
138 | 6 | # | ||
139 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
140 | 8 | # | ||
141 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
142 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
143 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
144 | 12 | # See the License for the specific language governing permissions and | ||
145 | 13 | # limitations under the License. | ||
146 | diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py | |||
147 | index e69de29..d7567b8 100644 | |||
148 | --- a/hooks/charmhelpers/contrib/charmsupport/__init__.py | |||
149 | +++ b/hooks/charmhelpers/contrib/charmsupport/__init__.py | |||
150 | @@ -0,0 +1,13 @@ | |||
151 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
152 | 2 | # | ||
153 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
154 | 4 | # you may not use this file except in compliance with the License. | ||
155 | 5 | # You may obtain a copy of the License at | ||
156 | 6 | # | ||
157 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
158 | 8 | # | ||
159 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
160 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
161 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
162 | 12 | # See the License for the specific language governing permissions and | ||
163 | 13 | # limitations under the License. | ||
164 | diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py | |||
165 | index f3bfe3f..e3d10c1 100644 | |||
166 | --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py | |||
167 | +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py | |||
168 | @@ -1,3 +1,17 @@ | |||
169 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
170 | 2 | # | ||
171 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
172 | 4 | # you may not use this file except in compliance with the License. | ||
173 | 5 | # You may obtain a copy of the License at | ||
174 | 6 | # | ||
175 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
176 | 8 | # | ||
177 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
178 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
179 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
180 | 12 | # See the License for the specific language governing permissions and | ||
181 | 13 | # limitations under the License. | ||
182 | 14 | |||
183 | 1 | """Compatibility with the nrpe-external-master charm""" | 15 | """Compatibility with the nrpe-external-master charm""" |
184 | 2 | # Copyright 2012 Canonical Ltd. | 16 | # Copyright 2012 Canonical Ltd. |
185 | 3 | # | 17 | # |
186 | @@ -8,19 +22,24 @@ import subprocess | |||
187 | 8 | import pwd | 22 | import pwd |
188 | 9 | import grp | 23 | import grp |
189 | 10 | import os | 24 | import os |
190 | 25 | import glob | ||
191 | 26 | import shutil | ||
192 | 11 | import re | 27 | import re |
193 | 12 | import shlex | 28 | import shlex |
194 | 13 | import yaml | 29 | import yaml |
195 | 14 | 30 | ||
196 | 15 | from charmhelpers.core.hookenv import ( | 31 | from charmhelpers.core.hookenv import ( |
197 | 16 | config, | 32 | config, |
198 | 33 | hook_name, | ||
199 | 17 | local_unit, | 34 | local_unit, |
200 | 18 | log, | 35 | log, |
201 | 19 | relation_ids, | 36 | relation_ids, |
202 | 20 | relation_set, | 37 | relation_set, |
203 | 38 | relations_of_type, | ||
204 | 21 | ) | 39 | ) |
205 | 22 | 40 | ||
206 | 23 | from charmhelpers.core.host import service | 41 | from charmhelpers.core.host import service |
207 | 42 | from charmhelpers.core import host | ||
208 | 24 | 43 | ||
209 | 25 | # This module adds compatibility with the nrpe-external-master and plain nrpe | 44 | # This module adds compatibility with the nrpe-external-master and plain nrpe |
210 | 26 | # subordinate charms. To use it in your charm: | 45 | # subordinate charms. To use it in your charm: |
211 | @@ -54,6 +73,12 @@ from charmhelpers.core.host import service | |||
212 | 54 | # juju-myservice-0 | 73 | # juju-myservice-0 |
213 | 55 | # If you're running multiple environments with the same services in them | 74 | # If you're running multiple environments with the same services in them |
214 | 56 | # this allows you to differentiate between them. | 75 | # this allows you to differentiate between them. |
215 | 76 | # nagios_servicegroups: | ||
216 | 77 | # default: "" | ||
217 | 78 | # type: string | ||
218 | 79 | # description: | | ||
219 | 80 | # A comma-separated list of nagios servicegroups. | ||
220 | 81 | # If left empty, the nagios_context will be used as the servicegroup | ||
221 | 57 | # | 82 | # |
222 | 58 | # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master | 83 | # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master |
223 | 59 | # | 84 | # |
224 | @@ -85,6 +110,13 @@ from charmhelpers.core.host import service | |||
225 | 85 | # def local_monitors_relation_changed(): | 110 | # def local_monitors_relation_changed(): |
226 | 86 | # update_nrpe_config() | 111 | # update_nrpe_config() |
227 | 87 | # | 112 | # |
228 | 113 | # 4.a If your charm is a subordinate charm set primary=False | ||
229 | 114 | # | ||
230 | 115 | # from charmsupport.nrpe import NRPE | ||
231 | 116 | # (...) | ||
232 | 117 | # def update_nrpe_config(): | ||
233 | 118 | # nrpe_compat = NRPE(primary=False) | ||
234 | 119 | # | ||
235 | 88 | # 5. ln -s hooks.py nrpe-external-master-relation-changed | 120 | # 5. ln -s hooks.py nrpe-external-master-relation-changed |
236 | 89 | # ln -s hooks.py local-monitors-relation-changed | 121 | # ln -s hooks.py local-monitors-relation-changed |
237 | 90 | 122 | ||
238 | @@ -94,7 +126,7 @@ class CheckException(Exception): | |||
239 | 94 | 126 | ||
240 | 95 | 127 | ||
241 | 96 | class Check(object): | 128 | class Check(object): |
243 | 97 | shortname_re = '[A-Za-z0-9-_]+$' | 129 | shortname_re = '[A-Za-z0-9-_.]+$' |
244 | 98 | service_template = (""" | 130 | service_template = (""" |
245 | 99 | #--------------------------------------------------- | 131 | #--------------------------------------------------- |
246 | 100 | # This file is Juju managed | 132 | # This file is Juju managed |
247 | @@ -123,12 +155,17 @@ define service {{ | |||
248 | 123 | self.description = description | 155 | self.description = description |
249 | 124 | self.check_cmd = self._locate_cmd(check_cmd) | 156 | self.check_cmd = self._locate_cmd(check_cmd) |
250 | 125 | 157 | ||
251 | 158 | def _get_check_filename(self): | ||
252 | 159 | return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) | ||
253 | 160 | |||
254 | 161 | def _get_service_filename(self, hostname): | ||
255 | 162 | return os.path.join(NRPE.nagios_exportdir, | ||
256 | 163 | 'service__{}_{}.cfg'.format(hostname, self.command)) | ||
257 | 164 | |||
258 | 126 | def _locate_cmd(self, check_cmd): | 165 | def _locate_cmd(self, check_cmd): |
259 | 127 | search_path = ( | 166 | search_path = ( |
260 | 128 | '/', | ||
261 | 129 | os.path.join(os.environ['CHARM_DIR'], | ||
262 | 130 | 'files/nrpe-external-master'), | ||
263 | 131 | '/usr/lib/nagios/plugins', | 167 | '/usr/lib/nagios/plugins', |
264 | 168 | '/usr/local/lib/nagios/plugins', | ||
265 | 132 | ) | 169 | ) |
266 | 133 | parts = shlex.split(check_cmd) | 170 | parts = shlex.split(check_cmd) |
267 | 134 | for path in search_path: | 171 | for path in search_path: |
268 | @@ -140,11 +177,30 @@ define service {{ | |||
269 | 140 | log('Check command not found: {}'.format(parts[0])) | 177 | log('Check command not found: {}'.format(parts[0])) |
270 | 141 | return '' | 178 | return '' |
271 | 142 | 179 | ||
275 | 143 | def write(self, nagios_context, hostname): | 180 | def _remove_service_files(self): |
276 | 144 | nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( | 181 | if not os.path.exists(NRPE.nagios_exportdir): |
277 | 145 | self.command) | 182 | return |
278 | 183 | for f in os.listdir(NRPE.nagios_exportdir): | ||
279 | 184 | if f.endswith('_{}.cfg'.format(self.command)): | ||
280 | 185 | os.remove(os.path.join(NRPE.nagios_exportdir, f)) | ||
281 | 186 | |||
282 | 187 | def remove(self, hostname): | ||
283 | 188 | nrpe_check_file = self._get_check_filename() | ||
284 | 189 | if os.path.exists(nrpe_check_file): | ||
285 | 190 | os.remove(nrpe_check_file) | ||
286 | 191 | self._remove_service_files() | ||
287 | 192 | |||
288 | 193 | def write(self, nagios_context, hostname, nagios_servicegroups): | ||
289 | 194 | nrpe_check_file = self._get_check_filename() | ||
290 | 146 | with open(nrpe_check_file, 'w') as nrpe_check_config: | 195 | with open(nrpe_check_file, 'w') as nrpe_check_config: |
291 | 147 | nrpe_check_config.write("# check {}\n".format(self.shortname)) | 196 | nrpe_check_config.write("# check {}\n".format(self.shortname)) |
292 | 197 | if nagios_servicegroups: | ||
293 | 198 | nrpe_check_config.write( | ||
294 | 199 | "# The following header was added automatically by juju\n") | ||
295 | 200 | nrpe_check_config.write( | ||
296 | 201 | "# Modifying it will affect nagios monitoring and alerting\n") | ||
297 | 202 | nrpe_check_config.write( | ||
298 | 203 | "# servicegroups: {}\n".format(nagios_servicegroups)) | ||
299 | 148 | nrpe_check_config.write("command[{}]={}\n".format( | 204 | nrpe_check_config.write("command[{}]={}\n".format( |
300 | 149 | self.command, self.check_cmd)) | 205 | self.command, self.check_cmd)) |
301 | 150 | 206 | ||
302 | @@ -152,23 +208,22 @@ define service {{ | |||
303 | 152 | log('Not writing service config as {} is not accessible'.format( | 208 | log('Not writing service config as {} is not accessible'.format( |
304 | 153 | NRPE.nagios_exportdir)) | 209 | NRPE.nagios_exportdir)) |
305 | 154 | else: | 210 | else: |
307 | 155 | self.write_service_config(nagios_context, hostname) | 211 | self.write_service_config(nagios_context, hostname, |
308 | 212 | nagios_servicegroups) | ||
309 | 156 | 213 | ||
314 | 157 | def write_service_config(self, nagios_context, hostname): | 214 | def write_service_config(self, nagios_context, hostname, |
315 | 158 | for f in os.listdir(NRPE.nagios_exportdir): | 215 | nagios_servicegroups): |
316 | 159 | if re.search('.*{}.cfg'.format(self.command), f): | 216 | self._remove_service_files() |
313 | 160 | os.remove(os.path.join(NRPE.nagios_exportdir, f)) | ||
317 | 161 | 217 | ||
318 | 162 | templ_vars = { | 218 | templ_vars = { |
319 | 163 | 'nagios_hostname': hostname, | 219 | 'nagios_hostname': hostname, |
321 | 164 | 'nagios_servicegroup': nagios_context, | 220 | 'nagios_servicegroup': nagios_servicegroups, |
322 | 165 | 'description': self.description, | 221 | 'description': self.description, |
323 | 166 | 'shortname': self.shortname, | 222 | 'shortname': self.shortname, |
324 | 167 | 'command': self.command, | 223 | 'command': self.command, |
325 | 168 | } | 224 | } |
326 | 169 | nrpe_service_text = Check.service_template.format(**templ_vars) | 225 | nrpe_service_text = Check.service_template.format(**templ_vars) |
329 | 170 | nrpe_service_file = '{}/service__{}_{}.cfg'.format( | 226 | nrpe_service_file = self._get_service_filename(hostname) |
328 | 171 | NRPE.nagios_exportdir, hostname, self.command) | ||
330 | 172 | with open(nrpe_service_file, 'w') as nrpe_service_config: | 227 | with open(nrpe_service_file, 'w') as nrpe_service_config: |
331 | 173 | nrpe_service_config.write(str(nrpe_service_text)) | 228 | nrpe_service_config.write(str(nrpe_service_text)) |
332 | 174 | 229 | ||
333 | @@ -180,23 +235,58 @@ class NRPE(object): | |||
334 | 180 | nagios_logdir = '/var/log/nagios' | 235 | nagios_logdir = '/var/log/nagios' |
335 | 181 | nagios_exportdir = '/var/lib/nagios/export' | 236 | nagios_exportdir = '/var/lib/nagios/export' |
336 | 182 | nrpe_confdir = '/etc/nagios/nrpe.d' | 237 | nrpe_confdir = '/etc/nagios/nrpe.d' |
337 | 238 | homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server | ||
338 | 183 | 239 | ||
340 | 184 | def __init__(self): | 240 | def __init__(self, hostname=None, primary=True): |
341 | 185 | super(NRPE, self).__init__() | 241 | super(NRPE, self).__init__() |
342 | 186 | self.config = config() | 242 | self.config = config() |
343 | 243 | self.primary = primary | ||
344 | 187 | self.nagios_context = self.config['nagios_context'] | 244 | self.nagios_context = self.config['nagios_context'] |
345 | 245 | if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: | ||
346 | 246 | self.nagios_servicegroups = self.config['nagios_servicegroups'] | ||
347 | 247 | else: | ||
348 | 248 | self.nagios_servicegroups = self.nagios_context | ||
349 | 188 | self.unit_name = local_unit().replace('/', '-') | 249 | self.unit_name = local_unit().replace('/', '-') |
351 | 189 | self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) | 250 | if hostname: |
352 | 251 | self.hostname = hostname | ||
353 | 252 | else: | ||
354 | 253 | nagios_hostname = get_nagios_hostname() | ||
355 | 254 | if nagios_hostname: | ||
356 | 255 | self.hostname = nagios_hostname | ||
357 | 256 | else: | ||
358 | 257 | self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) | ||
359 | 190 | self.checks = [] | 258 | self.checks = [] |
360 | 259 | # Iff in an nrpe-external-master relation hook, set primary status | ||
361 | 260 | relation = relation_ids('nrpe-external-master') | ||
362 | 261 | if relation: | ||
363 | 262 | log("Setting charm primary status {}".format(primary)) | ||
364 | 263 | for rid in relation_ids('nrpe-external-master'): | ||
365 | 264 | relation_set(relation_id=rid, relation_settings={'primary': self.primary}) | ||
366 | 191 | 265 | ||
367 | 192 | def add_check(self, *args, **kwargs): | 266 | def add_check(self, *args, **kwargs): |
368 | 193 | self.checks.append(Check(*args, **kwargs)) | 267 | self.checks.append(Check(*args, **kwargs)) |
369 | 194 | 268 | ||
370 | 269 | def remove_check(self, *args, **kwargs): | ||
371 | 270 | if kwargs.get('shortname') is None: | ||
372 | 271 | raise ValueError('shortname of check must be specified') | ||
373 | 272 | |||
374 | 273 | # Use sensible defaults if they're not specified - these are not | ||
375 | 274 | # actually used during removal, but they're required for constructing | ||
376 | 275 | # the Check object; check_disk is chosen because it's part of the | ||
377 | 276 | # nagios-plugins-basic package. | ||
378 | 277 | if kwargs.get('check_cmd') is None: | ||
379 | 278 | kwargs['check_cmd'] = 'check_disk' | ||
380 | 279 | if kwargs.get('description') is None: | ||
381 | 280 | kwargs['description'] = '' | ||
382 | 281 | |||
383 | 282 | check = Check(*args, **kwargs) | ||
384 | 283 | check.remove(self.hostname) | ||
385 | 284 | |||
386 | 195 | def write(self): | 285 | def write(self): |
387 | 196 | try: | 286 | try: |
388 | 197 | nagios_uid = pwd.getpwnam('nagios').pw_uid | 287 | nagios_uid = pwd.getpwnam('nagios').pw_uid |
389 | 198 | nagios_gid = grp.getgrnam('nagios').gr_gid | 288 | nagios_gid = grp.getgrnam('nagios').gr_gid |
391 | 199 | except: | 289 | except Exception: |
392 | 200 | log("Nagios user not set up, nrpe checks not updated") | 290 | log("Nagios user not set up, nrpe checks not updated") |
393 | 201 | return | 291 | return |
394 | 202 | 292 | ||
395 | @@ -207,12 +297,154 @@ class NRPE(object): | |||
396 | 207 | nrpe_monitors = {} | 297 | nrpe_monitors = {} |
397 | 208 | monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} | 298 | monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} |
398 | 209 | for nrpecheck in self.checks: | 299 | for nrpecheck in self.checks: |
400 | 210 | nrpecheck.write(self.nagios_context, self.hostname) | 300 | nrpecheck.write(self.nagios_context, self.hostname, |
401 | 301 | self.nagios_servicegroups) | ||
402 | 211 | nrpe_monitors[nrpecheck.shortname] = { | 302 | nrpe_monitors[nrpecheck.shortname] = { |
403 | 212 | "command": nrpecheck.command, | 303 | "command": nrpecheck.command, |
404 | 213 | } | 304 | } |
405 | 214 | 305 | ||
407 | 215 | service('restart', 'nagios-nrpe-server') | 306 | # update-status hooks are configured to firing every 5 minutes by |
408 | 307 | # default. When nagios-nrpe-server is restarted, the nagios server | ||
409 | 308 | # reports checks failing causing unneccessary alerts. Let's not restart | ||
410 | 309 | # on update-status hooks. | ||
411 | 310 | if not hook_name() == 'update-status': | ||
412 | 311 | service('restart', 'nagios-nrpe-server') | ||
413 | 216 | 312 | ||
415 | 217 | for rid in relation_ids("local-monitors"): | 313 | monitor_ids = relation_ids("local-monitors") + \ |
416 | 314 | relation_ids("nrpe-external-master") | ||
417 | 315 | for rid in monitor_ids: | ||
418 | 218 | relation_set(relation_id=rid, monitors=yaml.dump(monitors)) | 316 | relation_set(relation_id=rid, monitors=yaml.dump(monitors)) |
419 | 317 | |||
420 | 318 | |||
421 | 319 | def get_nagios_hostcontext(relation_name='nrpe-external-master'): | ||
422 | 320 | """ | ||
423 | 321 | Query relation with nrpe subordinate, return the nagios_host_context | ||
424 | 322 | |||
425 | 323 | :param str relation_name: Name of relation nrpe sub joined to | ||
426 | 324 | """ | ||
427 | 325 | for rel in relations_of_type(relation_name): | ||
428 | 326 | if 'nagios_host_context' in rel: | ||
429 | 327 | return rel['nagios_host_context'] | ||
430 | 328 | |||
431 | 329 | |||
432 | 330 | def get_nagios_hostname(relation_name='nrpe-external-master'): | ||
433 | 331 | """ | ||
434 | 332 | Query relation with nrpe subordinate, return the nagios_hostname | ||
435 | 333 | |||
436 | 334 | :param str relation_name: Name of relation nrpe sub joined to | ||
437 | 335 | """ | ||
438 | 336 | for rel in relations_of_type(relation_name): | ||
439 | 337 | if 'nagios_hostname' in rel: | ||
440 | 338 | return rel['nagios_hostname'] | ||
441 | 339 | |||
442 | 340 | |||
443 | 341 | def get_nagios_unit_name(relation_name='nrpe-external-master'): | ||
444 | 342 | """ | ||
445 | 343 | Return the nagios unit name prepended with host_context if needed | ||
446 | 344 | |||
447 | 345 | :param str relation_name: Name of relation nrpe sub joined to | ||
448 | 346 | """ | ||
449 | 347 | host_context = get_nagios_hostcontext(relation_name) | ||
450 | 348 | if host_context: | ||
451 | 349 | unit = "%s:%s" % (host_context, local_unit()) | ||
452 | 350 | else: | ||
453 | 351 | unit = local_unit() | ||
454 | 352 | return unit | ||
455 | 353 | |||
456 | 354 | |||
457 | 355 | def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): | ||
458 | 356 | """ | ||
459 | 357 | Add checks for each service in list | ||
460 | 358 | |||
461 | 359 | :param NRPE nrpe: NRPE object to add check to | ||
462 | 360 | :param list services: List of services to check | ||
463 | 361 | :param str unit_name: Unit name to use in check description | ||
464 | 362 | :param bool immediate_check: For sysv init, run the service check immediately | ||
465 | 363 | """ | ||
466 | 364 | for svc in services: | ||
467 | 365 | # Don't add a check for these services from neutron-gateway | ||
468 | 366 | if svc in ['ext-port', 'os-charm-phy-nic-mtu']: | ||
469 | 367 | next | ||
470 | 368 | |||
471 | 369 | upstart_init = '/etc/init/%s.conf' % svc | ||
472 | 370 | sysv_init = '/etc/init.d/%s' % svc | ||
473 | 371 | |||
474 | 372 | if host.init_is_systemd(): | ||
475 | 373 | nrpe.add_check( | ||
476 | 374 | shortname=svc, | ||
477 | 375 | description='process check {%s}' % unit_name, | ||
478 | 376 | check_cmd='check_systemd.py %s' % svc | ||
479 | 377 | ) | ||
480 | 378 | elif os.path.exists(upstart_init): | ||
481 | 379 | nrpe.add_check( | ||
482 | 380 | shortname=svc, | ||
483 | 381 | description='process check {%s}' % unit_name, | ||
484 | 382 | check_cmd='check_upstart_job %s' % svc | ||
485 | 383 | ) | ||
486 | 384 | elif os.path.exists(sysv_init): | ||
487 | 385 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc | ||
488 | 386 | checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) | ||
489 | 387 | croncmd = ( | ||
490 | 388 | '/usr/local/lib/nagios/plugins/check_exit_status.pl ' | ||
491 | 389 | '-e -s /etc/init.d/%s status' % svc | ||
492 | 390 | ) | ||
493 | 391 | cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) | ||
494 | 392 | f = open(cronpath, 'w') | ||
495 | 393 | f.write(cron_file) | ||
496 | 394 | f.close() | ||
497 | 395 | nrpe.add_check( | ||
498 | 396 | shortname=svc, | ||
499 | 397 | description='service check {%s}' % unit_name, | ||
500 | 398 | check_cmd='check_status_file.py -f %s' % checkpath, | ||
501 | 399 | ) | ||
502 | 400 | # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail | ||
503 | 401 | # (LP: #1670223). | ||
504 | 402 | if immediate_check and os.path.isdir(nrpe.homedir): | ||
505 | 403 | f = open(checkpath, 'w') | ||
506 | 404 | subprocess.call( | ||
507 | 405 | croncmd.split(), | ||
508 | 406 | stdout=f, | ||
509 | 407 | stderr=subprocess.STDOUT | ||
510 | 408 | ) | ||
511 | 409 | f.close() | ||
512 | 410 | os.chmod(checkpath, 0o644) | ||
513 | 411 | |||
514 | 412 | |||
515 | 413 | def copy_nrpe_checks(nrpe_files_dir=None): | ||
516 | 414 | """ | ||
517 | 415 | Copy the nrpe checks into place | ||
518 | 416 | |||
519 | 417 | """ | ||
520 | 418 | NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' | ||
521 | 419 | default_nrpe_files_dir = os.path.join( | ||
522 | 420 | os.getenv('CHARM_DIR'), | ||
523 | 421 | 'hooks', | ||
524 | 422 | 'charmhelpers', | ||
525 | 423 | 'contrib', | ||
526 | 424 | 'openstack', | ||
527 | 425 | 'files') | ||
528 | 426 | if not nrpe_files_dir: | ||
529 | 427 | nrpe_files_dir = default_nrpe_files_dir | ||
530 | 428 | if not os.path.exists(NAGIOS_PLUGINS): | ||
531 | 429 | os.makedirs(NAGIOS_PLUGINS) | ||
532 | 430 | for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): | ||
533 | 431 | if os.path.isfile(fname): | ||
534 | 432 | shutil.copy2(fname, | ||
535 | 433 | os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) | ||
536 | 434 | |||
537 | 435 | |||
538 | 436 | def add_haproxy_checks(nrpe, unit_name): | ||
539 | 437 | """ | ||
540 | 438 | Add checks for each service in list | ||
541 | 439 | |||
542 | 440 | :param NRPE nrpe: NRPE object to add check to | ||
543 | 441 | :param str unit_name: Unit name to use in check description | ||
544 | 442 | """ | ||
545 | 443 | nrpe.add_check( | ||
546 | 444 | shortname='haproxy_servers', | ||
547 | 445 | description='Check HAProxy {%s}' % unit_name, | ||
548 | 446 | check_cmd='check_haproxy.sh') | ||
549 | 447 | nrpe.add_check( | ||
550 | 448 | shortname='haproxy_queue', | ||
551 | 449 | description='Check HAProxy queue depth {%s}' % unit_name, | ||
552 | 450 | check_cmd='check_haproxy_queue_depth.sh') | ||
553 | diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py | |||
554 | index 0f905df..7ea43f0 100644 | |||
555 | --- a/hooks/charmhelpers/contrib/charmsupport/volumes.py | |||
556 | +++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py | |||
557 | @@ -1,8 +1,23 @@ | |||
558 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
559 | 2 | # | ||
560 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
561 | 4 | # you may not use this file except in compliance with the License. | ||
562 | 5 | # You may obtain a copy of the License at | ||
563 | 6 | # | ||
564 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
565 | 8 | # | ||
566 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
567 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
568 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
569 | 12 | # See the License for the specific language governing permissions and | ||
570 | 13 | # limitations under the License. | ||
571 | 14 | |||
572 | 1 | ''' | 15 | ''' |
573 | 2 | Functions for managing volumes in juju units. One volume is supported per unit. | 16 | Functions for managing volumes in juju units. One volume is supported per unit. |
574 | 3 | Subordinates may have their own storage, provided it is on its own partition. | 17 | Subordinates may have their own storage, provided it is on its own partition. |
575 | 4 | 18 | ||
577 | 5 | Configuration stanzas: | 19 | Configuration stanzas:: |
578 | 20 | |||
579 | 6 | volume-ephemeral: | 21 | volume-ephemeral: |
580 | 7 | type: boolean | 22 | type: boolean |
581 | 8 | default: true | 23 | default: true |
582 | @@ -20,7 +35,8 @@ Configuration stanzas: | |||
583 | 20 | is 'true' and no volume-map value is set. Use 'juju set' to set a | 35 | is 'true' and no volume-map value is set. Use 'juju set' to set a |
584 | 21 | value and 'juju resolved' to complete configuration. | 36 | value and 'juju resolved' to complete configuration. |
585 | 22 | 37 | ||
587 | 23 | Usage: | 38 | Usage:: |
588 | 39 | |||
589 | 24 | from charmsupport.volumes import configure_volume, VolumeConfigurationError | 40 | from charmsupport.volumes import configure_volume, VolumeConfigurationError |
590 | 25 | from charmsupport.hookenv import log, ERROR | 41 | from charmsupport.hookenv import log, ERROR |
591 | 26 | def post_mount_hook(): | 42 | def post_mount_hook(): |
592 | @@ -34,6 +50,7 @@ Usage: | |||
593 | 34 | after_change=post_mount_hook) | 50 | after_change=post_mount_hook) |
594 | 35 | except VolumeConfigurationError: | 51 | except VolumeConfigurationError: |
595 | 36 | log('Storage could not be configured', ERROR) | 52 | log('Storage could not be configured', ERROR) |
596 | 53 | |||
597 | 37 | ''' | 54 | ''' |
598 | 38 | 55 | ||
599 | 39 | # XXX: Known limitations | 56 | # XXX: Known limitations |
600 | diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py | |||
601 | index e69de29..d7567b8 100644 | |||
602 | --- a/hooks/charmhelpers/core/__init__.py | |||
603 | +++ b/hooks/charmhelpers/core/__init__.py | |||
604 | @@ -0,0 +1,13 @@ | |||
605 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
606 | 2 | # | ||
607 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
608 | 4 | # you may not use this file except in compliance with the License. | ||
609 | 5 | # You may obtain a copy of the License at | ||
610 | 6 | # | ||
611 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
612 | 8 | # | ||
613 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
614 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
615 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
616 | 12 | # See the License for the specific language governing permissions and | ||
617 | 13 | # limitations under the License. | ||
618 | diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py | |||
619 | 0 | new file mode 100644 | 14 | new file mode 100644 |
620 | index 0000000..6ad41ee | |||
621 | --- /dev/null | |||
622 | +++ b/hooks/charmhelpers/core/decorators.py | |||
623 | @@ -0,0 +1,55 @@ | |||
624 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
625 | 2 | # | ||
626 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
627 | 4 | # you may not use this file except in compliance with the License. | ||
628 | 5 | # You may obtain a copy of the License at | ||
629 | 6 | # | ||
630 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
631 | 8 | # | ||
632 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
633 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
634 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
635 | 12 | # See the License for the specific language governing permissions and | ||
636 | 13 | # limitations under the License. | ||
637 | 14 | |||
638 | 15 | # | ||
639 | 16 | # Copyright 2014 Canonical Ltd. | ||
640 | 17 | # | ||
641 | 18 | # Authors: | ||
642 | 19 | # Edward Hope-Morley <opentastic@gmail.com> | ||
643 | 20 | # | ||
644 | 21 | |||
645 | 22 | import time | ||
646 | 23 | |||
647 | 24 | from charmhelpers.core.hookenv import ( | ||
648 | 25 | log, | ||
649 | 26 | INFO, | ||
650 | 27 | ) | ||
651 | 28 | |||
652 | 29 | |||
653 | 30 | def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): | ||
654 | 31 | """If the decorated function raises exception exc_type, allow num_retries | ||
655 | 32 | retry attempts before raise the exception. | ||
656 | 33 | """ | ||
657 | 34 | def _retry_on_exception_inner_1(f): | ||
658 | 35 | def _retry_on_exception_inner_2(*args, **kwargs): | ||
659 | 36 | retries = num_retries | ||
660 | 37 | multiplier = 1 | ||
661 | 38 | while True: | ||
662 | 39 | try: | ||
663 | 40 | return f(*args, **kwargs) | ||
664 | 41 | except exc_type: | ||
665 | 42 | if not retries: | ||
666 | 43 | raise | ||
667 | 44 | |||
668 | 45 | delay = base_delay * multiplier | ||
669 | 46 | multiplier += 1 | ||
670 | 47 | log("Retrying '%s' %d more times (delay=%s)" % | ||
671 | 48 | (f.__name__, retries, delay), level=INFO) | ||
672 | 49 | retries -= 1 | ||
673 | 50 | if delay: | ||
674 | 51 | time.sleep(delay) | ||
675 | 52 | |||
676 | 53 | return _retry_on_exception_inner_2 | ||
677 | 54 | |||
678 | 55 | return _retry_on_exception_inner_1 | ||
679 | diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py | |||
680 | 0 | new file mode 100644 | 56 | new file mode 100644 |
681 | index 0000000..fdd82b7 | |||
682 | --- /dev/null | |||
683 | +++ b/hooks/charmhelpers/core/files.py | |||
684 | @@ -0,0 +1,43 @@ | |||
685 | 1 | #!/usr/bin/env python | ||
686 | 2 | # -*- coding: utf-8 -*- | ||
687 | 3 | |||
688 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
689 | 5 | # | ||
690 | 6 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
691 | 7 | # you may not use this file except in compliance with the License. | ||
692 | 8 | # You may obtain a copy of the License at | ||
693 | 9 | # | ||
694 | 10 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
695 | 11 | # | ||
696 | 12 | # Unless required by applicable law or agreed to in writing, software | ||
697 | 13 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
698 | 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
699 | 15 | # See the License for the specific language governing permissions and | ||
700 | 16 | # limitations under the License. | ||
701 | 17 | |||
702 | 18 | __author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>' | ||
703 | 19 | |||
704 | 20 | import os | ||
705 | 21 | import subprocess | ||
706 | 22 | |||
707 | 23 | |||
708 | 24 | def sed(filename, before, after, flags='g'): | ||
709 | 25 | """ | ||
710 | 26 | Search and replaces the given pattern on filename. | ||
711 | 27 | |||
712 | 28 | :param filename: relative or absolute file path. | ||
713 | 29 | :param before: expression to be replaced (see 'man sed') | ||
714 | 30 | :param after: expression to replace with (see 'man sed') | ||
715 | 31 | :param flags: sed-compatible regex flags in example, to make | ||
716 | 32 | the search and replace case insensitive, specify ``flags="i"``. | ||
717 | 33 | The ``g`` flag is always specified regardless, so you do not | ||
718 | 34 | need to remember to include it when overriding this parameter. | ||
719 | 35 | :returns: If the sed command exit code was zero then return, | ||
720 | 36 | otherwise raise CalledProcessError. | ||
721 | 37 | """ | ||
722 | 38 | expression = r's/{0}/{1}/{2}'.format(before, | ||
723 | 39 | after, flags) | ||
724 | 40 | |||
725 | 41 | return subprocess.check_call(["sed", "-i", "-r", "-e", | ||
726 | 42 | expression, | ||
727 | 43 | os.path.expanduser(filename)]) | ||
728 | diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py | |||
729 | 0 | new file mode 100644 | 44 | new file mode 100644 |
730 | index 0000000..d9fa915 | |||
731 | --- /dev/null | |||
732 | +++ b/hooks/charmhelpers/core/fstab.py | |||
733 | @@ -0,0 +1,132 @@ | |||
734 | 1 | #!/usr/bin/env python | ||
735 | 2 | # -*- coding: utf-8 -*- | ||
736 | 3 | |||
737 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
738 | 5 | # | ||
739 | 6 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
740 | 7 | # you may not use this file except in compliance with the License. | ||
741 | 8 | # You may obtain a copy of the License at | ||
742 | 9 | # | ||
743 | 10 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
744 | 11 | # | ||
745 | 12 | # Unless required by applicable law or agreed to in writing, software | ||
746 | 13 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
747 | 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
748 | 15 | # See the License for the specific language governing permissions and | ||
749 | 16 | # limitations under the License. | ||
750 | 17 | |||
751 | 18 | import io | ||
752 | 19 | import os | ||
753 | 20 | |||
754 | 21 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | ||
755 | 22 | |||
756 | 23 | |||
757 | 24 | class Fstab(io.FileIO): | ||
758 | 25 | """This class extends file in order to implement a file reader/writer | ||
759 | 26 | for file `/etc/fstab` | ||
760 | 27 | """ | ||
761 | 28 | |||
762 | 29 | class Entry(object): | ||
763 | 30 | """Entry class represents a non-comment line on the `/etc/fstab` file | ||
764 | 31 | """ | ||
765 | 32 | def __init__(self, device, mountpoint, filesystem, | ||
766 | 33 | options, d=0, p=0): | ||
767 | 34 | self.device = device | ||
768 | 35 | self.mountpoint = mountpoint | ||
769 | 36 | self.filesystem = filesystem | ||
770 | 37 | |||
771 | 38 | if not options: | ||
772 | 39 | options = "defaults" | ||
773 | 40 | |||
774 | 41 | self.options = options | ||
775 | 42 | self.d = int(d) | ||
776 | 43 | self.p = int(p) | ||
777 | 44 | |||
778 | 45 | def __eq__(self, o): | ||
779 | 46 | return str(self) == str(o) | ||
780 | 47 | |||
781 | 48 | def __str__(self): | ||
782 | 49 | return "{} {} {} {} {} {}".format(self.device, | ||
783 | 50 | self.mountpoint, | ||
784 | 51 | self.filesystem, | ||
785 | 52 | self.options, | ||
786 | 53 | self.d, | ||
787 | 54 | self.p) | ||
788 | 55 | |||
789 | 56 | DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') | ||
790 | 57 | |||
791 | 58 | def __init__(self, path=None): | ||
792 | 59 | if path: | ||
793 | 60 | self._path = path | ||
794 | 61 | else: | ||
795 | 62 | self._path = self.DEFAULT_PATH | ||
796 | 63 | super(Fstab, self).__init__(self._path, 'rb+') | ||
797 | 64 | |||
798 | 65 | def _hydrate_entry(self, line): | ||
799 | 66 | # NOTE: use split with no arguments to split on any | ||
800 | 67 | # whitespace including tabs | ||
801 | 68 | return Fstab.Entry(*filter( | ||
802 | 69 | lambda x: x not in ('', None), | ||
803 | 70 | line.strip("\n").split())) | ||
804 | 71 | |||
805 | 72 | @property | ||
806 | 73 | def entries(self): | ||
807 | 74 | self.seek(0) | ||
808 | 75 | for line in self.readlines(): | ||
809 | 76 | line = line.decode('us-ascii') | ||
810 | 77 | try: | ||
811 | 78 | if line.strip() and not line.strip().startswith("#"): | ||
812 | 79 | yield self._hydrate_entry(line) | ||
813 | 80 | except ValueError: | ||
814 | 81 | pass | ||
815 | 82 | |||
816 | 83 | def get_entry_by_attr(self, attr, value): | ||
817 | 84 | for entry in self.entries: | ||
818 | 85 | e_attr = getattr(entry, attr) | ||
819 | 86 | if e_attr == value: | ||
820 | 87 | return entry | ||
821 | 88 | return None | ||
822 | 89 | |||
823 | 90 | def add_entry(self, entry): | ||
824 | 91 | if self.get_entry_by_attr('device', entry.device): | ||
825 | 92 | return False | ||
826 | 93 | |||
827 | 94 | self.write((str(entry) + '\n').encode('us-ascii')) | ||
828 | 95 | self.truncate() | ||
829 | 96 | return entry | ||
830 | 97 | |||
831 | 98 | def remove_entry(self, entry): | ||
832 | 99 | self.seek(0) | ||
833 | 100 | |||
834 | 101 | lines = [l.decode('us-ascii') for l in self.readlines()] | ||
835 | 102 | |||
836 | 103 | found = False | ||
837 | 104 | for index, line in enumerate(lines): | ||
838 | 105 | if line.strip() and not line.strip().startswith("#"): | ||
839 | 106 | if self._hydrate_entry(line) == entry: | ||
840 | 107 | found = True | ||
841 | 108 | break | ||
842 | 109 | |||
843 | 110 | if not found: | ||
844 | 111 | return False | ||
845 | 112 | |||
846 | 113 | lines.remove(line) | ||
847 | 114 | |||
848 | 115 | self.seek(0) | ||
849 | 116 | self.write(''.join(lines).encode('us-ascii')) | ||
850 | 117 | self.truncate() | ||
851 | 118 | return True | ||
852 | 119 | |||
853 | 120 | @classmethod | ||
854 | 121 | def remove_by_mountpoint(cls, mountpoint, path=None): | ||
855 | 122 | fstab = cls(path=path) | ||
856 | 123 | entry = fstab.get_entry_by_attr('mountpoint', mountpoint) | ||
857 | 124 | if entry: | ||
858 | 125 | return fstab.remove_entry(entry) | ||
859 | 126 | return False | ||
860 | 127 | |||
861 | 128 | @classmethod | ||
862 | 129 | def add(cls, device, mountpoint, filesystem, options=None, path=None): | ||
863 | 130 | return cls(path=path).add_entry(Fstab.Entry(device, | ||
864 | 131 | mountpoint, filesystem, | ||
865 | 132 | options=options)) | ||
866 | diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py | |||
867 | index 2b06706..fc57505 100644 | |||
868 | --- a/hooks/charmhelpers/core/hookenv.py | |||
869 | +++ b/hooks/charmhelpers/core/hookenv.py | |||
870 | @@ -1,29 +1,61 @@ | |||
871 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
872 | 2 | # | ||
873 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
874 | 4 | # you may not use this file except in compliance with the License. | ||
875 | 5 | # You may obtain a copy of the License at | ||
876 | 6 | # | ||
877 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
878 | 8 | # | ||
879 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
880 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
881 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
882 | 12 | # See the License for the specific language governing permissions and | ||
883 | 13 | # limitations under the License. | ||
884 | 14 | |||
885 | 1 | "Interactions with the Juju environment" | 15 | "Interactions with the Juju environment" |
886 | 2 | # Copyright 2013 Canonical Ltd. | 16 | # Copyright 2013 Canonical Ltd. |
887 | 3 | # | 17 | # |
888 | 4 | # Authors: | 18 | # Authors: |
889 | 5 | # Charm Helpers Developers <juju@lists.ubuntu.com> | 19 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
890 | 6 | 20 | ||
891 | 21 | from __future__ import print_function | ||
892 | 22 | import copy | ||
893 | 23 | from distutils.version import LooseVersion | ||
894 | 24 | from functools import wraps | ||
895 | 25 | from collections import namedtuple | ||
896 | 26 | import glob | ||
897 | 7 | import os | 27 | import os |
898 | 8 | import json | 28 | import json |
899 | 9 | import yaml | 29 | import yaml |
900 | 30 | import re | ||
901 | 10 | import subprocess | 31 | import subprocess |
903 | 11 | import UserDict | 32 | import sys |
904 | 33 | import errno | ||
905 | 34 | import tempfile | ||
906 | 35 | from subprocess import CalledProcessError | ||
907 | 36 | |||
908 | 37 | import six | ||
909 | 38 | if not six.PY3: | ||
910 | 39 | from UserDict import UserDict | ||
911 | 40 | else: | ||
912 | 41 | from collections import UserDict | ||
913 | 42 | |||
914 | 12 | 43 | ||
915 | 13 | CRITICAL = "CRITICAL" | 44 | CRITICAL = "CRITICAL" |
916 | 14 | ERROR = "ERROR" | 45 | ERROR = "ERROR" |
917 | 15 | WARNING = "WARNING" | 46 | WARNING = "WARNING" |
918 | 16 | INFO = "INFO" | 47 | INFO = "INFO" |
919 | 17 | DEBUG = "DEBUG" | 48 | DEBUG = "DEBUG" |
920 | 49 | TRACE = "TRACE" | ||
921 | 18 | MARKER = object() | 50 | MARKER = object() |
922 | 19 | 51 | ||
923 | 20 | cache = {} | 52 | cache = {} |
924 | 21 | 53 | ||
925 | 22 | 54 | ||
926 | 23 | def cached(func): | 55 | def cached(func): |
928 | 24 | ''' Cache return values for multiple executions of func + args | 56 | """Cache return values for multiple executions of func + args |
929 | 25 | 57 | ||
931 | 26 | For example: | 58 | For example:: |
932 | 27 | 59 | ||
933 | 28 | @cached | 60 | @cached |
934 | 29 | def unit_get(attribute): | 61 | def unit_get(attribute): |
935 | @@ -32,22 +64,25 @@ def cached(func): | |||
936 | 32 | unit_get('test') | 64 | unit_get('test') |
937 | 33 | 65 | ||
938 | 34 | will cache the result of unit_get + 'test' for future calls. | 66 | will cache the result of unit_get + 'test' for future calls. |
940 | 35 | ''' | 67 | """ |
941 | 68 | @wraps(func) | ||
942 | 36 | def wrapper(*args, **kwargs): | 69 | def wrapper(*args, **kwargs): |
943 | 37 | global cache | 70 | global cache |
945 | 38 | key = str((func, args, kwargs)) | 71 | key = json.dumps((func, args, kwargs), sort_keys=True, default=str) |
946 | 39 | try: | 72 | try: |
947 | 40 | return cache[key] | 73 | return cache[key] |
948 | 41 | except KeyError: | 74 | except KeyError: |
952 | 42 | res = func(*args, **kwargs) | 75 | pass # Drop out of the exception handler scope. |
953 | 43 | cache[key] = res | 76 | res = func(*args, **kwargs) |
954 | 44 | return res | 77 | cache[key] = res |
955 | 78 | return res | ||
956 | 79 | wrapper._wrapped = func | ||
957 | 45 | return wrapper | 80 | return wrapper |
958 | 46 | 81 | ||
959 | 47 | 82 | ||
960 | 48 | def flush(key): | 83 | def flush(key): |
963 | 49 | ''' Flushes any entries from function cache where the | 84 | """Flushes any entries from function cache where the |
964 | 50 | key is found in the function+args ''' | 85 | key is found in the function+args """ |
965 | 51 | flush_list = [] | 86 | flush_list = [] |
966 | 52 | for item in cache: | 87 | for item in cache: |
967 | 53 | if key in item: | 88 | if key in item: |
968 | @@ -57,20 +92,33 @@ def flush(key): | |||
969 | 57 | 92 | ||
970 | 58 | 93 | ||
971 | 59 | def log(message, level=None): | 94 | def log(message, level=None): |
973 | 60 | "Write a message to the juju log" | 95 | """Write a message to the juju log""" |
974 | 61 | command = ['juju-log'] | 96 | command = ['juju-log'] |
975 | 62 | if level: | 97 | if level: |
976 | 63 | command += ['-l', level] | 98 | command += ['-l', level] |
977 | 99 | if not isinstance(message, six.string_types): | ||
978 | 100 | message = repr(message) | ||
979 | 64 | command += [message] | 101 | command += [message] |
981 | 65 | subprocess.call(command) | 102 | # Missing juju-log should not cause failures in unit tests |
982 | 103 | # Send log output to stderr | ||
983 | 104 | try: | ||
984 | 105 | subprocess.call(command) | ||
985 | 106 | except OSError as e: | ||
986 | 107 | if e.errno == errno.ENOENT: | ||
987 | 108 | if level: | ||
988 | 109 | message = "{}: {}".format(level, message) | ||
989 | 110 | message = "juju-log: {}".format(message) | ||
990 | 111 | print(message, file=sys.stderr) | ||
991 | 112 | else: | ||
992 | 113 | raise | ||
993 | 66 | 114 | ||
994 | 67 | 115 | ||
997 | 68 | class Serializable(UserDict.IterableUserDict): | 116 | class Serializable(UserDict): |
998 | 69 | "Wrapper, an object that can be serialized to yaml or json" | 117 | """Wrapper, an object that can be serialized to yaml or json""" |
999 | 70 | 118 | ||
1000 | 71 | def __init__(self, obj): | 119 | def __init__(self, obj): |
1001 | 72 | # wrap the object | 120 | # wrap the object |
1003 | 73 | UserDict.IterableUserDict.__init__(self) | 121 | UserDict.__init__(self) |
1004 | 74 | self.data = obj | 122 | self.data = obj |
1005 | 75 | 123 | ||
1006 | 76 | def __getattr__(self, attr): | 124 | def __getattr__(self, attr): |
1007 | @@ -96,11 +144,11 @@ class Serializable(UserDict.IterableUserDict): | |||
1008 | 96 | self.data = state | 144 | self.data = state |
1009 | 97 | 145 | ||
1010 | 98 | def json(self): | 146 | def json(self): |
1012 | 99 | "Serialize the object to json" | 147 | """Serialize the object to json""" |
1013 | 100 | return json.dumps(self.data) | 148 | return json.dumps(self.data) |
1014 | 101 | 149 | ||
1015 | 102 | def yaml(self): | 150 | def yaml(self): |
1017 | 103 | "Serialize the object to yaml" | 151 | """Serialize the object to yaml""" |
1018 | 104 | return yaml.dump(self.data) | 152 | return yaml.dump(self.data) |
1019 | 105 | 153 | ||
1020 | 106 | 154 | ||
1021 | @@ -119,50 +167,261 @@ def execution_environment(): | |||
1022 | 119 | 167 | ||
1023 | 120 | 168 | ||
1024 | 121 | def in_relation_hook(): | 169 | def in_relation_hook(): |
1026 | 122 | "Determine whether we're running in a relation hook" | 170 | """Determine whether we're running in a relation hook""" |
1027 | 123 | return 'JUJU_RELATION' in os.environ | 171 | return 'JUJU_RELATION' in os.environ |
1028 | 124 | 172 | ||
1029 | 125 | 173 | ||
1030 | 126 | def relation_type(): | 174 | def relation_type(): |
1032 | 127 | "The scope for the current relation hook" | 175 | """The scope for the current relation hook""" |
1033 | 128 | return os.environ.get('JUJU_RELATION', None) | 176 | return os.environ.get('JUJU_RELATION', None) |
1034 | 129 | 177 | ||
1035 | 130 | 178 | ||
1039 | 131 | def relation_id(): | 179 | @cached |
1040 | 132 | "The relation ID for the current relation hook" | 180 | def relation_id(relation_name=None, service_or_unit=None): |
1041 | 133 | return os.environ.get('JUJU_RELATION_ID', None) | 181 | """The relation ID for the current or a specified relation""" |
1042 | 182 | if not relation_name and not service_or_unit: | ||
1043 | 183 | return os.environ.get('JUJU_RELATION_ID', None) | ||
1044 | 184 | elif relation_name and service_or_unit: | ||
1045 | 185 | service_name = service_or_unit.split('/')[0] | ||
1046 | 186 | for relid in relation_ids(relation_name): | ||
1047 | 187 | remote_service = remote_service_name(relid) | ||
1048 | 188 | if remote_service == service_name: | ||
1049 | 189 | return relid | ||
1050 | 190 | else: | ||
1051 | 191 | raise ValueError('Must specify neither or both of relation_name and service_or_unit') | ||
1052 | 134 | 192 | ||
1053 | 135 | 193 | ||
1054 | 136 | def local_unit(): | 194 | def local_unit(): |
1056 | 137 | "Local unit ID" | 195 | """Local unit ID""" |
1057 | 138 | return os.environ['JUJU_UNIT_NAME'] | 196 | return os.environ['JUJU_UNIT_NAME'] |
1058 | 139 | 197 | ||
1059 | 140 | 198 | ||
1060 | 141 | def remote_unit(): | 199 | def remote_unit(): |
1063 | 142 | "The remote unit for the current relation hook" | 200 | """The remote unit for the current relation hook""" |
1064 | 143 | return os.environ['JUJU_REMOTE_UNIT'] | 201 | return os.environ.get('JUJU_REMOTE_UNIT', None) |
1065 | 144 | 202 | ||
1066 | 145 | 203 | ||
1069 | 146 | def service_name(): | 204 | def application_name(): |
1070 | 147 | "The name service group this unit belongs to" | 205 | """ |
1071 | 206 | The name of the deployed application this unit belongs to. | ||
1072 | 207 | """ | ||
1073 | 148 | return local_unit().split('/')[0] | 208 | return local_unit().split('/')[0] |
1074 | 149 | 209 | ||
1075 | 150 | 210 | ||
1076 | 211 | def service_name(): | ||
1077 | 212 | """ | ||
1078 | 213 | .. deprecated:: 0.19.1 | ||
1079 | 214 | Alias for :func:`application_name`. | ||
1080 | 215 | """ | ||
1081 | 216 | return application_name() | ||
1082 | 217 | |||
1083 | 218 | |||
1084 | 219 | def model_name(): | ||
1085 | 220 | """ | ||
1086 | 221 | Name of the model that this unit is deployed in. | ||
1087 | 222 | """ | ||
1088 | 223 | return os.environ['JUJU_MODEL_NAME'] | ||
1089 | 224 | |||
1090 | 225 | |||
1091 | 226 | def model_uuid(): | ||
1092 | 227 | """ | ||
1093 | 228 | UUID of the model that this unit is deployed in. | ||
1094 | 229 | """ | ||
1095 | 230 | return os.environ['JUJU_MODEL_UUID'] | ||
1096 | 231 | |||
1097 | 232 | |||
1098 | 233 | def principal_unit(): | ||
1099 | 234 | """Returns the principal unit of this unit, otherwise None""" | ||
1100 | 235 | # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT | ||
1101 | 236 | principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) | ||
1102 | 237 | # If it's empty, then this unit is the principal | ||
1103 | 238 | if principal_unit == '': | ||
1104 | 239 | return os.environ['JUJU_UNIT_NAME'] | ||
1105 | 240 | elif principal_unit is not None: | ||
1106 | 241 | return principal_unit | ||
1107 | 242 | # For Juju 2.1 and below, let's try work out the principle unit by | ||
1108 | 243 | # the various charms' metadata.yaml. | ||
1109 | 244 | for reltype in relation_types(): | ||
1110 | 245 | for rid in relation_ids(reltype): | ||
1111 | 246 | for unit in related_units(rid): | ||
1112 | 247 | md = _metadata_unit(unit) | ||
1113 | 248 | if not md: | ||
1114 | 249 | continue | ||
1115 | 250 | subordinate = md.pop('subordinate', None) | ||
1116 | 251 | if not subordinate: | ||
1117 | 252 | return unit | ||
1118 | 253 | return None | ||
1119 | 254 | |||
1120 | 255 | |||
1121 | 151 | @cached | 256 | @cached |
1122 | 257 | def remote_service_name(relid=None): | ||
1123 | 258 | """The remote service name for a given relation-id (or the current relation)""" | ||
1124 | 259 | if relid is None: | ||
1125 | 260 | unit = remote_unit() | ||
1126 | 261 | else: | ||
1127 | 262 | units = related_units(relid) | ||
1128 | 263 | unit = units[0] if units else None | ||
1129 | 264 | return unit.split('/')[0] if unit else None | ||
1130 | 265 | |||
1131 | 266 | |||
1132 | 267 | def hook_name(): | ||
1133 | 268 | """The name of the currently executing hook""" | ||
1134 | 269 | return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) | ||
1135 | 270 | |||
1136 | 271 | |||
1137 | 272 | class Config(dict): | ||
1138 | 273 | """A dictionary representation of the charm's config.yaml, with some | ||
1139 | 274 | extra features: | ||
1140 | 275 | |||
1141 | 276 | - See which values in the dictionary have changed since the previous hook. | ||
1142 | 277 | - For values that have changed, see what the previous value was. | ||
1143 | 278 | - Store arbitrary data for use in a later hook. | ||
1144 | 279 | |||
1145 | 280 | NOTE: Do not instantiate this object directly - instead call | ||
1146 | 281 | ``hookenv.config()``, which will return an instance of :class:`Config`. | ||
1147 | 282 | |||
1148 | 283 | Example usage:: | ||
1149 | 284 | |||
1150 | 285 | >>> # inside a hook | ||
1151 | 286 | >>> from charmhelpers.core import hookenv | ||
1152 | 287 | >>> config = hookenv.config() | ||
1153 | 288 | >>> config['foo'] | ||
1154 | 289 | 'bar' | ||
1155 | 290 | >>> # store a new key/value for later use | ||
1156 | 291 | >>> config['mykey'] = 'myval' | ||
1157 | 292 | |||
1158 | 293 | |||
1159 | 294 | >>> # user runs `juju set mycharm foo=baz` | ||
1160 | 295 | >>> # now we're inside subsequent config-changed hook | ||
1161 | 296 | >>> config = hookenv.config() | ||
1162 | 297 | >>> config['foo'] | ||
1163 | 298 | 'baz' | ||
1164 | 299 | >>> # test to see if this val has changed since last hook | ||
1165 | 300 | >>> config.changed('foo') | ||
1166 | 301 | True | ||
1167 | 302 | >>> # what was the previous value? | ||
1168 | 303 | >>> config.previous('foo') | ||
1169 | 304 | 'bar' | ||
1170 | 305 | >>> # keys/values that we add are preserved across hooks | ||
1171 | 306 | >>> config['mykey'] | ||
1172 | 307 | 'myval' | ||
1173 | 308 | |||
1174 | 309 | """ | ||
1175 | 310 | CONFIG_FILE_NAME = '.juju-persistent-config' | ||
1176 | 311 | |||
1177 | 312 | def __init__(self, *args, **kw): | ||
1178 | 313 | super(Config, self).__init__(*args, **kw) | ||
1179 | 314 | self.implicit_save = True | ||
1180 | 315 | self._prev_dict = None | ||
1181 | 316 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) | ||
1182 | 317 | if os.path.exists(self.path) and os.stat(self.path).st_size: | ||
1183 | 318 | self.load_previous() | ||
1184 | 319 | atexit(self._implicit_save) | ||
1185 | 320 | |||
1186 | 321 | def load_previous(self, path=None): | ||
1187 | 322 | """Load previous copy of config from disk. | ||
1188 | 323 | |||
1189 | 324 | In normal usage you don't need to call this method directly - it | ||
1190 | 325 | is called automatically at object initialization. | ||
1191 | 326 | |||
1192 | 327 | :param path: | ||
1193 | 328 | |||
1194 | 329 | File path from which to load the previous config. If `None`, | ||
1195 | 330 | config is loaded from the default location. If `path` is | ||
1196 | 331 | specified, subsequent `save()` calls will write to the same | ||
1197 | 332 | path. | ||
1198 | 333 | |||
1199 | 334 | """ | ||
1200 | 335 | self.path = path or self.path | ||
1201 | 336 | with open(self.path) as f: | ||
1202 | 337 | try: | ||
1203 | 338 | self._prev_dict = json.load(f) | ||
1204 | 339 | except ValueError as e: | ||
1205 | 340 | log('Unable to parse previous config data - {}'.format(str(e)), | ||
1206 | 341 | level=ERROR) | ||
1207 | 342 | for k, v in copy.deepcopy(self._prev_dict).items(): | ||
1208 | 343 | if k not in self: | ||
1209 | 344 | self[k] = v | ||
1210 | 345 | |||
1211 | 346 | def changed(self, key): | ||
1212 | 347 | """Return True if the current value for this key is different from | ||
1213 | 348 | the previous value. | ||
1214 | 349 | |||
1215 | 350 | """ | ||
1216 | 351 | if self._prev_dict is None: | ||
1217 | 352 | return True | ||
1218 | 353 | return self.previous(key) != self.get(key) | ||
1219 | 354 | |||
1220 | 355 | def previous(self, key): | ||
1221 | 356 | """Return previous value for this key, or None if there | ||
1222 | 357 | is no previous value. | ||
1223 | 358 | |||
1224 | 359 | """ | ||
1225 | 360 | if self._prev_dict: | ||
1226 | 361 | return self._prev_dict.get(key) | ||
1227 | 362 | return None | ||
1228 | 363 | |||
1229 | 364 | def save(self): | ||
1230 | 365 | """Save this config to disk. | ||
1231 | 366 | |||
1232 | 367 | If the charm is using the :mod:`Services Framework <services.base>` | ||
1233 | 368 | or :meth:'@hook <Hooks.hook>' decorator, this | ||
1234 | 369 | is called automatically at the end of successful hook execution. | ||
1235 | 370 | Otherwise, it should be called directly by user code. | ||
1236 | 371 | |||
1237 | 372 | To disable automatic saves, set ``implicit_save=False`` on this | ||
1238 | 373 | instance. | ||
1239 | 374 | |||
1240 | 375 | """ | ||
1241 | 376 | with open(self.path, 'w') as f: | ||
1242 | 377 | os.fchmod(f.fileno(), 0o600) | ||
1243 | 378 | json.dump(self, f) | ||
1244 | 379 | |||
1245 | 380 | def _implicit_save(self): | ||
1246 | 381 | if self.implicit_save: | ||
1247 | 382 | self.save() | ||
1248 | 383 | |||
1249 | 384 | |||
1250 | 385 | _cache_config = None | ||
1251 | 386 | |||
1252 | 387 | |||
1253 | 152 | def config(scope=None): | 388 | def config(scope=None): |
1259 | 153 | "Juju charm configuration" | 389 | """ |
1260 | 154 | config_cmd_line = ['config-get'] | 390 | Get the juju charm configuration (scope==None) or individual key, |
1261 | 155 | if scope is not None: | 391 | (scope=str). The returned value is a Python data structure loaded as |
1262 | 156 | config_cmd_line.append(scope) | 392 | JSON from the Juju config command. |
1263 | 157 | config_cmd_line.append('--format=json') | 393 | |
1264 | 394 | :param scope: If set, return the value for the specified key. | ||
1265 | 395 | :type scope: Optional[str] | ||
1266 | 396 | :returns: Either the whole config as a Config, or a key from it. | ||
1267 | 397 | :rtype: Any | ||
1268 | 398 | """ | ||
1269 | 399 | global _cache_config | ||
1270 | 400 | config_cmd_line = ['config-get', '--all', '--format=json'] | ||
1271 | 158 | try: | 401 | try: |
1274 | 159 | return json.loads(subprocess.check_output(config_cmd_line)) | 402 | # JSON Decode Exception for Python3.5+ |
1275 | 160 | except ValueError: | 403 | exc_json = json.decoder.JSONDecodeError |
1276 | 404 | except AttributeError: | ||
1277 | 405 | # JSON Decode Exception for Python2.7 through Python3.4 | ||
1278 | 406 | exc_json = ValueError | ||
1279 | 407 | try: | ||
1280 | 408 | if _cache_config is None: | ||
1281 | 409 | config_data = json.loads( | ||
1282 | 410 | subprocess.check_output(config_cmd_line).decode('UTF-8')) | ||
1283 | 411 | _cache_config = Config(config_data) | ||
1284 | 412 | if scope is not None: | ||
1285 | 413 | return _cache_config.get(scope) | ||
1286 | 414 | return _cache_config | ||
1287 | 415 | except (exc_json, UnicodeDecodeError) as e: | ||
1288 | 416 | log('Unable to parse output from config-get: config_cmd_line="{}" ' | ||
1289 | 417 | 'message="{}"' | ||
1290 | 418 | .format(config_cmd_line, str(e)), level=ERROR) | ||
1291 | 161 | return None | 419 | return None |
1292 | 162 | 420 | ||
1293 | 163 | 421 | ||
1294 | 164 | @cached | 422 | @cached |
1295 | 165 | def relation_get(attribute=None, unit=None, rid=None): | 423 | def relation_get(attribute=None, unit=None, rid=None): |
1296 | 424 | """Get relation information""" | ||
1297 | 166 | _args = ['relation-get', '--format=json'] | 425 | _args = ['relation-get', '--format=json'] |
1298 | 167 | if rid: | 426 | if rid: |
1299 | 168 | _args.append('-r') | 427 | _args.append('-r') |
1300 | @@ -171,49 +430,88 @@ def relation_get(attribute=None, unit=None, rid=None): | |||
1301 | 171 | if unit: | 430 | if unit: |
1302 | 172 | _args.append(unit) | 431 | _args.append(unit) |
1303 | 173 | try: | 432 | try: |
1305 | 174 | return json.loads(subprocess.check_output(_args)) | 433 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) |
1306 | 175 | except ValueError: | 434 | except ValueError: |
1307 | 176 | return None | 435 | return None |
1308 | 436 | except CalledProcessError as e: | ||
1309 | 437 | if e.returncode == 2: | ||
1310 | 438 | return None | ||
1311 | 439 | raise | ||
1312 | 177 | 440 | ||
1313 | 178 | 441 | ||
1315 | 179 | def relation_set(relation_id=None, relation_settings={}, **kwargs): | 442 | def relation_set(relation_id=None, relation_settings=None, **kwargs): |
1316 | 443 | """Set relation information for the current unit""" | ||
1317 | 444 | relation_settings = relation_settings if relation_settings else {} | ||
1318 | 180 | relation_cmd_line = ['relation-set'] | 445 | relation_cmd_line = ['relation-set'] |
1319 | 446 | accepts_file = "--file" in subprocess.check_output( | ||
1320 | 447 | relation_cmd_line + ["--help"], universal_newlines=True) | ||
1321 | 181 | if relation_id is not None: | 448 | if relation_id is not None: |
1322 | 182 | relation_cmd_line.extend(('-r', relation_id)) | 449 | relation_cmd_line.extend(('-r', relation_id)) |
1329 | 183 | for k, v in (relation_settings.items() + kwargs.items()): | 450 | settings = relation_settings.copy() |
1330 | 184 | if v is None: | 451 | settings.update(kwargs) |
1331 | 185 | relation_cmd_line.append('{}='.format(k)) | 452 | for key, value in settings.items(): |
1332 | 186 | else: | 453 | # Force value to be a string: it always should, but some call |
1333 | 187 | relation_cmd_line.append('{}={}'.format(k, v)) | 454 | # sites pass in things like dicts or numbers. |
1334 | 188 | subprocess.check_call(relation_cmd_line) | 455 | if value is not None: |
1335 | 456 | settings[key] = "{}".format(value) | ||
1336 | 457 | if accepts_file: | ||
1337 | 458 | # --file was introduced in Juju 1.23.2. Use it by default if | ||
1338 | 459 | # available, since otherwise we'll break if the relation data is | ||
1339 | 460 | # too big. Ideally we should tell relation-set to read the data from | ||
1340 | 461 | # stdin, but that feature is broken in 1.23.2: Bug #1454678. | ||
1341 | 462 | with tempfile.NamedTemporaryFile(delete=False) as settings_file: | ||
1342 | 463 | settings_file.write(yaml.safe_dump(settings).encode("utf-8")) | ||
1343 | 464 | subprocess.check_call( | ||
1344 | 465 | relation_cmd_line + ["--file", settings_file.name]) | ||
1345 | 466 | os.remove(settings_file.name) | ||
1346 | 467 | else: | ||
1347 | 468 | for key, value in settings.items(): | ||
1348 | 469 | if value is None: | ||
1349 | 470 | relation_cmd_line.append('{}='.format(key)) | ||
1350 | 471 | else: | ||
1351 | 472 | relation_cmd_line.append('{}={}'.format(key, value)) | ||
1352 | 473 | subprocess.check_call(relation_cmd_line) | ||
1353 | 189 | # Flush cache of any relation-gets for local unit | 474 | # Flush cache of any relation-gets for local unit |
1354 | 190 | flush(local_unit()) | 475 | flush(local_unit()) |
1355 | 191 | 476 | ||
1356 | 192 | 477 | ||
1357 | 478 | def relation_clear(r_id=None): | ||
1358 | 479 | ''' Clears any relation data already set on relation r_id ''' | ||
1359 | 480 | settings = relation_get(rid=r_id, | ||
1360 | 481 | unit=local_unit()) | ||
1361 | 482 | for setting in settings: | ||
1362 | 483 | if setting not in ['public-address', 'private-address']: | ||
1363 | 484 | settings[setting] = None | ||
1364 | 485 | relation_set(relation_id=r_id, | ||
1365 | 486 | **settings) | ||
1366 | 487 | |||
1367 | 488 | |||
1368 | 193 | @cached | 489 | @cached |
1369 | 194 | def relation_ids(reltype=None): | 490 | def relation_ids(reltype=None): |
1371 | 195 | "A list of relation_ids" | 491 | """A list of relation_ids""" |
1372 | 196 | reltype = reltype or relation_type() | 492 | reltype = reltype or relation_type() |
1373 | 197 | relid_cmd_line = ['relation-ids', '--format=json'] | 493 | relid_cmd_line = ['relation-ids', '--format=json'] |
1374 | 198 | if reltype is not None: | 494 | if reltype is not None: |
1375 | 199 | relid_cmd_line.append(reltype) | 495 | relid_cmd_line.append(reltype) |
1377 | 200 | return json.loads(subprocess.check_output(relid_cmd_line)) or [] | 496 | return json.loads( |
1378 | 497 | subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] | ||
1379 | 201 | return [] | 498 | return [] |
1380 | 202 | 499 | ||
1381 | 203 | 500 | ||
1382 | 204 | @cached | 501 | @cached |
1383 | 205 | def related_units(relid=None): | 502 | def related_units(relid=None): |
1385 | 206 | "A list of related units" | 503 | """A list of related units""" |
1386 | 207 | relid = relid or relation_id() | 504 | relid = relid or relation_id() |
1387 | 208 | units_cmd_line = ['relation-list', '--format=json'] | 505 | units_cmd_line = ['relation-list', '--format=json'] |
1388 | 209 | if relid is not None: | 506 | if relid is not None: |
1389 | 210 | units_cmd_line.extend(('-r', relid)) | 507 | units_cmd_line.extend(('-r', relid)) |
1391 | 211 | return json.loads(subprocess.check_output(units_cmd_line)) or [] | 508 | return json.loads( |
1392 | 509 | subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] | ||
1393 | 212 | 510 | ||
1394 | 213 | 511 | ||
1395 | 214 | @cached | 512 | @cached |
1396 | 215 | def relation_for_unit(unit=None, rid=None): | 513 | def relation_for_unit(unit=None, rid=None): |
1398 | 216 | "Get the json represenation of a unit's relation" | 514 | """Get the json represenation of a unit's relation""" |
1399 | 217 | unit = unit or remote_unit() | 515 | unit = unit or remote_unit() |
1400 | 218 | relation = relation_get(unit=unit, rid=rid) | 516 | relation = relation_get(unit=unit, rid=rid) |
1401 | 219 | for key in relation: | 517 | for key in relation: |
1402 | @@ -225,7 +523,7 @@ def relation_for_unit(unit=None, rid=None): | |||
1403 | 225 | 523 | ||
1404 | 226 | @cached | 524 | @cached |
1405 | 227 | def relations_for_id(relid=None): | 525 | def relations_for_id(relid=None): |
1407 | 228 | "Get relations of a specific relation ID" | 526 | """Get relations of a specific relation ID""" |
1408 | 229 | relation_data = [] | 527 | relation_data = [] |
1409 | 230 | relid = relid or relation_ids() | 528 | relid = relid or relation_ids() |
1410 | 231 | for unit in related_units(relid): | 529 | for unit in related_units(relid): |
1411 | @@ -237,7 +535,7 @@ def relations_for_id(relid=None): | |||
1412 | 237 | 535 | ||
1413 | 238 | @cached | 536 | @cached |
1414 | 239 | def relations_of_type(reltype=None): | 537 | def relations_of_type(reltype=None): |
1416 | 240 | "Get relations of a specific type" | 538 | """Get relations of a specific type""" |
1417 | 241 | relation_data = [] | 539 | relation_data = [] |
1418 | 242 | reltype = reltype or relation_type() | 540 | reltype = reltype or relation_type() |
1419 | 243 | for relid in relation_ids(reltype): | 541 | for relid in relation_ids(reltype): |
1420 | @@ -248,22 +546,121 @@ def relations_of_type(reltype=None): | |||
1421 | 248 | 546 | ||
1422 | 249 | 547 | ||
1423 | 250 | @cached | 548 | @cached |
1424 | 549 | def metadata(): | ||
1425 | 550 | """Get the current charm metadata.yaml contents as a python object""" | ||
1426 | 551 | with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: | ||
1427 | 552 | return yaml.safe_load(md) | ||
1428 | 553 | |||
1429 | 554 | |||
1430 | 555 | def _metadata_unit(unit): | ||
1431 | 556 | """Given the name of a unit (e.g. apache2/0), get the unit charm's | ||
1432 | 557 | metadata.yaml. Very similar to metadata() but allows us to inspect | ||
1433 | 558 | other units. Unit needs to be co-located, such as a subordinate or | ||
1434 | 559 | principal/primary. | ||
1435 | 560 | |||
1436 | 561 | :returns: metadata.yaml as a python object. | ||
1437 | 562 | |||
1438 | 563 | """ | ||
1439 | 564 | basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) | ||
1440 | 565 | unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) | ||
1441 | 566 | joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') | ||
1442 | 567 | if not os.path.exists(joineddir): | ||
1443 | 568 | return None | ||
1444 | 569 | with open(joineddir) as md: | ||
1445 | 570 | return yaml.safe_load(md) | ||
1446 | 571 | |||
1447 | 572 | |||
1448 | 573 | @cached | ||
1449 | 251 | def relation_types(): | 574 | def relation_types(): |
1454 | 252 | "Get a list of relation types supported by this charm" | 575 | """Get a list of relation types supported by this charm""" |
1451 | 253 | charmdir = os.environ.get('CHARM_DIR', '') | ||
1452 | 254 | mdf = open(os.path.join(charmdir, 'metadata.yaml')) | ||
1453 | 255 | md = yaml.safe_load(mdf) | ||
1455 | 256 | rel_types = [] | 576 | rel_types = [] |
1456 | 577 | md = metadata() | ||
1457 | 257 | for key in ('provides', 'requires', 'peers'): | 578 | for key in ('provides', 'requires', 'peers'): |
1458 | 258 | section = md.get(key) | 579 | section = md.get(key) |
1459 | 259 | if section: | 580 | if section: |
1460 | 260 | rel_types.extend(section.keys()) | 581 | rel_types.extend(section.keys()) |
1461 | 261 | mdf.close() | ||
1462 | 262 | return rel_types | 582 | return rel_types |
1463 | 263 | 583 | ||
1464 | 264 | 584 | ||
1465 | 265 | @cached | 585 | @cached |
1466 | 586 | def peer_relation_id(): | ||
1467 | 587 | '''Get the peers relation id if a peers relation has been joined, else None.''' | ||
1468 | 588 | md = metadata() | ||
1469 | 589 | section = md.get('peers') | ||
1470 | 590 | if section: | ||
1471 | 591 | for key in section: | ||
1472 | 592 | relids = relation_ids(key) | ||
1473 | 593 | if relids: | ||
1474 | 594 | return relids[0] | ||
1475 | 595 | return None | ||
1476 | 596 | |||
1477 | 597 | |||
1478 | 598 | @cached | ||
1479 | 599 | def relation_to_interface(relation_name): | ||
1480 | 600 | """ | ||
1481 | 601 | Given the name of a relation, return the interface that relation uses. | ||
1482 | 602 | |||
1483 | 603 | :returns: The interface name, or ``None``. | ||
1484 | 604 | """ | ||
1485 | 605 | return relation_to_role_and_interface(relation_name)[1] | ||
1486 | 606 | |||
1487 | 607 | |||
1488 | 608 | @cached | ||
1489 | 609 | def relation_to_role_and_interface(relation_name): | ||
1490 | 610 | """ | ||
1491 | 611 | Given the name of a relation, return the role and the name of the interface | ||
1492 | 612 | that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). | ||
1493 | 613 | |||
1494 | 614 | :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. | ||
1495 | 615 | """ | ||
1496 | 616 | _metadata = metadata() | ||
1497 | 617 | for role in ('provides', 'requires', 'peers'): | ||
1498 | 618 | interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') | ||
1499 | 619 | if interface: | ||
1500 | 620 | return role, interface | ||
1501 | 621 | return None, None | ||
1502 | 622 | |||
1503 | 623 | |||
1504 | 624 | @cached | ||
1505 | 625 | def role_and_interface_to_relations(role, interface_name): | ||
1506 | 626 | """ | ||
1507 | 627 | Given a role and interface name, return a list of relation names for the | ||
1508 | 628 | current charm that use that interface under that role (where role is one | ||
1509 | 629 | of ``provides``, ``requires``, or ``peers``). | ||
1510 | 630 | |||
1511 | 631 | :returns: A list of relation names. | ||
1512 | 632 | """ | ||
1513 | 633 | _metadata = metadata() | ||
1514 | 634 | results = [] | ||
1515 | 635 | for relation_name, relation in _metadata.get(role, {}).items(): | ||
1516 | 636 | if relation['interface'] == interface_name: | ||
1517 | 637 | results.append(relation_name) | ||
1518 | 638 | return results | ||
1519 | 639 | |||
1520 | 640 | |||
1521 | 641 | @cached | ||
1522 | 642 | def interface_to_relations(interface_name): | ||
1523 | 643 | """ | ||
1524 | 644 | Given an interface, return a list of relation names for the current | ||
1525 | 645 | charm that use that interface. | ||
1526 | 646 | |||
1527 | 647 | :returns: A list of relation names. | ||
1528 | 648 | """ | ||
1529 | 649 | results = [] | ||
1530 | 650 | for role in ('provides', 'requires', 'peers'): | ||
1531 | 651 | results.extend(role_and_interface_to_relations(role, interface_name)) | ||
1532 | 652 | return results | ||
1533 | 653 | |||
1534 | 654 | |||
1535 | 655 | @cached | ||
1536 | 656 | def charm_name(): | ||
1537 | 657 | """Get the name of the current charm as is specified on metadata.yaml""" | ||
1538 | 658 | return metadata().get('name') | ||
1539 | 659 | |||
1540 | 660 | |||
1541 | 661 | @cached | ||
1542 | 266 | def relations(): | 662 | def relations(): |
1543 | 663 | """Get a nested dictionary of relation data for all related units""" | ||
1544 | 267 | rels = {} | 664 | rels = {} |
1545 | 268 | for reltype in relation_types(): | 665 | for reltype in relation_types(): |
1546 | 269 | relids = {} | 666 | relids = {} |
1547 | @@ -277,53 +674,187 @@ def relations(): | |||
1548 | 277 | return rels | 674 | return rels |
1549 | 278 | 675 | ||
1550 | 279 | 676 | ||
1551 | 677 | @cached | ||
1552 | 678 | def is_relation_made(relation, keys='private-address'): | ||
1553 | 679 | ''' | ||
1554 | 680 | Determine whether a relation is established by checking for | ||
1555 | 681 | presence of key(s). If a list of keys is provided, they | ||
1556 | 682 | must all be present for the relation to be identified as made | ||
1557 | 683 | ''' | ||
1558 | 684 | if isinstance(keys, str): | ||
1559 | 685 | keys = [keys] | ||
1560 | 686 | for r_id in relation_ids(relation): | ||
1561 | 687 | for unit in related_units(r_id): | ||
1562 | 688 | context = {} | ||
1563 | 689 | for k in keys: | ||
1564 | 690 | context[k] = relation_get(k, rid=r_id, | ||
1565 | 691 | unit=unit) | ||
1566 | 692 | if None not in context.values(): | ||
1567 | 693 | return True | ||
1568 | 694 | return False | ||
1569 | 695 | |||
1570 | 696 | |||
1571 | 697 | def _port_op(op_name, port, protocol="TCP"): | ||
1572 | 698 | """Open or close a service network port""" | ||
1573 | 699 | _args = [op_name] | ||
1574 | 700 | icmp = protocol.upper() == "ICMP" | ||
1575 | 701 | if icmp: | ||
1576 | 702 | _args.append(protocol) | ||
1577 | 703 | else: | ||
1578 | 704 | _args.append('{}/{}'.format(port, protocol)) | ||
1579 | 705 | try: | ||
1580 | 706 | subprocess.check_call(_args) | ||
1581 | 707 | except subprocess.CalledProcessError: | ||
1582 | 708 | # Older Juju pre 2.3 doesn't support ICMP | ||
1583 | 709 | # so treat it as a no-op if it fails. | ||
1584 | 710 | if not icmp: | ||
1585 | 711 | raise | ||
1586 | 712 | |||
1587 | 713 | |||
1588 | 280 | def open_port(port, protocol="TCP"): | 714 | def open_port(port, protocol="TCP"): |
1590 | 281 | "Open a service network port" | 715 | """Open a service network port""" |
1591 | 716 | _port_op('open-port', port, protocol) | ||
1592 | 717 | |||
1593 | 718 | |||
1594 | 719 | def close_port(port, protocol="TCP"): | ||
1595 | 720 | """Close a service network port""" | ||
1596 | 721 | _port_op('close-port', port, protocol) | ||
1597 | 722 | |||
1598 | 723 | |||
1599 | 724 | def open_ports(start, end, protocol="TCP"): | ||
1600 | 725 | """Opens a range of service network ports""" | ||
1601 | 282 | _args = ['open-port'] | 726 | _args = ['open-port'] |
1603 | 283 | _args.append('{}/{}'.format(port, protocol)) | 727 | _args.append('{}-{}/{}'.format(start, end, protocol)) |
1604 | 284 | subprocess.check_call(_args) | 728 | subprocess.check_call(_args) |
1605 | 285 | 729 | ||
1606 | 286 | 730 | ||
1609 | 287 | def close_port(port, protocol="TCP"): | 731 | def close_ports(start, end, protocol="TCP"): |
1610 | 288 | "Close a service network port" | 732 | """Close a range of service network ports""" |
1611 | 289 | _args = ['close-port'] | 733 | _args = ['close-port'] |
1613 | 290 | _args.append('{}/{}'.format(port, protocol)) | 734 | _args.append('{}-{}/{}'.format(start, end, protocol)) |
1614 | 291 | subprocess.check_call(_args) | 735 | subprocess.check_call(_args) |
1615 | 292 | 736 | ||
1616 | 293 | 737 | ||
1617 | 738 | def opened_ports(): | ||
1618 | 739 | """Get the opened ports | ||
1619 | 740 | |||
1620 | 741 | *Note that this will only show ports opened in a previous hook* | ||
1621 | 742 | |||
1622 | 743 | :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` | ||
1623 | 744 | """ | ||
1624 | 745 | _args = ['opened-ports', '--format=json'] | ||
1625 | 746 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
1626 | 747 | |||
1627 | 748 | |||
1628 | 294 | @cached | 749 | @cached |
1629 | 295 | def unit_get(attribute): | 750 | def unit_get(attribute): |
1630 | 751 | """Get the unit ID for the remote unit""" | ||
1631 | 296 | _args = ['unit-get', '--format=json', attribute] | 752 | _args = ['unit-get', '--format=json', attribute] |
1632 | 297 | try: | 753 | try: |
1634 | 298 | return json.loads(subprocess.check_output(_args)) | 754 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) |
1635 | 299 | except ValueError: | 755 | except ValueError: |
1636 | 300 | return None | 756 | return None |
1637 | 301 | 757 | ||
1638 | 302 | 758 | ||
1639 | 759 | def unit_public_ip(): | ||
1640 | 760 | """Get this unit's public IP address""" | ||
1641 | 761 | return unit_get('public-address') | ||
1642 | 762 | |||
1643 | 763 | |||
1644 | 303 | def unit_private_ip(): | 764 | def unit_private_ip(): |
1645 | 765 | """Get this unit's private IP address""" | ||
1646 | 304 | return unit_get('private-address') | 766 | return unit_get('private-address') |
1647 | 305 | 767 | ||
1648 | 306 | 768 | ||
1649 | 769 | @cached | ||
1650 | 770 | def storage_get(attribute=None, storage_id=None): | ||
1651 | 771 | """Get storage attributes""" | ||
1652 | 772 | _args = ['storage-get', '--format=json'] | ||
1653 | 773 | if storage_id: | ||
1654 | 774 | _args.extend(('-s', storage_id)) | ||
1655 | 775 | if attribute: | ||
1656 | 776 | _args.append(attribute) | ||
1657 | 777 | try: | ||
1658 | 778 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
1659 | 779 | except ValueError: | ||
1660 | 780 | return None | ||
1661 | 781 | |||
1662 | 782 | |||
1663 | 783 | @cached | ||
1664 | 784 | def storage_list(storage_name=None): | ||
1665 | 785 | """List the storage IDs for the unit""" | ||
1666 | 786 | _args = ['storage-list', '--format=json'] | ||
1667 | 787 | if storage_name: | ||
1668 | 788 | _args.append(storage_name) | ||
1669 | 789 | try: | ||
1670 | 790 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
1671 | 791 | except ValueError: | ||
1672 | 792 | return None | ||
1673 | 793 | except OSError as e: | ||
1674 | 794 | import errno | ||
1675 | 795 | if e.errno == errno.ENOENT: | ||
1676 | 796 | # storage-list does not exist | ||
1677 | 797 | return [] | ||
1678 | 798 | raise | ||
1679 | 799 | |||
1680 | 800 | |||
1681 | 307 | class UnregisteredHookError(Exception): | 801 | class UnregisteredHookError(Exception): |
1682 | 802 | """Raised when an undefined hook is called""" | ||
1683 | 308 | pass | 803 | pass |
1684 | 309 | 804 | ||
1685 | 310 | 805 | ||
1686 | 311 | class Hooks(object): | 806 | class Hooks(object): |
1688 | 312 | def __init__(self): | 807 | """A convenient handler for hook functions. |
1689 | 808 | |||
1690 | 809 | Example:: | ||
1691 | 810 | |||
1692 | 811 | hooks = Hooks() | ||
1693 | 812 | |||
1694 | 813 | # register a hook, taking its name from the function name | ||
1695 | 814 | @hooks.hook() | ||
1696 | 815 | def install(): | ||
1697 | 816 | pass # your code here | ||
1698 | 817 | |||
1699 | 818 | # register a hook, providing a custom hook name | ||
1700 | 819 | @hooks.hook("config-changed") | ||
1701 | 820 | def config_changed(): | ||
1702 | 821 | pass # your code here | ||
1703 | 822 | |||
1704 | 823 | if __name__ == "__main__": | ||
1705 | 824 | # execute a hook based on the name the program is called by | ||
1706 | 825 | hooks.execute(sys.argv) | ||
1707 | 826 | """ | ||
1708 | 827 | |||
1709 | 828 | def __init__(self, config_save=None): | ||
1710 | 313 | super(Hooks, self).__init__() | 829 | super(Hooks, self).__init__() |
1711 | 314 | self._hooks = {} | 830 | self._hooks = {} |
1712 | 315 | 831 | ||
1713 | 832 | # For unknown reasons, we allow the Hooks constructor to override | ||
1714 | 833 | # config().implicit_save. | ||
1715 | 834 | if config_save is not None: | ||
1716 | 835 | config().implicit_save = config_save | ||
1717 | 836 | |||
1718 | 316 | def register(self, name, function): | 837 | def register(self, name, function): |
1719 | 838 | """Register a hook""" | ||
1720 | 317 | self._hooks[name] = function | 839 | self._hooks[name] = function |
1721 | 318 | 840 | ||
1722 | 319 | def execute(self, args): | 841 | def execute(self, args): |
1723 | 842 | """Execute a registered hook based on args[0]""" | ||
1724 | 843 | _run_atstart() | ||
1725 | 320 | hook_name = os.path.basename(args[0]) | 844 | hook_name = os.path.basename(args[0]) |
1726 | 321 | if hook_name in self._hooks: | 845 | if hook_name in self._hooks: |
1728 | 322 | self._hooks[hook_name]() | 846 | try: |
1729 | 847 | self._hooks[hook_name]() | ||
1730 | 848 | except SystemExit as x: | ||
1731 | 849 | if x.code is None or x.code == 0: | ||
1732 | 850 | _run_atexit() | ||
1733 | 851 | raise | ||
1734 | 852 | _run_atexit() | ||
1735 | 323 | else: | 853 | else: |
1736 | 324 | raise UnregisteredHookError(hook_name) | 854 | raise UnregisteredHookError(hook_name) |
1737 | 325 | 855 | ||
1738 | 326 | def hook(self, *hook_names): | 856 | def hook(self, *hook_names): |
1739 | 857 | """Decorator, registering them as hooks""" | ||
1740 | 327 | def wrapper(decorated): | 858 | def wrapper(decorated): |
1741 | 328 | for hook_name in hook_names: | 859 | for hook_name in hook_names: |
1742 | 329 | self.register(hook_name, decorated) | 860 | self.register(hook_name, decorated) |
1743 | @@ -336,5 +867,457 @@ class Hooks(object): | |||
1744 | 336 | return wrapper | 867 | return wrapper |
1745 | 337 | 868 | ||
1746 | 338 | 869 | ||
1747 | 870 | class NoNetworkBinding(Exception): | ||
1748 | 871 | pass | ||
1749 | 872 | |||
1750 | 873 | |||
1751 | 339 | def charm_dir(): | 874 | def charm_dir(): |
1752 | 875 | """Return the root directory of the current charm""" | ||
1753 | 876 | d = os.environ.get('JUJU_CHARM_DIR') | ||
1754 | 877 | if d is not None: | ||
1755 | 878 | return d | ||
1756 | 340 | return os.environ.get('CHARM_DIR') | 879 | return os.environ.get('CHARM_DIR') |
1757 | 880 | |||
1758 | 881 | |||
1759 | 882 | @cached | ||
1760 | 883 | def action_get(key=None): | ||
1761 | 884 | """Gets the value of an action parameter, or all key/value param pairs""" | ||
1762 | 885 | cmd = ['action-get'] | ||
1763 | 886 | if key is not None: | ||
1764 | 887 | cmd.append(key) | ||
1765 | 888 | cmd.append('--format=json') | ||
1766 | 889 | action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
1767 | 890 | return action_data | ||
1768 | 891 | |||
1769 | 892 | |||
1770 | 893 | def action_set(values): | ||
1771 | 894 | """Sets the values to be returned after the action finishes""" | ||
1772 | 895 | cmd = ['action-set'] | ||
1773 | 896 | for k, v in list(values.items()): | ||
1774 | 897 | cmd.append('{}={}'.format(k, v)) | ||
1775 | 898 | subprocess.check_call(cmd) | ||
1776 | 899 | |||
1777 | 900 | |||
1778 | 901 | def action_fail(message): | ||
1779 | 902 | """Sets the action status to failed and sets the error message. | ||
1780 | 903 | |||
1781 | 904 | The results set by action_set are preserved.""" | ||
1782 | 905 | subprocess.check_call(['action-fail', message]) | ||
1783 | 906 | |||
1784 | 907 | |||
1785 | 908 | def action_name(): | ||
1786 | 909 | """Get the name of the currently executing action.""" | ||
1787 | 910 | return os.environ.get('JUJU_ACTION_NAME') | ||
1788 | 911 | |||
1789 | 912 | |||
1790 | 913 | def action_uuid(): | ||
1791 | 914 | """Get the UUID of the currently executing action.""" | ||
1792 | 915 | return os.environ.get('JUJU_ACTION_UUID') | ||
1793 | 916 | |||
1794 | 917 | |||
1795 | 918 | def action_tag(): | ||
1796 | 919 | """Get the tag for the currently executing action.""" | ||
1797 | 920 | return os.environ.get('JUJU_ACTION_TAG') | ||
1798 | 921 | |||
1799 | 922 | |||
1800 | 923 | def status_set(workload_state, message): | ||
1801 | 924 | """Set the workload state with a message | ||
1802 | 925 | |||
1803 | 926 | Use status-set to set the workload state with a message which is visible | ||
1804 | 927 | to the user via juju status. If the status-set command is not found then | ||
1805 | 928 | assume this is juju < 1.23 and juju-log the message unstead. | ||
1806 | 929 | |||
1807 | 930 | workload_state -- valid juju workload state. | ||
1808 | 931 | message -- status update message | ||
1809 | 932 | """ | ||
1810 | 933 | valid_states = ['maintenance', 'blocked', 'waiting', 'active'] | ||
1811 | 934 | if workload_state not in valid_states: | ||
1812 | 935 | raise ValueError( | ||
1813 | 936 | '{!r} is not a valid workload state'.format(workload_state) | ||
1814 | 937 | ) | ||
1815 | 938 | cmd = ['status-set', workload_state, message] | ||
1816 | 939 | try: | ||
1817 | 940 | ret = subprocess.call(cmd) | ||
1818 | 941 | if ret == 0: | ||
1819 | 942 | return | ||
1820 | 943 | except OSError as e: | ||
1821 | 944 | if e.errno != errno.ENOENT: | ||
1822 | 945 | raise | ||
1823 | 946 | log_message = 'status-set failed: {} {}'.format(workload_state, | ||
1824 | 947 | message) | ||
1825 | 948 | log(log_message, level='INFO') | ||
1826 | 949 | |||
1827 | 950 | |||
1828 | 951 | def status_get(): | ||
1829 | 952 | """Retrieve the previously set juju workload state and message | ||
1830 | 953 | |||
1831 | 954 | If the status-get command is not found then assume this is juju < 1.23 and | ||
1832 | 955 | return 'unknown', "" | ||
1833 | 956 | |||
1834 | 957 | """ | ||
1835 | 958 | cmd = ['status-get', "--format=json", "--include-data"] | ||
1836 | 959 | try: | ||
1837 | 960 | raw_status = subprocess.check_output(cmd) | ||
1838 | 961 | except OSError as e: | ||
1839 | 962 | if e.errno == errno.ENOENT: | ||
1840 | 963 | return ('unknown', "") | ||
1841 | 964 | else: | ||
1842 | 965 | raise | ||
1843 | 966 | else: | ||
1844 | 967 | status = json.loads(raw_status.decode("UTF-8")) | ||
1845 | 968 | return (status["status"], status["message"]) | ||
1846 | 969 | |||
1847 | 970 | |||
1848 | 971 | def translate_exc(from_exc, to_exc): | ||
1849 | 972 | def inner_translate_exc1(f): | ||
1850 | 973 | @wraps(f) | ||
1851 | 974 | def inner_translate_exc2(*args, **kwargs): | ||
1852 | 975 | try: | ||
1853 | 976 | return f(*args, **kwargs) | ||
1854 | 977 | except from_exc: | ||
1855 | 978 | raise to_exc | ||
1856 | 979 | |||
1857 | 980 | return inner_translate_exc2 | ||
1858 | 981 | |||
1859 | 982 | return inner_translate_exc1 | ||
1860 | 983 | |||
1861 | 984 | |||
1862 | 985 | def application_version_set(version): | ||
1863 | 986 | """Charm authors may trigger this command from any hook to output what | ||
1864 | 987 | version of the application is running. This could be a package version, | ||
1865 | 988 | for instance postgres version 9.5. It could also be a build number or | ||
1866 | 989 | version control revision identifier, for instance git sha 6fb7ba68. """ | ||
1867 | 990 | |||
1868 | 991 | cmd = ['application-version-set'] | ||
1869 | 992 | cmd.append(version) | ||
1870 | 993 | try: | ||
1871 | 994 | subprocess.check_call(cmd) | ||
1872 | 995 | except OSError: | ||
1873 | 996 | log("Application Version: {}".format(version)) | ||
1874 | 997 | |||
1875 | 998 | |||
1876 | 999 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1877 | 1000 | def goal_state(): | ||
1878 | 1001 | """Juju goal state values""" | ||
1879 | 1002 | cmd = ['goal-state', '--format=json'] | ||
1880 | 1003 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
1881 | 1004 | |||
1882 | 1005 | |||
1883 | 1006 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1884 | 1007 | def is_leader(): | ||
1885 | 1008 | """Does the current unit hold the juju leadership | ||
1886 | 1009 | |||
1887 | 1010 | Uses juju to determine whether the current unit is the leader of its peers | ||
1888 | 1011 | """ | ||
1889 | 1012 | cmd = ['is-leader', '--format=json'] | ||
1890 | 1013 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
1891 | 1014 | |||
1892 | 1015 | |||
1893 | 1016 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1894 | 1017 | def leader_get(attribute=None): | ||
1895 | 1018 | """Juju leader get value(s)""" | ||
1896 | 1019 | cmd = ['leader-get', '--format=json'] + [attribute or '-'] | ||
1897 | 1020 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
1898 | 1021 | |||
1899 | 1022 | |||
1900 | 1023 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1901 | 1024 | def leader_set(settings=None, **kwargs): | ||
1902 | 1025 | """Juju leader set value(s)""" | ||
1903 | 1026 | # Don't log secrets. | ||
1904 | 1027 | # log("Juju leader-set '%s'" % (settings), level=DEBUG) | ||
1905 | 1028 | cmd = ['leader-set'] | ||
1906 | 1029 | settings = settings or {} | ||
1907 | 1030 | settings.update(kwargs) | ||
1908 | 1031 | for k, v in settings.items(): | ||
1909 | 1032 | if v is None: | ||
1910 | 1033 | cmd.append('{}='.format(k)) | ||
1911 | 1034 | else: | ||
1912 | 1035 | cmd.append('{}={}'.format(k, v)) | ||
1913 | 1036 | subprocess.check_call(cmd) | ||
1914 | 1037 | |||
1915 | 1038 | |||
1916 | 1039 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1917 | 1040 | def payload_register(ptype, klass, pid): | ||
1918 | 1041 | """ is used while a hook is running to let Juju know that a | ||
1919 | 1042 | payload has been started.""" | ||
1920 | 1043 | cmd = ['payload-register'] | ||
1921 | 1044 | for x in [ptype, klass, pid]: | ||
1922 | 1045 | cmd.append(x) | ||
1923 | 1046 | subprocess.check_call(cmd) | ||
1924 | 1047 | |||
1925 | 1048 | |||
1926 | 1049 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1927 | 1050 | def payload_unregister(klass, pid): | ||
1928 | 1051 | """ is used while a hook is running to let Juju know | ||
1929 | 1052 | that a payload has been manually stopped. The <class> and <id> provided | ||
1930 | 1053 | must match a payload that has been previously registered with juju using | ||
1931 | 1054 | payload-register.""" | ||
1932 | 1055 | cmd = ['payload-unregister'] | ||
1933 | 1056 | for x in [klass, pid]: | ||
1934 | 1057 | cmd.append(x) | ||
1935 | 1058 | subprocess.check_call(cmd) | ||
1936 | 1059 | |||
1937 | 1060 | |||
1938 | 1061 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1939 | 1062 | def payload_status_set(klass, pid, status): | ||
1940 | 1063 | """is used to update the current status of a registered payload. | ||
1941 | 1064 | The <class> and <id> provided must match a payload that has been previously | ||
1942 | 1065 | registered with juju using payload-register. The <status> must be one of the | ||
1943 | 1066 | follow: starting, started, stopping, stopped""" | ||
1944 | 1067 | cmd = ['payload-status-set'] | ||
1945 | 1068 | for x in [klass, pid, status]: | ||
1946 | 1069 | cmd.append(x) | ||
1947 | 1070 | subprocess.check_call(cmd) | ||
1948 | 1071 | |||
1949 | 1072 | |||
1950 | 1073 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1951 | 1074 | def resource_get(name): | ||
1952 | 1075 | """used to fetch the resource path of the given name. | ||
1953 | 1076 | |||
1954 | 1077 | <name> must match a name of defined resource in metadata.yaml | ||
1955 | 1078 | |||
1956 | 1079 | returns either a path or False if resource not available | ||
1957 | 1080 | """ | ||
1958 | 1081 | if not name: | ||
1959 | 1082 | return False | ||
1960 | 1083 | |||
1961 | 1084 | cmd = ['resource-get', name] | ||
1962 | 1085 | try: | ||
1963 | 1086 | return subprocess.check_output(cmd).decode('UTF-8') | ||
1964 | 1087 | except subprocess.CalledProcessError: | ||
1965 | 1088 | return False | ||
1966 | 1089 | |||
1967 | 1090 | |||
1968 | 1091 | @cached | ||
1969 | 1092 | def juju_version(): | ||
1970 | 1093 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" | ||
1971 | 1094 | # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 | ||
1972 | 1095 | jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] | ||
1973 | 1096 | return subprocess.check_output([jujud, 'version'], | ||
1974 | 1097 | universal_newlines=True).strip() | ||
1975 | 1098 | |||
1976 | 1099 | |||
1977 | 1100 | def has_juju_version(minimum_version): | ||
1978 | 1101 | """Return True if the Juju version is at least the provided version""" | ||
1979 | 1102 | return LooseVersion(juju_version()) >= LooseVersion(minimum_version) | ||
1980 | 1103 | |||
1981 | 1104 | |||
1982 | 1105 | _atexit = [] | ||
1983 | 1106 | _atstart = [] | ||
1984 | 1107 | |||
1985 | 1108 | |||
1986 | 1109 | def atstart(callback, *args, **kwargs): | ||
1987 | 1110 | '''Schedule a callback to run before the main hook. | ||
1988 | 1111 | |||
1989 | 1112 | Callbacks are run in the order they were added. | ||
1990 | 1113 | |||
1991 | 1114 | This is useful for modules and classes to perform initialization | ||
1992 | 1115 | and inject behavior. In particular: | ||
1993 | 1116 | |||
1994 | 1117 | - Run common code before all of your hooks, such as logging | ||
1995 | 1118 | the hook name or interesting relation data. | ||
1996 | 1119 | - Defer object or module initialization that requires a hook | ||
1997 | 1120 | context until we know there actually is a hook context, | ||
1998 | 1121 | making testing easier. | ||
1999 | 1122 | - Rather than requiring charm authors to include boilerplate to | ||
2000 | 1123 | invoke your helper's behavior, have it run automatically if | ||
2001 | 1124 | your object is instantiated or module imported. | ||
2002 | 1125 | |||
2003 | 1126 | This is not at all useful after your hook framework as been launched. | ||
2004 | 1127 | ''' | ||
2005 | 1128 | global _atstart | ||
2006 | 1129 | _atstart.append((callback, args, kwargs)) | ||
2007 | 1130 | |||
2008 | 1131 | |||
2009 | 1132 | def atexit(callback, *args, **kwargs): | ||
2010 | 1133 | '''Schedule a callback to run on successful hook completion. | ||
2011 | 1134 | |||
2012 | 1135 | Callbacks are run in the reverse order that they were added.''' | ||
2013 | 1136 | _atexit.append((callback, args, kwargs)) | ||
2014 | 1137 | |||
2015 | 1138 | |||
2016 | 1139 | def _run_atstart(): | ||
2017 | 1140 | '''Hook frameworks must invoke this before running the main hook body.''' | ||
2018 | 1141 | global _atstart | ||
2019 | 1142 | for callback, args, kwargs in _atstart: | ||
2020 | 1143 | callback(*args, **kwargs) | ||
2021 | 1144 | del _atstart[:] | ||
2022 | 1145 | |||
2023 | 1146 | |||
2024 | 1147 | def _run_atexit(): | ||
2025 | 1148 | '''Hook frameworks must invoke this after the main hook body has | ||
2026 | 1149 | successfully completed. Do not invoke it if the hook fails.''' | ||
2027 | 1150 | global _atexit | ||
2028 | 1151 | for callback, args, kwargs in reversed(_atexit): | ||
2029 | 1152 | callback(*args, **kwargs) | ||
2030 | 1153 | del _atexit[:] | ||
2031 | 1154 | |||
2032 | 1155 | |||
2033 | 1156 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
2034 | 1157 | def network_get_primary_address(binding): | ||
2035 | 1158 | ''' | ||
2036 | 1159 | Deprecated since Juju 2.3; use network_get() | ||
2037 | 1160 | |||
2038 | 1161 | Retrieve the primary network address for a named binding | ||
2039 | 1162 | |||
2040 | 1163 | :param binding: string. The name of a relation of extra-binding | ||
2041 | 1164 | :return: string. The primary IP address for the named binding | ||
2042 | 1165 | :raise: NotImplementedError if run on Juju < 2.0 | ||
2043 | 1166 | ''' | ||
2044 | 1167 | cmd = ['network-get', '--primary-address', binding] | ||
2045 | 1168 | try: | ||
2046 | 1169 | response = subprocess.check_output( | ||
2047 | 1170 | cmd, | ||
2048 | 1171 | stderr=subprocess.STDOUT).decode('UTF-8').strip() | ||
2049 | 1172 | except CalledProcessError as e: | ||
2050 | 1173 | if 'no network config found for binding' in e.output.decode('UTF-8'): | ||
2051 | 1174 | raise NoNetworkBinding("No network binding for {}" | ||
2052 | 1175 | .format(binding)) | ||
2053 | 1176 | else: | ||
2054 | 1177 | raise | ||
2055 | 1178 | return response | ||
2056 | 1179 | |||
2057 | 1180 | |||
2058 | 1181 | def network_get(endpoint, relation_id=None): | ||
2059 | 1182 | """ | ||
2060 | 1183 | Retrieve the network details for a relation endpoint | ||
2061 | 1184 | |||
2062 | 1185 | :param endpoint: string. The name of a relation endpoint | ||
2063 | 1186 | :param relation_id: int. The ID of the relation for the current context. | ||
2064 | 1187 | :return: dict. The loaded YAML output of the network-get query. | ||
2065 | 1188 | :raise: NotImplementedError if request not supported by the Juju version. | ||
2066 | 1189 | """ | ||
2067 | 1190 | if not has_juju_version('2.2'): | ||
2068 | 1191 | raise NotImplementedError(juju_version()) # earlier versions require --primary-address | ||
2069 | 1192 | if relation_id and not has_juju_version('2.3'): | ||
2070 | 1193 | raise NotImplementedError # 2.3 added the -r option | ||
2071 | 1194 | |||
2072 | 1195 | cmd = ['network-get', endpoint, '--format', 'yaml'] | ||
2073 | 1196 | if relation_id: | ||
2074 | 1197 | cmd.append('-r') | ||
2075 | 1198 | cmd.append(relation_id) | ||
2076 | 1199 | response = subprocess.check_output( | ||
2077 | 1200 | cmd, | ||
2078 | 1201 | stderr=subprocess.STDOUT).decode('UTF-8').strip() | ||
2079 | 1202 | return yaml.safe_load(response) | ||
2080 | 1203 | |||
2081 | 1204 | |||
2082 | 1205 | def add_metric(*args, **kwargs): | ||
2083 | 1206 | """Add metric values. Values may be expressed with keyword arguments. For | ||
2084 | 1207 | metric names containing dashes, these may be expressed as one or more | ||
2085 | 1208 | 'key=value' positional arguments. May only be called from the collect-metrics | ||
2086 | 1209 | hook.""" | ||
2087 | 1210 | _args = ['add-metric'] | ||
2088 | 1211 | _kvpairs = [] | ||
2089 | 1212 | _kvpairs.extend(args) | ||
2090 | 1213 | _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) | ||
2091 | 1214 | _args.extend(sorted(_kvpairs)) | ||
2092 | 1215 | try: | ||
2093 | 1216 | subprocess.check_call(_args) | ||
2094 | 1217 | return | ||
2095 | 1218 | except EnvironmentError as e: | ||
2096 | 1219 | if e.errno != errno.ENOENT: | ||
2097 | 1220 | raise | ||
2098 | 1221 | log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) | ||
2099 | 1222 | log(log_message, level='INFO') | ||
2100 | 1223 | |||
2101 | 1224 | |||
2102 | 1225 | def meter_status(): | ||
2103 | 1226 | """Get the meter status, if running in the meter-status-changed hook.""" | ||
2104 | 1227 | return os.environ.get('JUJU_METER_STATUS') | ||
2105 | 1228 | |||
2106 | 1229 | |||
2107 | 1230 | def meter_info(): | ||
2108 | 1231 | """Get the meter status information, if running in the meter-status-changed | ||
2109 | 1232 | hook.""" | ||
2110 | 1233 | return os.environ.get('JUJU_METER_INFO') | ||
2111 | 1234 | |||
2112 | 1235 | |||
2113 | 1236 | def iter_units_for_relation_name(relation_name): | ||
2114 | 1237 | """Iterate through all units in a relation | ||
2115 | 1238 | |||
2116 | 1239 | Generator that iterates through all the units in a relation and yields | ||
2117 | 1240 | a named tuple with rid and unit field names. | ||
2118 | 1241 | |||
2119 | 1242 | Usage: | ||
2120 | 1243 | data = [(u.rid, u.unit) | ||
2121 | 1244 | for u in iter_units_for_relation_name(relation_name)] | ||
2122 | 1245 | |||
2123 | 1246 | :param relation_name: string relation name | ||
2124 | 1247 | :yield: Named Tuple with rid and unit field names | ||
2125 | 1248 | """ | ||
2126 | 1249 | RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') | ||
2127 | 1250 | for rid in relation_ids(relation_name): | ||
2128 | 1251 | for unit in related_units(rid): | ||
2129 | 1252 | yield RelatedUnit(rid, unit) | ||
2130 | 1253 | |||
2131 | 1254 | |||
2132 | 1255 | def ingress_address(rid=None, unit=None): | ||
2133 | 1256 | """ | ||
2134 | 1257 | Retrieve the ingress-address from a relation when available. | ||
2135 | 1258 | Otherwise, return the private-address. | ||
2136 | 1259 | |||
2137 | 1260 | When used on the consuming side of the relation (unit is a remote | ||
2138 | 1261 | unit), the ingress-address is the IP address that this unit needs | ||
2139 | 1262 | to use to reach the provided service on the remote unit. | ||
2140 | 1263 | |||
2141 | 1264 | When used on the providing side of the relation (unit == local_unit()), | ||
2142 | 1265 | the ingress-address is the IP address that is advertised to remote | ||
2143 | 1266 | units on this relation. Remote units need to use this address to | ||
2144 | 1267 | reach the local provided service on this unit. | ||
2145 | 1268 | |||
2146 | 1269 | Note that charms may document some other method to use in | ||
2147 | 1270 | preference to the ingress_address(), such as an address provided | ||
2148 | 1271 | on a different relation attribute or a service discovery mechanism. | ||
2149 | 1272 | This allows charms to redirect inbound connections to their peers | ||
2150 | 1273 | or different applications such as load balancers. | ||
2151 | 1274 | |||
2152 | 1275 | Usage: | ||
2153 | 1276 | addresses = [ingress_address(rid=u.rid, unit=u.unit) | ||
2154 | 1277 | for u in iter_units_for_relation_name(relation_name)] | ||
2155 | 1278 | |||
2156 | 1279 | :param rid: string relation id | ||
2157 | 1280 | :param unit: string unit name | ||
2158 | 1281 | :side effect: calls relation_get | ||
2159 | 1282 | :return: string IP address | ||
2160 | 1283 | """ | ||
2161 | 1284 | settings = relation_get(rid=rid, unit=unit) | ||
2162 | 1285 | return (settings.get('ingress-address') or | ||
2163 | 1286 | settings.get('private-address')) | ||
2164 | 1287 | |||
2165 | 1288 | |||
2166 | 1289 | def egress_subnets(rid=None, unit=None): | ||
2167 | 1290 | """ | ||
2168 | 1291 | Retrieve the egress-subnets from a relation. | ||
2169 | 1292 | |||
2170 | 1293 | This function is to be used on the providing side of the | ||
2171 | 1294 | relation, and provides the ranges of addresses that client | ||
2172 | 1295 | connections may come from. The result is uninteresting on | ||
2173 | 1296 | the consuming side of a relation (unit == local_unit()). | ||
2174 | 1297 | |||
2175 | 1298 | Returns a stable list of subnets in CIDR format. | ||
2176 | 1299 | eg. ['192.168.1.0/24', '2001::F00F/128'] | ||
2177 | 1300 | |||
2178 | 1301 | If egress-subnets is not available, falls back to using the published | ||
2179 | 1302 | ingress-address, or finally private-address. | ||
2180 | 1303 | |||
2181 | 1304 | :param rid: string relation id | ||
2182 | 1305 | :param unit: string unit name | ||
2183 | 1306 | :side effect: calls relation_get | ||
2184 | 1307 | :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] | ||
2185 | 1308 | """ | ||
2186 | 1309 | def _to_range(addr): | ||
2187 | 1310 | if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: | ||
2188 | 1311 | addr += '/32' | ||
2189 | 1312 | elif ':' in addr and '/' not in addr: # IPv6 | ||
2190 | 1313 | addr += '/128' | ||
2191 | 1314 | return addr | ||
2192 | 1315 | |||
2193 | 1316 | settings = relation_get(rid=rid, unit=unit) | ||
2194 | 1317 | if 'egress-subnets' in settings: | ||
2195 | 1318 | return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] | ||
2196 | 1319 | if 'ingress-address' in settings: | ||
2197 | 1320 | return [_to_range(settings['ingress-address'])] | ||
2198 | 1321 | if 'private-address' in settings: | ||
2199 | 1322 | return [_to_range(settings['private-address'])] | ||
2200 | 1323 | return [] # Should never happen | ||
2201 | diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py | |||
2202 | index ae36574..e9fd38a 100644 | |||
2203 | --- a/hooks/charmhelpers/core/host.py | |||
2204 | +++ b/hooks/charmhelpers/core/host.py | |||
2205 | @@ -1,3 +1,17 @@ | |||
2206 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
2207 | 2 | # | ||
2208 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
2209 | 4 | # you may not use this file except in compliance with the License. | ||
2210 | 5 | # You may obtain a copy of the License at | ||
2211 | 6 | # | ||
2212 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
2213 | 8 | # | ||
2214 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
2215 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
2216 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
2217 | 12 | # See the License for the specific language governing permissions and | ||
2218 | 13 | # limitations under the License. | ||
2219 | 14 | |||
2220 | 1 | """Tools for working with the host system""" | 15 | """Tools for working with the host system""" |
2221 | 2 | # Copyright 2012 Canonical Ltd. | 16 | # Copyright 2012 Canonical Ltd. |
2222 | 3 | # | 17 | # |
2223 | @@ -6,60 +20,332 @@ | |||
2224 | 6 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> | 20 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> |
2225 | 7 | 21 | ||
2226 | 8 | import os | 22 | import os |
2227 | 23 | import re | ||
2228 | 9 | import pwd | 24 | import pwd |
2229 | 25 | import glob | ||
2230 | 10 | import grp | 26 | import grp |
2231 | 11 | import random | 27 | import random |
2232 | 12 | import string | 28 | import string |
2233 | 13 | import subprocess | 29 | import subprocess |
2234 | 14 | import hashlib | 30 | import hashlib |
2235 | 31 | import functools | ||
2236 | 32 | import itertools | ||
2237 | 33 | import six | ||
2238 | 15 | 34 | ||
2239 | 35 | from contextlib import contextmanager | ||
2240 | 16 | from collections import OrderedDict | 36 | from collections import OrderedDict |
2241 | 37 | from .hookenv import log, DEBUG, local_unit | ||
2242 | 38 | from .fstab import Fstab | ||
2243 | 39 | from charmhelpers.osplatform import get_platform | ||
2244 | 40 | |||
2245 | 41 | __platform__ = get_platform() | ||
2246 | 42 | if __platform__ == "ubuntu": | ||
2247 | 43 | from charmhelpers.core.host_factory.ubuntu import ( | ||
2248 | 44 | service_available, | ||
2249 | 45 | add_new_group, | ||
2250 | 46 | lsb_release, | ||
2251 | 47 | cmp_pkgrevno, | ||
2252 | 48 | CompareHostReleases, | ||
2253 | 49 | ) # flake8: noqa -- ignore F401 for this import | ||
2254 | 50 | elif __platform__ == "centos": | ||
2255 | 51 | from charmhelpers.core.host_factory.centos import ( | ||
2256 | 52 | service_available, | ||
2257 | 53 | add_new_group, | ||
2258 | 54 | lsb_release, | ||
2259 | 55 | cmp_pkgrevno, | ||
2260 | 56 | CompareHostReleases, | ||
2261 | 57 | ) # flake8: noqa -- ignore F401 for this import | ||
2262 | 58 | |||
2263 | 59 | UPDATEDB_PATH = '/etc/updatedb.conf' | ||
2264 | 60 | |||
2265 | 61 | def service_start(service_name, **kwargs): | ||
2266 | 62 | """Start a system service. | ||
2267 | 63 | |||
2268 | 64 | The specified service name is managed via the system level init system. | ||
2269 | 65 | Some init systems (e.g. upstart) require that additional arguments be | ||
2270 | 66 | provided in order to directly control service instances whereas other init | ||
2271 | 67 | systems allow for addressing instances of a service directly by name (e.g. | ||
2272 | 68 | systemd). | ||
2273 | 69 | |||
2274 | 70 | The kwargs allow for the additional parameters to be passed to underlying | ||
2275 | 71 | init systems for those systems which require/allow for them. For example, | ||
2276 | 72 | the ceph-osd upstart script requires the id parameter to be passed along | ||
2277 | 73 | in order to identify which running daemon should be reloaded. The follow- | ||
2278 | 74 | ing example stops the ceph-osd service for instance id=4: | ||
2279 | 75 | |||
2280 | 76 | service_stop('ceph-osd', id=4) | ||
2281 | 77 | |||
2282 | 78 | :param service_name: the name of the service to stop | ||
2283 | 79 | :param **kwargs: additional parameters to pass to the init system when | ||
2284 | 80 | managing services. These will be passed as key=value | ||
2285 | 81 | parameters to the init system's commandline. kwargs | ||
2286 | 82 | are ignored for systemd enabled systems. | ||
2287 | 83 | """ | ||
2288 | 84 | return service('start', service_name, **kwargs) | ||
2289 | 85 | |||
2290 | 86 | |||
2291 | 87 | def service_stop(service_name, **kwargs): | ||
2292 | 88 | """Stop a system service. | ||
2293 | 89 | |||
2294 | 90 | The specified service name is managed via the system level init system. | ||
2295 | 91 | Some init systems (e.g. upstart) require that additional arguments be | ||
2296 | 92 | provided in order to directly control service instances whereas other init | ||
2297 | 93 | systems allow for addressing instances of a service directly by name (e.g. | ||
2298 | 94 | systemd). | ||
2299 | 95 | |||
2300 | 96 | The kwargs allow for the additional parameters to be passed to underlying | ||
2301 | 97 | init systems for those systems which require/allow for them. For example, | ||
2302 | 98 | the ceph-osd upstart script requires the id parameter to be passed along | ||
2303 | 99 | in order to identify which running daemon should be reloaded. The follow- | ||
2304 | 100 | ing example stops the ceph-osd service for instance id=4: | ||
2305 | 101 | |||
2306 | 102 | service_stop('ceph-osd', id=4) | ||
2307 | 103 | |||
2308 | 104 | :param service_name: the name of the service to stop | ||
2309 | 105 | :param **kwargs: additional parameters to pass to the init system when | ||
2310 | 106 | managing services. These will be passed as key=value | ||
2311 | 107 | parameters to the init system's commandline. kwargs | ||
2312 | 108 | are ignored for systemd enabled systems. | ||
2313 | 109 | """ | ||
2314 | 110 | return service('stop', service_name, **kwargs) | ||
2315 | 111 | |||
2316 | 112 | |||
2317 | 113 | def service_restart(service_name, **kwargs): | ||
2318 | 114 | """Restart a system service. | ||
2319 | 115 | |||
2320 | 116 | The specified service name is managed via the system level init system. | ||
2321 | 117 | Some init systems (e.g. upstart) require that additional arguments be | ||
2322 | 118 | provided in order to directly control service instances whereas other init | ||
2323 | 119 | systems allow for addressing instances of a service directly by name (e.g. | ||
2324 | 120 | systemd). | ||
2325 | 121 | |||
2326 | 122 | The kwargs allow for the additional parameters to be passed to underlying | ||
2327 | 123 | init systems for those systems which require/allow for them. For example, | ||
2328 | 124 | the ceph-osd upstart script requires the id parameter to be passed along | ||
2329 | 125 | in order to identify which running daemon should be restarted. The follow- | ||
2330 | 126 | ing example restarts the ceph-osd service for instance id=4: | ||
2331 | 127 | |||
2332 | 128 | service_restart('ceph-osd', id=4) | ||
2333 | 17 | 129 | ||
2335 | 18 | from hookenv import log | 130 | :param service_name: the name of the service to restart |
2336 | 131 | :param **kwargs: additional parameters to pass to the init system when | ||
2337 | 132 | managing services. These will be passed as key=value | ||
2338 | 133 | parameters to the init system's commandline. kwargs | ||
2339 | 134 | are ignored for init systems not allowing additional | ||
2340 | 135 | parameters via the commandline (systemd). | ||
2341 | 136 | """ | ||
2342 | 137 | return service('restart', service_name) | ||
2343 | 19 | 138 | ||
2344 | 20 | 139 | ||
2347 | 21 | def service_start(service_name): | 140 | def service_reload(service_name, restart_on_failure=False, **kwargs): |
2348 | 22 | service('start', service_name) | 141 | """Reload a system service, optionally falling back to restart if |
2349 | 142 | reload fails. | ||
2350 | 23 | 143 | ||
2351 | 144 | The specified service name is managed via the system level init system. | ||
2352 | 145 | Some init systems (e.g. upstart) require that additional arguments be | ||
2353 | 146 | provided in order to directly control service instances whereas other init | ||
2354 | 147 | systems allow for addressing instances of a service directly by name (e.g. | ||
2355 | 148 | systemd). | ||
2356 | 24 | 149 | ||
2359 | 25 | def service_stop(service_name): | 150 | The kwargs allow for the additional parameters to be passed to underlying |
2360 | 26 | service('stop', service_name) | 151 | init systems for those systems which require/allow for them. For example, |
2361 | 152 | the ceph-osd upstart script requires the id parameter to be passed along | ||
2362 | 153 | in order to identify which running daemon should be reloaded. The follow- | ||
2363 | 154 | ing example restarts the ceph-osd service for instance id=4: | ||
2364 | 27 | 155 | ||
2365 | 156 | service_reload('ceph-osd', id=4) | ||
2366 | 28 | 157 | ||
2369 | 29 | def service_restart(service_name): | 158 | :param service_name: the name of the service to reload |
2370 | 30 | service('restart', service_name) | 159 | :param restart_on_failure: boolean indicating whether to fallback to a |
2371 | 160 | restart if the reload fails. | ||
2372 | 161 | :param **kwargs: additional parameters to pass to the init system when | ||
2373 | 162 | managing services. These will be passed as key=value | ||
2374 | 163 | parameters to the init system's commandline. kwargs | ||
2375 | 164 | are ignored for init systems not allowing additional | ||
2376 | 165 | parameters via the commandline (systemd). | ||
2377 | 166 | """ | ||
2378 | 167 | service_result = service('reload', service_name, **kwargs) | ||
2379 | 168 | if not service_result and restart_on_failure: | ||
2380 | 169 | service_result = service('restart', service_name, **kwargs) | ||
2381 | 170 | return service_result | ||
2382 | 31 | 171 | ||
2383 | 32 | 172 | ||
2387 | 33 | def service_reload(service_name, restart_on_failure=False): | 173 | def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", |
2388 | 34 | if not service('reload', service_name) and restart_on_failure: | 174 | **kwargs): |
2389 | 35 | service('restart', service_name) | 175 | """Pause a system service. |
2390 | 36 | 176 | ||
2391 | 177 | Stop it, and prevent it from starting again at boot. | ||
2392 | 37 | 178 | ||
2395 | 38 | def service(action, service_name): | 179 | :param service_name: the name of the service to pause |
2396 | 39 | cmd = ['service', service_name, action] | 180 | :param init_dir: path to the upstart init directory |
2397 | 181 | :param initd_dir: path to the sysv init directory | ||
2398 | 182 | :param **kwargs: additional parameters to pass to the init system when | ||
2399 | 183 | managing services. These will be passed as key=value | ||
2400 | 184 | parameters to the init system's commandline. kwargs | ||
2401 | 185 | are ignored for init systems which do not support | ||
2402 | 186 | key=value arguments via the commandline. | ||
2403 | 187 | """ | ||
2404 | 188 | stopped = True | ||
2405 | 189 | if service_running(service_name, **kwargs): | ||
2406 | 190 | stopped = service_stop(service_name, **kwargs) | ||
2407 | 191 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | ||
2408 | 192 | sysv_file = os.path.join(initd_dir, service_name) | ||
2409 | 193 | if init_is_systemd(): | ||
2410 | 194 | service('disable', service_name) | ||
2411 | 195 | service('mask', service_name) | ||
2412 | 196 | elif os.path.exists(upstart_file): | ||
2413 | 197 | override_path = os.path.join( | ||
2414 | 198 | init_dir, '{}.override'.format(service_name)) | ||
2415 | 199 | with open(override_path, 'w') as fh: | ||
2416 | 200 | fh.write("manual\n") | ||
2417 | 201 | elif os.path.exists(sysv_file): | ||
2418 | 202 | subprocess.check_call(["update-rc.d", service_name, "disable"]) | ||
2419 | 203 | else: | ||
2420 | 204 | raise ValueError( | ||
2421 | 205 | "Unable to detect {0} as SystemD, Upstart {1} or" | ||
2422 | 206 | " SysV {2}".format( | ||
2423 | 207 | service_name, upstart_file, sysv_file)) | ||
2424 | 208 | return stopped | ||
2425 | 209 | |||
2426 | 210 | |||
2427 | 211 | def service_resume(service_name, init_dir="/etc/init", | ||
2428 | 212 | initd_dir="/etc/init.d", **kwargs): | ||
2429 | 213 | """Resume a system service. | ||
2430 | 214 | |||
2431 | 215 | Reenable starting again at boot. Start the service. | ||
2432 | 216 | |||
2433 | 217 | :param service_name: the name of the service to resume | ||
2434 | 218 | :param init_dir: the path to the init dir | ||
2435 | 219 | :param initd dir: the path to the initd dir | ||
2436 | 220 | :param **kwargs: additional parameters to pass to the init system when | ||
2437 | 221 | managing services. These will be passed as key=value | ||
2438 | 222 | parameters to the init system's commandline. kwargs | ||
2439 | 223 | are ignored for systemd enabled systems. | ||
2440 | 224 | """ | ||
2441 | 225 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | ||
2442 | 226 | sysv_file = os.path.join(initd_dir, service_name) | ||
2443 | 227 | if init_is_systemd(): | ||
2444 | 228 | service('unmask', service_name) | ||
2445 | 229 | service('enable', service_name) | ||
2446 | 230 | elif os.path.exists(upstart_file): | ||
2447 | 231 | override_path = os.path.join( | ||
2448 | 232 | init_dir, '{}.override'.format(service_name)) | ||
2449 | 233 | if os.path.exists(override_path): | ||
2450 | 234 | os.unlink(override_path) | ||
2451 | 235 | elif os.path.exists(sysv_file): | ||
2452 | 236 | subprocess.check_call(["update-rc.d", service_name, "enable"]) | ||
2453 | 237 | else: | ||
2454 | 238 | raise ValueError( | ||
2455 | 239 | "Unable to detect {0} as SystemD, Upstart {1} or" | ||
2456 | 240 | " SysV {2}".format( | ||
2457 | 241 | service_name, upstart_file, sysv_file)) | ||
2458 | 242 | started = service_running(service_name, **kwargs) | ||
2459 | 243 | |||
2460 | 244 | if not started: | ||
2461 | 245 | started = service_start(service_name, **kwargs) | ||
2462 | 246 | return started | ||
2463 | 247 | |||
2464 | 248 | |||
2465 | 249 | def service(action, service_name, **kwargs): | ||
2466 | 250 | """Control a system service. | ||
2467 | 251 | |||
2468 | 252 | :param action: the action to take on the service | ||
2469 | 253 | :param service_name: the name of the service to perform th action on | ||
2470 | 254 | :param **kwargs: additional params to be passed to the service command in | ||
2471 | 255 | the form of key=value. | ||
2472 | 256 | """ | ||
2473 | 257 | if init_is_systemd(): | ||
2474 | 258 | cmd = ['systemctl', action, service_name] | ||
2475 | 259 | else: | ||
2476 | 260 | cmd = ['service', service_name, action] | ||
2477 | 261 | for key, value in six.iteritems(kwargs): | ||
2478 | 262 | parameter = '%s=%s' % (key, value) | ||
2479 | 263 | cmd.append(parameter) | ||
2480 | 40 | return subprocess.call(cmd) == 0 | 264 | return subprocess.call(cmd) == 0 |
2481 | 41 | 265 | ||
2482 | 42 | 266 | ||
2488 | 43 | def service_running(service): | 267 | _UPSTART_CONF = "/etc/init/{}.conf" |
2489 | 44 | try: | 268 | _INIT_D_CONF = "/etc/init.d/{}" |
2490 | 45 | output = subprocess.check_output(['service', service, 'status']) | 269 | |
2491 | 46 | except subprocess.CalledProcessError: | 270 | |
2492 | 47 | return False | 271 | def service_running(service_name, **kwargs): |
2493 | 272 | """Determine whether a system service is running. | ||
2494 | 273 | |||
2495 | 274 | :param service_name: the name of the service | ||
2496 | 275 | :param **kwargs: additional args to pass to the service command. This is | ||
2497 | 276 | used to pass additional key=value arguments to the | ||
2498 | 277 | service command line for managing specific instance | ||
2499 | 278 | units (e.g. service ceph-osd status id=2). The kwargs | ||
2500 | 279 | are ignored in systemd services. | ||
2501 | 280 | """ | ||
2502 | 281 | if init_is_systemd(): | ||
2503 | 282 | return service('is-active', service_name) | ||
2504 | 48 | else: | 283 | else: |
2509 | 49 | if ("start/running" in output or "is running" in output): | 284 | if os.path.exists(_UPSTART_CONF.format(service_name)): |
2510 | 50 | return True | 285 | try: |
2511 | 51 | else: | 286 | cmd = ['status', service_name] |
2512 | 52 | return False | 287 | for key, value in six.iteritems(kwargs): |
2513 | 288 | parameter = '%s=%s' % (key, value) | ||
2514 | 289 | cmd.append(parameter) | ||
2515 | 290 | output = subprocess.check_output(cmd, | ||
2516 | 291 | stderr=subprocess.STDOUT).decode('UTF-8') | ||
2517 | 292 | except subprocess.CalledProcessError: | ||
2518 | 293 | return False | ||
2519 | 294 | else: | ||
2520 | 295 | # This works for upstart scripts where the 'service' command | ||
2521 | 296 | # returns a consistent string to represent running | ||
2522 | 297 | # 'start/running' | ||
2523 | 298 | if ("start/running" in output or | ||
2524 | 299 | "is running" in output or | ||
2525 | 300 | "up and running" in output): | ||
2526 | 301 | return True | ||
2527 | 302 | elif os.path.exists(_INIT_D_CONF.format(service_name)): | ||
2528 | 303 | # Check System V scripts init script return codes | ||
2529 | 304 | return service('status', service_name) | ||
2530 | 305 | return False | ||
2531 | 306 | |||
2532 | 307 | |||
2533 | 308 | SYSTEMD_SYSTEM = '/run/systemd/system' | ||
2534 | 53 | 309 | ||
2535 | 54 | 310 | ||
2538 | 55 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 311 | def init_is_systemd(): |
2539 | 56 | """Add a user""" | 312 | """Return True if the host system uses systemd, False otherwise.""" |
2540 | 313 | if lsb_release()['DISTRIB_CODENAME'] == 'trusty': | ||
2541 | 314 | return False | ||
2542 | 315 | return os.path.isdir(SYSTEMD_SYSTEM) | ||
2543 | 316 | |||
2544 | 317 | |||
2545 | 318 | def adduser(username, password=None, shell='/bin/bash', | ||
2546 | 319 | system_user=False, primary_group=None, | ||
2547 | 320 | secondary_groups=None, uid=None, home_dir=None): | ||
2548 | 321 | """Add a user to the system. | ||
2549 | 322 | |||
2550 | 323 | Will log but otherwise succeed if the user already exists. | ||
2551 | 324 | |||
2552 | 325 | :param str username: Username to create | ||
2553 | 326 | :param str password: Password for user; if ``None``, create a system user | ||
2554 | 327 | :param str shell: The default shell for the user | ||
2555 | 328 | :param bool system_user: Whether to create a login or system user | ||
2556 | 329 | :param str primary_group: Primary group for user; defaults to username | ||
2557 | 330 | :param list secondary_groups: Optional list of additional groups | ||
2558 | 331 | :param int uid: UID for user being created | ||
2559 | 332 | :param str home_dir: Home directory for user | ||
2560 | 333 | |||
2561 | 334 | :returns: The password database entry struct, as returned by `pwd.getpwnam` | ||
2562 | 335 | """ | ||
2563 | 57 | try: | 336 | try: |
2564 | 58 | user_info = pwd.getpwnam(username) | 337 | user_info = pwd.getpwnam(username) |
2565 | 59 | log('user {0} already exists!'.format(username)) | 338 | log('user {0} already exists!'.format(username)) |
2566 | 339 | if uid: | ||
2567 | 340 | user_info = pwd.getpwuid(int(uid)) | ||
2568 | 341 | log('user with uid {0} already exists!'.format(uid)) | ||
2569 | 60 | except KeyError: | 342 | except KeyError: |
2570 | 61 | log('creating user {0}'.format(username)) | 343 | log('creating user {0}'.format(username)) |
2571 | 62 | cmd = ['useradd'] | 344 | cmd = ['useradd'] |
2572 | 345 | if uid: | ||
2573 | 346 | cmd.extend(['--uid', str(uid)]) | ||
2574 | 347 | if home_dir: | ||
2575 | 348 | cmd.extend(['--home', str(home_dir)]) | ||
2576 | 63 | if system_user or password is None: | 349 | if system_user or password is None: |
2577 | 64 | cmd.append('--system') | 350 | cmd.append('--system') |
2578 | 65 | else: | 351 | else: |
2579 | @@ -68,32 +354,147 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): | |||
2580 | 68 | '--shell', shell, | 354 | '--shell', shell, |
2581 | 69 | '--password', password, | 355 | '--password', password, |
2582 | 70 | ]) | 356 | ]) |
2583 | 357 | if not primary_group: | ||
2584 | 358 | try: | ||
2585 | 359 | grp.getgrnam(username) | ||
2586 | 360 | primary_group = username # avoid "group exists" error | ||
2587 | 361 | except KeyError: | ||
2588 | 362 | pass | ||
2589 | 363 | if primary_group: | ||
2590 | 364 | cmd.extend(['-g', primary_group]) | ||
2591 | 365 | if secondary_groups: | ||
2592 | 366 | cmd.extend(['-G', ','.join(secondary_groups)]) | ||
2593 | 71 | cmd.append(username) | 367 | cmd.append(username) |
2594 | 72 | subprocess.check_call(cmd) | 368 | subprocess.check_call(cmd) |
2595 | 73 | user_info = pwd.getpwnam(username) | 369 | user_info = pwd.getpwnam(username) |
2596 | 74 | return user_info | 370 | return user_info |
2597 | 75 | 371 | ||
2598 | 76 | 372 | ||
2599 | 373 | def user_exists(username): | ||
2600 | 374 | """Check if a user exists""" | ||
2601 | 375 | try: | ||
2602 | 376 | pwd.getpwnam(username) | ||
2603 | 377 | user_exists = True | ||
2604 | 378 | except KeyError: | ||
2605 | 379 | user_exists = False | ||
2606 | 380 | return user_exists | ||
2607 | 381 | |||
2608 | 382 | |||
2609 | 383 | def uid_exists(uid): | ||
2610 | 384 | """Check if a uid exists""" | ||
2611 | 385 | try: | ||
2612 | 386 | pwd.getpwuid(uid) | ||
2613 | 387 | uid_exists = True | ||
2614 | 388 | except KeyError: | ||
2615 | 389 | uid_exists = False | ||
2616 | 390 | return uid_exists | ||
2617 | 391 | |||
2618 | 392 | |||
2619 | 393 | def group_exists(groupname): | ||
2620 | 394 | """Check if a group exists""" | ||
2621 | 395 | try: | ||
2622 | 396 | grp.getgrnam(groupname) | ||
2623 | 397 | group_exists = True | ||
2624 | 398 | except KeyError: | ||
2625 | 399 | group_exists = False | ||
2626 | 400 | return group_exists | ||
2627 | 401 | |||
2628 | 402 | |||
2629 | 403 | def gid_exists(gid): | ||
2630 | 404 | """Check if a gid exists""" | ||
2631 | 405 | try: | ||
2632 | 406 | grp.getgrgid(gid) | ||
2633 | 407 | gid_exists = True | ||
2634 | 408 | except KeyError: | ||
2635 | 409 | gid_exists = False | ||
2636 | 410 | return gid_exists | ||
2637 | 411 | |||
2638 | 412 | |||
2639 | 413 | def add_group(group_name, system_group=False, gid=None): | ||
2640 | 414 | """Add a group to the system | ||
2641 | 415 | |||
2642 | 416 | Will log but otherwise succeed if the group already exists. | ||
2643 | 417 | |||
2644 | 418 | :param str group_name: group to create | ||
2645 | 419 | :param bool system_group: Create system group | ||
2646 | 420 | :param int gid: GID for user being created | ||
2647 | 421 | |||
2648 | 422 | :returns: The password database entry struct, as returned by `grp.getgrnam` | ||
2649 | 423 | """ | ||
2650 | 424 | try: | ||
2651 | 425 | group_info = grp.getgrnam(group_name) | ||
2652 | 426 | log('group {0} already exists!'.format(group_name)) | ||
2653 | 427 | if gid: | ||
2654 | 428 | group_info = grp.getgrgid(gid) | ||
2655 | 429 | log('group with gid {0} already exists!'.format(gid)) | ||
2656 | 430 | except KeyError: | ||
2657 | 431 | log('creating group {0}'.format(group_name)) | ||
2658 | 432 | add_new_group(group_name, system_group, gid) | ||
2659 | 433 | group_info = grp.getgrnam(group_name) | ||
2660 | 434 | return group_info | ||
2661 | 435 | |||
2662 | 436 | |||
2663 | 77 | def add_user_to_group(username, group): | 437 | def add_user_to_group(username, group): |
2664 | 78 | """Add a user to a group""" | 438 | """Add a user to a group""" |
2670 | 79 | cmd = [ | 439 | cmd = ['gpasswd', '-a', username, group] |
2666 | 80 | 'gpasswd', '-a', | ||
2667 | 81 | username, | ||
2668 | 82 | group | ||
2669 | 83 | ] | ||
2671 | 84 | log("Adding user {} to group {}".format(username, group)) | 440 | log("Adding user {} to group {}".format(username, group)) |
2672 | 85 | subprocess.check_call(cmd) | 441 | subprocess.check_call(cmd) |
2673 | 86 | 442 | ||
2674 | 87 | 443 | ||
2676 | 88 | def rsync(from_path, to_path, flags='-r', options=None): | 444 | def chage(username, lastday=None, expiredate=None, inactive=None, |
2677 | 445 | mindays=None, maxdays=None, root=None, warndays=None): | ||
2678 | 446 | """Change user password expiry information | ||
2679 | 447 | |||
2680 | 448 | :param str username: User to update | ||
2681 | 449 | :param str lastday: Set when password was changed in YYYY-MM-DD format | ||
2682 | 450 | :param str expiredate: Set when user's account will no longer be | ||
2683 | 451 | accessible in YYYY-MM-DD format. | ||
2684 | 452 | -1 will remove an account expiration date. | ||
2685 | 453 | :param str inactive: Set the number of days of inactivity after a password | ||
2686 | 454 | has expired before the account is locked. | ||
2687 | 455 | -1 will remove an account's inactivity. | ||
2688 | 456 | :param str mindays: Set the minimum number of days between password | ||
2689 | 457 | changes to MIN_DAYS. | ||
2690 | 458 | 0 indicates the password can be changed anytime. | ||
2691 | 459 | :param str maxdays: Set the maximum number of days during which a | ||
2692 | 460 | password is valid. | ||
2693 | 461 | -1 as MAX_DAYS will remove checking maxdays | ||
2694 | 462 | :param str root: Apply changes in the CHROOT_DIR directory | ||
2695 | 463 | :param str warndays: Set the number of days of warning before a password | ||
2696 | 464 | change is required | ||
2697 | 465 | :raises subprocess.CalledProcessError: if call to chage fails | ||
2698 | 466 | """ | ||
2699 | 467 | cmd = ['chage'] | ||
2700 | 468 | if root: | ||
2701 | 469 | cmd.extend(['--root', root]) | ||
2702 | 470 | if lastday: | ||
2703 | 471 | cmd.extend(['--lastday', lastday]) | ||
2704 | 472 | if expiredate: | ||
2705 | 473 | cmd.extend(['--expiredate', expiredate]) | ||
2706 | 474 | if inactive: | ||
2707 | 475 | cmd.extend(['--inactive', inactive]) | ||
2708 | 476 | if mindays: | ||
2709 | 477 | cmd.extend(['--mindays', mindays]) | ||
2710 | 478 | if maxdays: | ||
2711 | 479 | cmd.extend(['--maxdays', maxdays]) | ||
2712 | 480 | if warndays: | ||
2713 | 481 | cmd.extend(['--warndays', warndays]) | ||
2714 | 482 | cmd.append(username) | ||
2715 | 483 | subprocess.check_call(cmd) | ||
2716 | 484 | |||
2717 | 485 | remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') | ||
2718 | 486 | |||
2719 | 487 | def rsync(from_path, to_path, flags='-r', options=None, timeout=None): | ||
2720 | 89 | """Replicate the contents of a path""" | 488 | """Replicate the contents of a path""" |
2721 | 90 | options = options or ['--delete', '--executability'] | 489 | options = options or ['--delete', '--executability'] |
2722 | 91 | cmd = ['/usr/bin/rsync', flags] | 490 | cmd = ['/usr/bin/rsync', flags] |
2723 | 491 | if timeout: | ||
2724 | 492 | cmd = ['timeout', str(timeout)] + cmd | ||
2725 | 92 | cmd.extend(options) | 493 | cmd.extend(options) |
2726 | 93 | cmd.append(from_path) | 494 | cmd.append(from_path) |
2727 | 94 | cmd.append(to_path) | 495 | cmd.append(to_path) |
2728 | 95 | log(" ".join(cmd)) | 496 | log(" ".join(cmd)) |
2730 | 96 | return subprocess.check_output(cmd).strip() | 497 | return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() |
2731 | 97 | 498 | ||
2732 | 98 | 499 | ||
2733 | 99 | def symlink(source, destination): | 500 | def symlink(source, destination): |
2734 | @@ -108,66 +509,105 @@ def symlink(source, destination): | |||
2735 | 108 | subprocess.check_call(cmd) | 509 | subprocess.check_call(cmd) |
2736 | 109 | 510 | ||
2737 | 110 | 511 | ||
2739 | 111 | def mkdir(path, owner='root', group='root', perms=0555, force=False): | 512 | def mkdir(path, owner='root', group='root', perms=0o555, force=False): |
2740 | 112 | """Create a directory""" | 513 | """Create a directory""" |
2741 | 113 | log("Making dir {} {}:{} {:o}".format(path, owner, group, | 514 | log("Making dir {} {}:{} {:o}".format(path, owner, group, |
2742 | 114 | perms)) | 515 | perms)) |
2743 | 115 | uid = pwd.getpwnam(owner).pw_uid | 516 | uid = pwd.getpwnam(owner).pw_uid |
2744 | 116 | gid = grp.getgrnam(group).gr_gid | 517 | gid = grp.getgrnam(group).gr_gid |
2745 | 117 | realpath = os.path.abspath(path) | 518 | realpath = os.path.abspath(path) |
2748 | 118 | if os.path.exists(realpath): | 519 | path_exists = os.path.exists(realpath) |
2749 | 119 | if force and not os.path.isdir(realpath): | 520 | if path_exists and force: |
2750 | 521 | if not os.path.isdir(realpath): | ||
2751 | 120 | log("Removing non-directory file {} prior to mkdir()".format(path)) | 522 | log("Removing non-directory file {} prior to mkdir()".format(path)) |
2752 | 121 | os.unlink(realpath) | 523 | os.unlink(realpath) |
2754 | 122 | else: | 524 | os.makedirs(realpath, perms) |
2755 | 525 | elif not path_exists: | ||
2756 | 123 | os.makedirs(realpath, perms) | 526 | os.makedirs(realpath, perms) |
2757 | 124 | os.chown(realpath, uid, gid) | 527 | os.chown(realpath, uid, gid) |
2758 | 528 | os.chmod(realpath, perms) | ||
2759 | 125 | 529 | ||
2760 | 126 | 530 | ||
2764 | 127 | def write_file(path, content, owner='root', group='root', perms=0444): | 531 | def write_file(path, content, owner='root', group='root', perms=0o444): |
2765 | 128 | """Create or overwrite a file with the contents of a string""" | 532 | """Create or overwrite a file with the contents of a byte string.""" |
2763 | 129 | log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) | ||
2766 | 130 | uid = pwd.getpwnam(owner).pw_uid | 533 | uid = pwd.getpwnam(owner).pw_uid |
2767 | 131 | gid = grp.getgrnam(group).gr_gid | 534 | gid = grp.getgrnam(group).gr_gid |
2772 | 132 | with open(path, 'w') as target: | 535 | # lets see if we can grab the file and compare the context, to avoid doing |
2773 | 133 | os.fchown(target.fileno(), uid, gid) | 536 | # a write. |
2774 | 134 | os.fchmod(target.fileno(), perms) | 537 | existing_content = None |
2775 | 135 | target.write(content) | 538 | existing_uid, existing_gid = None, None |
2776 | 539 | try: | ||
2777 | 540 | with open(path, 'rb') as target: | ||
2778 | 541 | existing_content = target.read() | ||
2779 | 542 | stat = os.stat(path) | ||
2780 | 543 | existing_uid, existing_gid = stat.st_uid, stat.st_gid | ||
2781 | 544 | except: | ||
2782 | 545 | pass | ||
2783 | 546 | if content != existing_content: | ||
2784 | 547 | log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), | ||
2785 | 548 | level=DEBUG) | ||
2786 | 549 | with open(path, 'wb') as target: | ||
2787 | 550 | os.fchown(target.fileno(), uid, gid) | ||
2788 | 551 | os.fchmod(target.fileno(), perms) | ||
2789 | 552 | if six.PY3 and isinstance(content, six.string_types): | ||
2790 | 553 | content = content.encode('UTF-8') | ||
2791 | 554 | target.write(content) | ||
2792 | 555 | return | ||
2793 | 556 | # the contents were the same, but we might still need to change the | ||
2794 | 557 | # ownership. | ||
2795 | 558 | if existing_uid != uid: | ||
2796 | 559 | log("Changing uid on already existing content: {} -> {}" | ||
2797 | 560 | .format(existing_uid, uid), level=DEBUG) | ||
2798 | 561 | os.chown(path, uid, -1) | ||
2799 | 562 | if existing_gid != gid: | ||
2800 | 563 | log("Changing gid on already existing content: {} -> {}" | ||
2801 | 564 | .format(existing_gid, gid), level=DEBUG) | ||
2802 | 565 | os.chown(path, -1, gid) | ||
2803 | 566 | |||
2804 | 567 | |||
2805 | 568 | def fstab_remove(mp): | ||
2806 | 569 | """Remove the given mountpoint entry from /etc/fstab""" | ||
2807 | 570 | return Fstab.remove_by_mountpoint(mp) | ||
2808 | 136 | 571 | ||
2809 | 137 | 572 | ||
2812 | 138 | def mount(device, mountpoint, options=None, persist=False): | 573 | def fstab_add(dev, mp, fs, options=None): |
2813 | 139 | '''Mount a filesystem''' | 574 | """Adds the given device entry to the /etc/fstab file""" |
2814 | 575 | return Fstab.add(dev, mp, fs, options=options) | ||
2815 | 576 | |||
2816 | 577 | |||
2817 | 578 | def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): | ||
2818 | 579 | """Mount a filesystem at a particular mountpoint""" | ||
2819 | 140 | cmd_args = ['mount'] | 580 | cmd_args = ['mount'] |
2820 | 141 | if options is not None: | 581 | if options is not None: |
2821 | 142 | cmd_args.extend(['-o', options]) | 582 | cmd_args.extend(['-o', options]) |
2822 | 143 | cmd_args.extend([device, mountpoint]) | 583 | cmd_args.extend([device, mountpoint]) |
2823 | 144 | try: | 584 | try: |
2824 | 145 | subprocess.check_output(cmd_args) | 585 | subprocess.check_output(cmd_args) |
2826 | 146 | except subprocess.CalledProcessError, e: | 586 | except subprocess.CalledProcessError as e: |
2827 | 147 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) | 587 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) |
2828 | 148 | return False | 588 | return False |
2829 | 589 | |||
2830 | 149 | if persist: | 590 | if persist: |
2833 | 150 | # TODO: update fstab | 591 | return fstab_add(device, mountpoint, filesystem, options=options) |
2832 | 151 | pass | ||
2834 | 152 | return True | 592 | return True |
2835 | 153 | 593 | ||
2836 | 154 | 594 | ||
2837 | 155 | def umount(mountpoint, persist=False): | 595 | def umount(mountpoint, persist=False): |
2839 | 156 | '''Unmount a filesystem''' | 596 | """Unmount a filesystem""" |
2840 | 157 | cmd_args = ['umount', mountpoint] | 597 | cmd_args = ['umount', mountpoint] |
2841 | 158 | try: | 598 | try: |
2842 | 159 | subprocess.check_output(cmd_args) | 599 | subprocess.check_output(cmd_args) |
2844 | 160 | except subprocess.CalledProcessError, e: | 600 | except subprocess.CalledProcessError as e: |
2845 | 161 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) | 601 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) |
2846 | 162 | return False | 602 | return False |
2847 | 603 | |||
2848 | 163 | if persist: | 604 | if persist: |
2851 | 164 | # TODO: update fstab | 605 | return fstab_remove(mountpoint) |
2850 | 165 | pass | ||
2852 | 166 | return True | 606 | return True |
2853 | 167 | 607 | ||
2854 | 168 | 608 | ||
2855 | 169 | def mounts(): | 609 | def mounts(): |
2857 | 170 | '''List of all mounted volumes as [[mountpoint,device],[...]]''' | 610 | """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" |
2858 | 171 | with open('/proc/mounts') as f: | 611 | with open('/proc/mounts') as f: |
2859 | 172 | # [['/mount/point','/dev/path'],[...]] | 612 | # [['/mount/point','/dev/path'],[...]] |
2860 | 173 | system_mounts = [m[1::-1] for m in [l.strip().split() | 613 | system_mounts = [m[1::-1] for m in [l.strip().split() |
2861 | @@ -175,65 +615,428 @@ def mounts(): | |||
2862 | 175 | return system_mounts | 615 | return system_mounts |
2863 | 176 | 616 | ||
2864 | 177 | 617 | ||
2867 | 178 | def file_hash(path): | 618 | def fstab_mount(mountpoint): |
2868 | 179 | ''' Generate a md5 hash of the contents of 'path' or None if not found ''' | 619 | """Mount filesystem using fstab""" |
2869 | 620 | cmd_args = ['mount', mountpoint] | ||
2870 | 621 | try: | ||
2871 | 622 | subprocess.check_output(cmd_args) | ||
2872 | 623 | except subprocess.CalledProcessError as e: | ||
2873 | 624 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) | ||
2874 | 625 | return False | ||
2875 | 626 | return True | ||
2876 | 627 | |||
2877 | 628 | |||
2878 | 629 | def file_hash(path, hash_type='md5'): | ||
2879 | 630 | """Generate a hash checksum of the contents of 'path' or None if not found. | ||
2880 | 631 | |||
2881 | 632 | :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, | ||
2882 | 633 | such as md5, sha1, sha256, sha512, etc. | ||
2883 | 634 | """ | ||
2884 | 180 | if os.path.exists(path): | 635 | if os.path.exists(path): |
2888 | 181 | h = hashlib.md5() | 636 | h = getattr(hashlib, hash_type)() |
2889 | 182 | with open(path, 'r') as source: | 637 | with open(path, 'rb') as source: |
2890 | 183 | h.update(source.read()) # IGNORE:E1101 - it does have update | 638 | h.update(source.read()) |
2891 | 184 | return h.hexdigest() | 639 | return h.hexdigest() |
2892 | 185 | else: | 640 | else: |
2893 | 186 | return None | 641 | return None |
2894 | 187 | 642 | ||
2895 | 188 | 643 | ||
2898 | 189 | def restart_on_change(restart_map): | 644 | def path_hash(path): |
2899 | 190 | ''' Restart services based on configuration files changing | 645 | """Generate a hash checksum of all files matching 'path'. Standard |
2900 | 646 | wildcards like '*' and '?' are supported, see documentation for the 'glob' | ||
2901 | 647 | module for more information. | ||
2902 | 648 | |||
2903 | 649 | :return: dict: A { filename: hash } dictionary for all matched files. | ||
2904 | 650 | Empty if none found. | ||
2905 | 651 | """ | ||
2906 | 652 | return { | ||
2907 | 653 | filename: file_hash(filename) | ||
2908 | 654 | for filename in glob.iglob(path) | ||
2909 | 655 | } | ||
2910 | 656 | |||
2911 | 191 | 657 | ||
2913 | 192 | This function is used a decorator, for example | 658 | def check_hash(path, checksum, hash_type='md5'): |
2914 | 659 | """Validate a file using a cryptographic checksum. | ||
2915 | 660 | |||
2916 | 661 | :param str checksum: Value of the checksum used to validate the file. | ||
2917 | 662 | :param str hash_type: Hash algorithm used to generate `checksum`. | ||
2918 | 663 | Can be any hash alrgorithm supported by :mod:`hashlib`, | ||
2919 | 664 | such as md5, sha1, sha256, sha512, etc. | ||
2920 | 665 | :raises ChecksumError: If the file fails the checksum | ||
2921 | 666 | |||
2922 | 667 | """ | ||
2923 | 668 | actual_checksum = file_hash(path, hash_type) | ||
2924 | 669 | if checksum != actual_checksum: | ||
2925 | 670 | raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) | ||
2926 | 671 | |||
2927 | 672 | |||
2928 | 673 | class ChecksumError(ValueError): | ||
2929 | 674 | """A class derived from Value error to indicate the checksum failed.""" | ||
2930 | 675 | pass | ||
2931 | 676 | |||
2932 | 677 | |||
2933 | 678 | def restart_on_change(restart_map, stopstart=False, restart_functions=None): | ||
2934 | 679 | """Restart services based on configuration files changing | ||
2935 | 680 | |||
2936 | 681 | This function is used a decorator, for example:: | ||
2937 | 193 | 682 | ||
2938 | 194 | @restart_on_change({ | 683 | @restart_on_change({ |
2939 | 195 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 684 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
2940 | 685 | '/etc/apache/sites-enabled/*': [ 'apache2' ] | ||
2941 | 196 | }) | 686 | }) |
2944 | 197 | def ceph_client_changed(): | 687 | def config_changed(): |
2945 | 198 | ... | 688 | pass # your code here |
2946 | 199 | 689 | ||
2947 | 200 | In this example, the cinder-api and cinder-volume services | 690 | In this example, the cinder-api and cinder-volume services |
2948 | 201 | would be restarted if /etc/ceph/ceph.conf is changed by the | 691 | would be restarted if /etc/ceph/ceph.conf is changed by the |
2951 | 202 | ceph_client_changed function. | 692 | ceph_client_changed function. The apache2 service would be |
2952 | 203 | ''' | 693 | restarted if any file matching the pattern got changed, created |
2953 | 694 | or removed. Standard wildcards are supported, see documentation | ||
2954 | 695 | for the 'glob' module for more information. | ||
2955 | 696 | |||
2956 | 697 | @param restart_map: {path_file_name: [service_name, ...] | ||
2957 | 698 | @param stopstart: DEFAULT false; whether to stop, start OR restart | ||
2958 | 699 | @param restart_functions: nonstandard functions to use to restart services | ||
2959 | 700 | {svc: func, ...} | ||
2960 | 701 | @returns result from decorated function | ||
2961 | 702 | """ | ||
2962 | 204 | def wrap(f): | 703 | def wrap(f): |
2974 | 205 | def wrapped_f(*args): | 704 | @functools.wraps(f) |
2975 | 206 | checksums = {} | 705 | def wrapped_f(*args, **kwargs): |
2976 | 207 | for path in restart_map: | 706 | return restart_on_change_helper( |
2977 | 208 | checksums[path] = file_hash(path) | 707 | (lambda: f(*args, **kwargs)), restart_map, stopstart, |
2978 | 209 | f(*args) | 708 | restart_functions) |
2968 | 210 | restarts = [] | ||
2969 | 211 | for path in restart_map: | ||
2970 | 212 | if checksums[path] != file_hash(path): | ||
2971 | 213 | restarts += restart_map[path] | ||
2972 | 214 | for service_name in list(OrderedDict.fromkeys(restarts)): | ||
2973 | 215 | service('restart', service_name) | ||
2979 | 216 | return wrapped_f | 709 | return wrapped_f |
2980 | 217 | return wrap | 710 | return wrap |
2981 | 218 | 711 | ||
2982 | 219 | 712 | ||
2991 | 220 | def lsb_release(): | 713 | def restart_on_change_helper(lambda_f, restart_map, stopstart=False, |
2992 | 221 | '''Return /etc/lsb-release in a dict''' | 714 | restart_functions=None): |
2993 | 222 | d = {} | 715 | """Helper function to perform the restart_on_change function. |
2994 | 223 | with open('/etc/lsb-release', 'r') as lsb: | 716 | |
2995 | 224 | for l in lsb: | 717 | This is provided for decorators to restart services if files described |
2996 | 225 | k, v = l.split('=') | 718 | in the restart_map have changed after an invocation of lambda_f(). |
2997 | 226 | d[k.strip()] = v.strip() | 719 | |
2998 | 227 | return d | 720 | @param lambda_f: function to call. |
2999 | 721 | @param restart_map: {file: [service, ...]} | ||
3000 | 722 | @param stopstart: whether to stop, start or restart a service | ||
3001 | 723 | @param restart_functions: nonstandard functions to use to restart services | ||
3002 | 724 | {svc: func, ...} | ||
3003 | 725 | @returns result of lambda_f() | ||
3004 | 726 | """ | ||
3005 | 727 | if restart_functions is None: | ||
3006 | 728 | restart_functions = {} | ||
3007 | 729 | checksums = {path: path_hash(path) for path in restart_map} | ||
3008 | 730 | r = lambda_f() | ||
3009 | 731 | # create a list of lists of the services to restart | ||
3010 | 732 | restarts = [restart_map[path] | ||
3011 | 733 | for path in restart_map | ||
3012 | 734 | if path_hash(path) != checksums[path]] | ||
3013 | 735 | # create a flat list of ordered services without duplicates from lists | ||
3014 | 736 | services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) | ||
3015 | 737 | if services_list: | ||
3016 | 738 | actions = ('stop', 'start') if stopstart else ('restart',) | ||
3017 | 739 | for service_name in services_list: | ||
3018 | 740 | if service_name in restart_functions: | ||
3019 | 741 | restart_functions[service_name](service_name) | ||
3020 | 742 | else: | ||
3021 | 743 | for action in actions: | ||
3022 | 744 | service(action, service_name) | ||
3023 | 745 | return r | ||
3024 | 228 | 746 | ||
3025 | 229 | 747 | ||
3026 | 230 | def pwgen(length=None): | 748 | def pwgen(length=None): |
3028 | 231 | '''Generate a random pasword.''' | 749 | """Generate a random pasword.""" |
3029 | 232 | if length is None: | 750 | if length is None: |
3030 | 751 | # A random length is ok to use a weak PRNG | ||
3031 | 233 | length = random.choice(range(35, 45)) | 752 | length = random.choice(range(35, 45)) |
3032 | 234 | alphanumeric_chars = [ | 753 | alphanumeric_chars = [ |
3034 | 235 | l for l in (string.letters + string.digits) | 754 | l for l in (string.ascii_letters + string.digits) |
3035 | 236 | if l not in 'l0QD1vAEIOUaeiou'] | 755 | if l not in 'l0QD1vAEIOUaeiou'] |
3036 | 756 | # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the | ||
3037 | 757 | # actual password | ||
3038 | 758 | random_generator = random.SystemRandom() | ||
3039 | 237 | random_chars = [ | 759 | random_chars = [ |
3041 | 238 | random.choice(alphanumeric_chars) for _ in range(length)] | 760 | random_generator.choice(alphanumeric_chars) for _ in range(length)] |
3042 | 239 | return(''.join(random_chars)) | 761 | return(''.join(random_chars)) |
3043 | 762 | |||
3044 | 763 | |||
3045 | 764 | def is_phy_iface(interface): | ||
3046 | 765 | """Returns True if interface is not virtual, otherwise False.""" | ||
3047 | 766 | if interface: | ||
3048 | 767 | sys_net = '/sys/class/net' | ||
3049 | 768 | if os.path.isdir(sys_net): | ||
3050 | 769 | for iface in glob.glob(os.path.join(sys_net, '*')): | ||
3051 | 770 | if '/virtual/' in os.path.realpath(iface): | ||
3052 | 771 | continue | ||
3053 | 772 | |||
3054 | 773 | if interface == os.path.basename(iface): | ||
3055 | 774 | return True | ||
3056 | 775 | |||
3057 | 776 | return False | ||
3058 | 777 | |||
3059 | 778 | |||
3060 | 779 | def get_bond_master(interface): | ||
3061 | 780 | """Returns bond master if interface is bond slave otherwise None. | ||
3062 | 781 | |||
3063 | 782 | NOTE: the provided interface is expected to be physical | ||
3064 | 783 | """ | ||
3065 | 784 | if interface: | ||
3066 | 785 | iface_path = '/sys/class/net/%s' % (interface) | ||
3067 | 786 | if os.path.exists(iface_path): | ||
3068 | 787 | if '/virtual/' in os.path.realpath(iface_path): | ||
3069 | 788 | return None | ||
3070 | 789 | |||
3071 | 790 | master = os.path.join(iface_path, 'master') | ||
3072 | 791 | if os.path.exists(master): | ||
3073 | 792 | master = os.path.realpath(master) | ||
3074 | 793 | # make sure it is a bond master | ||
3075 | 794 | if os.path.exists(os.path.join(master, 'bonding')): | ||
3076 | 795 | return os.path.basename(master) | ||
3077 | 796 | |||
3078 | 797 | return None | ||
3079 | 798 | |||
3080 | 799 | |||
3081 | 800 | def list_nics(nic_type=None): | ||
3082 | 801 | """Return a list of nics of given type(s)""" | ||
3083 | 802 | if isinstance(nic_type, six.string_types): | ||
3084 | 803 | int_types = [nic_type] | ||
3085 | 804 | else: | ||
3086 | 805 | int_types = nic_type | ||
3087 | 806 | |||
3088 | 807 | interfaces = [] | ||
3089 | 808 | if nic_type: | ||
3090 | 809 | for int_type in int_types: | ||
3091 | 810 | cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] | ||
3092 | 811 | ip_output = subprocess.check_output(cmd).decode('UTF-8') | ||
3093 | 812 | ip_output = ip_output.split('\n') | ||
3094 | 813 | ip_output = (line for line in ip_output if line) | ||
3095 | 814 | for line in ip_output: | ||
3096 | 815 | if line.split()[1].startswith(int_type): | ||
3097 | 816 | matched = re.search('.*: (' + int_type + | ||
3098 | 817 | r'[0-9]+\.[0-9]+)@.*', line) | ||
3099 | 818 | if matched: | ||
3100 | 819 | iface = matched.groups()[0] | ||
3101 | 820 | else: | ||
3102 | 821 | iface = line.split()[1].replace(":", "") | ||
3103 | 822 | |||
3104 | 823 | if iface not in interfaces: | ||
3105 | 824 | interfaces.append(iface) | ||
3106 | 825 | else: | ||
3107 | 826 | cmd = ['ip', 'a'] | ||
3108 | 827 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') | ||
3109 | 828 | ip_output = (line.strip() for line in ip_output if line) | ||
3110 | 829 | |||
3111 | 830 | key = re.compile('^[0-9]+:\s+(.+):') | ||
3112 | 831 | for line in ip_output: | ||
3113 | 832 | matched = re.search(key, line) | ||
3114 | 833 | if matched: | ||
3115 | 834 | iface = matched.group(1) | ||
3116 | 835 | iface = iface.partition("@")[0] | ||
3117 | 836 | if iface not in interfaces: | ||
3118 | 837 | interfaces.append(iface) | ||
3119 | 838 | |||
3120 | 839 | return interfaces | ||
3121 | 840 | |||
3122 | 841 | |||
3123 | 842 | def set_nic_mtu(nic, mtu): | ||
3124 | 843 | """Set the Maximum Transmission Unit (MTU) on a network interface.""" | ||
3125 | 844 | cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] | ||
3126 | 845 | subprocess.check_call(cmd) | ||
3127 | 846 | |||
3128 | 847 | |||
3129 | 848 | def get_nic_mtu(nic): | ||
3130 | 849 | """Return the Maximum Transmission Unit (MTU) for a network interface.""" | ||
3131 | 850 | cmd = ['ip', 'addr', 'show', nic] | ||
3132 | 851 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') | ||
3133 | 852 | mtu = "" | ||
3134 | 853 | for line in ip_output: | ||
3135 | 854 | words = line.split() | ||
3136 | 855 | if 'mtu' in words: | ||
3137 | 856 | mtu = words[words.index("mtu") + 1] | ||
3138 | 857 | return mtu | ||
3139 | 858 | |||
3140 | 859 | |||
3141 | 860 | def get_nic_hwaddr(nic): | ||
3142 | 861 | """Return the Media Access Control (MAC) for a network interface.""" | ||
3143 | 862 | cmd = ['ip', '-o', '-0', 'addr', 'show', nic] | ||
3144 | 863 | ip_output = subprocess.check_output(cmd).decode('UTF-8') | ||
3145 | 864 | hwaddr = "" | ||
3146 | 865 | words = ip_output.split() | ||
3147 | 866 | if 'link/ether' in words: | ||
3148 | 867 | hwaddr = words[words.index('link/ether') + 1] | ||
3149 | 868 | return hwaddr | ||
3150 | 869 | |||
3151 | 870 | |||
3152 | 871 | @contextmanager | ||
3153 | 872 | def chdir(directory): | ||
3154 | 873 | """Change the current working directory to a different directory for a code | ||
3155 | 874 | block and return the previous directory after the block exits. Useful to | ||
3156 | 875 | run commands from a specificed directory. | ||
3157 | 876 | |||
3158 | 877 | :param str directory: The directory path to change to for this context. | ||
3159 | 878 | """ | ||
3160 | 879 | cur = os.getcwd() | ||
3161 | 880 | try: | ||
3162 | 881 | yield os.chdir(directory) | ||
3163 | 882 | finally: | ||
3164 | 883 | os.chdir(cur) | ||
3165 | 884 | |||
3166 | 885 | |||
3167 | 886 | def chownr(path, owner, group, follow_links=True, chowntopdir=False): | ||
3168 | 887 | """Recursively change user and group ownership of files and directories | ||
3169 | 888 | in given path. Doesn't chown path itself by default, only its children. | ||
3170 | 889 | |||
3171 | 890 | :param str path: The string path to start changing ownership. | ||
3172 | 891 | :param str owner: The owner string to use when looking up the uid. | ||
3173 | 892 | :param str group: The group string to use when looking up the gid. | ||
3174 | 893 | :param bool follow_links: Also follow and chown links if True | ||
3175 | 894 | :param bool chowntopdir: Also chown path itself if True | ||
3176 | 895 | """ | ||
3177 | 896 | uid = pwd.getpwnam(owner).pw_uid | ||
3178 | 897 | gid = grp.getgrnam(group).gr_gid | ||
3179 | 898 | if follow_links: | ||
3180 | 899 | chown = os.chown | ||
3181 | 900 | else: | ||
3182 | 901 | chown = os.lchown | ||
3183 | 902 | |||
3184 | 903 | if chowntopdir: | ||
3185 | 904 | broken_symlink = os.path.lexists(path) and not os.path.exists(path) | ||
3186 | 905 | if not broken_symlink: | ||
3187 | 906 | chown(path, uid, gid) | ||
3188 | 907 | for root, dirs, files in os.walk(path, followlinks=follow_links): | ||
3189 | 908 | for name in dirs + files: | ||
3190 | 909 | full = os.path.join(root, name) | ||
3191 | 910 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) | ||
3192 | 911 | if not broken_symlink: | ||
3193 | 912 | chown(full, uid, gid) | ||
3194 | 913 | |||
3195 | 914 | |||
3196 | 915 | def lchownr(path, owner, group): | ||
3197 | 916 | """Recursively change user and group ownership of files and directories | ||
3198 | 917 | in a given path, not following symbolic links. See the documentation for | ||
3199 | 918 | 'os.lchown' for more information. | ||
3200 | 919 | |||
3201 | 920 | :param str path: The string path to start changing ownership. | ||
3202 | 921 | :param str owner: The owner string to use when looking up the uid. | ||
3203 | 922 | :param str group: The group string to use when looking up the gid. | ||
3204 | 923 | """ | ||
3205 | 924 | chownr(path, owner, group, follow_links=False) | ||
3206 | 925 | |||
3207 | 926 | |||
3208 | 927 | def owner(path): | ||
3209 | 928 | """Returns a tuple containing the username & groupname owning the path. | ||
3210 | 929 | |||
3211 | 930 | :param str path: the string path to retrieve the ownership | ||
3212 | 931 | :return tuple(str, str): A (username, groupname) tuple containing the | ||
3213 | 932 | name of the user and group owning the path. | ||
3214 | 933 | :raises OSError: if the specified path does not exist | ||
3215 | 934 | """ | ||
3216 | 935 | stat = os.stat(path) | ||
3217 | 936 | username = pwd.getpwuid(stat.st_uid)[0] | ||
3218 | 937 | groupname = grp.getgrgid(stat.st_gid)[0] | ||
3219 | 938 | return username, groupname | ||
3220 | 939 | |||
3221 | 940 | |||
3222 | 941 | def get_total_ram(): | ||
3223 | 942 | """The total amount of system RAM in bytes. | ||
3224 | 943 | |||
3225 | 944 | This is what is reported by the OS, and may be overcommitted when | ||
3226 | 945 | there are multiple containers hosted on the same machine. | ||
3227 | 946 | """ | ||
3228 | 947 | with open('/proc/meminfo', 'r') as f: | ||
3229 | 948 | for line in f.readlines(): | ||
3230 | 949 | if line: | ||
3231 | 950 | key, value, unit = line.split() | ||
3232 | 951 | if key == 'MemTotal:': | ||
3233 | 952 | assert unit == 'kB', 'Unknown unit' | ||
3234 | 953 | return int(value) * 1024 # Classic, not KiB. | ||
3235 | 954 | raise NotImplementedError() | ||
3236 | 955 | |||
3237 | 956 | |||
3238 | 957 | UPSTART_CONTAINER_TYPE = '/run/container_type' | ||
3239 | 958 | |||
3240 | 959 | |||
3241 | 960 | def is_container(): | ||
3242 | 961 | """Determine whether unit is running in a container | ||
3243 | 962 | |||
3244 | 963 | @return: boolean indicating if unit is in a container | ||
3245 | 964 | """ | ||
3246 | 965 | if init_is_systemd(): | ||
3247 | 966 | # Detect using systemd-detect-virt | ||
3248 | 967 | return subprocess.call(['systemd-detect-virt', | ||
3249 | 968 | '--container']) == 0 | ||
3250 | 969 | else: | ||
3251 | 970 | # Detect using upstart container file marker | ||
3252 | 971 | return os.path.exists(UPSTART_CONTAINER_TYPE) | ||
3253 | 972 | |||
3254 | 973 | |||
3255 | 974 | def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): | ||
3256 | 975 | """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. | ||
3257 | 976 | |||
3258 | 977 | This method has no effect if the path specified by updatedb_path does not | ||
3259 | 978 | exist or is not a file. | ||
3260 | 979 | |||
3261 | 980 | @param path: string the path to add to the updatedb.conf PRUNEPATHS value | ||
3262 | 981 | @param updatedb_path: the path the updatedb.conf file | ||
3263 | 982 | """ | ||
3264 | 983 | if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): | ||
3265 | 984 | # If the updatedb.conf file doesn't exist then don't attempt to update | ||
3266 | 985 | # the file as the package providing mlocate may not be installed on | ||
3267 | 986 | # the local system | ||
3268 | 987 | return | ||
3269 | 988 | |||
3270 | 989 | with open(updatedb_path, 'r+') as f_id: | ||
3271 | 990 | updatedb_text = f_id.read() | ||
3272 | 991 | output = updatedb(updatedb_text, path) | ||
3273 | 992 | f_id.seek(0) | ||
3274 | 993 | f_id.write(output) | ||
3275 | 994 | f_id.truncate() | ||
3276 | 995 | |||
3277 | 996 | |||
3278 | 997 | def updatedb(updatedb_text, new_path): | ||
3279 | 998 | lines = [line for line in updatedb_text.split("\n")] | ||
3280 | 999 | for i, line in enumerate(lines): | ||
3281 | 1000 | if line.startswith("PRUNEPATHS="): | ||
3282 | 1001 | paths_line = line.split("=")[1].replace('"', '') | ||
3283 | 1002 | paths = paths_line.split(" ") | ||
3284 | 1003 | if new_path not in paths: | ||
3285 | 1004 | paths.append(new_path) | ||
3286 | 1005 | lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) | ||
3287 | 1006 | output = "\n".join(lines) | ||
3288 | 1007 | return output | ||
3289 | 1008 | |||
3290 | 1009 | |||
3291 | 1010 | def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): | ||
3292 | 1011 | """ Modulo distribution | ||
3293 | 1012 | |||
3294 | 1013 | This helper uses the unit number, a modulo value and a constant wait time | ||
3295 | 1014 | to produce a calculated wait time distribution. This is useful in large | ||
3296 | 1015 | scale deployments to distribute load during an expensive operation such as | ||
3297 | 1016 | service restarts. | ||
3298 | 1017 | |||
3299 | 1018 | If you have 1000 nodes that need to restart 100 at a time 1 minute at a | ||
3300 | 1019 | time: | ||
3301 | 1020 | |||
3302 | 1021 | time.wait(modulo_distribution(modulo=100, wait=60)) | ||
3303 | 1022 | restart() | ||
3304 | 1023 | |||
3305 | 1024 | If you need restarts to happen serially set modulo to the exact number of | ||
3306 | 1025 | nodes and set a high constant wait time: | ||
3307 | 1026 | |||
3308 | 1027 | time.wait(modulo_distribution(modulo=10, wait=120)) | ||
3309 | 1028 | restart() | ||
3310 | 1029 | |||
3311 | 1030 | @param modulo: int The modulo number creates the group distribution | ||
3312 | 1031 | @param wait: int The constant time wait value | ||
3313 | 1032 | @param non_zero_wait: boolean Override unit % modulo == 0, | ||
3314 | 1033 | return modulo * wait. Used to avoid collisions with | ||
3315 | 1034 | leader nodes which are often given priority. | ||
3316 | 1035 | @return: int Calculated time to wait for unit operation | ||
3317 | 1036 | """ | ||
3318 | 1037 | unit_number = int(local_unit().split('/')[1]) | ||
3319 | 1038 | calculated_wait_time = (unit_number % modulo) * wait | ||
3320 | 1039 | if non_zero_wait and calculated_wait_time == 0: | ||
3321 | 1040 | return modulo * wait | ||
3322 | 1041 | else: | ||
3323 | 1042 | return calculated_wait_time | ||
3324 | diff --git a/hooks/charmhelpers/core/host_factory/__init__.py b/hooks/charmhelpers/core/host_factory/__init__.py | |||
3325 | 240 | new file mode 100644 | 1043 | new file mode 100644 |
3326 | index 0000000..e69de29 | |||
3327 | --- /dev/null | |||
3328 | +++ b/hooks/charmhelpers/core/host_factory/__init__.py | |||
3329 | diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py | |||
3330 | 241 | new file mode 100644 | 1044 | new file mode 100644 |
3331 | index 0000000..7781a39 | |||
3332 | --- /dev/null | |||
3333 | +++ b/hooks/charmhelpers/core/host_factory/centos.py | |||
3334 | @@ -0,0 +1,72 @@ | |||
3335 | 1 | import subprocess | ||
3336 | 2 | import yum | ||
3337 | 3 | import os | ||
3338 | 4 | |||
3339 | 5 | from charmhelpers.core.strutils import BasicStringComparator | ||
3340 | 6 | |||
3341 | 7 | |||
3342 | 8 | class CompareHostReleases(BasicStringComparator): | ||
3343 | 9 | """Provide comparisons of Host releases. | ||
3344 | 10 | |||
3345 | 11 | Use in the form of | ||
3346 | 12 | |||
3347 | 13 | if CompareHostReleases(release) > 'trusty': | ||
3348 | 14 | # do something with mitaka | ||
3349 | 15 | """ | ||
3350 | 16 | |||
3351 | 17 | def __init__(self, item): | ||
3352 | 18 | raise NotImplementedError( | ||
3353 | 19 | "CompareHostReleases() is not implemented for CentOS") | ||
3354 | 20 | |||
3355 | 21 | |||
3356 | 22 | def service_available(service_name): | ||
3357 | 23 | # """Determine whether a system service is available.""" | ||
3358 | 24 | if os.path.isdir('/run/systemd/system'): | ||
3359 | 25 | cmd = ['systemctl', 'is-enabled', service_name] | ||
3360 | 26 | else: | ||
3361 | 27 | cmd = ['service', service_name, 'is-enabled'] | ||
3362 | 28 | return subprocess.call(cmd) == 0 | ||
3363 | 29 | |||
3364 | 30 | |||
3365 | 31 | def add_new_group(group_name, system_group=False, gid=None): | ||
3366 | 32 | cmd = ['groupadd'] | ||
3367 | 33 | if gid: | ||
3368 | 34 | cmd.extend(['--gid', str(gid)]) | ||
3369 | 35 | if system_group: | ||
3370 | 36 | cmd.append('-r') | ||
3371 | 37 | cmd.append(group_name) | ||
3372 | 38 | subprocess.check_call(cmd) | ||
3373 | 39 | |||
3374 | 40 | |||
3375 | 41 | def lsb_release(): | ||
3376 | 42 | """Return /etc/os-release in a dict.""" | ||
3377 | 43 | d = {} | ||
3378 | 44 | with open('/etc/os-release', 'r') as lsb: | ||
3379 | 45 | for l in lsb: | ||
3380 | 46 | s = l.split('=') | ||
3381 | 47 | if len(s) != 2: | ||
3382 | 48 | continue | ||
3383 | 49 | d[s[0].strip()] = s[1].strip() | ||
3384 | 50 | return d | ||
3385 | 51 | |||
3386 | 52 | |||
3387 | 53 | def cmp_pkgrevno(package, revno, pkgcache=None): | ||
3388 | 54 | """Compare supplied revno with the revno of the installed package. | ||
3389 | 55 | |||
3390 | 56 | * 1 => Installed revno is greater than supplied arg | ||
3391 | 57 | * 0 => Installed revno is the same as supplied arg | ||
3392 | 58 | * -1 => Installed revno is less than supplied arg | ||
3393 | 59 | |||
3394 | 60 | This function imports YumBase function if the pkgcache argument | ||
3395 | 61 | is None. | ||
3396 | 62 | """ | ||
3397 | 63 | if not pkgcache: | ||
3398 | 64 | y = yum.YumBase() | ||
3399 | 65 | packages = y.doPackageLists() | ||
3400 | 66 | pkgcache = {i.Name: i.version for i in packages['installed']} | ||
3401 | 67 | pkg = pkgcache[package] | ||
3402 | 68 | if pkg > revno: | ||
3403 | 69 | return 1 | ||
3404 | 70 | if pkg < revno: | ||
3405 | 71 | return -1 | ||
3406 | 72 | return 0 | ||
3407 | diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py | |||
3408 | 0 | new file mode 100644 | 73 | new file mode 100644 |
3409 | index 0000000..99451b5 | |||
3410 | --- /dev/null | |||
3411 | +++ b/hooks/charmhelpers/core/host_factory/ubuntu.py | |||
3412 | @@ -0,0 +1,90 @@ | |||
3413 | 1 | import subprocess | ||
3414 | 2 | |||
3415 | 3 | from charmhelpers.core.strutils import BasicStringComparator | ||
3416 | 4 | |||
3417 | 5 | |||
3418 | 6 | UBUNTU_RELEASES = ( | ||
3419 | 7 | 'lucid', | ||
3420 | 8 | 'maverick', | ||
3421 | 9 | 'natty', | ||
3422 | 10 | 'oneiric', | ||
3423 | 11 | 'precise', | ||
3424 | 12 | 'quantal', | ||
3425 | 13 | 'raring', | ||
3426 | 14 | 'saucy', | ||
3427 | 15 | 'trusty', | ||
3428 | 16 | 'utopic', | ||
3429 | 17 | 'vivid', | ||
3430 | 18 | 'wily', | ||
3431 | 19 | 'xenial', | ||
3432 | 20 | 'yakkety', | ||
3433 | 21 | 'zesty', | ||
3434 | 22 | 'artful', | ||
3435 | 23 | 'bionic', | ||
3436 | 24 | ) | ||
3437 | 25 | |||
3438 | 26 | |||
3439 | 27 | class CompareHostReleases(BasicStringComparator): | ||
3440 | 28 | """Provide comparisons of Ubuntu releases. | ||
3441 | 29 | |||
3442 | 30 | Use in the form of | ||
3443 | 31 | |||
3444 | 32 | if CompareHostReleases(release) > 'trusty': | ||
3445 | 33 | # do something with mitaka | ||
3446 | 34 | """ | ||
3447 | 35 | _list = UBUNTU_RELEASES | ||
3448 | 36 | |||
3449 | 37 | |||
3450 | 38 | def service_available(service_name): | ||
3451 | 39 | """Determine whether a system service is available""" | ||
3452 | 40 | try: | ||
3453 | 41 | subprocess.check_output( | ||
3454 | 42 | ['service', service_name, 'status'], | ||
3455 | 43 | stderr=subprocess.STDOUT).decode('UTF-8') | ||
3456 | 44 | except subprocess.CalledProcessError as e: | ||
3457 | 45 | return b'unrecognized service' not in e.output | ||
3458 | 46 | else: | ||
3459 | 47 | return True | ||
3460 | 48 | |||
3461 | 49 | |||
3462 | 50 | def add_new_group(group_name, system_group=False, gid=None): | ||
3463 | 51 | cmd = ['addgroup'] | ||
3464 | 52 | if gid: | ||
3465 | 53 | cmd.extend(['--gid', str(gid)]) | ||
3466 | 54 | if system_group: | ||
3467 | 55 | cmd.append('--system') | ||
3468 | 56 | else: | ||
3469 | 57 | cmd.extend([ | ||
3470 | 58 | '--group', | ||
3471 | 59 | ]) | ||
3472 | 60 | cmd.append(group_name) | ||
3473 | 61 | subprocess.check_call(cmd) | ||
3474 | 62 | |||
3475 | 63 | |||
3476 | 64 | def lsb_release(): | ||
3477 | 65 | """Return /etc/lsb-release in a dict""" | ||
3478 | 66 | d = {} | ||
3479 | 67 | with open('/etc/lsb-release', 'r') as lsb: | ||
3480 | 68 | for l in lsb: | ||
3481 | 69 | k, v = l.split('=') | ||
3482 | 70 | d[k.strip()] = v.strip() | ||
3483 | 71 | return d | ||
3484 | 72 | |||
3485 | 73 | |||
3486 | 74 | def cmp_pkgrevno(package, revno, pkgcache=None): | ||
3487 | 75 | """Compare supplied revno with the revno of the installed package. | ||
3488 | 76 | |||
3489 | 77 | * 1 => Installed revno is greater than supplied arg | ||
3490 | 78 | * 0 => Installed revno is the same as supplied arg | ||
3491 | 79 | * -1 => Installed revno is less than supplied arg | ||
3492 | 80 | |||
3493 | 81 | This function imports apt_cache function from charmhelpers.fetch if | ||
3494 | 82 | the pkgcache argument is None. Be sure to add charmhelpers.fetch if | ||
3495 | 83 | you call this function, or pass an apt_pkg.Cache() instance. | ||
3496 | 84 | """ | ||
3497 | 85 | import apt_pkg | ||
3498 | 86 | if not pkgcache: | ||
3499 | 87 | from charmhelpers.fetch import apt_cache | ||
3500 | 88 | pkgcache = apt_cache() | ||
3501 | 89 | pkg = pkgcache[package] | ||
3502 | 90 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | ||
3503 | diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py | |||
3504 | 0 | new file mode 100644 | 91 | new file mode 100644 |
3505 | index 0000000..54b5b5e | |||
3506 | --- /dev/null | |||
3507 | +++ b/hooks/charmhelpers/core/hugepage.py | |||
3508 | @@ -0,0 +1,69 @@ | |||
3509 | 1 | # -*- coding: utf-8 -*- | ||
3510 | 2 | |||
3511 | 3 | # Copyright 2014-2015 Canonical Limited. | ||
3512 | 4 | # | ||
3513 | 5 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
3514 | 6 | # you may not use this file except in compliance with the License. | ||
3515 | 7 | # You may obtain a copy of the License at | ||
3516 | 8 | # | ||
3517 | 9 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
3518 | 10 | # | ||
3519 | 11 | # Unless required by applicable law or agreed to in writing, software | ||
3520 | 12 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
3521 | 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
3522 | 14 | # See the License for the specific language governing permissions and | ||
3523 | 15 | # limitations under the License. | ||
3524 | 16 | |||
3525 | 17 | import yaml | ||
3526 | 18 | from charmhelpers.core import fstab | ||
3527 | 19 | from charmhelpers.core import sysctl | ||
3528 | 20 | from charmhelpers.core.host import ( | ||
3529 | 21 | add_group, | ||
3530 | 22 | add_user_to_group, | ||
3531 | 23 | fstab_mount, | ||
3532 | 24 | mkdir, | ||
3533 | 25 | ) | ||
3534 | 26 | from charmhelpers.core.strutils import bytes_from_string | ||
3535 | 27 | from subprocess import check_output | ||
3536 | 28 | |||
3537 | 29 | |||
3538 | 30 | def hugepage_support(user, group='hugetlb', nr_hugepages=256, | ||
3539 | 31 | max_map_count=65536, mnt_point='/run/hugepages/kvm', | ||
3540 | 32 | pagesize='2MB', mount=True, set_shmmax=False): | ||
3541 | 33 | """Enable hugepages on system. | ||
3542 | 34 | |||
3543 | 35 | Args: | ||
3544 | 36 | user (str) -- Username to allow access to hugepages to | ||
3545 | 37 | group (str) -- Group name to own hugepages | ||
3546 | 38 | nr_hugepages (int) -- Number of pages to reserve | ||
3547 | 39 | max_map_count (int) -- Number of Virtual Memory Areas a process can own | ||
3548 | 40 | mnt_point (str) -- Directory to mount hugepages on | ||
3549 | 41 | pagesize (str) -- Size of hugepages | ||
3550 | 42 | mount (bool) -- Whether to Mount hugepages | ||
3551 | 43 | """ | ||
3552 | 44 | group_info = add_group(group) | ||
3553 | 45 | gid = group_info.gr_gid | ||
3554 | 46 | add_user_to_group(user, group) | ||
3555 | 47 | if max_map_count < 2 * nr_hugepages: | ||
3556 | 48 | max_map_count = 2 * nr_hugepages | ||
3557 | 49 | sysctl_settings = { | ||
3558 | 50 | 'vm.nr_hugepages': nr_hugepages, | ||
3559 | 51 | 'vm.max_map_count': max_map_count, | ||
3560 | 52 | 'vm.hugetlb_shm_group': gid, | ||
3561 | 53 | } | ||
3562 | 54 | if set_shmmax: | ||
3563 | 55 | shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) | ||
3564 | 56 | shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages | ||
3565 | 57 | if shmmax_minsize > shmmax_current: | ||
3566 | 58 | sysctl_settings['kernel.shmmax'] = shmmax_minsize | ||
3567 | 59 | sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') | ||
3568 | 60 | mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) | ||
3569 | 61 | lfstab = fstab.Fstab() | ||
3570 | 62 | fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) | ||
3571 | 63 | if fstab_entry: | ||
3572 | 64 | lfstab.remove_entry(fstab_entry) | ||
3573 | 65 | entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', | ||
3574 | 66 | 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) | ||
3575 | 67 | lfstab.add_entry(entry) | ||
3576 | 68 | if mount: | ||
3577 | 69 | fstab_mount(mnt_point) | ||
3578 | diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py | |||
3579 | 0 | new file mode 100644 | 70 | new file mode 100644 |
3580 | index 0000000..2d40452 | |||
3581 | --- /dev/null | |||
3582 | +++ b/hooks/charmhelpers/core/kernel.py | |||
3583 | @@ -0,0 +1,72 @@ | |||
3584 | 1 | #!/usr/bin/env python | ||
3585 | 2 | # -*- coding: utf-8 -*- | ||
3586 | 3 | |||
3587 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
3588 | 5 | # | ||
3589 | 6 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
3590 | 7 | # you may not use this file except in compliance with the License. | ||
3591 | 8 | # You may obtain a copy of the License at | ||
3592 | 9 | # | ||
3593 | 10 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
3594 | 11 | # | ||
3595 | 12 | # Unless required by applicable law or agreed to in writing, software | ||
3596 | 13 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
3597 | 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
3598 | 15 | # See the License for the specific language governing permissions and | ||
3599 | 16 | # limitations under the License. | ||
3600 | 17 | |||
3601 | 18 | import re | ||
3602 | 19 | import subprocess | ||
3603 | 20 | |||
3604 | 21 | from charmhelpers.osplatform import get_platform | ||
3605 | 22 | from charmhelpers.core.hookenv import ( | ||
3606 | 23 | log, | ||
3607 | 24 | INFO | ||
3608 | 25 | ) | ||
3609 | 26 | |||
3610 | 27 | __platform__ = get_platform() | ||
3611 | 28 | if __platform__ == "ubuntu": | ||
3612 | 29 | from charmhelpers.core.kernel_factory.ubuntu import ( | ||
3613 | 30 | persistent_modprobe, | ||
3614 | 31 | update_initramfs, | ||
3615 | 32 | ) # flake8: noqa -- ignore F401 for this import | ||
3616 | 33 | elif __platform__ == "centos": | ||
3617 | 34 | from charmhelpers.core.kernel_factory.centos import ( | ||
3618 | 35 | persistent_modprobe, | ||
3619 | 36 | update_initramfs, | ||
3620 | 37 | ) # flake8: noqa -- ignore F401 for this import | ||
3621 | 38 | |||
3622 | 39 | __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" | ||
3623 | 40 | |||
3624 | 41 | |||
3625 | 42 | def modprobe(module, persist=True): | ||
3626 | 43 | """Load a kernel module and configure for auto-load on reboot.""" | ||
3627 | 44 | cmd = ['modprobe', module] | ||
3628 | 45 | |||
3629 | 46 | log('Loading kernel module %s' % module, level=INFO) | ||
3630 | 47 | |||
3631 | 48 | subprocess.check_call(cmd) | ||
3632 | 49 | if persist: | ||
3633 | 50 | persistent_modprobe(module) | ||
3634 | 51 | |||
3635 | 52 | |||
3636 | 53 | def rmmod(module, force=False): | ||
3637 | 54 | """Remove a module from the linux kernel""" | ||
3638 | 55 | cmd = ['rmmod'] | ||
3639 | 56 | if force: | ||
3640 | 57 | cmd.append('-f') | ||
3641 | 58 | cmd.append(module) | ||
3642 | 59 | log('Removing kernel module %s' % module, level=INFO) | ||
3643 | 60 | return subprocess.check_call(cmd) | ||
3644 | 61 | |||
3645 | 62 | |||
3646 | 63 | def lsmod(): | ||
3647 | 64 | """Shows what kernel modules are currently loaded""" | ||
3648 | 65 | return subprocess.check_output(['lsmod'], | ||
3649 | 66 | universal_newlines=True) | ||
3650 | 67 | |||
3651 | 68 | |||
3652 | 69 | def is_module_loaded(module): | ||
3653 | 70 | """Checks if a kernel module is already loaded""" | ||
3654 | 71 | matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) | ||
3655 | 72 | return len(matches) > 0 | ||
3656 | diff --git a/hooks/charmhelpers/core/kernel_factory/__init__.py b/hooks/charmhelpers/core/kernel_factory/__init__.py | |||
3657 | 0 | new file mode 100644 | 73 | new file mode 100644 |
3658 | index 0000000..e69de29 | |||
3659 | --- /dev/null | |||
3660 | +++ b/hooks/charmhelpers/core/kernel_factory/__init__.py | |||
3661 | diff --git a/hooks/charmhelpers/core/kernel_factory/centos.py b/hooks/charmhelpers/core/kernel_factory/centos.py | |||
3662 | 1 | new file mode 100644 | 74 | new file mode 100644 |
3663 | index 0000000..1c402c1 | |||
3664 | --- /dev/null | |||
3665 | +++ b/hooks/charmhelpers/core/kernel_factory/centos.py | |||
3666 | @@ -0,0 +1,17 @@ | |||
3667 | 1 | import subprocess | ||
3668 | 2 | import os | ||
3669 | 3 | |||
3670 | 4 | |||
3671 | 5 | def persistent_modprobe(module): | ||
3672 | 6 | """Load a kernel module and configure for auto-load on reboot.""" | ||
3673 | 7 | if not os.path.exists('/etc/rc.modules'): | ||
3674 | 8 | open('/etc/rc.modules', 'a') | ||
3675 | 9 | os.chmod('/etc/rc.modules', 111) | ||
3676 | 10 | with open('/etc/rc.modules', 'r+') as modules: | ||
3677 | 11 | if module not in modules.read(): | ||
3678 | 12 | modules.write('modprobe %s\n' % module) | ||
3679 | 13 | |||
3680 | 14 | |||
3681 | 15 | def update_initramfs(version='all'): | ||
3682 | 16 | """Updates an initramfs image.""" | ||
3683 | 17 | return subprocess.check_call(["dracut", "-f", version]) | ||
3684 | diff --git a/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/hooks/charmhelpers/core/kernel_factory/ubuntu.py | |||
3685 | 0 | new file mode 100644 | 18 | new file mode 100644 |
3686 | index 0000000..3de372f | |||
3687 | --- /dev/null | |||
3688 | +++ b/hooks/charmhelpers/core/kernel_factory/ubuntu.py | |||
3689 | @@ -0,0 +1,13 @@ | |||
3690 | 1 | import subprocess | ||
3691 | 2 | |||
3692 | 3 | |||
3693 | 4 | def persistent_modprobe(module): | ||
3694 | 5 | """Load a kernel module and configure for auto-load on reboot.""" | ||
3695 | 6 | with open('/etc/modules', 'r+') as modules: | ||
3696 | 7 | if module not in modules.read(): | ||
3697 | 8 | modules.write(module + "\n") | ||
3698 | 9 | |||
3699 | 10 | |||
3700 | 11 | def update_initramfs(version='all'): | ||
3701 | 12 | """Updates an initramfs image.""" | ||
3702 | 13 | return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) | ||
3703 | diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py | |||
3704 | 0 | new file mode 100644 | 14 | new file mode 100644 |
3705 | index 0000000..61fd074 | |||
3706 | --- /dev/null | |||
3707 | +++ b/hooks/charmhelpers/core/services/__init__.py | |||
3708 | @@ -0,0 +1,16 @@ | |||
3709 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
3710 | 2 | # | ||
3711 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
3712 | 4 | # you may not use this file except in compliance with the License. | ||
3713 | 5 | # You may obtain a copy of the License at | ||
3714 | 6 | # | ||
3715 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
3716 | 8 | # | ||
3717 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
3718 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
3719 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
3720 | 12 | # See the License for the specific language governing permissions and | ||
3721 | 13 | # limitations under the License. | ||
3722 | 14 | |||
3723 | 15 | from .base import * # NOQA | ||
3724 | 16 | from .helpers import * # NOQA | ||
3725 | diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py | |||
3726 | 0 | new file mode 100644 | 17 | new file mode 100644 |
3727 | index 0000000..179ad4f | |||
3728 | --- /dev/null | |||
3729 | +++ b/hooks/charmhelpers/core/services/base.py | |||
3730 | @@ -0,0 +1,362 @@ | |||
3731 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
3732 | 2 | # | ||
3733 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
3734 | 4 | # you may not use this file except in compliance with the License. | ||
3735 | 5 | # You may obtain a copy of the License at | ||
3736 | 6 | # | ||
3737 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
3738 | 8 | # | ||
3739 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
3740 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
3741 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
3742 | 12 | # See the License for the specific language governing permissions and | ||
3743 | 13 | # limitations under the License. | ||
3744 | 14 | |||
3745 | 15 | import os | ||
3746 | 16 | import json | ||
3747 | 17 | from inspect import getargspec | ||
3748 | 18 | from collections import Iterable, OrderedDict | ||
3749 | 19 | |||
3750 | 20 | from charmhelpers.core import host | ||
3751 | 21 | from charmhelpers.core import hookenv | ||
3752 | 22 | |||
3753 | 23 | |||
3754 | 24 | __all__ = ['ServiceManager', 'ManagerCallback', | ||
3755 | 25 | 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', | ||
3756 | 26 | 'service_restart', 'service_stop'] | ||
3757 | 27 | |||
3758 | 28 | |||
3759 | 29 | class ServiceManager(object): | ||
3760 | 30 | def __init__(self, services=None): | ||
3761 | 31 | """ | ||
3762 | 32 | Register a list of services, given their definitions. | ||
3763 | 33 | |||
3764 | 34 | Service definitions are dicts in the following formats (all keys except | ||
3765 | 35 | 'service' are optional):: | ||
3766 | 36 | |||
3767 | 37 | { | ||
3768 | 38 | "service": <service name>, | ||
3769 | 39 | "required_data": <list of required data contexts>, | ||
3770 | 40 | "provided_data": <list of provided data contexts>, | ||
3771 | 41 | "data_ready": <one or more callbacks>, | ||
3772 | 42 | "data_lost": <one or more callbacks>, | ||
3773 | 43 | "start": <one or more callbacks>, | ||
3774 | 44 | "stop": <one or more callbacks>, | ||
3775 | 45 | "ports": <list of ports to manage>, | ||
3776 | 46 | } | ||
3777 | 47 | |||
3778 | 48 | The 'required_data' list should contain dicts of required data (or | ||
3779 | 49 | dependency managers that act like dicts and know how to collect the data). | ||
3780 | 50 | Only when all items in the 'required_data' list are populated are the list | ||
3781 | 51 | of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more | ||
3782 | 52 | information. | ||
3783 | 53 | |||
3784 | 54 | The 'provided_data' list should contain relation data providers, most likely | ||
3785 | 55 | a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, | ||
3786 | 56 | that will indicate a set of data to set on a given relation. | ||
3787 | 57 | |||
3788 | 58 | The 'data_ready' value should be either a single callback, or a list of | ||
3789 | 59 | callbacks, to be called when all items in 'required_data' pass `is_ready()`. | ||
3790 | 60 | Each callback will be called with the service name as the only parameter. | ||
3791 | 61 | After all of the 'data_ready' callbacks are called, the 'start' callbacks | ||
3792 | 62 | are fired. | ||
3793 | 63 | |||
3794 | 64 | The 'data_lost' value should be either a single callback, or a list of | ||
3795 | 65 | callbacks, to be called when a 'required_data' item no longer passes | ||
3796 | 66 | `is_ready()`. Each callback will be called with the service name as the | ||
3797 | 67 | only parameter. After all of the 'data_lost' callbacks are called, | ||
3798 | 68 | the 'stop' callbacks are fired. | ||
3799 | 69 | |||
3800 | 70 | The 'start' value should be either a single callback, or a list of | ||
3801 | 71 | callbacks, to be called when starting the service, after the 'data_ready' | ||
3802 | 72 | callbacks are complete. Each callback will be called with the service | ||
3803 | 73 | name as the only parameter. This defaults to | ||
3804 | 74 | `[host.service_start, services.open_ports]`. | ||
3805 | 75 | |||
3806 | 76 | The 'stop' value should be either a single callback, or a list of | ||
3807 | 77 | callbacks, to be called when stopping the service. If the service is | ||
3808 | 78 | being stopped because it no longer has all of its 'required_data', this | ||
3809 | 79 | will be called after all of the 'data_lost' callbacks are complete. | ||
3810 | 80 | Each callback will be called with the service name as the only parameter. | ||
3811 | 81 | This defaults to `[services.close_ports, host.service_stop]`. | ||
3812 | 82 | |||
3813 | 83 | The 'ports' value should be a list of ports to manage. The default | ||
3814 | 84 | 'start' handler will open the ports after the service is started, | ||
3815 | 85 | and the default 'stop' handler will close the ports prior to stopping | ||
3816 | 86 | the service. | ||
3817 | 87 | |||
3818 | 88 | |||
3819 | 89 | Examples: | ||
3820 | 90 | |||
3821 | 91 | The following registers an Upstart service called bingod that depends on | ||
3822 | 92 | a mongodb relation and which runs a custom `db_migrate` function prior to | ||
3823 | 93 | restarting the service, and a Runit service called spadesd:: | ||
3824 | 94 | |||
3825 | 95 | manager = services.ServiceManager([ | ||
3826 | 96 | { | ||
3827 | 97 | 'service': 'bingod', | ||
3828 | 98 | 'ports': [80, 443], | ||
3829 | 99 | 'required_data': [MongoRelation(), config(), {'my': 'data'}], | ||
3830 | 100 | 'data_ready': [ | ||
3831 | 101 | services.template(source='bingod.conf'), | ||
3832 | 102 | services.template(source='bingod.ini', | ||
3833 | 103 | target='/etc/bingod.ini', | ||
3834 | 104 | owner='bingo', perms=0400), | ||
3835 | 105 | ], | ||
3836 | 106 | }, | ||
3837 | 107 | { | ||
3838 | 108 | 'service': 'spadesd', | ||
3839 | 109 | 'data_ready': services.template(source='spadesd_run.j2', | ||
3840 | 110 | target='/etc/sv/spadesd/run', | ||
3841 | 111 | perms=0555), | ||
3842 | 112 | 'start': runit_start, | ||
3843 | 113 | 'stop': runit_stop, | ||
3844 | 114 | }, | ||
3845 | 115 | ]) | ||
3846 | 116 | manager.manage() | ||
3847 | 117 | """ | ||
3848 | 118 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') | ||
3849 | 119 | self._ready = None | ||
3850 | 120 | self.services = OrderedDict() | ||
3851 | 121 | for service in services or []: | ||
3852 | 122 | service_name = service['service'] | ||
3853 | 123 | self.services[service_name] = service | ||
3854 | 124 | |||
3855 | 125 | def manage(self): | ||
3856 | 126 | """ | ||
3857 | 127 | Handle the current hook by doing The Right Thing with the registered services. | ||
3858 | 128 | """ | ||
3859 | 129 | hookenv._run_atstart() | ||
3860 | 130 | try: | ||
3861 | 131 | hook_name = hookenv.hook_name() | ||
3862 | 132 | if hook_name == 'stop': | ||
3863 | 133 | self.stop_services() | ||
3864 | 134 | else: | ||
3865 | 135 | self.reconfigure_services() | ||
3866 | 136 | self.provide_data() | ||
3867 | 137 | except SystemExit as x: | ||
3868 | 138 | if x.code is None or x.code == 0: | ||
3869 | 139 | hookenv._run_atexit() | ||
3870 | 140 | hookenv._run_atexit() | ||
3871 | 141 | |||
3872 | 142 | def provide_data(self): | ||
3873 | 143 | """ | ||
3874 | 144 | Set the relation data for each provider in the ``provided_data`` list. | ||
3875 | 145 | |||
3876 | 146 | A provider must have a `name` attribute, which indicates which relation | ||
3877 | 147 | to set data on, and a `provide_data()` method, which returns a dict of | ||
3878 | 148 | data to set. | ||
3879 | 149 | |||
3880 | 150 | The `provide_data()` method can optionally accept two parameters: | ||
3881 | 151 | |||
3882 | 152 | * ``remote_service`` The name of the remote service that the data will | ||
3883 | 153 | be provided to. The `provide_data()` method will be called once | ||
3884 | 154 | for each connected service (not unit). This allows the method to | ||
3885 | 155 | tailor its data to the given service. | ||
3886 | 156 | * ``service_ready`` Whether or not the service definition had all of | ||
3887 | 157 | its requirements met, and thus the ``data_ready`` callbacks run. | ||
3888 | 158 | |||
3889 | 159 | Note that the ``provided_data`` methods are now called **after** the | ||
3890 | 160 | ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks | ||
3891 | 161 | a chance to generate any data necessary for the providing to the remote | ||
3892 | 162 | services. | ||
3893 | 163 | """ | ||
3894 | 164 | for service_name, service in self.services.items(): | ||
3895 | 165 | service_ready = self.is_ready(service_name) | ||
3896 | 166 | for provider in service.get('provided_data', []): | ||
3897 | 167 | for relid in hookenv.relation_ids(provider.name): | ||
3898 | 168 | units = hookenv.related_units(relid) | ||
3899 | 169 | if not units: | ||
3900 | 170 | continue | ||
3901 | 171 | remote_service = units[0].split('/')[0] | ||
3902 | 172 | argspec = getargspec(provider.provide_data) | ||
3903 | 173 | if len(argspec.args) > 1: | ||
3904 | 174 | data = provider.provide_data(remote_service, service_ready) | ||
3905 | 175 | else: | ||
3906 | 176 | data = provider.provide_data() | ||
3907 | 177 | if data: | ||
3908 | 178 | hookenv.relation_set(relid, data) | ||
3909 | 179 | |||
3910 | 180 | def reconfigure_services(self, *service_names): | ||
3911 | 181 | """ | ||
3912 | 182 | Update all files for one or more registered services, and, | ||
3913 | 183 | if ready, optionally restart them. | ||
3914 | 184 | |||
3915 | 185 | If no service names are given, reconfigures all registered services. | ||
3916 | 186 | """ | ||
3917 | 187 | for service_name in service_names or self.services.keys(): | ||
3918 | 188 | if self.is_ready(service_name): | ||
3919 | 189 | self.fire_event('data_ready', service_name) | ||
3920 | 190 | self.fire_event('start', service_name, default=[ | ||
3921 | 191 | service_restart, | ||
3922 | 192 | manage_ports]) | ||
3923 | 193 | self.save_ready(service_name) | ||
3924 | 194 | else: | ||
3925 | 195 | if self.was_ready(service_name): | ||
3926 | 196 | self.fire_event('data_lost', service_name) | ||
3927 | 197 | self.fire_event('stop', service_name, default=[ | ||
3928 | 198 | manage_ports, | ||
3929 | 199 | service_stop]) | ||
3930 | 200 | self.save_lost(service_name) | ||
3931 | 201 | |||
3932 | 202 | def stop_services(self, *service_names): | ||
3933 | 203 | """ | ||
3934 | 204 | Stop one or more registered services, by name. | ||
3935 | 205 | |||
3936 | 206 | If no service names are given, stops all registered services. | ||
3937 | 207 | """ | ||
3938 | 208 | for service_name in service_names or self.services.keys(): | ||
3939 | 209 | self.fire_event('stop', service_name, default=[ | ||
3940 | 210 | manage_ports, | ||
3941 | 211 | service_stop]) | ||
3942 | 212 | |||
3943 | 213 | def get_service(self, service_name): | ||
3944 | 214 | """ | ||
3945 | 215 | Given the name of a registered service, return its service definition. | ||
3946 | 216 | """ | ||
3947 | 217 | service = self.services.get(service_name) | ||
3948 | 218 | if not service: | ||
3949 | 219 | raise KeyError('Service not registered: %s' % service_name) | ||
3950 | 220 | return service | ||
3951 | 221 | |||
3952 | 222 | def fire_event(self, event_name, service_name, default=None): | ||
3953 | 223 | """ | ||
3954 | 224 | Fire a data_ready, data_lost, start, or stop event on a given service. | ||
3955 | 225 | """ | ||
3956 | 226 | service = self.get_service(service_name) | ||
3957 | 227 | callbacks = service.get(event_name, default) | ||
3958 | 228 | if not callbacks: | ||
3959 | 229 | return | ||
3960 | 230 | if not isinstance(callbacks, Iterable): | ||
3961 | 231 | callbacks = [callbacks] | ||
3962 | 232 | for callback in callbacks: | ||
3963 | 233 | if isinstance(callback, ManagerCallback): | ||
3964 | 234 | callback(self, service_name, event_name) | ||
3965 | 235 | else: | ||
3966 | 236 | callback(service_name) | ||
3967 | 237 | |||
3968 | 238 | def is_ready(self, service_name): | ||
3969 | 239 | """ | ||
3970 | 240 | Determine if a registered service is ready, by checking its 'required_data'. | ||
3971 | 241 | |||
3972 | 242 | A 'required_data' item can be any mapping type, and is considered ready | ||
3973 | 243 | if `bool(item)` evaluates as True. | ||
3974 | 244 | """ | ||
3975 | 245 | service = self.get_service(service_name) | ||
3976 | 246 | reqs = service.get('required_data', []) | ||
3977 | 247 | return all(bool(req) for req in reqs) | ||
3978 | 248 | |||
3979 | 249 | def _load_ready_file(self): | ||
3980 | 250 | if self._ready is not None: | ||
3981 | 251 | return | ||
3982 | 252 | if os.path.exists(self._ready_file): | ||
3983 | 253 | with open(self._ready_file) as fp: | ||
3984 | 254 | self._ready = set(json.load(fp)) | ||
3985 | 255 | else: | ||
3986 | 256 | self._ready = set() | ||
3987 | 257 | |||
3988 | 258 | def _save_ready_file(self): | ||
3989 | 259 | if self._ready is None: | ||
3990 | 260 | return | ||
3991 | 261 | with open(self._ready_file, 'w') as fp: | ||
3992 | 262 | json.dump(list(self._ready), fp) | ||
3993 | 263 | |||
3994 | 264 | def save_ready(self, service_name): | ||
3995 | 265 | """ | ||
3996 | 266 | Save an indicator that the given service is now data_ready. | ||
3997 | 267 | """ | ||
3998 | 268 | self._load_ready_file() | ||
3999 | 269 | self._ready.add(service_name) | ||
4000 | 270 | self._save_ready_file() | ||
4001 | 271 | |||
4002 | 272 | def save_lost(self, service_name): | ||
4003 | 273 | """ | ||
4004 | 274 | Save an indicator that the given service is no longer data_ready. | ||
4005 | 275 | """ | ||
4006 | 276 | self._load_ready_file() | ||
4007 | 277 | self._ready.discard(service_name) | ||
4008 | 278 | self._save_ready_file() | ||
4009 | 279 | |||
4010 | 280 | def was_ready(self, service_name): | ||
4011 | 281 | """ | ||
4012 | 282 | Determine if the given service was previously data_ready. | ||
4013 | 283 | """ | ||
4014 | 284 | self._load_ready_file() | ||
4015 | 285 | return service_name in self._ready | ||
4016 | 286 | |||
4017 | 287 | |||
4018 | 288 | class ManagerCallback(object): | ||
4019 | 289 | """ | ||
4020 | 290 | Special case of a callback that takes the `ServiceManager` instance | ||
4021 | 291 | in addition to the service name. | ||
4022 | 292 | |||
4023 | 293 | Subclasses should implement `__call__` which should accept three parameters: | ||
4024 | 294 | |||
4025 | 295 | * `manager` The `ServiceManager` instance | ||
4026 | 296 | * `service_name` The name of the service it's being triggered for | ||
4027 | 297 | * `event_name` The name of the event that this callback is handling | ||
4028 | 298 | """ | ||
4029 | 299 | def __call__(self, manager, service_name, event_name): | ||
4030 | 300 | raise NotImplementedError() | ||
4031 | 301 | |||
4032 | 302 | |||
4033 | 303 | class PortManagerCallback(ManagerCallback): | ||
4034 | 304 | """ | ||
4035 | 305 | Callback class that will open or close ports, for use as either | ||
4036 | 306 | a start or stop action. | ||
4037 | 307 | """ | ||
4038 | 308 | def __call__(self, manager, service_name, event_name): | ||
4039 | 309 | service = manager.get_service(service_name) | ||
4040 | 310 | # turn this generator into a list, | ||
4041 | 311 | # as we'll be going over it multiple times | ||
4042 | 312 | new_ports = list(service.get('ports', [])) | ||
4043 | 313 | port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) | ||
4044 | 314 | if os.path.exists(port_file): | ||
4045 | 315 | with open(port_file) as fp: | ||
4046 | 316 | old_ports = fp.read().split(',') | ||
4047 | 317 | for old_port in old_ports: | ||
4048 | 318 | if bool(old_port) and not self.ports_contains(old_port, new_ports): | ||
4049 | 319 | hookenv.close_port(old_port) | ||
4050 | 320 | with open(port_file, 'w') as fp: | ||
4051 | 321 | fp.write(','.join(str(port) for port in new_ports)) | ||
4052 | 322 | for port in new_ports: | ||
4053 | 323 | # A port is either a number or 'ICMP' | ||
4054 | 324 | protocol = 'TCP' | ||
4055 | 325 | if str(port).upper() == 'ICMP': | ||
4056 | 326 | protocol = 'ICMP' | ||
4057 | 327 | if event_name == 'start': | ||
4058 | 328 | hookenv.open_port(port, protocol) | ||
4059 | 329 | elif event_name == 'stop': | ||
4060 | 330 | hookenv.close_port(port, protocol) | ||
4061 | 331 | |||
4062 | 332 | def ports_contains(self, port, ports): | ||
4063 | 333 | if not bool(port): | ||
4064 | 334 | return False | ||
4065 | 335 | if str(port).upper() != 'ICMP': | ||
4066 | 336 | port = int(port) | ||
4067 | 337 | return port in ports | ||
4068 | 338 | |||
4069 | 339 | |||
4070 | 340 | def service_stop(service_name): | ||
4071 | 341 | """ | ||
4072 | 342 | Wrapper around host.service_stop to prevent spurious "unknown service" | ||
4073 | 343 | messages in the logs. | ||
4074 | 344 | """ | ||
4075 | 345 | if host.service_running(service_name): | ||
4076 | 346 | host.service_stop(service_name) | ||
4077 | 347 | |||
4078 | 348 | |||
4079 | 349 | def service_restart(service_name): | ||
4080 | 350 | """ | ||
4081 | 351 | Wrapper around host.service_restart to prevent spurious "unknown service" | ||
4082 | 352 | messages in the logs. | ||
4083 | 353 | """ | ||
4084 | 354 | if host.service_available(service_name): | ||
4085 | 355 | if host.service_running(service_name): | ||
4086 | 356 | host.service_restart(service_name) | ||
4087 | 357 | else: | ||
4088 | 358 | host.service_start(service_name) | ||
4089 | 359 | |||
4090 | 360 | |||
4091 | 361 | # Convenience aliases | ||
4092 | 362 | open_ports = close_ports = manage_ports = PortManagerCallback() | ||
4093 | diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py | |||
4094 | 0 | new file mode 100644 | 363 | new file mode 100644 |
4095 | index 0000000..3e6e30d | |||
4096 | --- /dev/null | |||
4097 | +++ b/hooks/charmhelpers/core/services/helpers.py | |||
4098 | @@ -0,0 +1,290 @@ | |||
4099 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
4100 | 2 | # | ||
4101 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
4102 | 4 | # you may not use this file except in compliance with the License. | ||
4103 | 5 | # You may obtain a copy of the License at | ||
4104 | 6 | # | ||
4105 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
4106 | 8 | # | ||
4107 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
4108 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
4109 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
4110 | 12 | # See the License for the specific language governing permissions and | ||
4111 | 13 | # limitations under the License. | ||
4112 | 14 | |||
4113 | 15 | import os | ||
4114 | 16 | import yaml | ||
4115 | 17 | |||
4116 | 18 | from charmhelpers.core import hookenv | ||
4117 | 19 | from charmhelpers.core import host | ||
4118 | 20 | from charmhelpers.core import templating | ||
4119 | 21 | |||
4120 | 22 | from charmhelpers.core.services.base import ManagerCallback | ||
4121 | 23 | |||
4122 | 24 | |||
4123 | 25 | __all__ = ['RelationContext', 'TemplateCallback', | ||
4124 | 26 | 'render_template', 'template'] | ||
4125 | 27 | |||
4126 | 28 | |||
4127 | 29 | class RelationContext(dict): | ||
4128 | 30 | """ | ||
4129 | 31 | Base class for a context generator that gets relation data from juju. | ||
4130 | 32 | |||
4131 | 33 | Subclasses must provide the attributes `name`, which is the name of the | ||
4132 | 34 | interface of interest, `interface`, which is the type of the interface of | ||
4133 | 35 | interest, and `required_keys`, which is the set of keys required for the | ||
4134 | 36 | relation to be considered complete. The data for all interfaces matching | ||
4135 | 37 | the `name` attribute that are complete will used to populate the dictionary | ||
4136 | 38 | values (see `get_data`, below). | ||
4137 | 39 | |||
4138 | 40 | The generated context will be namespaced under the relation :attr:`name`, | ||
4139 | 41 | to prevent potential naming conflicts. | ||
4140 | 42 | |||
4141 | 43 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
4142 | 44 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
4143 | 45 | """ | ||
4144 | 46 | name = None | ||
4145 | 47 | interface = None | ||
4146 | 48 | |||
4147 | 49 | def __init__(self, name=None, additional_required_keys=None): | ||
4148 | 50 | if not hasattr(self, 'required_keys'): | ||
4149 | 51 | self.required_keys = [] | ||
4150 | 52 | |||
4151 | 53 | if name is not None: | ||
4152 | 54 | self.name = name | ||
4153 | 55 | if additional_required_keys: | ||
4154 | 56 | self.required_keys.extend(additional_required_keys) | ||
4155 | 57 | self.get_data() | ||
4156 | 58 | |||
4157 | 59 | def __bool__(self): | ||
4158 | 60 | """ | ||
4159 | 61 | Returns True if all of the required_keys are available. | ||
4160 | 62 | """ | ||
4161 | 63 | return self.is_ready() | ||
4162 | 64 | |||
4163 | 65 | __nonzero__ = __bool__ | ||
4164 | 66 | |||
4165 | 67 | def __repr__(self): | ||
4166 | 68 | return super(RelationContext, self).__repr__() | ||
4167 | 69 | |||
4168 | 70 | def is_ready(self): | ||
4169 | 71 | """ | ||
4170 | 72 | Returns True if all of the `required_keys` are available from any units. | ||
4171 | 73 | """ | ||
4172 | 74 | ready = len(self.get(self.name, [])) > 0 | ||
4173 | 75 | if not ready: | ||
4174 | 76 | hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) | ||
4175 | 77 | return ready | ||
4176 | 78 | |||
4177 | 79 | def _is_ready(self, unit_data): | ||
4178 | 80 | """ | ||
4179 | 81 | Helper method that tests a set of relation data and returns True if | ||
4180 | 82 | all of the `required_keys` are present. | ||
4181 | 83 | """ | ||
4182 | 84 | return set(unit_data.keys()).issuperset(set(self.required_keys)) | ||
4183 | 85 | |||
4184 | 86 | def get_data(self): | ||
4185 | 87 | """ | ||
4186 | 88 | Retrieve the relation data for each unit involved in a relation and, | ||
4187 | 89 | if complete, store it in a list under `self[self.name]`. This | ||
4188 | 90 | is automatically called when the RelationContext is instantiated. | ||
4189 | 91 | |||
4190 | 92 | The units are sorted lexographically first by the service ID, then by | ||
4191 | 93 | the unit ID. Thus, if an interface has two other services, 'db:1' | ||
4192 | 94 | and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', | ||
4193 | 95 | and 'db:2' having one unit, 'mediawiki/0', all of which have a complete | ||
4194 | 96 | set of data, the relation data for the units will be stored in the | ||
4195 | 97 | order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. | ||
4196 | 98 | |||
4197 | 99 | If you only care about a single unit on the relation, you can just | ||
4198 | 100 | access it as `{{ interface[0]['key'] }}`. However, if you can at all | ||
4199 | 101 | support multiple units on a relation, you should iterate over the list, | ||
4200 | 102 | like:: | ||
4201 | 103 | |||
4202 | 104 | {% for unit in interface -%} | ||
4203 | 105 | {{ unit['key'] }}{% if not loop.last %},{% endif %} | ||
4204 | 106 | {%- endfor %} | ||
4205 | 107 | |||
4206 | 108 | Note that since all sets of relation data from all related services and | ||
4207 | 109 | units are in a single list, if you need to know which service or unit a | ||
4208 | 110 | set of data came from, you'll need to extend this class to preserve | ||
4209 | 111 | that information. | ||
4210 | 112 | """ | ||
4211 | 113 | if not hookenv.relation_ids(self.name): | ||
4212 | 114 | return | ||
4213 | 115 | |||
4214 | 116 | ns = self.setdefault(self.name, []) | ||
4215 | 117 | for rid in sorted(hookenv.relation_ids(self.name)): | ||
4216 | 118 | for unit in sorted(hookenv.related_units(rid)): | ||
4217 | 119 | reldata = hookenv.relation_get(rid=rid, unit=unit) | ||
4218 | 120 | if self._is_ready(reldata): | ||
4219 | 121 | ns.append(reldata) | ||
4220 | 122 | |||
4221 | 123 | def provide_data(self): | ||
4222 | 124 | """ | ||
4223 | 125 | Return data to be relation_set for this interface. | ||
4224 | 126 | """ | ||
4225 | 127 | return {} | ||
4226 | 128 | |||
4227 | 129 | |||
4228 | 130 | class MysqlRelation(RelationContext): | ||
4229 | 131 | """ | ||
4230 | 132 | Relation context for the `mysql` interface. | ||
4231 | 133 | |||
4232 | 134 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
4233 | 135 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
4234 | 136 | """ | ||
4235 | 137 | name = 'db' | ||
4236 | 138 | interface = 'mysql' | ||
4237 | 139 | |||
4238 | 140 | def __init__(self, *args, **kwargs): | ||
4239 | 141 | self.required_keys = ['host', 'user', 'password', 'database'] | ||
4240 | 142 | RelationContext.__init__(self, *args, **kwargs) | ||
4241 | 143 | |||
4242 | 144 | |||
4243 | 145 | class HttpRelation(RelationContext): | ||
4244 | 146 | """ | ||
4245 | 147 | Relation context for the `http` interface. | ||
4246 | 148 | |||
4247 | 149 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
4248 | 150 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
4249 | 151 | """ | ||
4250 | 152 | name = 'website' | ||
4251 | 153 | interface = 'http' | ||
4252 | 154 | |||
4253 | 155 | def __init__(self, *args, **kwargs): | ||
4254 | 156 | self.required_keys = ['host', 'port'] | ||
4255 | 157 | RelationContext.__init__(self, *args, **kwargs) | ||
4256 | 158 | |||
4257 | 159 | def provide_data(self): | ||
4258 | 160 | return { | ||
4259 | 161 | 'host': hookenv.unit_get('private-address'), | ||
4260 | 162 | 'port': 80, | ||
4261 | 163 | } | ||
4262 | 164 | |||
4263 | 165 | |||
4264 | 166 | class RequiredConfig(dict): | ||
4265 | 167 | """ | ||
4266 | 168 | Data context that loads config options with one or more mandatory options. | ||
4267 | 169 | |||
4268 | 170 | Once the required options have been changed from their default values, all | ||
4269 | 171 | config options will be available, namespaced under `config` to prevent | ||
4270 | 172 | potential naming conflicts (for example, between a config option and a | ||
4271 | 173 | relation property). | ||
4272 | 174 | |||
4273 | 175 | :param list *args: List of options that must be changed from their default values. | ||
4274 | 176 | """ | ||
4275 | 177 | |||
4276 | 178 | def __init__(self, *args): | ||
4277 | 179 | self.required_options = args | ||
4278 | 180 | self['config'] = hookenv.config() | ||
4279 | 181 | with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: | ||
4280 | 182 | self.config = yaml.load(fp).get('options', {}) | ||
4281 | 183 | |||
4282 | 184 | def __bool__(self): | ||
4283 | 185 | for option in self.required_options: | ||
4284 | 186 | if option not in self['config']: | ||
4285 | 187 | return False | ||
4286 | 188 | current_value = self['config'][option] | ||
4287 | 189 | default_value = self.config[option].get('default') | ||
4288 | 190 | if current_value == default_value: | ||
4289 | 191 | return False | ||
4290 | 192 | if current_value in (None, '') and default_value in (None, ''): | ||
4291 | 193 | return False | ||
4292 | 194 | return True | ||
4293 | 195 | |||
4294 | 196 | def __nonzero__(self): | ||
4295 | 197 | return self.__bool__() | ||
4296 | 198 | |||
4297 | 199 | |||
4298 | 200 | class StoredContext(dict): | ||
4299 | 201 | """ | ||
4300 | 202 | A data context that always returns the data that it was first created with. | ||
4301 | 203 | |||
4302 | 204 | This is useful to do a one-time generation of things like passwords, that | ||
4303 | 205 | will thereafter use the same value that was originally generated, instead | ||
4304 | 206 | of generating a new value each time it is run. | ||
4305 | 207 | """ | ||
4306 | 208 | def __init__(self, file_name, config_data): | ||
4307 | 209 | """ | ||
4308 | 210 | If the file exists, populate `self` with the data from the file. | ||
4309 | 211 | Otherwise, populate with the given data and persist it to the file. | ||
4310 | 212 | """ | ||
4311 | 213 | if os.path.exists(file_name): | ||
4312 | 214 | self.update(self.read_context(file_name)) | ||
4313 | 215 | else: | ||
4314 | 216 | self.store_context(file_name, config_data) | ||
4315 | 217 | self.update(config_data) | ||
4316 | 218 | |||
4317 | 219 | def store_context(self, file_name, config_data): | ||
4318 | 220 | if not os.path.isabs(file_name): | ||
4319 | 221 | file_name = os.path.join(hookenv.charm_dir(), file_name) | ||
4320 | 222 | with open(file_name, 'w') as file_stream: | ||
4321 | 223 | os.fchmod(file_stream.fileno(), 0o600) | ||
4322 | 224 | yaml.dump(config_data, file_stream) | ||
4323 | 225 | |||
4324 | 226 | def read_context(self, file_name): | ||
4325 | 227 | if not os.path.isabs(file_name): | ||
4326 | 228 | file_name = os.path.join(hookenv.charm_dir(), file_name) | ||
4327 | 229 | with open(file_name, 'r') as file_stream: | ||
4328 | 230 | data = yaml.load(file_stream) | ||
4329 | 231 | if not data: | ||
4330 | 232 | raise OSError("%s is empty" % file_name) | ||
4331 | 233 | return data | ||
4332 | 234 | |||
4333 | 235 | |||
4334 | 236 | class TemplateCallback(ManagerCallback): | ||
4335 | 237 | """ | ||
4336 | 238 | Callback class that will render a Jinja2 template, for use as a ready | ||
4337 | 239 | action. | ||
4338 | 240 | |||
4339 | 241 | :param str source: The template source file, relative to | ||
4340 | 242 | `$CHARM_DIR/templates` | ||
4341 | 243 | |||
4342 | 244 | :param str target: The target to write the rendered template to (or None) | ||
4343 | 245 | :param str owner: The owner of the rendered file | ||
4344 | 246 | :param str group: The group of the rendered file | ||
4345 | 247 | :param int perms: The permissions of the rendered file | ||
4346 | 248 | :param partial on_change_action: functools partial to be executed when | ||
4347 | 249 | rendered file changes | ||
4348 | 250 | :param jinja2 loader template_loader: A jinja2 template loader | ||
4349 | 251 | |||
4350 | 252 | :return str: The rendered template | ||
4351 | 253 | """ | ||
4352 | 254 | def __init__(self, source, target, | ||
4353 | 255 | owner='root', group='root', perms=0o444, | ||
4354 | 256 | on_change_action=None, template_loader=None): | ||
4355 | 257 | self.source = source | ||
4356 | 258 | self.target = target | ||
4357 | 259 | self.owner = owner | ||
4358 | 260 | self.group = group | ||
4359 | 261 | self.perms = perms | ||
4360 | 262 | self.on_change_action = on_change_action | ||
4361 | 263 | self.template_loader = template_loader | ||
4362 | 264 | |||
4363 | 265 | def __call__(self, manager, service_name, event_name): | ||
4364 | 266 | pre_checksum = '' | ||
4365 | 267 | if self.on_change_action and os.path.isfile(self.target): | ||
4366 | 268 | pre_checksum = host.file_hash(self.target) | ||
4367 | 269 | service = manager.get_service(service_name) | ||
4368 | 270 | context = {'ctx': {}} | ||
4369 | 271 | for ctx in service.get('required_data', []): | ||
4370 | 272 | context.update(ctx) | ||
4371 | 273 | context['ctx'].update(ctx) | ||
4372 | 274 | |||
4373 | 275 | result = templating.render(self.source, self.target, context, | ||
4374 | 276 | self.owner, self.group, self.perms, | ||
4375 | 277 | template_loader=self.template_loader) | ||
4376 | 278 | if self.on_change_action: | ||
4377 | 279 | if pre_checksum == host.file_hash(self.target): | ||
4378 | 280 | hookenv.log( | ||
4379 | 281 | 'No change detected: {}'.format(self.target), | ||
4380 | 282 | hookenv.DEBUG) | ||
4381 | 283 | else: | ||
4382 | 284 | self.on_change_action() | ||
4383 | 285 | |||
4384 | 286 | return result | ||
4385 | 287 | |||
4386 | 288 | |||
4387 | 289 | # Convenience aliases for templates | ||
4388 | 290 | render_template = template = TemplateCallback | ||
4389 | diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py | |||
4390 | 0 | new file mode 100644 | 291 | new file mode 100644 |
4391 | index 0000000..e8df045 | |||
4392 | --- /dev/null | |||
4393 | +++ b/hooks/charmhelpers/core/strutils.py | |||
4394 | @@ -0,0 +1,129 @@ | |||
4395 | 1 | #!/usr/bin/env python | ||
4396 | 2 | # -*- coding: utf-8 -*- | ||
4397 | 3 | |||
4398 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
4399 | 5 | # | ||
4400 | 6 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
4401 | 7 | # you may not use this file except in compliance with the License. | ||
4402 | 8 | # You may obtain a copy of the License at | ||
4403 | 9 | # | ||
4404 | 10 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
4405 | 11 | # | ||
4406 | 12 | # Unless required by applicable law or agreed to in writing, software | ||
4407 | 13 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
4408 | 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
4409 | 15 | # See the License for the specific language governing permissions and | ||
4410 | 16 | # limitations under the License. | ||
4411 | 17 | |||
4412 | 18 | import six | ||
4413 | 19 | import re | ||
4414 | 20 | |||
4415 | 21 | |||
4416 | 22 | def bool_from_string(value): | ||
4417 | 23 | """Interpret string value as boolean. | ||
4418 | 24 | |||
4419 | 25 | Returns True if value translates to True otherwise False. | ||
4420 | 26 | """ | ||
4421 | 27 | if isinstance(value, six.string_types): | ||
4422 | 28 | value = six.text_type(value) | ||
4423 | 29 | else: | ||
4424 | 30 | msg = "Unable to interpret non-string value '%s' as boolean" % (value) | ||
4425 | 31 | raise ValueError(msg) | ||
4426 | 32 | |||
4427 | 33 | value = value.strip().lower() | ||
4428 | 34 | |||
4429 | 35 | if value in ['y', 'yes', 'true', 't', 'on']: | ||
4430 | 36 | return True | ||
4431 | 37 | elif value in ['n', 'no', 'false', 'f', 'off']: | ||
4432 | 38 | return False | ||
4433 | 39 | |||
4434 | 40 | msg = "Unable to interpret string value '%s' as boolean" % (value) | ||
4435 | 41 | raise ValueError(msg) | ||
4436 | 42 | |||
4437 | 43 | |||
4438 | 44 | def bytes_from_string(value): | ||
4439 | 45 | """Interpret human readable string value as bytes. | ||
4440 | 46 | |||
4441 | 47 | Returns int | ||
4442 | 48 | """ | ||
4443 | 49 | BYTE_POWER = { | ||
4444 | 50 | 'K': 1, | ||
4445 | 51 | 'KB': 1, | ||
4446 | 52 | 'M': 2, | ||
4447 | 53 | 'MB': 2, | ||
4448 | 54 | 'G': 3, | ||
4449 | 55 | 'GB': 3, | ||
4450 | 56 | 'T': 4, | ||
4451 | 57 | 'TB': 4, | ||
4452 | 58 | 'P': 5, | ||
4453 | 59 | 'PB': 5, | ||
4454 | 60 | } | ||
4455 | 61 | if isinstance(value, six.string_types): | ||
4456 | 62 | value = six.text_type(value) | ||
4457 | 63 | else: | ||
4458 | 64 | msg = "Unable to interpret non-string value '%s' as bytes" % (value) | ||
4459 | 65 | raise ValueError(msg) | ||
4460 | 66 | matches = re.match("([0-9]+)([a-zA-Z]+)", value) | ||
4461 | 67 | if matches: | ||
4462 | 68 | size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) | ||
4463 | 69 | else: | ||
4464 | 70 | # Assume that value passed in is bytes | ||
4465 | 71 | try: | ||
4466 | 72 | size = int(value) | ||
4467 | 73 | except ValueError: | ||
4468 | 74 | msg = "Unable to interpret string value '%s' as bytes" % (value) | ||
4469 | 75 | raise ValueError(msg) | ||
4470 | 76 | return size | ||
4471 | 77 | |||
4472 | 78 | |||
4473 | 79 | class BasicStringComparator(object): | ||
4474 | 80 | """Provides a class that will compare strings from an iterator type object. | ||
4475 | 81 | Used to provide > and < comparisons on strings that may not necessarily be | ||
4476 | 82 | alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the | ||
4477 | 83 | z-wrap. | ||
4478 | 84 | """ | ||
4479 | 85 | |||
4480 | 86 | _list = None | ||
4481 | 87 | |||
4482 | 88 | def __init__(self, item): | ||
4483 | 89 | if self._list is None: | ||
4484 | 90 | raise Exception("Must define the _list in the class definition!") | ||
4485 | 91 | try: | ||
4486 | 92 | self.index = self._list.index(item) | ||
4487 | 93 | except Exception: | ||
4488 | 94 | raise KeyError("Item '{}' is not in list '{}'" | ||
4489 | 95 | .format(item, self._list)) | ||
4490 | 96 | |||
4491 | 97 | def __eq__(self, other): | ||
4492 | 98 | assert isinstance(other, str) or isinstance(other, self.__class__) | ||
4493 | 99 | return self.index == self._list.index(other) | ||
4494 | 100 | |||
4495 | 101 | def __ne__(self, other): | ||
4496 | 102 | return not self.__eq__(other) | ||
4497 | 103 | |||
4498 | 104 | def __lt__(self, other): | ||
4499 | 105 | assert isinstance(other, str) or isinstance(other, self.__class__) | ||
4500 | 106 | return self.index < self._list.index(other) | ||
4501 | 107 | |||
4502 | 108 | def __ge__(self, other): | ||
4503 | 109 | return not self.__lt__(other) | ||
4504 | 110 | |||
4505 | 111 | def __gt__(self, other): | ||
4506 | 112 | assert isinstance(other, str) or isinstance(other, self.__class__) | ||
4507 | 113 | return self.index > self._list.index(other) | ||
4508 | 114 | |||
4509 | 115 | def __le__(self, other): | ||
4510 | 116 | return not self.__gt__(other) | ||
4511 | 117 | |||
4512 | 118 | def __str__(self): | ||
4513 | 119 | """Always give back the item at the index so it can be used in | ||
4514 | 120 | comparisons like: | ||
4515 | 121 | |||
4516 | 122 | s_mitaka = CompareOpenStack('mitaka') | ||
4517 | 123 | s_newton = CompareOpenstack('newton') | ||
4518 | 124 | |||
4519 | 125 | assert s_newton > s_mitaka | ||
4520 | 126 | |||
4521 | 127 | @returns: <string> | ||
4522 | 128 | """ | ||
4523 | 129 | return self._list[self.index] | ||
4524 | diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py | |||
4525 | 0 | new file mode 100644 | 130 | new file mode 100644 |
4526 | index 0000000..1f188d8 | |||
4527 | --- /dev/null | |||
4528 | +++ b/hooks/charmhelpers/core/sysctl.py | |||
4529 | @@ -0,0 +1,58 @@ | |||
4530 | 1 | #!/usr/bin/env python | ||
4531 | 2 | # -*- coding: utf-8 -*- | ||
4532 | 3 | |||
4533 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
4534 | 5 | # | ||
4535 | 6 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
4536 | 7 | # you may not use this file except in compliance with the License. | ||
4537 | 8 | # You may obtain a copy of the License at | ||
4538 | 9 | # | ||
4539 | 10 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
4540 | 11 | # | ||
4541 | 12 | # Unless required by applicable law or agreed to in writing, software | ||
4542 | 13 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
4543 | 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
4544 | 15 | # See the License for the specific language governing permissions and | ||
4545 | 16 | # limitations under the License. | ||
4546 | 17 | |||
4547 | 18 | import yaml | ||
4548 | 19 | |||
4549 | 20 | from subprocess import check_call | ||
4550 | 21 | |||
4551 | 22 | from charmhelpers.core.hookenv import ( | ||
4552 | 23 | log, | ||
4553 | 24 | DEBUG, | ||
4554 | 25 | ERROR, | ||
4555 | 26 | ) | ||
4556 | 27 | |||
4557 | 28 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | ||
4558 | 29 | |||
4559 | 30 | |||
4560 | 31 | def create(sysctl_dict, sysctl_file): | ||
4561 | 32 | """Creates a sysctl.conf file from a YAML associative array | ||
4562 | 33 | |||
4563 | 34 | :param sysctl_dict: a dict or YAML-formatted string of sysctl | ||
4564 | 35 | options eg "{ 'kernel.max_pid': 1337 }" | ||
4565 | 36 | :type sysctl_dict: str | ||
4566 | 37 | :param sysctl_file: path to the sysctl file to be saved | ||
4567 | 38 | :type sysctl_file: str or unicode | ||
4568 | 39 | :returns: None | ||
4569 | 40 | """ | ||
4570 | 41 | if type(sysctl_dict) is not dict: | ||
4571 | 42 | try: | ||
4572 | 43 | sysctl_dict_parsed = yaml.safe_load(sysctl_dict) | ||
4573 | 44 | except yaml.YAMLError: | ||
4574 | 45 | log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), | ||
4575 | 46 | level=ERROR) | ||
4576 | 47 | return | ||
4577 | 48 | else: | ||
4578 | 49 | sysctl_dict_parsed = sysctl_dict | ||
4579 | 50 | |||
4580 | 51 | with open(sysctl_file, "w") as fd: | ||
4581 | 52 | for key, value in sysctl_dict_parsed.items(): | ||
4582 | 53 | fd.write("{}={}\n".format(key, value)) | ||
4583 | 54 | |||
4584 | 55 | log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), | ||
4585 | 56 | level=DEBUG) | ||
4586 | 57 | |||
4587 | 58 | check_call(["sysctl", "-p", sysctl_file]) | ||
4588 | diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py | |||
4589 | 0 | new file mode 100644 | 59 | new file mode 100644 |
4590 | index 0000000..9014015 | |||
4591 | --- /dev/null | |||
4592 | +++ b/hooks/charmhelpers/core/templating.py | |||
4593 | @@ -0,0 +1,93 @@ | |||
4594 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
4595 | 2 | # | ||
4596 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
4597 | 4 | # you may not use this file except in compliance with the License. | ||
4598 | 5 | # You may obtain a copy of the License at | ||
4599 | 6 | # | ||
4600 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
4601 | 8 | # | ||
4602 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
4603 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
4604 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
4605 | 12 | # See the License for the specific language governing permissions and | ||
4606 | 13 | # limitations under the License. | ||
4607 | 14 | |||
4608 | 15 | import os | ||
4609 | 16 | import sys | ||
4610 | 17 | |||
4611 | 18 | from charmhelpers.core import host | ||
4612 | 19 | from charmhelpers.core import hookenv | ||
4613 | 20 | |||
4614 | 21 | |||
4615 | 22 | def render(source, target, context, owner='root', group='root', | ||
4616 | 23 | perms=0o444, templates_dir=None, encoding='UTF-8', | ||
4617 | 24 | template_loader=None, config_template=None): | ||
4618 | 25 | """ | ||
4619 | 26 | Render a template. | ||
4620 | 27 | |||
4621 | 28 | The `source` path, if not absolute, is relative to the `templates_dir`. | ||
4622 | 29 | |||
4623 | 30 | The `target` path should be absolute. It can also be `None`, in which | ||
4624 | 31 | case no file will be written. | ||
4625 | 32 | |||
4626 | 33 | The context should be a dict containing the values to be replaced in the | ||
4627 | 34 | template. | ||
4628 | 35 | |||
4629 | 36 | config_template may be provided to render from a provided template instead | ||
4630 | 37 | of loading from a file. | ||
4631 | 38 | |||
4632 | 39 | The `owner`, `group`, and `perms` options will be passed to `write_file`. | ||
4633 | 40 | |||
4634 | 41 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | ||
4635 | 42 | |||
4636 | 43 | The rendered template will be written to the file as well as being returned | ||
4637 | 44 | as a string. | ||
4638 | 45 | |||
4639 | 46 | Note: Using this requires python-jinja2 or python3-jinja2; if it is not | ||
4640 | 47 | installed, calling this will attempt to use charmhelpers.fetch.apt_install | ||
4641 | 48 | to install it. | ||
4642 | 49 | """ | ||
4643 | 50 | try: | ||
4644 | 51 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
4645 | 52 | except ImportError: | ||
4646 | 53 | try: | ||
4647 | 54 | from charmhelpers.fetch import apt_install | ||
4648 | 55 | except ImportError: | ||
4649 | 56 | hookenv.log('Could not import jinja2, and could not import ' | ||
4650 | 57 | 'charmhelpers.fetch to install it', | ||
4651 | 58 | level=hookenv.ERROR) | ||
4652 | 59 | raise | ||
4653 | 60 | if sys.version_info.major == 2: | ||
4654 | 61 | apt_install('python-jinja2', fatal=True) | ||
4655 | 62 | else: | ||
4656 | 63 | apt_install('python3-jinja2', fatal=True) | ||
4657 | 64 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
4658 | 65 | |||
4659 | 66 | if template_loader: | ||
4660 | 67 | template_env = Environment(loader=template_loader) | ||
4661 | 68 | else: | ||
4662 | 69 | if templates_dir is None: | ||
4663 | 70 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | ||
4664 | 71 | template_env = Environment(loader=FileSystemLoader(templates_dir)) | ||
4665 | 72 | |||
4666 | 73 | # load from a string if provided explicitly | ||
4667 | 74 | if config_template is not None: | ||
4668 | 75 | template = template_env.from_string(config_template) | ||
4669 | 76 | else: | ||
4670 | 77 | try: | ||
4671 | 78 | source = source | ||
4672 | 79 | template = template_env.get_template(source) | ||
4673 | 80 | except exceptions.TemplateNotFound as e: | ||
4674 | 81 | hookenv.log('Could not load template %s from %s.' % | ||
4675 | 82 | (source, templates_dir), | ||
4676 | 83 | level=hookenv.ERROR) | ||
4677 | 84 | raise e | ||
4678 | 85 | content = template.render(context) | ||
4679 | 86 | if target is not None: | ||
4680 | 87 | target_dir = os.path.dirname(target) | ||
4681 | 88 | if not os.path.exists(target_dir): | ||
4682 | 89 | # This is a terrible default directory permission, as the file | ||
4683 | 90 | # or its siblings will often contain secrets. | ||
4684 | 91 | host.mkdir(os.path.dirname(target), owner, group, perms=0o755) | ||
4685 | 92 | host.write_file(target, content.encode(encoding), owner, group, perms) | ||
4686 | 93 | return content | ||
4687 | diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py | |||
4688 | 0 | new file mode 100644 | 94 | new file mode 100644 |
4689 | index 0000000..ab55432 | |||
4690 | --- /dev/null | |||
4691 | +++ b/hooks/charmhelpers/core/unitdata.py | |||
4692 | @@ -0,0 +1,525 @@ | |||
4693 | 1 | #!/usr/bin/env python | ||
4694 | 2 | # -*- coding: utf-8 -*- | ||
4695 | 3 | # | ||
4696 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
4697 | 5 | # | ||
4698 | 6 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
4699 | 7 | # you may not use this file except in compliance with the License. | ||
4700 | 8 | # You may obtain a copy of the License at | ||
4701 | 9 | # | ||
4702 | 10 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
4703 | 11 | # | ||
4704 | 12 | # Unless required by applicable law or agreed to in writing, software | ||
4705 | 13 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
4706 | 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
4707 | 15 | # See the License for the specific language governing permissions and | ||
4708 | 16 | # limitations under the License. | ||
4709 | 17 | # | ||
4710 | 18 | # Authors: | ||
4711 | 19 | # Kapil Thangavelu <kapil.foss@gmail.com> | ||
4712 | 20 | # | ||
4713 | 21 | """ | ||
4714 | 22 | Intro | ||
4715 | 23 | ----- | ||
4716 | 24 | |||
4717 | 25 | A simple way to store state in units. This provides a key value | ||
4718 | 26 | storage with support for versioned, transactional operation, | ||
4719 | 27 | and can calculate deltas from previous values to simplify unit logic | ||
4720 | 28 | when processing changes. | ||
4721 | 29 | |||
4722 | 30 | |||
4723 | 31 | Hook Integration | ||
4724 | 32 | ---------------- | ||
4725 | 33 | |||
4726 | 34 | There are several extant frameworks for hook execution, including | ||
4727 | 35 | |||
4728 | 36 | - charmhelpers.core.hookenv.Hooks | ||
4729 | 37 | - charmhelpers.core.services.ServiceManager | ||
4730 | 38 | |||
4731 | 39 | The storage classes are framework agnostic, one simple integration is | ||
4732 | 40 | via the HookData contextmanager. It will record the current hook | ||
4733 | 41 | execution environment (including relation data, config data, etc.), | ||
4734 | 42 | setup a transaction and allow easy access to the changes from | ||
4735 | 43 | previously seen values. One consequence of the integration is the | ||
4736 | 44 | reservation of particular keys ('rels', 'unit', 'env', 'config', | ||
4737 | 45 | 'charm_revisions') for their respective values. | ||
4738 | 46 | |||
4739 | 47 | Here's a fully worked integration example using hookenv.Hooks:: | ||
4740 | 48 | |||
4741 | 49 | from charmhelper.core import hookenv, unitdata | ||
4742 | 50 | |||
4743 | 51 | hook_data = unitdata.HookData() | ||
4744 | 52 | db = unitdata.kv() | ||
4745 | 53 | hooks = hookenv.Hooks() | ||
4746 | 54 | |||
4747 | 55 | @hooks.hook | ||
4748 | 56 | def config_changed(): | ||
4749 | 57 | # Print all changes to configuration from previously seen | ||
4750 | 58 | # values. | ||
4751 | 59 | for changed, (prev, cur) in hook_data.conf.items(): | ||
4752 | 60 | print('config changed', changed, | ||
4753 | 61 | 'previous value', prev, | ||
4754 | 62 | 'current value', cur) | ||
4755 | 63 | |||
4756 | 64 | # Get some unit specific bookeeping | ||
4757 | 65 | if not db.get('pkg_key'): | ||
4758 | 66 | key = urllib.urlopen('https://example.com/pkg_key').read() | ||
4759 | 67 | db.set('pkg_key', key) | ||
4760 | 68 | |||
4761 | 69 | # Directly access all charm config as a mapping. | ||
4762 | 70 | conf = db.getrange('config', True) | ||
4763 | 71 | |||
4764 | 72 | # Directly access all relation data as a mapping | ||
4765 | 73 | rels = db.getrange('rels', True) | ||
4766 | 74 | |||
4767 | 75 | if __name__ == '__main__': | ||
4768 | 76 | with hook_data(): | ||
4769 | 77 | hook.execute() | ||
4770 | 78 | |||
4771 | 79 | |||
4772 | 80 | A more basic integration is via the hook_scope context manager which simply | ||
4773 | 81 | manages transaction scope (and records hook name, and timestamp):: | ||
4774 | 82 | |||
4775 | 83 | >>> from unitdata import kv | ||
4776 | 84 | >>> db = kv() | ||
4777 | 85 | >>> with db.hook_scope('install'): | ||
4778 | 86 | ... # do work, in transactional scope. | ||
4779 | 87 | ... db.set('x', 1) | ||
4780 | 88 | >>> db.get('x') | ||
4781 | 89 | 1 | ||
4782 | 90 | |||
4783 | 91 | |||
4784 | 92 | Usage | ||
4785 | 93 | ----- | ||
4786 | 94 | |||
4787 | 95 | Values are automatically json de/serialized to preserve basic typing | ||
4788 | 96 | and complex data struct capabilities (dicts, lists, ints, booleans, etc). | ||
4789 | 97 | |||
4790 | 98 | Individual values can be manipulated via get/set:: | ||
4791 | 99 | |||
4792 | 100 | >>> kv.set('y', True) | ||
4793 | 101 | >>> kv.get('y') | ||
4794 | 102 | True | ||
4795 | 103 | |||
4796 | 104 | # We can set complex values (dicts, lists) as a single key. | ||
4797 | 105 | >>> kv.set('config', {'a': 1, 'b': True'}) | ||
4798 | 106 | |||
4799 | 107 | # Also supports returning dictionaries as a record which | ||
4800 | 108 | # provides attribute access. | ||
4801 | 109 | >>> config = kv.get('config', record=True) | ||
4802 | 110 | >>> config.b | ||
4803 | 111 | True | ||
4804 | 112 | |||
4805 | 113 | |||
4806 | 114 | Groups of keys can be manipulated with update/getrange:: | ||
4807 | 115 | |||
4808 | 116 | >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") | ||
4809 | 117 | >>> kv.getrange('gui.', strip=True) | ||
4810 | 118 | {'z': 1, 'y': 2} | ||
4811 | 119 | |||
4812 | 120 | When updating values, its very helpful to understand which values | ||
4813 | 121 | have actually changed and how have they changed. The storage | ||
4814 | 122 | provides a delta method to provide for this:: | ||
4815 | 123 | |||
4816 | 124 | >>> data = {'debug': True, 'option': 2} | ||
4817 | 125 | >>> delta = kv.delta(data, 'config.') | ||
4818 | 126 | >>> delta.debug.previous | ||
4819 | 127 | None | ||
4820 | 128 | >>> delta.debug.current | ||
4821 | 129 | True | ||
4822 | 130 | >>> delta | ||
4823 | 131 | {'debug': (None, True), 'option': (None, 2)} | ||
4824 | 132 | |||
4825 | 133 | Note the delta method does not persist the actual change, it needs to | ||
4826 | 134 | be explicitly saved via 'update' method:: | ||
4827 | 135 | |||
4828 | 136 | >>> kv.update(data, 'config.') | ||
4829 | 137 | |||
4830 | 138 | Values modified in the context of a hook scope retain historical values | ||
4831 | 139 | associated to the hookname. | ||
4832 | 140 | |||
4833 | 141 | >>> with db.hook_scope('config-changed'): | ||
4834 | 142 | ... db.set('x', 42) | ||
4835 | 143 | >>> db.gethistory('x') | ||
4836 | 144 | [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), | ||
4837 | 145 | (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] | ||
4838 | 146 | |||
4839 | 147 | """ | ||
4840 | 148 | |||
4841 | 149 | import collections | ||
4842 | 150 | import contextlib | ||
4843 | 151 | import datetime | ||
4844 | 152 | import itertools | ||
4845 | 153 | import json | ||
4846 | 154 | import os | ||
4847 | 155 | import pprint | ||
4848 | 156 | import sqlite3 | ||
4849 | 157 | import sys | ||
4850 | 158 | |||
4851 | 159 | __author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>' | ||
4852 | 160 | |||
4853 | 161 | |||
4854 | 162 | class Storage(object): | ||
4855 | 163 | """Simple key value database for local unit state within charms. | ||
4856 | 164 | |||
4857 | 165 | Modifications are not persisted unless :meth:`flush` is called. | ||
4858 | 166 | |||
4859 | 167 | To support dicts, lists, integer, floats, and booleans values | ||
4860 | 168 | are automatically json encoded/decoded. | ||
4861 | 169 | |||
4862 | 170 | Note: to facilitate unit testing, ':memory:' can be passed as the | ||
4863 | 171 | path parameter which causes sqlite3 to only build the db in memory. | ||
4864 | 172 | This should only be used for testing purposes. | ||
4865 | 173 | """ | ||
4866 | 174 | def __init__(self, path=None): | ||
4867 | 175 | self.db_path = path | ||
4868 | 176 | if path is None: | ||
4869 | 177 | if 'UNIT_STATE_DB' in os.environ: | ||
4870 | 178 | self.db_path = os.environ['UNIT_STATE_DB'] | ||
4871 | 179 | else: | ||
4872 | 180 | self.db_path = os.path.join( | ||
4873 | 181 | os.environ.get('CHARM_DIR', ''), '.unit-state.db') | ||
4874 | 182 | if self.db_path != ':memory:': | ||
4875 | 183 | with open(self.db_path, 'a') as f: | ||
4876 | 184 | os.fchmod(f.fileno(), 0o600) | ||
4877 | 185 | self.conn = sqlite3.connect('%s' % self.db_path) | ||
4878 | 186 | self.cursor = self.conn.cursor() | ||
4879 | 187 | self.revision = None | ||
4880 | 188 | self._closed = False | ||
4881 | 189 | self._init() | ||
4882 | 190 | |||
4883 | 191 | def close(self): | ||
4884 | 192 | if self._closed: | ||
4885 | 193 | return | ||
4886 | 194 | self.flush(False) | ||
4887 | 195 | self.cursor.close() | ||
4888 | 196 | self.conn.close() | ||
4889 | 197 | self._closed = True | ||
4890 | 198 | |||
4891 | 199 | def get(self, key, default=None, record=False): | ||
4892 | 200 | self.cursor.execute('select data from kv where key=?', [key]) | ||
4893 | 201 | result = self.cursor.fetchone() | ||
4894 | 202 | if not result: | ||
4895 | 203 | return default | ||
4896 | 204 | if record: | ||
4897 | 205 | return Record(json.loads(result[0])) | ||
4898 | 206 | return json.loads(result[0]) | ||
4899 | 207 | |||
4900 | 208 | def getrange(self, key_prefix, strip=False): | ||
4901 | 209 | """ | ||
4902 | 210 | Get a range of keys starting with a common prefix as a mapping of | ||
4903 | 211 | keys to values. | ||
4904 | 212 | |||
4905 | 213 | :param str key_prefix: Common prefix among all keys | ||
4906 | 214 | :param bool strip: Optionally strip the common prefix from the key | ||
4907 | 215 | names in the returned dict | ||
4908 | 216 | :return dict: A (possibly empty) dict of key-value mappings | ||
4909 | 217 | """ | ||
4910 | 218 | self.cursor.execute("select key, data from kv where key like ?", | ||
4911 | 219 | ['%s%%' % key_prefix]) | ||
4912 | 220 | result = self.cursor.fetchall() | ||
4913 | 221 | |||
4914 | 222 | if not result: | ||
4915 | 223 | return {} | ||
4916 | 224 | if not strip: | ||
4917 | 225 | key_prefix = '' | ||
4918 | 226 | return dict([ | ||
4919 | 227 | (k[len(key_prefix):], json.loads(v)) for k, v in result]) | ||
4920 | 228 | |||
4921 | 229 | def update(self, mapping, prefix=""): | ||
4922 | 230 | """ | ||
4923 | 231 | Set the values of multiple keys at once. | ||
4924 | 232 | |||
4925 | 233 | :param dict mapping: Mapping of keys to values | ||
4926 | 234 | :param str prefix: Optional prefix to apply to all keys in `mapping` | ||
4927 | 235 | before setting | ||
4928 | 236 | """ | ||
4929 | 237 | for k, v in mapping.items(): | ||
4930 | 238 | self.set("%s%s" % (prefix, k), v) | ||
4931 | 239 | |||
4932 | 240 | def unset(self, key): | ||
4933 | 241 | """ | ||
4934 | 242 | Remove a key from the database entirely. | ||
4935 | 243 | """ | ||
4936 | 244 | self.cursor.execute('delete from kv where key=?', [key]) | ||
4937 | 245 | if self.revision and self.cursor.rowcount: | ||
4938 | 246 | self.cursor.execute( | ||
4939 | 247 | 'insert into kv_revisions values (?, ?, ?)', | ||
4940 | 248 | [key, self.revision, json.dumps('DELETED')]) | ||
4941 | 249 | |||
4942 | 250 | def unsetrange(self, keys=None, prefix=""): | ||
4943 | 251 | """ | ||
4944 | 252 | Remove a range of keys starting with a common prefix, from the database | ||
4945 | 253 | entirely. | ||
4946 | 254 | |||
4947 | 255 | :param list keys: List of keys to remove. | ||
4948 | 256 | :param str prefix: Optional prefix to apply to all keys in ``keys`` | ||
4949 | 257 | before removing. | ||
4950 | 258 | """ | ||
4951 | 259 | if keys is not None: | ||
4952 | 260 | keys = ['%s%s' % (prefix, key) for key in keys] | ||
4953 | 261 | self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) | ||
4954 | 262 | if self.revision and self.cursor.rowcount: | ||
4955 | 263 | self.cursor.execute( | ||
4956 | 264 | 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), | ||
4957 | 265 | list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) | ||
4958 | 266 | else: | ||
4959 | 267 | self.cursor.execute('delete from kv where key like ?', | ||
4960 | 268 | ['%s%%' % prefix]) | ||
4961 | 269 | if self.revision and self.cursor.rowcount: | ||
4962 | 270 | self.cursor.execute( | ||
4963 | 271 | 'insert into kv_revisions values (?, ?, ?)', | ||
4964 | 272 | ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) | ||
4965 | 273 | |||
4966 | 274 | def set(self, key, value): | ||
4967 | 275 | """ | ||
4968 | 276 | Set a value in the database. | ||
4969 | 277 | |||
4970 | 278 | :param str key: Key to set the value for | ||
4971 | 279 | :param value: Any JSON-serializable value to be set | ||
4972 | 280 | """ | ||
4973 | 281 | serialized = json.dumps(value) | ||
4974 | 282 | |||
4975 | 283 | self.cursor.execute('select data from kv where key=?', [key]) | ||
4976 | 284 | exists = self.cursor.fetchone() | ||
4977 | 285 | |||
4978 | 286 | # Skip mutations to the same value | ||
4979 | 287 | if exists: | ||
4980 | 288 | if exists[0] == serialized: | ||
4981 | 289 | return value | ||
4982 | 290 | |||
4983 | 291 | if not exists: | ||
4984 | 292 | self.cursor.execute( | ||
4985 | 293 | 'insert into kv (key, data) values (?, ?)', | ||
4986 | 294 | (key, serialized)) | ||
4987 | 295 | else: | ||
4988 | 296 | self.cursor.execute(''' | ||
4989 | 297 | update kv | ||
4990 | 298 | set data = ? | ||
4991 | 299 | where key = ?''', [serialized, key]) | ||
4992 | 300 | |||
4993 | 301 | # Save | ||
4994 | 302 | if not self.revision: | ||
4995 | 303 | return value | ||
4996 | 304 | |||
4997 | 305 | self.cursor.execute( | ||
4998 | 306 | 'select 1 from kv_revisions where key=? and revision=?', | ||
4999 | 307 | [key, self.revision]) | ||
5000 | 308 | exists = self.cursor.fetchone() |
The diff has been truncated for viewing.
This merge proposal is being monitored by mergebot. Change the status to Approved to merge.