Merge lp:~plumgrid-team/charms/trusty/plumgrid-gateway/trunk into lp:charms/trusty/plumgrid-gateway
- Trusty Tahr (14.04)
- trunk
- Merge into trunk
Status: | Merged | ||||
---|---|---|---|---|---|
Merged at revision: | 15 | ||||
Proposed branch: | lp:~plumgrid-team/charms/trusty/plumgrid-gateway/trunk | ||||
Merge into: | lp:charms/trusty/plumgrid-gateway | ||||
Diff against target: |
9738 lines (+4393/-3429) 49 files modified
Makefile (+1/-1) bin/charm_helpers_sync.py (+253/-0) charm-helpers-sync.yaml (+6/-1) hooks/charmhelpers/contrib/amulet/deployment.py (+4/-2) hooks/charmhelpers/contrib/amulet/utils.py (+382/-86) hooks/charmhelpers/contrib/ansible/__init__.py (+0/-254) hooks/charmhelpers/contrib/benchmark/__init__.py (+0/-126) hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-208) hooks/charmhelpers/contrib/charmsupport/__init__.py (+0/-15) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-360) hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-175) hooks/charmhelpers/contrib/database/mysql.py (+0/-412) hooks/charmhelpers/contrib/network/ip.py (+55/-23) hooks/charmhelpers/contrib/network/ovs/__init__.py (+6/-2) hooks/charmhelpers/contrib/network/ufw.py (+5/-6) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+135/-14) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+421/-13) hooks/charmhelpers/contrib/openstack/context.py (+318/-79) hooks/charmhelpers/contrib/openstack/ip.py (+35/-7) hooks/charmhelpers/contrib/openstack/neutron.py (+62/-21) hooks/charmhelpers/contrib/openstack/templating.py (+30/-2) hooks/charmhelpers/contrib/openstack/utils.py (+939/-70) hooks/charmhelpers/contrib/peerstorage/__init__.py (+0/-268) hooks/charmhelpers/contrib/python/packages.py (+35/-11) hooks/charmhelpers/contrib/saltstack/__init__.py (+0/-118) hooks/charmhelpers/contrib/ssl/__init__.py (+0/-94) hooks/charmhelpers/contrib/ssl/service.py (+0/-279) hooks/charmhelpers/contrib/storage/linux/ceph.py (+823/-61) hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0) hooks/charmhelpers/contrib/storage/linux/utils.py (+8/-7) hooks/charmhelpers/contrib/templating/__init__.py (+0/-15) hooks/charmhelpers/contrib/templating/contexts.py (+0/-139) hooks/charmhelpers/contrib/templating/jinja.py (+0/-39) hooks/charmhelpers/contrib/templating/pyformat.py (+0/-29) hooks/charmhelpers/contrib/unison/__init__.py (+0/-313) hooks/charmhelpers/core/hookenv.py (+220/-13) hooks/charmhelpers/core/host.py (+298/-75) hooks/charmhelpers/core/hugepage.py (+71/-0) hooks/charmhelpers/core/kernel.py (+68/-0) hooks/charmhelpers/core/services/helpers.py (+30/-5) hooks/charmhelpers/core/strutils.py (+30/-0) hooks/charmhelpers/core/templating.py (+21/-8) hooks/charmhelpers/core/unitdata.py (+61/-17) hooks/charmhelpers/fetch/__init__.py (+18/-2) hooks/charmhelpers/fetch/archiveurl.py (+1/-1) hooks/charmhelpers/fetch/bzrurl.py (+22/-32) hooks/charmhelpers/fetch/giturl.py (+20/-23) hooks/pg_gw_utils.py (+3/-2) unit_tests/test_pg_gw_hooks.py (+2/-1) |
||||
To merge this branch: | bzr merge lp:~plumgrid-team/charms/trusty/plumgrid-gateway/trunk | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Review Queue (community) | automated testing | Needs Fixing | |
James Page | Pending | ||
Review via email: mp+295030@code.launchpad.net |
This proposal supersedes a proposal from 2016-01-16.
Commit message
Trusty - Liberty/Mitaka support added
Description of the change
- Liberty/Mitaka support
- Charmhelpers sync and improved pg-restart
Review Queue (review-queue) wrote : Posted in a previous version of this proposal | # |
Review Queue (review-queue) wrote : Posted in a previous version of this proposal | # |
This item has failed automated testing! Results available here http://
Bilal Baqar (bbaqar) wrote : Posted in a previous version of this proposal | # |
tests/files/
Review Queue (review-queue) wrote : | # |
This item has failed automated testing! Results available here http://
Review Queue (review-queue) wrote : | # |
This item has failed automated testing! Results available here http://
Bilal Baqar (bbaqar) wrote : | # |
Looking at the results. Will provide fix shortly.
Preview Diff
1 | === modified file 'Makefile' | |||
2 | --- Makefile 2016-03-03 21:49:53 +0000 | |||
3 | +++ Makefile 2016-05-18 10:06:26 +0000 | |||
4 | @@ -4,7 +4,7 @@ | |||
5 | 4 | virtualenv: | 4 | virtualenv: |
6 | 5 | virtualenv .venv | 5 | virtualenv .venv |
7 | 6 | .venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \ | 6 | .venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \ |
9 | 7 | netaddr jinja2 | 7 | netaddr jinja2 pyflakes pep8 six pbr funcsigs psutil |
10 | 8 | 8 | ||
11 | 9 | lint: virtualenv | 9 | lint: virtualenv |
12 | 10 | .venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402 | 10 | .venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402 |
13 | 11 | 11 | ||
14 | === added directory 'bin' | |||
15 | === added file 'bin/charm_helpers_sync.py' | |||
16 | --- bin/charm_helpers_sync.py 1970-01-01 00:00:00 +0000 | |||
17 | +++ bin/charm_helpers_sync.py 2016-05-18 10:06:26 +0000 | |||
18 | @@ -0,0 +1,253 @@ | |||
19 | 1 | #!/usr/bin/python | ||
20 | 2 | |||
21 | 3 | # Copyright 2014-2015 Canonical Limited. | ||
22 | 4 | # | ||
23 | 5 | # This file is part of charm-helpers. | ||
24 | 6 | # | ||
25 | 7 | # charm-helpers is free software: you can redistribute it and/or modify | ||
26 | 8 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
27 | 9 | # published by the Free Software Foundation. | ||
28 | 10 | # | ||
29 | 11 | # charm-helpers is distributed in the hope that it will be useful, | ||
30 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
31 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
32 | 14 | # GNU Lesser General Public License for more details. | ||
33 | 15 | # | ||
34 | 16 | # You should have received a copy of the GNU Lesser General Public License | ||
35 | 17 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
36 | 18 | |||
37 | 19 | # Authors: | ||
38 | 20 | # Adam Gandelman <adamg@ubuntu.com> | ||
39 | 21 | |||
40 | 22 | import logging | ||
41 | 23 | import optparse | ||
42 | 24 | import os | ||
43 | 25 | import subprocess | ||
44 | 26 | import shutil | ||
45 | 27 | import sys | ||
46 | 28 | import tempfile | ||
47 | 29 | import yaml | ||
48 | 30 | from fnmatch import fnmatch | ||
49 | 31 | |||
50 | 32 | import six | ||
51 | 33 | |||
52 | 34 | CHARM_HELPERS_BRANCH = 'lp:charm-helpers' | ||
53 | 35 | |||
54 | 36 | |||
55 | 37 | def parse_config(conf_file): | ||
56 | 38 | if not os.path.isfile(conf_file): | ||
57 | 39 | logging.error('Invalid config file: %s.' % conf_file) | ||
58 | 40 | return False | ||
59 | 41 | return yaml.load(open(conf_file).read()) | ||
60 | 42 | |||
61 | 43 | |||
62 | 44 | def clone_helpers(work_dir, branch): | ||
63 | 45 | dest = os.path.join(work_dir, 'charm-helpers') | ||
64 | 46 | logging.info('Checking out %s to %s.' % (branch, dest)) | ||
65 | 47 | cmd = ['bzr', 'checkout', '--lightweight', branch, dest] | ||
66 | 48 | subprocess.check_call(cmd) | ||
67 | 49 | return dest | ||
68 | 50 | |||
69 | 51 | |||
70 | 52 | def _module_path(module): | ||
71 | 53 | return os.path.join(*module.split('.')) | ||
72 | 54 | |||
73 | 55 | |||
74 | 56 | def _src_path(src, module): | ||
75 | 57 | return os.path.join(src, 'charmhelpers', _module_path(module)) | ||
76 | 58 | |||
77 | 59 | |||
78 | 60 | def _dest_path(dest, module): | ||
79 | 61 | return os.path.join(dest, _module_path(module)) | ||
80 | 62 | |||
81 | 63 | |||
82 | 64 | def _is_pyfile(path): | ||
83 | 65 | return os.path.isfile(path + '.py') | ||
84 | 66 | |||
85 | 67 | |||
86 | 68 | def ensure_init(path): | ||
87 | 69 | ''' | ||
88 | 70 | ensure directories leading up to path are importable, omitting | ||
89 | 71 | parent directory, eg path='/hooks/helpers/foo'/: | ||
90 | 72 | hooks/ | ||
91 | 73 | hooks/helpers/__init__.py | ||
92 | 74 | hooks/helpers/foo/__init__.py | ||
93 | 75 | ''' | ||
94 | 76 | for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): | ||
95 | 77 | _i = os.path.join(d, '__init__.py') | ||
96 | 78 | if not os.path.exists(_i): | ||
97 | 79 | logging.info('Adding missing __init__.py: %s' % _i) | ||
98 | 80 | open(_i, 'wb').close() | ||
99 | 81 | |||
100 | 82 | |||
101 | 83 | def sync_pyfile(src, dest): | ||
102 | 84 | src = src + '.py' | ||
103 | 85 | src_dir = os.path.dirname(src) | ||
104 | 86 | logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) | ||
105 | 87 | if not os.path.exists(dest): | ||
106 | 88 | os.makedirs(dest) | ||
107 | 89 | shutil.copy(src, dest) | ||
108 | 90 | if os.path.isfile(os.path.join(src_dir, '__init__.py')): | ||
109 | 91 | shutil.copy(os.path.join(src_dir, '__init__.py'), | ||
110 | 92 | dest) | ||
111 | 93 | ensure_init(dest) | ||
112 | 94 | |||
113 | 95 | |||
114 | 96 | def get_filter(opts=None): | ||
115 | 97 | opts = opts or [] | ||
116 | 98 | if 'inc=*' in opts: | ||
117 | 99 | # do not filter any files, include everything | ||
118 | 100 | return None | ||
119 | 101 | |||
120 | 102 | def _filter(dir, ls): | ||
121 | 103 | incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] | ||
122 | 104 | _filter = [] | ||
123 | 105 | for f in ls: | ||
124 | 106 | _f = os.path.join(dir, f) | ||
125 | 107 | |||
126 | 108 | if not os.path.isdir(_f) and not _f.endswith('.py') and incs: | ||
127 | 109 | if True not in [fnmatch(_f, inc) for inc in incs]: | ||
128 | 110 | logging.debug('Not syncing %s, does not match include ' | ||
129 | 111 | 'filters (%s)' % (_f, incs)) | ||
130 | 112 | _filter.append(f) | ||
131 | 113 | else: | ||
132 | 114 | logging.debug('Including file, which matches include ' | ||
133 | 115 | 'filters (%s): %s' % (incs, _f)) | ||
134 | 116 | elif (os.path.isfile(_f) and not _f.endswith('.py')): | ||
135 | 117 | logging.debug('Not syncing file: %s' % f) | ||
136 | 118 | _filter.append(f) | ||
137 | 119 | elif (os.path.isdir(_f) and not | ||
138 | 120 | os.path.isfile(os.path.join(_f, '__init__.py'))): | ||
139 | 121 | logging.debug('Not syncing directory: %s' % f) | ||
140 | 122 | _filter.append(f) | ||
141 | 123 | return _filter | ||
142 | 124 | return _filter | ||
143 | 125 | |||
144 | 126 | |||
145 | 127 | def sync_directory(src, dest, opts=None): | ||
146 | 128 | if os.path.exists(dest): | ||
147 | 129 | logging.debug('Removing existing directory: %s' % dest) | ||
148 | 130 | shutil.rmtree(dest) | ||
149 | 131 | logging.info('Syncing directory: %s -> %s.' % (src, dest)) | ||
150 | 132 | |||
151 | 133 | shutil.copytree(src, dest, ignore=get_filter(opts)) | ||
152 | 134 | ensure_init(dest) | ||
153 | 135 | |||
154 | 136 | |||
155 | 137 | def sync(src, dest, module, opts=None): | ||
156 | 138 | |||
157 | 139 | # Sync charmhelpers/__init__.py for bootstrap code. | ||
158 | 140 | sync_pyfile(_src_path(src, '__init__'), dest) | ||
159 | 141 | |||
160 | 142 | # Sync other __init__.py files in the path leading to module. | ||
161 | 143 | m = [] | ||
162 | 144 | steps = module.split('.')[:-1] | ||
163 | 145 | while steps: | ||
164 | 146 | m.append(steps.pop(0)) | ||
165 | 147 | init = '.'.join(m + ['__init__']) | ||
166 | 148 | sync_pyfile(_src_path(src, init), | ||
167 | 149 | os.path.dirname(_dest_path(dest, init))) | ||
168 | 150 | |||
169 | 151 | # Sync the module, or maybe a .py file. | ||
170 | 152 | if os.path.isdir(_src_path(src, module)): | ||
171 | 153 | sync_directory(_src_path(src, module), _dest_path(dest, module), opts) | ||
172 | 154 | elif _is_pyfile(_src_path(src, module)): | ||
173 | 155 | sync_pyfile(_src_path(src, module), | ||
174 | 156 | os.path.dirname(_dest_path(dest, module))) | ||
175 | 157 | else: | ||
176 | 158 | logging.warn('Could not sync: %s. Neither a pyfile or directory, ' | ||
177 | 159 | 'does it even exist?' % module) | ||
178 | 160 | |||
179 | 161 | |||
180 | 162 | def parse_sync_options(options): | ||
181 | 163 | if not options: | ||
182 | 164 | return [] | ||
183 | 165 | return options.split(',') | ||
184 | 166 | |||
185 | 167 | |||
186 | 168 | def extract_options(inc, global_options=None): | ||
187 | 169 | global_options = global_options or [] | ||
188 | 170 | if global_options and isinstance(global_options, six.string_types): | ||
189 | 171 | global_options = [global_options] | ||
190 | 172 | if '|' not in inc: | ||
191 | 173 | return (inc, global_options) | ||
192 | 174 | inc, opts = inc.split('|') | ||
193 | 175 | return (inc, parse_sync_options(opts) + global_options) | ||
194 | 176 | |||
195 | 177 | |||
196 | 178 | def sync_helpers(include, src, dest, options=None): | ||
197 | 179 | if not os.path.isdir(dest): | ||
198 | 180 | os.makedirs(dest) | ||
199 | 181 | |||
200 | 182 | global_options = parse_sync_options(options) | ||
201 | 183 | |||
202 | 184 | for inc in include: | ||
203 | 185 | if isinstance(inc, str): | ||
204 | 186 | inc, opts = extract_options(inc, global_options) | ||
205 | 187 | sync(src, dest, inc, opts) | ||
206 | 188 | elif isinstance(inc, dict): | ||
207 | 189 | # could also do nested dicts here. | ||
208 | 190 | for k, v in six.iteritems(inc): | ||
209 | 191 | if isinstance(v, list): | ||
210 | 192 | for m in v: | ||
211 | 193 | inc, opts = extract_options(m, global_options) | ||
212 | 194 | sync(src, dest, '%s.%s' % (k, inc), opts) | ||
213 | 195 | |||
214 | 196 | if __name__ == '__main__': | ||
215 | 197 | parser = optparse.OptionParser() | ||
216 | 198 | parser.add_option('-c', '--config', action='store', dest='config', | ||
217 | 199 | default=None, help='helper config file') | ||
218 | 200 | parser.add_option('-D', '--debug', action='store_true', dest='debug', | ||
219 | 201 | default=False, help='debug') | ||
220 | 202 | parser.add_option('-b', '--branch', action='store', dest='branch', | ||
221 | 203 | help='charm-helpers bzr branch (overrides config)') | ||
222 | 204 | parser.add_option('-d', '--destination', action='store', dest='dest_dir', | ||
223 | 205 | help='sync destination dir (overrides config)') | ||
224 | 206 | (opts, args) = parser.parse_args() | ||
225 | 207 | |||
226 | 208 | if opts.debug: | ||
227 | 209 | logging.basicConfig(level=logging.DEBUG) | ||
228 | 210 | else: | ||
229 | 211 | logging.basicConfig(level=logging.INFO) | ||
230 | 212 | |||
231 | 213 | if opts.config: | ||
232 | 214 | logging.info('Loading charm helper config from %s.' % opts.config) | ||
233 | 215 | config = parse_config(opts.config) | ||
234 | 216 | if not config: | ||
235 | 217 | logging.error('Could not parse config from %s.' % opts.config) | ||
236 | 218 | sys.exit(1) | ||
237 | 219 | else: | ||
238 | 220 | config = {} | ||
239 | 221 | |||
240 | 222 | if 'branch' not in config: | ||
241 | 223 | config['branch'] = CHARM_HELPERS_BRANCH | ||
242 | 224 | if opts.branch: | ||
243 | 225 | config['branch'] = opts.branch | ||
244 | 226 | if opts.dest_dir: | ||
245 | 227 | config['destination'] = opts.dest_dir | ||
246 | 228 | |||
247 | 229 | if 'destination' not in config: | ||
248 | 230 | logging.error('No destination dir. specified as option or config.') | ||
249 | 231 | sys.exit(1) | ||
250 | 232 | |||
251 | 233 | if 'include' not in config: | ||
252 | 234 | if not args: | ||
253 | 235 | logging.error('No modules to sync specified as option or config.') | ||
254 | 236 | sys.exit(1) | ||
255 | 237 | config['include'] = [] | ||
256 | 238 | [config['include'].append(a) for a in args] | ||
257 | 239 | |||
258 | 240 | sync_options = None | ||
259 | 241 | if 'options' in config: | ||
260 | 242 | sync_options = config['options'] | ||
261 | 243 | tmpd = tempfile.mkdtemp() | ||
262 | 244 | try: | ||
263 | 245 | checkout = clone_helpers(tmpd, config['branch']) | ||
264 | 246 | sync_helpers(config['include'], checkout, config['destination'], | ||
265 | 247 | options=sync_options) | ||
266 | 248 | except Exception as e: | ||
267 | 249 | logging.error("Could not sync: %s" % e) | ||
268 | 250 | raise e | ||
269 | 251 | finally: | ||
270 | 252 | logging.debug('Cleaning up %s' % tmpd) | ||
271 | 253 | shutil.rmtree(tmpd) | ||
272 | 0 | 254 | ||
273 | === modified file 'charm-helpers-sync.yaml' | |||
274 | --- charm-helpers-sync.yaml 2015-07-29 18:23:55 +0000 | |||
275 | +++ charm-helpers-sync.yaml 2016-05-18 10:06:26 +0000 | |||
276 | @@ -3,5 +3,10 @@ | |||
277 | 3 | include: | 3 | include: |
278 | 4 | - core | 4 | - core |
279 | 5 | - fetch | 5 | - fetch |
281 | 6 | - contrib | 6 | - contrib.amulet |
282 | 7 | - contrib.hahelpers | ||
283 | 8 | - contrib.network | ||
284 | 9 | - contrib.openstack | ||
285 | 10 | - contrib.python | ||
286 | 11 | - contrib.storage | ||
287 | 7 | - payload | 12 | - payload |
288 | 8 | 13 | ||
289 | === modified file 'hooks/charmhelpers/contrib/amulet/deployment.py' | |||
290 | --- hooks/charmhelpers/contrib/amulet/deployment.py 2015-07-29 18:23:55 +0000 | |||
291 | +++ hooks/charmhelpers/contrib/amulet/deployment.py 2016-05-18 10:06:26 +0000 | |||
292 | @@ -51,7 +51,8 @@ | |||
293 | 51 | if 'units' not in this_service: | 51 | if 'units' not in this_service: |
294 | 52 | this_service['units'] = 1 | 52 | this_service['units'] = 1 |
295 | 53 | 53 | ||
297 | 54 | self.d.add(this_service['name'], units=this_service['units']) | 54 | self.d.add(this_service['name'], units=this_service['units'], |
298 | 55 | constraints=this_service.get('constraints')) | ||
299 | 55 | 56 | ||
300 | 56 | for svc in other_services: | 57 | for svc in other_services: |
301 | 57 | if 'location' in svc: | 58 | if 'location' in svc: |
302 | @@ -64,7 +65,8 @@ | |||
303 | 64 | if 'units' not in svc: | 65 | if 'units' not in svc: |
304 | 65 | svc['units'] = 1 | 66 | svc['units'] = 1 |
305 | 66 | 67 | ||
307 | 67 | self.d.add(svc['name'], charm=branch_location, units=svc['units']) | 68 | self.d.add(svc['name'], charm=branch_location, units=svc['units'], |
308 | 69 | constraints=svc.get('constraints')) | ||
309 | 68 | 70 | ||
310 | 69 | def _add_relations(self, relations): | 71 | def _add_relations(self, relations): |
311 | 70 | """Add all of the relations for the services.""" | 72 | """Add all of the relations for the services.""" |
312 | 71 | 73 | ||
313 | === modified file 'hooks/charmhelpers/contrib/amulet/utils.py' | |||
314 | --- hooks/charmhelpers/contrib/amulet/utils.py 2015-07-29 18:23:55 +0000 | |||
315 | +++ hooks/charmhelpers/contrib/amulet/utils.py 2016-05-18 10:06:26 +0000 | |||
316 | @@ -14,17 +14,25 @@ | |||
317 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
318 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
319 | 16 | 16 | ||
320 | 17 | import amulet | ||
321 | 18 | import ConfigParser | ||
322 | 19 | import distro_info | ||
323 | 20 | import io | 17 | import io |
324 | 18 | import json | ||
325 | 21 | import logging | 19 | import logging |
326 | 22 | import os | 20 | import os |
327 | 23 | import re | 21 | import re |
329 | 24 | import six | 22 | import socket |
330 | 23 | import subprocess | ||
331 | 25 | import sys | 24 | import sys |
332 | 26 | import time | 25 | import time |
334 | 27 | import urlparse | 26 | import uuid |
335 | 27 | |||
336 | 28 | import amulet | ||
337 | 29 | import distro_info | ||
338 | 30 | import six | ||
339 | 31 | from six.moves import configparser | ||
340 | 32 | if six.PY3: | ||
341 | 33 | from urllib import parse as urlparse | ||
342 | 34 | else: | ||
343 | 35 | import urlparse | ||
344 | 28 | 36 | ||
345 | 29 | 37 | ||
346 | 30 | class AmuletUtils(object): | 38 | class AmuletUtils(object): |
347 | @@ -108,7 +116,7 @@ | |||
348 | 108 | # /!\ DEPRECATION WARNING (beisner): | 116 | # /!\ DEPRECATION WARNING (beisner): |
349 | 109 | # New and existing tests should be rewritten to use | 117 | # New and existing tests should be rewritten to use |
350 | 110 | # validate_services_by_name() as it is aware of init systems. | 118 | # validate_services_by_name() as it is aware of init systems. |
352 | 111 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | 119 | self.log.warn('DEPRECATION WARNING: use ' |
353 | 112 | 'validate_services_by_name instead of validate_services ' | 120 | 'validate_services_by_name instead of validate_services ' |
354 | 113 | 'due to init system differences.') | 121 | 'due to init system differences.') |
355 | 114 | 122 | ||
356 | @@ -142,19 +150,23 @@ | |||
357 | 142 | 150 | ||
358 | 143 | for service_name in services_list: | 151 | for service_name in services_list: |
359 | 144 | if (self.ubuntu_releases.index(release) >= systemd_switch or | 152 | if (self.ubuntu_releases.index(release) >= systemd_switch or |
362 | 145 | service_name == "rabbitmq-server"): | 153 | service_name in ['rabbitmq-server', 'apache2']): |
363 | 146 | # init is systemd | 154 | # init is systemd (or regular sysv) |
364 | 147 | cmd = 'sudo service {} status'.format(service_name) | 155 | cmd = 'sudo service {} status'.format(service_name) |
365 | 156 | output, code = sentry_unit.run(cmd) | ||
366 | 157 | service_running = code == 0 | ||
367 | 148 | elif self.ubuntu_releases.index(release) < systemd_switch: | 158 | elif self.ubuntu_releases.index(release) < systemd_switch: |
368 | 149 | # init is upstart | 159 | # init is upstart |
369 | 150 | cmd = 'sudo status {}'.format(service_name) | 160 | cmd = 'sudo status {}'.format(service_name) |
370 | 161 | output, code = sentry_unit.run(cmd) | ||
371 | 162 | service_running = code == 0 and "start/running" in output | ||
372 | 151 | 163 | ||
373 | 152 | output, code = sentry_unit.run(cmd) | ||
374 | 153 | self.log.debug('{} `{}` returned ' | 164 | self.log.debug('{} `{}` returned ' |
375 | 154 | '{}'.format(sentry_unit.info['unit_name'], | 165 | '{}'.format(sentry_unit.info['unit_name'], |
376 | 155 | cmd, code)) | 166 | cmd, code)) |
379 | 156 | if code != 0: | 167 | if not service_running: |
380 | 157 | return "command `{}` returned {}".format(cmd, str(code)) | 168 | return u"command `{}` returned {} {}".format( |
381 | 169 | cmd, output, str(code)) | ||
382 | 158 | return None | 170 | return None |
383 | 159 | 171 | ||
384 | 160 | def _get_config(self, unit, filename): | 172 | def _get_config(self, unit, filename): |
385 | @@ -164,7 +176,7 @@ | |||
386 | 164 | # NOTE(beisner): by default, ConfigParser does not handle options | 176 | # NOTE(beisner): by default, ConfigParser does not handle options |
387 | 165 | # with no value, such as the flags used in the mysql my.cnf file. | 177 | # with no value, such as the flags used in the mysql my.cnf file. |
388 | 166 | # https://bugs.python.org/issue7005 | 178 | # https://bugs.python.org/issue7005 |
390 | 167 | config = ConfigParser.ConfigParser(allow_no_value=True) | 179 | config = configparser.ConfigParser(allow_no_value=True) |
391 | 168 | config.readfp(io.StringIO(file_contents)) | 180 | config.readfp(io.StringIO(file_contents)) |
392 | 169 | return config | 181 | return config |
393 | 170 | 182 | ||
394 | @@ -259,33 +271,52 @@ | |||
395 | 259 | """Get last modification time of directory.""" | 271 | """Get last modification time of directory.""" |
396 | 260 | return sentry_unit.directory_stat(directory)['mtime'] | 272 | return sentry_unit.directory_stat(directory)['mtime'] |
397 | 261 | 273 | ||
416 | 262 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | 274 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): |
417 | 263 | """Get process' start time. | 275 | """Get start time of a process based on the last modification time |
418 | 264 | 276 | of the /proc/pid directory. | |
419 | 265 | Determine start time of the process based on the last modification | 277 | |
420 | 266 | time of the /proc/pid directory. If pgrep_full is True, the process | 278 | :sentry_unit: The sentry unit to check for the service on |
421 | 267 | name is matched against the full command line. | 279 | :service: service name to look for in process table |
422 | 268 | """ | 280 | :pgrep_full: [Deprecated] Use full command line search mode with pgrep |
423 | 269 | if pgrep_full: | 281 | :returns: epoch time of service process start |
424 | 270 | cmd = 'pgrep -o -f {}'.format(service) | 282 | :param commands: list of bash commands |
425 | 271 | else: | 283 | :param sentry_units: list of sentry unit pointers |
426 | 272 | cmd = 'pgrep -o {}'.format(service) | 284 | :returns: None if successful; Failure message otherwise |
427 | 273 | cmd = cmd + ' | grep -v pgrep || exit 0' | 285 | """ |
428 | 274 | cmd_out = sentry_unit.run(cmd) | 286 | if pgrep_full is not None: |
429 | 275 | self.log.debug('CMDout: ' + str(cmd_out)) | 287 | # /!\ DEPRECATION WARNING (beisner): |
430 | 276 | if cmd_out[0]: | 288 | # No longer implemented, as pidof is now used instead of pgrep. |
431 | 277 | self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) | 289 | # https://bugs.launchpad.net/charm-helpers/+bug/1474030 |
432 | 278 | proc_dir = '/proc/{}'.format(cmd_out[0].strip()) | 290 | self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' |
433 | 279 | return self._get_dir_mtime(sentry_unit, proc_dir) | 291 | 'longer implemented re: lp 1474030.') |
434 | 292 | |||
435 | 293 | pid_list = self.get_process_id_list(sentry_unit, service) | ||
436 | 294 | pid = pid_list[0] | ||
437 | 295 | proc_dir = '/proc/{}'.format(pid) | ||
438 | 296 | self.log.debug('Pid for {} on {}: {}'.format( | ||
439 | 297 | service, sentry_unit.info['unit_name'], pid)) | ||
440 | 298 | |||
441 | 299 | return self._get_dir_mtime(sentry_unit, proc_dir) | ||
442 | 280 | 300 | ||
443 | 281 | def service_restarted(self, sentry_unit, service, filename, | 301 | def service_restarted(self, sentry_unit, service, filename, |
445 | 282 | pgrep_full=False, sleep_time=20): | 302 | pgrep_full=None, sleep_time=20): |
446 | 283 | """Check if service was restarted. | 303 | """Check if service was restarted. |
447 | 284 | 304 | ||
448 | 285 | Compare a service's start time vs a file's last modification time | 305 | Compare a service's start time vs a file's last modification time |
449 | 286 | (such as a config file for that service) to determine if the service | 306 | (such as a config file for that service) to determine if the service |
450 | 287 | has been restarted. | 307 | has been restarted. |
451 | 288 | """ | 308 | """ |
452 | 309 | # /!\ DEPRECATION WARNING (beisner): | ||
453 | 310 | # This method is prone to races in that no before-time is known. | ||
454 | 311 | # Use validate_service_config_changed instead. | ||
455 | 312 | |||
456 | 313 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now | ||
457 | 314 | # used instead of pgrep. pgrep_full is still passed through to ensure | ||
458 | 315 | # deprecation WARNS. lp1474030 | ||
459 | 316 | self.log.warn('DEPRECATION WARNING: use ' | ||
460 | 317 | 'validate_service_config_changed instead of ' | ||
461 | 318 | 'service_restarted due to known races.') | ||
462 | 319 | |||
463 | 289 | time.sleep(sleep_time) | 320 | time.sleep(sleep_time) |
464 | 290 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= | 321 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= |
465 | 291 | self._get_file_mtime(sentry_unit, filename)): | 322 | self._get_file_mtime(sentry_unit, filename)): |
466 | @@ -294,78 +325,122 @@ | |||
467 | 294 | return False | 325 | return False |
468 | 295 | 326 | ||
469 | 296 | def service_restarted_since(self, sentry_unit, mtime, service, | 327 | def service_restarted_since(self, sentry_unit, mtime, service, |
472 | 297 | pgrep_full=False, sleep_time=20, | 328 | pgrep_full=None, sleep_time=20, |
473 | 298 | retry_count=2): | 329 | retry_count=30, retry_sleep_time=10): |
474 | 299 | """Check if service was been started after a given time. | 330 | """Check if service was been started after a given time. |
475 | 300 | 331 | ||
476 | 301 | Args: | 332 | Args: |
477 | 302 | sentry_unit (sentry): The sentry unit to check for the service on | 333 | sentry_unit (sentry): The sentry unit to check for the service on |
478 | 303 | mtime (float): The epoch time to check against | 334 | mtime (float): The epoch time to check against |
479 | 304 | service (string): service name to look for in process table | 335 | service (string): service name to look for in process table |
483 | 305 | pgrep_full (boolean): Use full command line search mode with pgrep | 336 | pgrep_full: [Deprecated] Use full command line search mode with pgrep |
484 | 306 | sleep_time (int): Seconds to sleep before looking for process | 337 | sleep_time (int): Initial sleep time (s) before looking for file |
485 | 307 | retry_count (int): If service is not found, how many times to retry | 338 | retry_sleep_time (int): Time (s) to sleep between retries |
486 | 339 | retry_count (int): If file is not found, how many times to retry | ||
487 | 308 | 340 | ||
488 | 309 | Returns: | 341 | Returns: |
489 | 310 | bool: True if service found and its start time it newer than mtime, | 342 | bool: True if service found and its start time it newer than mtime, |
490 | 311 | False if service is older than mtime or if service was | 343 | False if service is older than mtime or if service was |
491 | 312 | not found. | 344 | not found. |
492 | 313 | """ | 345 | """ |
494 | 314 | self.log.debug('Checking %s restarted since %s' % (service, mtime)) | 346 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
495 | 347 | # used instead of pgrep. pgrep_full is still passed through to ensure | ||
496 | 348 | # deprecation WARNS. lp1474030 | ||
497 | 349 | |||
498 | 350 | unit_name = sentry_unit.info['unit_name'] | ||
499 | 351 | self.log.debug('Checking that %s service restarted since %s on ' | ||
500 | 352 | '%s' % (service, mtime, unit_name)) | ||
501 | 315 | time.sleep(sleep_time) | 353 | time.sleep(sleep_time) |
511 | 316 | proc_start_time = self._get_proc_start_time(sentry_unit, service, | 354 | proc_start_time = None |
512 | 317 | pgrep_full) | 355 | tries = 0 |
513 | 318 | while retry_count > 0 and not proc_start_time: | 356 | while tries <= retry_count and not proc_start_time: |
514 | 319 | self.log.debug('No pid file found for service %s, will retry %i ' | 357 | try: |
515 | 320 | 'more times' % (service, retry_count)) | 358 | proc_start_time = self._get_proc_start_time(sentry_unit, |
516 | 321 | time.sleep(30) | 359 | service, |
517 | 322 | proc_start_time = self._get_proc_start_time(sentry_unit, service, | 360 | pgrep_full) |
518 | 323 | pgrep_full) | 361 | self.log.debug('Attempt {} to get {} proc start time on {} ' |
519 | 324 | retry_count = retry_count - 1 | 362 | 'OK'.format(tries, service, unit_name)) |
520 | 363 | except IOError as e: | ||
521 | 364 | # NOTE(beisner) - race avoidance, proc may not exist yet. | ||
522 | 365 | # https://bugs.launchpad.net/charm-helpers/+bug/1474030 | ||
523 | 366 | self.log.debug('Attempt {} to get {} proc start time on {} ' | ||
524 | 367 | 'failed\n{}'.format(tries, service, | ||
525 | 368 | unit_name, e)) | ||
526 | 369 | time.sleep(retry_sleep_time) | ||
527 | 370 | tries += 1 | ||
528 | 325 | 371 | ||
529 | 326 | if not proc_start_time: | 372 | if not proc_start_time: |
530 | 327 | self.log.warn('No proc start time found, assuming service did ' | 373 | self.log.warn('No proc start time found, assuming service did ' |
531 | 328 | 'not start') | 374 | 'not start') |
532 | 329 | return False | 375 | return False |
533 | 330 | if proc_start_time >= mtime: | 376 | if proc_start_time >= mtime: |
536 | 331 | self.log.debug('proc start time is newer than provided mtime' | 377 | self.log.debug('Proc start time is newer than provided mtime' |
537 | 332 | '(%s >= %s)' % (proc_start_time, mtime)) | 378 | '(%s >= %s) on %s (OK)' % (proc_start_time, |
538 | 379 | mtime, unit_name)) | ||
539 | 333 | return True | 380 | return True |
540 | 334 | else: | 381 | else: |
544 | 335 | self.log.warn('proc start time (%s) is older than provided mtime ' | 382 | self.log.warn('Proc start time (%s) is older than provided mtime ' |
545 | 336 | '(%s), service did not restart' % (proc_start_time, | 383 | '(%s) on %s, service did not ' |
546 | 337 | mtime)) | 384 | 'restart' % (proc_start_time, mtime, unit_name)) |
547 | 338 | return False | 385 | return False |
548 | 339 | 386 | ||
549 | 340 | def config_updated_since(self, sentry_unit, filename, mtime, | 387 | def config_updated_since(self, sentry_unit, filename, mtime, |
551 | 341 | sleep_time=20): | 388 | sleep_time=20, retry_count=30, |
552 | 389 | retry_sleep_time=10): | ||
553 | 342 | """Check if file was modified after a given time. | 390 | """Check if file was modified after a given time. |
554 | 343 | 391 | ||
555 | 344 | Args: | 392 | Args: |
556 | 345 | sentry_unit (sentry): The sentry unit to check the file mtime on | 393 | sentry_unit (sentry): The sentry unit to check the file mtime on |
557 | 346 | filename (string): The file to check mtime of | 394 | filename (string): The file to check mtime of |
558 | 347 | mtime (float): The epoch time to check against | 395 | mtime (float): The epoch time to check against |
560 | 348 | sleep_time (int): Seconds to sleep before looking for process | 396 | sleep_time (int): Initial sleep time (s) before looking for file |
561 | 397 | retry_sleep_time (int): Time (s) to sleep between retries | ||
562 | 398 | retry_count (int): If file is not found, how many times to retry | ||
563 | 349 | 399 | ||
564 | 350 | Returns: | 400 | Returns: |
565 | 351 | bool: True if file was modified more recently than mtime, False if | 401 | bool: True if file was modified more recently than mtime, False if |
567 | 352 | file was modified before mtime, | 402 | file was modified before mtime, or if file not found. |
568 | 353 | """ | 403 | """ |
570 | 354 | self.log.debug('Checking %s updated since %s' % (filename, mtime)) | 404 | unit_name = sentry_unit.info['unit_name'] |
571 | 405 | self.log.debug('Checking that %s updated since %s on ' | ||
572 | 406 | '%s' % (filename, mtime, unit_name)) | ||
573 | 355 | time.sleep(sleep_time) | 407 | time.sleep(sleep_time) |
575 | 356 | file_mtime = self._get_file_mtime(sentry_unit, filename) | 408 | file_mtime = None |
576 | 409 | tries = 0 | ||
577 | 410 | while tries <= retry_count and not file_mtime: | ||
578 | 411 | try: | ||
579 | 412 | file_mtime = self._get_file_mtime(sentry_unit, filename) | ||
580 | 413 | self.log.debug('Attempt {} to get {} file mtime on {} ' | ||
581 | 414 | 'OK'.format(tries, filename, unit_name)) | ||
582 | 415 | except IOError as e: | ||
583 | 416 | # NOTE(beisner) - race avoidance, file may not exist yet. | ||
584 | 417 | # https://bugs.launchpad.net/charm-helpers/+bug/1474030 | ||
585 | 418 | self.log.debug('Attempt {} to get {} file mtime on {} ' | ||
586 | 419 | 'failed\n{}'.format(tries, filename, | ||
587 | 420 | unit_name, e)) | ||
588 | 421 | time.sleep(retry_sleep_time) | ||
589 | 422 | tries += 1 | ||
590 | 423 | |||
591 | 424 | if not file_mtime: | ||
592 | 425 | self.log.warn('Could not determine file mtime, assuming ' | ||
593 | 426 | 'file does not exist') | ||
594 | 427 | return False | ||
595 | 428 | |||
596 | 357 | if file_mtime >= mtime: | 429 | if file_mtime >= mtime: |
597 | 358 | self.log.debug('File mtime is newer than provided mtime ' | 430 | self.log.debug('File mtime is newer than provided mtime ' |
599 | 359 | '(%s >= %s)' % (file_mtime, mtime)) | 431 | '(%s >= %s) on %s (OK)' % (file_mtime, |
600 | 432 | mtime, unit_name)) | ||
601 | 360 | return True | 433 | return True |
602 | 361 | else: | 434 | else: |
605 | 362 | self.log.warn('File mtime %s is older than provided mtime %s' | 435 | self.log.warn('File mtime is older than provided mtime' |
606 | 363 | % (file_mtime, mtime)) | 436 | '(%s < on %s) on %s' % (file_mtime, |
607 | 437 | mtime, unit_name)) | ||
608 | 364 | return False | 438 | return False |
609 | 365 | 439 | ||
610 | 366 | def validate_service_config_changed(self, sentry_unit, mtime, service, | 440 | def validate_service_config_changed(self, sentry_unit, mtime, service, |
613 | 367 | filename, pgrep_full=False, | 441 | filename, pgrep_full=None, |
614 | 368 | sleep_time=20, retry_count=2): | 442 | sleep_time=20, retry_count=30, |
615 | 443 | retry_sleep_time=10): | ||
616 | 369 | """Check service and file were updated after mtime | 444 | """Check service and file were updated after mtime |
617 | 370 | 445 | ||
618 | 371 | Args: | 446 | Args: |
619 | @@ -373,9 +448,10 @@ | |||
620 | 373 | mtime (float): The epoch time to check against | 448 | mtime (float): The epoch time to check against |
621 | 374 | service (string): service name to look for in process table | 449 | service (string): service name to look for in process table |
622 | 375 | filename (string): The file to check mtime of | 450 | filename (string): The file to check mtime of |
625 | 376 | pgrep_full (boolean): Use full command line search mode with pgrep | 451 | pgrep_full: [Deprecated] Use full command line search mode with pgrep |
626 | 377 | sleep_time (int): Seconds to sleep before looking for process | 452 | sleep_time (int): Initial sleep in seconds to pass to test helpers |
627 | 378 | retry_count (int): If service is not found, how many times to retry | 453 | retry_count (int): If service is not found, how many times to retry |
628 | 454 | retry_sleep_time (int): Time in seconds to wait between retries | ||
629 | 379 | 455 | ||
630 | 380 | Typical Usage: | 456 | Typical Usage: |
631 | 381 | u = OpenStackAmuletUtils(ERROR) | 457 | u = OpenStackAmuletUtils(ERROR) |
632 | @@ -392,15 +468,27 @@ | |||
633 | 392 | mtime, False if service is older than mtime or if service was | 468 | mtime, False if service is older than mtime or if service was |
634 | 393 | not found or if filename was modified before mtime. | 469 | not found or if filename was modified before mtime. |
635 | 394 | """ | 470 | """ |
645 | 395 | self.log.debug('Checking %s restarted since %s' % (service, mtime)) | 471 | |
646 | 396 | time.sleep(sleep_time) | 472 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
647 | 397 | service_restart = self.service_restarted_since(sentry_unit, mtime, | 473 | # used instead of pgrep. pgrep_full is still passed through to ensure |
648 | 398 | service, | 474 | # deprecation WARNS. lp1474030 |
649 | 399 | pgrep_full=pgrep_full, | 475 | |
650 | 400 | sleep_time=0, | 476 | service_restart = self.service_restarted_since( |
651 | 401 | retry_count=retry_count) | 477 | sentry_unit, mtime, |
652 | 402 | config_update = self.config_updated_since(sentry_unit, filename, mtime, | 478 | service, |
653 | 403 | sleep_time=0) | 479 | pgrep_full=pgrep_full, |
654 | 480 | sleep_time=sleep_time, | ||
655 | 481 | retry_count=retry_count, | ||
656 | 482 | retry_sleep_time=retry_sleep_time) | ||
657 | 483 | |||
658 | 484 | config_update = self.config_updated_since( | ||
659 | 485 | sentry_unit, | ||
660 | 486 | filename, | ||
661 | 487 | mtime, | ||
662 | 488 | sleep_time=sleep_time, | ||
663 | 489 | retry_count=retry_count, | ||
664 | 490 | retry_sleep_time=retry_sleep_time) | ||
665 | 491 | |||
666 | 404 | return service_restart and config_update | 492 | return service_restart and config_update |
667 | 405 | 493 | ||
668 | 406 | def get_sentry_time(self, sentry_unit): | 494 | def get_sentry_time(self, sentry_unit): |
669 | @@ -418,7 +506,6 @@ | |||
670 | 418 | """Return a list of all Ubuntu releases in order of release.""" | 506 | """Return a list of all Ubuntu releases in order of release.""" |
671 | 419 | _d = distro_info.UbuntuDistroInfo() | 507 | _d = distro_info.UbuntuDistroInfo() |
672 | 420 | _release_list = _d.all | 508 | _release_list = _d.all |
673 | 421 | self.log.debug('Ubuntu release list: {}'.format(_release_list)) | ||
674 | 422 | return _release_list | 509 | return _release_list |
675 | 423 | 510 | ||
676 | 424 | def file_to_url(self, file_rel_path): | 511 | def file_to_url(self, file_rel_path): |
677 | @@ -450,15 +537,20 @@ | |||
678 | 450 | cmd, code, output)) | 537 | cmd, code, output)) |
679 | 451 | return None | 538 | return None |
680 | 452 | 539 | ||
682 | 453 | def get_process_id_list(self, sentry_unit, process_name): | 540 | def get_process_id_list(self, sentry_unit, process_name, |
683 | 541 | expect_success=True): | ||
684 | 454 | """Get a list of process ID(s) from a single sentry juju unit | 542 | """Get a list of process ID(s) from a single sentry juju unit |
685 | 455 | for a single process name. | 543 | for a single process name. |
686 | 456 | 544 | ||
688 | 457 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | 545 | :param sentry_unit: Amulet sentry instance (juju unit) |
689 | 458 | :param process_name: Process name | 546 | :param process_name: Process name |
690 | 547 | :param expect_success: If False, expect the PID to be missing, | ||
691 | 548 | raise if it is present. | ||
692 | 459 | :returns: List of process IDs | 549 | :returns: List of process IDs |
693 | 460 | """ | 550 | """ |
695 | 461 | cmd = 'pidof {}'.format(process_name) | 551 | cmd = 'pidof -x {}'.format(process_name) |
696 | 552 | if not expect_success: | ||
697 | 553 | cmd += " || exit 0 && exit 1" | ||
698 | 462 | output, code = sentry_unit.run(cmd) | 554 | output, code = sentry_unit.run(cmd) |
699 | 463 | if code != 0: | 555 | if code != 0: |
700 | 464 | msg = ('{} `{}` returned {} ' | 556 | msg = ('{} `{}` returned {} ' |
701 | @@ -467,14 +559,23 @@ | |||
702 | 467 | amulet.raise_status(amulet.FAIL, msg=msg) | 559 | amulet.raise_status(amulet.FAIL, msg=msg) |
703 | 468 | return str(output).split() | 560 | return str(output).split() |
704 | 469 | 561 | ||
706 | 470 | def get_unit_process_ids(self, unit_processes): | 562 | def get_unit_process_ids(self, unit_processes, expect_success=True): |
707 | 471 | """Construct a dict containing unit sentries, process names, and | 563 | """Construct a dict containing unit sentries, process names, and |
709 | 472 | process IDs.""" | 564 | process IDs. |
710 | 565 | |||
711 | 566 | :param unit_processes: A dictionary of Amulet sentry instance | ||
712 | 567 | to list of process names. | ||
713 | 568 | :param expect_success: if False expect the processes to not be | ||
714 | 569 | running, raise if they are. | ||
715 | 570 | :returns: Dictionary of Amulet sentry instance to dictionary | ||
716 | 571 | of process names to PIDs. | ||
717 | 572 | """ | ||
718 | 473 | pid_dict = {} | 573 | pid_dict = {} |
720 | 474 | for sentry_unit, process_list in unit_processes.iteritems(): | 574 | for sentry_unit, process_list in six.iteritems(unit_processes): |
721 | 475 | pid_dict[sentry_unit] = {} | 575 | pid_dict[sentry_unit] = {} |
722 | 476 | for process in process_list: | 576 | for process in process_list: |
724 | 477 | pids = self.get_process_id_list(sentry_unit, process) | 577 | pids = self.get_process_id_list( |
725 | 578 | sentry_unit, process, expect_success=expect_success) | ||
726 | 478 | pid_dict[sentry_unit].update({process: pids}) | 579 | pid_dict[sentry_unit].update({process: pids}) |
727 | 479 | return pid_dict | 580 | return pid_dict |
728 | 480 | 581 | ||
729 | @@ -488,7 +589,7 @@ | |||
730 | 488 | return ('Unit count mismatch. expected, actual: {}, ' | 589 | return ('Unit count mismatch. expected, actual: {}, ' |
731 | 489 | '{} '.format(len(expected), len(actual))) | 590 | '{} '.format(len(expected), len(actual))) |
732 | 490 | 591 | ||
734 | 491 | for (e_sentry, e_proc_names) in expected.iteritems(): | 592 | for (e_sentry, e_proc_names) in six.iteritems(expected): |
735 | 492 | e_sentry_name = e_sentry.info['unit_name'] | 593 | e_sentry_name = e_sentry.info['unit_name'] |
736 | 493 | if e_sentry in actual.keys(): | 594 | if e_sentry in actual.keys(): |
737 | 494 | a_proc_names = actual[e_sentry] | 595 | a_proc_names = actual[e_sentry] |
738 | @@ -500,22 +601,40 @@ | |||
739 | 500 | return ('Process name count mismatch. expected, actual: {}, ' | 601 | return ('Process name count mismatch. expected, actual: {}, ' |
740 | 501 | '{}'.format(len(expected), len(actual))) | 602 | '{}'.format(len(expected), len(actual))) |
741 | 502 | 603 | ||
743 | 503 | for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ | 604 | for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ |
744 | 504 | zip(e_proc_names.items(), a_proc_names.items()): | 605 | zip(e_proc_names.items(), a_proc_names.items()): |
745 | 505 | if e_proc_name != a_proc_name: | 606 | if e_proc_name != a_proc_name: |
746 | 506 | return ('Process name mismatch. expected, actual: {}, ' | 607 | return ('Process name mismatch. expected, actual: {}, ' |
747 | 507 | '{}'.format(e_proc_name, a_proc_name)) | 608 | '{}'.format(e_proc_name, a_proc_name)) |
748 | 508 | 609 | ||
749 | 509 | a_pids_length = len(a_pids) | 610 | a_pids_length = len(a_pids) |
752 | 510 | if e_pids_length != a_pids_length: | 611 | fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' |
751 | 511 | return ('PID count mismatch. {} ({}) expected, actual: ' | ||
753 | 512 | '{}, {} ({})'.format(e_sentry_name, e_proc_name, | 612 | '{}, {} ({})'.format(e_sentry_name, e_proc_name, |
755 | 513 | e_pids_length, a_pids_length, | 613 | e_pids, a_pids_length, |
756 | 514 | a_pids)) | 614 | a_pids)) |
757 | 615 | |||
758 | 616 | # If expected is a list, ensure at least one PID quantity match | ||
759 | 617 | if isinstance(e_pids, list) and \ | ||
760 | 618 | a_pids_length not in e_pids: | ||
761 | 619 | return fail_msg | ||
762 | 620 | # If expected is not bool and not list, | ||
763 | 621 | # ensure PID quantities match | ||
764 | 622 | elif not isinstance(e_pids, bool) and \ | ||
765 | 623 | not isinstance(e_pids, list) and \ | ||
766 | 624 | a_pids_length != e_pids: | ||
767 | 625 | return fail_msg | ||
768 | 626 | # If expected is bool True, ensure 1 or more PIDs exist | ||
769 | 627 | elif isinstance(e_pids, bool) and \ | ||
770 | 628 | e_pids is True and a_pids_length < 1: | ||
771 | 629 | return fail_msg | ||
772 | 630 | # If expected is bool False, ensure 0 PIDs exist | ||
773 | 631 | elif isinstance(e_pids, bool) and \ | ||
774 | 632 | e_pids is False and a_pids_length != 0: | ||
775 | 633 | return fail_msg | ||
776 | 515 | else: | 634 | else: |
777 | 516 | self.log.debug('PID check OK: {} {} {}: ' | 635 | self.log.debug('PID check OK: {} {} {}: ' |
778 | 517 | '{}'.format(e_sentry_name, e_proc_name, | 636 | '{}'.format(e_sentry_name, e_proc_name, |
780 | 518 | e_pids_length, a_pids)) | 637 | e_pids, a_pids)) |
781 | 519 | return None | 638 | return None |
782 | 520 | 639 | ||
783 | 521 | def validate_list_of_identical_dicts(self, list_of_dicts): | 640 | def validate_list_of_identical_dicts(self, list_of_dicts): |
784 | @@ -531,3 +650,180 @@ | |||
785 | 531 | return 'Dicts within list are not identical' | 650 | return 'Dicts within list are not identical' |
786 | 532 | 651 | ||
787 | 533 | return None | 652 | return None |
788 | 653 | |||
789 | 654 | def validate_sectionless_conf(self, file_contents, expected): | ||
790 | 655 | """A crude conf parser. Useful to inspect configuration files which | ||
791 | 656 | do not have section headers (as would be necessary in order to use | ||
792 | 657 | the configparser). Such as openstack-dashboard or rabbitmq confs.""" | ||
793 | 658 | for line in file_contents.split('\n'): | ||
794 | 659 | if '=' in line: | ||
795 | 660 | args = line.split('=') | ||
796 | 661 | if len(args) <= 1: | ||
797 | 662 | continue | ||
798 | 663 | key = args[0].strip() | ||
799 | 664 | value = args[1].strip() | ||
800 | 665 | if key in expected.keys(): | ||
801 | 666 | if expected[key] != value: | ||
802 | 667 | msg = ('Config mismatch. Expected, actual: {}, ' | ||
803 | 668 | '{}'.format(expected[key], value)) | ||
804 | 669 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
805 | 670 | |||
806 | 671 | def get_unit_hostnames(self, units): | ||
807 | 672 | """Return a dict of juju unit names to hostnames.""" | ||
808 | 673 | host_names = {} | ||
809 | 674 | for unit in units: | ||
810 | 675 | host_names[unit.info['unit_name']] = \ | ||
811 | 676 | str(unit.file_contents('/etc/hostname').strip()) | ||
812 | 677 | self.log.debug('Unit host names: {}'.format(host_names)) | ||
813 | 678 | return host_names | ||
814 | 679 | |||
815 | 680 | def run_cmd_unit(self, sentry_unit, cmd): | ||
816 | 681 | """Run a command on a unit, return the output and exit code.""" | ||
817 | 682 | output, code = sentry_unit.run(cmd) | ||
818 | 683 | if code == 0: | ||
819 | 684 | self.log.debug('{} `{}` command returned {} ' | ||
820 | 685 | '(OK)'.format(sentry_unit.info['unit_name'], | ||
821 | 686 | cmd, code)) | ||
822 | 687 | else: | ||
823 | 688 | msg = ('{} `{}` command returned {} ' | ||
824 | 689 | '{}'.format(sentry_unit.info['unit_name'], | ||
825 | 690 | cmd, code, output)) | ||
826 | 691 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
827 | 692 | return str(output), code | ||
828 | 693 | |||
829 | 694 | def file_exists_on_unit(self, sentry_unit, file_name): | ||
830 | 695 | """Check if a file exists on a unit.""" | ||
831 | 696 | try: | ||
832 | 697 | sentry_unit.file_stat(file_name) | ||
833 | 698 | return True | ||
834 | 699 | except IOError: | ||
835 | 700 | return False | ||
836 | 701 | except Exception as e: | ||
837 | 702 | msg = 'Error checking file {}: {}'.format(file_name, e) | ||
838 | 703 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
839 | 704 | |||
840 | 705 | def file_contents_safe(self, sentry_unit, file_name, | ||
841 | 706 | max_wait=60, fatal=False): | ||
842 | 707 | """Get file contents from a sentry unit. Wrap amulet file_contents | ||
843 | 708 | with retry logic to address races where a file checks as existing, | ||
844 | 709 | but no longer exists by the time file_contents is called. | ||
845 | 710 | Return None if file not found. Optionally raise if fatal is True.""" | ||
846 | 711 | unit_name = sentry_unit.info['unit_name'] | ||
847 | 712 | file_contents = False | ||
848 | 713 | tries = 0 | ||
849 | 714 | while not file_contents and tries < (max_wait / 4): | ||
850 | 715 | try: | ||
851 | 716 | file_contents = sentry_unit.file_contents(file_name) | ||
852 | 717 | except IOError: | ||
853 | 718 | self.log.debug('Attempt {} to open file {} from {} ' | ||
854 | 719 | 'failed'.format(tries, file_name, | ||
855 | 720 | unit_name)) | ||
856 | 721 | time.sleep(4) | ||
857 | 722 | tries += 1 | ||
858 | 723 | |||
859 | 724 | if file_contents: | ||
860 | 725 | return file_contents | ||
861 | 726 | elif not fatal: | ||
862 | 727 | return None | ||
863 | 728 | elif fatal: | ||
864 | 729 | msg = 'Failed to get file contents from unit.' | ||
865 | 730 | amulet.raise_status(amulet.FAIL, msg) | ||
866 | 731 | |||
867 | 732 | def port_knock_tcp(self, host="localhost", port=22, timeout=15): | ||
868 | 733 | """Open a TCP socket to check for a listening sevice on a host. | ||
869 | 734 | |||
870 | 735 | :param host: host name or IP address, default to localhost | ||
871 | 736 | :param port: TCP port number, default to 22 | ||
872 | 737 | :param timeout: Connect timeout, default to 15 seconds | ||
873 | 738 | :returns: True if successful, False if connect failed | ||
874 | 739 | """ | ||
875 | 740 | |||
876 | 741 | # Resolve host name if possible | ||
877 | 742 | try: | ||
878 | 743 | connect_host = socket.gethostbyname(host) | ||
879 | 744 | host_human = "{} ({})".format(connect_host, host) | ||
880 | 745 | except socket.error as e: | ||
881 | 746 | self.log.warn('Unable to resolve address: ' | ||
882 | 747 | '{} ({}) Trying anyway!'.format(host, e)) | ||
883 | 748 | connect_host = host | ||
884 | 749 | host_human = connect_host | ||
885 | 750 | |||
886 | 751 | # Attempt socket connection | ||
887 | 752 | try: | ||
888 | 753 | knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | ||
889 | 754 | knock.settimeout(timeout) | ||
890 | 755 | knock.connect((connect_host, port)) | ||
891 | 756 | knock.close() | ||
892 | 757 | self.log.debug('Socket connect OK for host ' | ||
893 | 758 | '{} on port {}.'.format(host_human, port)) | ||
894 | 759 | return True | ||
895 | 760 | except socket.error as e: | ||
896 | 761 | self.log.debug('Socket connect FAIL for' | ||
897 | 762 | ' {} port {} ({})'.format(host_human, port, e)) | ||
898 | 763 | return False | ||
899 | 764 | |||
900 | 765 | def port_knock_units(self, sentry_units, port=22, | ||
901 | 766 | timeout=15, expect_success=True): | ||
902 | 767 | """Open a TCP socket to check for a listening sevice on each | ||
903 | 768 | listed juju unit. | ||
904 | 769 | |||
905 | 770 | :param sentry_units: list of sentry unit pointers | ||
906 | 771 | :param port: TCP port number, default to 22 | ||
907 | 772 | :param timeout: Connect timeout, default to 15 seconds | ||
908 | 773 | :expect_success: True by default, set False to invert logic | ||
909 | 774 | :returns: None if successful, Failure message otherwise | ||
910 | 775 | """ | ||
911 | 776 | for unit in sentry_units: | ||
912 | 777 | host = unit.info['public-address'] | ||
913 | 778 | connected = self.port_knock_tcp(host, port, timeout) | ||
914 | 779 | if not connected and expect_success: | ||
915 | 780 | return 'Socket connect failed.' | ||
916 | 781 | elif connected and not expect_success: | ||
917 | 782 | return 'Socket connected unexpectedly.' | ||
918 | 783 | |||
919 | 784 | def get_uuid_epoch_stamp(self): | ||
920 | 785 | """Returns a stamp string based on uuid4 and epoch time. Useful in | ||
921 | 786 | generating test messages which need to be unique-ish.""" | ||
922 | 787 | return '[{}-{}]'.format(uuid.uuid4(), time.time()) | ||
923 | 788 | |||
924 | 789 | # amulet juju action helpers: | ||
925 | 790 | def run_action(self, unit_sentry, action, | ||
926 | 791 | _check_output=subprocess.check_output, | ||
927 | 792 | params=None): | ||
928 | 793 | """Run the named action on a given unit sentry. | ||
929 | 794 | |||
930 | 795 | params a dict of parameters to use | ||
931 | 796 | _check_output parameter is used for dependency injection. | ||
932 | 797 | |||
933 | 798 | @return action_id. | ||
934 | 799 | """ | ||
935 | 800 | unit_id = unit_sentry.info["unit_name"] | ||
936 | 801 | command = ["juju", "action", "do", "--format=json", unit_id, action] | ||
937 | 802 | if params is not None: | ||
938 | 803 | for key, value in params.iteritems(): | ||
939 | 804 | command.append("{}={}".format(key, value)) | ||
940 | 805 | self.log.info("Running command: %s\n" % " ".join(command)) | ||
941 | 806 | output = _check_output(command, universal_newlines=True) | ||
942 | 807 | data = json.loads(output) | ||
943 | 808 | action_id = data[u'Action queued with id'] | ||
944 | 809 | return action_id | ||
945 | 810 | |||
946 | 811 | def wait_on_action(self, action_id, _check_output=subprocess.check_output): | ||
947 | 812 | """Wait for a given action, returning if it completed or not. | ||
948 | 813 | |||
949 | 814 | _check_output parameter is used for dependency injection. | ||
950 | 815 | """ | ||
951 | 816 | command = ["juju", "action", "fetch", "--format=json", "--wait=0", | ||
952 | 817 | action_id] | ||
953 | 818 | output = _check_output(command, universal_newlines=True) | ||
954 | 819 | data = json.loads(output) | ||
955 | 820 | return data.get(u"status") == "completed" | ||
956 | 821 | |||
957 | 822 | def status_get(self, unit): | ||
958 | 823 | """Return the current service status of this unit.""" | ||
959 | 824 | raw_status, return_code = unit.run( | ||
960 | 825 | "status-get --format=json --include-data") | ||
961 | 826 | if return_code != 0: | ||
962 | 827 | return ("unknown", "") | ||
963 | 828 | status = json.loads(raw_status) | ||
964 | 829 | return (status["status"], status["message"]) | ||
965 | 534 | 830 | ||
966 | === removed directory 'hooks/charmhelpers/contrib/ansible' | |||
967 | === removed file 'hooks/charmhelpers/contrib/ansible/__init__.py' | |||
968 | --- hooks/charmhelpers/contrib/ansible/__init__.py 2015-07-29 18:23:55 +0000 | |||
969 | +++ hooks/charmhelpers/contrib/ansible/__init__.py 1970-01-01 00:00:00 +0000 | |||
970 | @@ -1,254 +0,0 @@ | |||
971 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
972 | 2 | # | ||
973 | 3 | # This file is part of charm-helpers. | ||
974 | 4 | # | ||
975 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
976 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
977 | 7 | # published by the Free Software Foundation. | ||
978 | 8 | # | ||
979 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
980 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
981 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
982 | 12 | # GNU Lesser General Public License for more details. | ||
983 | 13 | # | ||
984 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
985 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
986 | 16 | |||
987 | 17 | # Copyright 2013 Canonical Ltd. | ||
988 | 18 | # | ||
989 | 19 | # Authors: | ||
990 | 20 | # Charm Helpers Developers <juju@lists.ubuntu.com> | ||
991 | 21 | """Charm Helpers ansible - declare the state of your machines. | ||
992 | 22 | |||
993 | 23 | This helper enables you to declare your machine state, rather than | ||
994 | 24 | program it procedurally (and have to test each change to your procedures). | ||
995 | 25 | Your install hook can be as simple as:: | ||
996 | 26 | |||
997 | 27 | {{{ | ||
998 | 28 | import charmhelpers.contrib.ansible | ||
999 | 29 | |||
1000 | 30 | |||
1001 | 31 | def install(): | ||
1002 | 32 | charmhelpers.contrib.ansible.install_ansible_support() | ||
1003 | 33 | charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml') | ||
1004 | 34 | }}} | ||
1005 | 35 | |||
1006 | 36 | and won't need to change (nor will its tests) when you change the machine | ||
1007 | 37 | state. | ||
1008 | 38 | |||
1009 | 39 | All of your juju config and relation-data are available as template | ||
1010 | 40 | variables within your playbooks and templates. An install playbook looks | ||
1011 | 41 | something like:: | ||
1012 | 42 | |||
1013 | 43 | {{{ | ||
1014 | 44 | --- | ||
1015 | 45 | - hosts: localhost | ||
1016 | 46 | user: root | ||
1017 | 47 | |||
1018 | 48 | tasks: | ||
1019 | 49 | - name: Add private repositories. | ||
1020 | 50 | template: | ||
1021 | 51 | src: ../templates/private-repositories.list.jinja2 | ||
1022 | 52 | dest: /etc/apt/sources.list.d/private.list | ||
1023 | 53 | |||
1024 | 54 | - name: Update the cache. | ||
1025 | 55 | apt: update_cache=yes | ||
1026 | 56 | |||
1027 | 57 | - name: Install dependencies. | ||
1028 | 58 | apt: pkg={{ item }} | ||
1029 | 59 | with_items: | ||
1030 | 60 | - python-mimeparse | ||
1031 | 61 | - python-webob | ||
1032 | 62 | - sunburnt | ||
1033 | 63 | |||
1034 | 64 | - name: Setup groups. | ||
1035 | 65 | group: name={{ item.name }} gid={{ item.gid }} | ||
1036 | 66 | with_items: | ||
1037 | 67 | - { name: 'deploy_user', gid: 1800 } | ||
1038 | 68 | - { name: 'service_user', gid: 1500 } | ||
1039 | 69 | |||
1040 | 70 | ... | ||
1041 | 71 | }}} | ||
1042 | 72 | |||
1043 | 73 | Read more online about `playbooks`_ and standard ansible `modules`_. | ||
1044 | 74 | |||
1045 | 75 | .. _playbooks: http://www.ansibleworks.com/docs/playbooks.html | ||
1046 | 76 | .. _modules: http://www.ansibleworks.com/docs/modules.html | ||
1047 | 77 | |||
1048 | 78 | A further feature os the ansible hooks is to provide a light weight "action" | ||
1049 | 79 | scripting tool. This is a decorator that you apply to a function, and that | ||
1050 | 80 | function can now receive cli args, and can pass extra args to the playbook. | ||
1051 | 81 | |||
1052 | 82 | e.g. | ||
1053 | 83 | |||
1054 | 84 | |||
1055 | 85 | @hooks.action() | ||
1056 | 86 | def some_action(amount, force="False"): | ||
1057 | 87 | "Usage: some-action AMOUNT [force=True]" # <-- shown on error | ||
1058 | 88 | # process the arguments | ||
1059 | 89 | # do some calls | ||
1060 | 90 | # return extra-vars to be passed to ansible-playbook | ||
1061 | 91 | return { | ||
1062 | 92 | 'amount': int(amount), | ||
1063 | 93 | 'type': force, | ||
1064 | 94 | } | ||
1065 | 95 | |||
1066 | 96 | You can now create a symlink to hooks.py that can be invoked like a hook, but | ||
1067 | 97 | with cli params: | ||
1068 | 98 | |||
1069 | 99 | # link actions/some-action to hooks/hooks.py | ||
1070 | 100 | |||
1071 | 101 | actions/some-action amount=10 force=true | ||
1072 | 102 | |||
1073 | 103 | """ | ||
1074 | 104 | import os | ||
1075 | 105 | import stat | ||
1076 | 106 | import subprocess | ||
1077 | 107 | import functools | ||
1078 | 108 | |||
1079 | 109 | import charmhelpers.contrib.templating.contexts | ||
1080 | 110 | import charmhelpers.core.host | ||
1081 | 111 | import charmhelpers.core.hookenv | ||
1082 | 112 | import charmhelpers.fetch | ||
1083 | 113 | |||
1084 | 114 | |||
1085 | 115 | charm_dir = os.environ.get('CHARM_DIR', '') | ||
1086 | 116 | ansible_hosts_path = '/etc/ansible/hosts' | ||
1087 | 117 | # Ansible will automatically include any vars in the following | ||
1088 | 118 | # file in its inventory when run locally. | ||
1089 | 119 | ansible_vars_path = '/etc/ansible/host_vars/localhost' | ||
1090 | 120 | |||
1091 | 121 | |||
1092 | 122 | def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'): | ||
1093 | 123 | """Installs the ansible package. | ||
1094 | 124 | |||
1095 | 125 | By default it is installed from the `PPA`_ linked from | ||
1096 | 126 | the ansible `website`_ or from a ppa specified by a charm config.. | ||
1097 | 127 | |||
1098 | 128 | .. _PPA: https://launchpad.net/~rquillo/+archive/ansible | ||
1099 | 129 | .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu | ||
1100 | 130 | |||
1101 | 131 | If from_ppa is empty, you must ensure that the package is available | ||
1102 | 132 | from a configured repository. | ||
1103 | 133 | """ | ||
1104 | 134 | if from_ppa: | ||
1105 | 135 | charmhelpers.fetch.add_source(ppa_location) | ||
1106 | 136 | charmhelpers.fetch.apt_update(fatal=True) | ||
1107 | 137 | charmhelpers.fetch.apt_install('ansible') | ||
1108 | 138 | with open(ansible_hosts_path, 'w+') as hosts_file: | ||
1109 | 139 | hosts_file.write('localhost ansible_connection=local') | ||
1110 | 140 | |||
1111 | 141 | |||
1112 | 142 | def apply_playbook(playbook, tags=None, extra_vars=None): | ||
1113 | 143 | tags = tags or [] | ||
1114 | 144 | tags = ",".join(tags) | ||
1115 | 145 | charmhelpers.contrib.templating.contexts.juju_state_to_yaml( | ||
1116 | 146 | ansible_vars_path, namespace_separator='__', | ||
1117 | 147 | allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR)) | ||
1118 | 148 | |||
1119 | 149 | # we want ansible's log output to be unbuffered | ||
1120 | 150 | env = os.environ.copy() | ||
1121 | 151 | env['PYTHONUNBUFFERED'] = "1" | ||
1122 | 152 | call = [ | ||
1123 | 153 | 'ansible-playbook', | ||
1124 | 154 | '-c', | ||
1125 | 155 | 'local', | ||
1126 | 156 | playbook, | ||
1127 | 157 | ] | ||
1128 | 158 | if tags: | ||
1129 | 159 | call.extend(['--tags', '{}'.format(tags)]) | ||
1130 | 160 | if extra_vars: | ||
1131 | 161 | extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()] | ||
1132 | 162 | call.extend(['--extra-vars', " ".join(extra)]) | ||
1133 | 163 | subprocess.check_call(call, env=env) | ||
1134 | 164 | |||
1135 | 165 | |||
1136 | 166 | class AnsibleHooks(charmhelpers.core.hookenv.Hooks): | ||
1137 | 167 | """Run a playbook with the hook-name as the tag. | ||
1138 | 168 | |||
1139 | 169 | This helper builds on the standard hookenv.Hooks helper, | ||
1140 | 170 | but additionally runs the playbook with the hook-name specified | ||
1141 | 171 | using --tags (ie. running all the tasks tagged with the hook-name). | ||
1142 | 172 | |||
1143 | 173 | Example:: | ||
1144 | 174 | |||
1145 | 175 | hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml') | ||
1146 | 176 | |||
1147 | 177 | # All the tasks within my_machine_state.yaml tagged with 'install' | ||
1148 | 178 | # will be run automatically after do_custom_work() | ||
1149 | 179 | @hooks.hook() | ||
1150 | 180 | def install(): | ||
1151 | 181 | do_custom_work() | ||
1152 | 182 | |||
1153 | 183 | # For most of your hooks, you won't need to do anything other | ||
1154 | 184 | # than run the tagged tasks for the hook: | ||
1155 | 185 | @hooks.hook('config-changed', 'start', 'stop') | ||
1156 | 186 | def just_use_playbook(): | ||
1157 | 187 | pass | ||
1158 | 188 | |||
1159 | 189 | # As a convenience, you can avoid the above noop function by specifying | ||
1160 | 190 | # the hooks which are handled by ansible-only and they'll be registered | ||
1161 | 191 | # for you: | ||
1162 | 192 | # hooks = AnsibleHooks( | ||
1163 | 193 | # 'playbooks/my_machine_state.yaml', | ||
1164 | 194 | # default_hooks=['config-changed', 'start', 'stop']) | ||
1165 | 195 | |||
1166 | 196 | if __name__ == "__main__": | ||
1167 | 197 | # execute a hook based on the name the program is called by | ||
1168 | 198 | hooks.execute(sys.argv) | ||
1169 | 199 | |||
1170 | 200 | """ | ||
1171 | 201 | |||
1172 | 202 | def __init__(self, playbook_path, default_hooks=None): | ||
1173 | 203 | """Register any hooks handled by ansible.""" | ||
1174 | 204 | super(AnsibleHooks, self).__init__() | ||
1175 | 205 | |||
1176 | 206 | self._actions = {} | ||
1177 | 207 | self.playbook_path = playbook_path | ||
1178 | 208 | |||
1179 | 209 | default_hooks = default_hooks or [] | ||
1180 | 210 | |||
1181 | 211 | def noop(*args, **kwargs): | ||
1182 | 212 | pass | ||
1183 | 213 | |||
1184 | 214 | for hook in default_hooks: | ||
1185 | 215 | self.register(hook, noop) | ||
1186 | 216 | |||
1187 | 217 | def register_action(self, name, function): | ||
1188 | 218 | """Register a hook""" | ||
1189 | 219 | self._actions[name] = function | ||
1190 | 220 | |||
1191 | 221 | def execute(self, args): | ||
1192 | 222 | """Execute the hook followed by the playbook using the hook as tag.""" | ||
1193 | 223 | hook_name = os.path.basename(args[0]) | ||
1194 | 224 | extra_vars = None | ||
1195 | 225 | if hook_name in self._actions: | ||
1196 | 226 | extra_vars = self._actions[hook_name](args[1:]) | ||
1197 | 227 | else: | ||
1198 | 228 | super(AnsibleHooks, self).execute(args) | ||
1199 | 229 | |||
1200 | 230 | charmhelpers.contrib.ansible.apply_playbook( | ||
1201 | 231 | self.playbook_path, tags=[hook_name], extra_vars=extra_vars) | ||
1202 | 232 | |||
1203 | 233 | def action(self, *action_names): | ||
1204 | 234 | """Decorator, registering them as actions""" | ||
1205 | 235 | def action_wrapper(decorated): | ||
1206 | 236 | |||
1207 | 237 | @functools.wraps(decorated) | ||
1208 | 238 | def wrapper(argv): | ||
1209 | 239 | kwargs = dict(arg.split('=') for arg in argv) | ||
1210 | 240 | try: | ||
1211 | 241 | return decorated(**kwargs) | ||
1212 | 242 | except TypeError as e: | ||
1213 | 243 | if decorated.__doc__: | ||
1214 | 244 | e.args += (decorated.__doc__,) | ||
1215 | 245 | raise | ||
1216 | 246 | |||
1217 | 247 | self.register_action(decorated.__name__, wrapper) | ||
1218 | 248 | if '_' in decorated.__name__: | ||
1219 | 249 | self.register_action( | ||
1220 | 250 | decorated.__name__.replace('_', '-'), wrapper) | ||
1221 | 251 | |||
1222 | 252 | return wrapper | ||
1223 | 253 | |||
1224 | 254 | return action_wrapper | ||
1225 | 255 | 0 | ||
1226 | === removed directory 'hooks/charmhelpers/contrib/benchmark' | |||
1227 | === removed file 'hooks/charmhelpers/contrib/benchmark/__init__.py' | |||
1228 | --- hooks/charmhelpers/contrib/benchmark/__init__.py 2015-07-29 18:23:55 +0000 | |||
1229 | +++ hooks/charmhelpers/contrib/benchmark/__init__.py 1970-01-01 00:00:00 +0000 | |||
1230 | @@ -1,126 +0,0 @@ | |||
1231 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1232 | 2 | # | ||
1233 | 3 | # This file is part of charm-helpers. | ||
1234 | 4 | # | ||
1235 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1236 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1237 | 7 | # published by the Free Software Foundation. | ||
1238 | 8 | # | ||
1239 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1240 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1241 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1242 | 12 | # GNU Lesser General Public License for more details. | ||
1243 | 13 | # | ||
1244 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1245 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1246 | 16 | |||
1247 | 17 | import subprocess | ||
1248 | 18 | import time | ||
1249 | 19 | import os | ||
1250 | 20 | from distutils.spawn import find_executable | ||
1251 | 21 | |||
1252 | 22 | from charmhelpers.core.hookenv import ( | ||
1253 | 23 | in_relation_hook, | ||
1254 | 24 | relation_ids, | ||
1255 | 25 | relation_set, | ||
1256 | 26 | relation_get, | ||
1257 | 27 | ) | ||
1258 | 28 | |||
1259 | 29 | |||
1260 | 30 | def action_set(key, val): | ||
1261 | 31 | if find_executable('action-set'): | ||
1262 | 32 | action_cmd = ['action-set'] | ||
1263 | 33 | |||
1264 | 34 | if isinstance(val, dict): | ||
1265 | 35 | for k, v in iter(val.items()): | ||
1266 | 36 | action_set('%s.%s' % (key, k), v) | ||
1267 | 37 | return True | ||
1268 | 38 | |||
1269 | 39 | action_cmd.append('%s=%s' % (key, val)) | ||
1270 | 40 | subprocess.check_call(action_cmd) | ||
1271 | 41 | return True | ||
1272 | 42 | return False | ||
1273 | 43 | |||
1274 | 44 | |||
1275 | 45 | class Benchmark(): | ||
1276 | 46 | """ | ||
1277 | 47 | Helper class for the `benchmark` interface. | ||
1278 | 48 | |||
1279 | 49 | :param list actions: Define the actions that are also benchmarks | ||
1280 | 50 | |||
1281 | 51 | From inside the benchmark-relation-changed hook, you would | ||
1282 | 52 | Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom']) | ||
1283 | 53 | |||
1284 | 54 | Examples: | ||
1285 | 55 | |||
1286 | 56 | siege = Benchmark(['siege']) | ||
1287 | 57 | siege.start() | ||
1288 | 58 | [... run siege ...] | ||
1289 | 59 | # The higher the score, the better the benchmark | ||
1290 | 60 | siege.set_composite_score(16.70, 'trans/sec', 'desc') | ||
1291 | 61 | siege.finish() | ||
1292 | 62 | |||
1293 | 63 | |||
1294 | 64 | """ | ||
1295 | 65 | |||
1296 | 66 | BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing | ||
1297 | 67 | |||
1298 | 68 | required_keys = [ | ||
1299 | 69 | 'hostname', | ||
1300 | 70 | 'port', | ||
1301 | 71 | 'graphite_port', | ||
1302 | 72 | 'graphite_endpoint', | ||
1303 | 73 | 'api_port' | ||
1304 | 74 | ] | ||
1305 | 75 | |||
1306 | 76 | def __init__(self, benchmarks=None): | ||
1307 | 77 | if in_relation_hook(): | ||
1308 | 78 | if benchmarks is not None: | ||
1309 | 79 | for rid in sorted(relation_ids('benchmark')): | ||
1310 | 80 | relation_set(relation_id=rid, relation_settings={ | ||
1311 | 81 | 'benchmarks': ",".join(benchmarks) | ||
1312 | 82 | }) | ||
1313 | 83 | |||
1314 | 84 | # Check the relation data | ||
1315 | 85 | config = {} | ||
1316 | 86 | for key in self.required_keys: | ||
1317 | 87 | val = relation_get(key) | ||
1318 | 88 | if val is not None: | ||
1319 | 89 | config[key] = val | ||
1320 | 90 | else: | ||
1321 | 91 | # We don't have all of the required keys | ||
1322 | 92 | config = {} | ||
1323 | 93 | break | ||
1324 | 94 | |||
1325 | 95 | if len(config): | ||
1326 | 96 | with open(self.BENCHMARK_CONF, 'w') as f: | ||
1327 | 97 | for key, val in iter(config.items()): | ||
1328 | 98 | f.write("%s=%s\n" % (key, val)) | ||
1329 | 99 | |||
1330 | 100 | @staticmethod | ||
1331 | 101 | def start(): | ||
1332 | 102 | action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ')) | ||
1333 | 103 | |||
1334 | 104 | """ | ||
1335 | 105 | If the collectd charm is also installed, tell it to send a snapshot | ||
1336 | 106 | of the current profile data. | ||
1337 | 107 | """ | ||
1338 | 108 | COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' | ||
1339 | 109 | if os.path.exists(COLLECT_PROFILE_DATA): | ||
1340 | 110 | subprocess.check_output([COLLECT_PROFILE_DATA]) | ||
1341 | 111 | |||
1342 | 112 | @staticmethod | ||
1343 | 113 | def finish(): | ||
1344 | 114 | action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ')) | ||
1345 | 115 | |||
1346 | 116 | @staticmethod | ||
1347 | 117 | def set_composite_score(value, units, direction='asc'): | ||
1348 | 118 | """ | ||
1349 | 119 | Set the composite score for a benchmark run. This is a single number | ||
1350 | 120 | representative of the benchmark results. This could be the most | ||
1351 | 121 | important metric, or an amalgamation of metric scores. | ||
1352 | 122 | """ | ||
1353 | 123 | return action_set( | ||
1354 | 124 | "meta.composite", | ||
1355 | 125 | {'value': value, 'units': units, 'direction': direction} | ||
1356 | 126 | ) | ||
1357 | 127 | 0 | ||
1358 | === removed directory 'hooks/charmhelpers/contrib/charmhelpers' | |||
1359 | === removed file 'hooks/charmhelpers/contrib/charmhelpers/__init__.py' | |||
1360 | --- hooks/charmhelpers/contrib/charmhelpers/__init__.py 2015-07-29 18:23:55 +0000 | |||
1361 | +++ hooks/charmhelpers/contrib/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000 | |||
1362 | @@ -1,208 +0,0 @@ | |||
1363 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1364 | 2 | # | ||
1365 | 3 | # This file is part of charm-helpers. | ||
1366 | 4 | # | ||
1367 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1368 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1369 | 7 | # published by the Free Software Foundation. | ||
1370 | 8 | # | ||
1371 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1372 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1373 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1374 | 12 | # GNU Lesser General Public License for more details. | ||
1375 | 13 | # | ||
1376 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1377 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1378 | 16 | |||
1379 | 17 | # Copyright 2012 Canonical Ltd. This software is licensed under the | ||
1380 | 18 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
1381 | 19 | |||
1382 | 20 | import warnings | ||
1383 | 21 | warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa | ||
1384 | 22 | |||
1385 | 23 | import operator | ||
1386 | 24 | import tempfile | ||
1387 | 25 | import time | ||
1388 | 26 | import yaml | ||
1389 | 27 | import subprocess | ||
1390 | 28 | |||
1391 | 29 | import six | ||
1392 | 30 | if six.PY3: | ||
1393 | 31 | from urllib.request import urlopen | ||
1394 | 32 | from urllib.error import (HTTPError, URLError) | ||
1395 | 33 | else: | ||
1396 | 34 | from urllib2 import (urlopen, HTTPError, URLError) | ||
1397 | 35 | |||
1398 | 36 | """Helper functions for writing Juju charms in Python.""" | ||
1399 | 37 | |||
1400 | 38 | __metaclass__ = type | ||
1401 | 39 | __all__ = [ | ||
1402 | 40 | # 'get_config', # core.hookenv.config() | ||
1403 | 41 | # 'log', # core.hookenv.log() | ||
1404 | 42 | # 'log_entry', # core.hookenv.log() | ||
1405 | 43 | # 'log_exit', # core.hookenv.log() | ||
1406 | 44 | # 'relation_get', # core.hookenv.relation_get() | ||
1407 | 45 | # 'relation_set', # core.hookenv.relation_set() | ||
1408 | 46 | # 'relation_ids', # core.hookenv.relation_ids() | ||
1409 | 47 | # 'relation_list', # core.hookenv.relation_units() | ||
1410 | 48 | # 'config_get', # core.hookenv.config() | ||
1411 | 49 | # 'unit_get', # core.hookenv.unit_get() | ||
1412 | 50 | # 'open_port', # core.hookenv.open_port() | ||
1413 | 51 | # 'close_port', # core.hookenv.close_port() | ||
1414 | 52 | # 'service_control', # core.host.service() | ||
1415 | 53 | 'unit_info', # client-side, NOT IMPLEMENTED | ||
1416 | 54 | 'wait_for_machine', # client-side, NOT IMPLEMENTED | ||
1417 | 55 | 'wait_for_page_contents', # client-side, NOT IMPLEMENTED | ||
1418 | 56 | 'wait_for_relation', # client-side, NOT IMPLEMENTED | ||
1419 | 57 | 'wait_for_unit', # client-side, NOT IMPLEMENTED | ||
1420 | 58 | ] | ||
1421 | 59 | |||
1422 | 60 | |||
1423 | 61 | SLEEP_AMOUNT = 0.1 | ||
1424 | 62 | |||
1425 | 63 | |||
1426 | 64 | # We create a juju_status Command here because it makes testing much, | ||
1427 | 65 | # much easier. | ||
1428 | 66 | def juju_status(): | ||
1429 | 67 | subprocess.check_call(['juju', 'status']) | ||
1430 | 68 | |||
1431 | 69 | # re-implemented as charmhelpers.fetch.configure_sources() | ||
1432 | 70 | # def configure_source(update=False): | ||
1433 | 71 | # source = config_get('source') | ||
1434 | 72 | # if ((source.startswith('ppa:') or | ||
1435 | 73 | # source.startswith('cloud:') or | ||
1436 | 74 | # source.startswith('http:'))): | ||
1437 | 75 | # run('add-apt-repository', source) | ||
1438 | 76 | # if source.startswith("http:"): | ||
1439 | 77 | # run('apt-key', 'import', config_get('key')) | ||
1440 | 78 | # if update: | ||
1441 | 79 | # run('apt-get', 'update') | ||
1442 | 80 | |||
1443 | 81 | |||
1444 | 82 | # DEPRECATED: client-side only | ||
1445 | 83 | def make_charm_config_file(charm_config): | ||
1446 | 84 | charm_config_file = tempfile.NamedTemporaryFile(mode='w+') | ||
1447 | 85 | charm_config_file.write(yaml.dump(charm_config)) | ||
1448 | 86 | charm_config_file.flush() | ||
1449 | 87 | # The NamedTemporaryFile instance is returned instead of just the name | ||
1450 | 88 | # because we want to take advantage of garbage collection-triggered | ||
1451 | 89 | # deletion of the temp file when it goes out of scope in the caller. | ||
1452 | 90 | return charm_config_file | ||
1453 | 91 | |||
1454 | 92 | |||
1455 | 93 | # DEPRECATED: client-side only | ||
1456 | 94 | def unit_info(service_name, item_name, data=None, unit=None): | ||
1457 | 95 | if data is None: | ||
1458 | 96 | data = yaml.safe_load(juju_status()) | ||
1459 | 97 | service = data['services'].get(service_name) | ||
1460 | 98 | if service is None: | ||
1461 | 99 | # XXX 2012-02-08 gmb: | ||
1462 | 100 | # This allows us to cope with the race condition that we | ||
1463 | 101 | # have between deploying a service and having it come up in | ||
1464 | 102 | # `juju status`. We could probably do with cleaning it up so | ||
1465 | 103 | # that it fails a bit more noisily after a while. | ||
1466 | 104 | return '' | ||
1467 | 105 | units = service['units'] | ||
1468 | 106 | if unit is not None: | ||
1469 | 107 | item = units[unit][item_name] | ||
1470 | 108 | else: | ||
1471 | 109 | # It might seem odd to sort the units here, but we do it to | ||
1472 | 110 | # ensure that when no unit is specified, the first unit for the | ||
1473 | 111 | # service (or at least the one with the lowest number) is the | ||
1474 | 112 | # one whose data gets returned. | ||
1475 | 113 | sorted_unit_names = sorted(units.keys()) | ||
1476 | 114 | item = units[sorted_unit_names[0]][item_name] | ||
1477 | 115 | return item | ||
1478 | 116 | |||
1479 | 117 | |||
1480 | 118 | # DEPRECATED: client-side only | ||
1481 | 119 | def get_machine_data(): | ||
1482 | 120 | return yaml.safe_load(juju_status())['machines'] | ||
1483 | 121 | |||
1484 | 122 | |||
1485 | 123 | # DEPRECATED: client-side only | ||
1486 | 124 | def wait_for_machine(num_machines=1, timeout=300): | ||
1487 | 125 | """Wait `timeout` seconds for `num_machines` machines to come up. | ||
1488 | 126 | |||
1489 | 127 | This wait_for... function can be called by other wait_for functions | ||
1490 | 128 | whose timeouts might be too short in situations where only a bare | ||
1491 | 129 | Juju setup has been bootstrapped. | ||
1492 | 130 | |||
1493 | 131 | :return: A tuple of (num_machines, time_taken). This is used for | ||
1494 | 132 | testing. | ||
1495 | 133 | """ | ||
1496 | 134 | # You may think this is a hack, and you'd be right. The easiest way | ||
1497 | 135 | # to tell what environment we're working in (LXC vs EC2) is to check | ||
1498 | 136 | # the dns-name of the first machine. If it's localhost we're in LXC | ||
1499 | 137 | # and we can just return here. | ||
1500 | 138 | if get_machine_data()[0]['dns-name'] == 'localhost': | ||
1501 | 139 | return 1, 0 | ||
1502 | 140 | start_time = time.time() | ||
1503 | 141 | while True: | ||
1504 | 142 | # Drop the first machine, since it's the Zookeeper and that's | ||
1505 | 143 | # not a machine that we need to wait for. This will only work | ||
1506 | 144 | # for EC2 environments, which is why we return early above if | ||
1507 | 145 | # we're in LXC. | ||
1508 | 146 | machine_data = get_machine_data() | ||
1509 | 147 | non_zookeeper_machines = [ | ||
1510 | 148 | machine_data[key] for key in list(machine_data.keys())[1:]] | ||
1511 | 149 | if len(non_zookeeper_machines) >= num_machines: | ||
1512 | 150 | all_machines_running = True | ||
1513 | 151 | for machine in non_zookeeper_machines: | ||
1514 | 152 | if machine.get('instance-state') != 'running': | ||
1515 | 153 | all_machines_running = False | ||
1516 | 154 | break | ||
1517 | 155 | if all_machines_running: | ||
1518 | 156 | break | ||
1519 | 157 | if time.time() - start_time >= timeout: | ||
1520 | 158 | raise RuntimeError('timeout waiting for service to start') | ||
1521 | 159 | time.sleep(SLEEP_AMOUNT) | ||
1522 | 160 | return num_machines, time.time() - start_time | ||
1523 | 161 | |||
1524 | 162 | |||
1525 | 163 | # DEPRECATED: client-side only | ||
1526 | 164 | def wait_for_unit(service_name, timeout=480): | ||
1527 | 165 | """Wait `timeout` seconds for a given service name to come up.""" | ||
1528 | 166 | wait_for_machine(num_machines=1) | ||
1529 | 167 | start_time = time.time() | ||
1530 | 168 | while True: | ||
1531 | 169 | state = unit_info(service_name, 'agent-state') | ||
1532 | 170 | if 'error' in state or state == 'started': | ||
1533 | 171 | break | ||
1534 | 172 | if time.time() - start_time >= timeout: | ||
1535 | 173 | raise RuntimeError('timeout waiting for service to start') | ||
1536 | 174 | time.sleep(SLEEP_AMOUNT) | ||
1537 | 175 | if state != 'started': | ||
1538 | 176 | raise RuntimeError('unit did not start, agent-state: ' + state) | ||
1539 | 177 | |||
1540 | 178 | |||
1541 | 179 | # DEPRECATED: client-side only | ||
1542 | 180 | def wait_for_relation(service_name, relation_name, timeout=120): | ||
1543 | 181 | """Wait `timeout` seconds for a given relation to come up.""" | ||
1544 | 182 | start_time = time.time() | ||
1545 | 183 | while True: | ||
1546 | 184 | relation = unit_info(service_name, 'relations').get(relation_name) | ||
1547 | 185 | if relation is not None and relation['state'] == 'up': | ||
1548 | 186 | break | ||
1549 | 187 | if time.time() - start_time >= timeout: | ||
1550 | 188 | raise RuntimeError('timeout waiting for relation to be up') | ||
1551 | 189 | time.sleep(SLEEP_AMOUNT) | ||
1552 | 190 | |||
1553 | 191 | |||
1554 | 192 | # DEPRECATED: client-side only | ||
1555 | 193 | def wait_for_page_contents(url, contents, timeout=120, validate=None): | ||
1556 | 194 | if validate is None: | ||
1557 | 195 | validate = operator.contains | ||
1558 | 196 | start_time = time.time() | ||
1559 | 197 | while True: | ||
1560 | 198 | try: | ||
1561 | 199 | stream = urlopen(url) | ||
1562 | 200 | except (HTTPError, URLError): | ||
1563 | 201 | pass | ||
1564 | 202 | else: | ||
1565 | 203 | page = stream.read() | ||
1566 | 204 | if validate(page, contents): | ||
1567 | 205 | return page | ||
1568 | 206 | if time.time() - start_time >= timeout: | ||
1569 | 207 | raise RuntimeError('timeout waiting for contents of ' + url) | ||
1570 | 208 | time.sleep(SLEEP_AMOUNT) | ||
1571 | 209 | 0 | ||
1572 | === removed directory 'hooks/charmhelpers/contrib/charmsupport' | |||
1573 | === removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py' | |||
1574 | --- hooks/charmhelpers/contrib/charmsupport/__init__.py 2015-07-29 18:23:55 +0000 | |||
1575 | +++ hooks/charmhelpers/contrib/charmsupport/__init__.py 1970-01-01 00:00:00 +0000 | |||
1576 | @@ -1,15 +0,0 @@ | |||
1577 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1578 | 2 | # | ||
1579 | 3 | # This file is part of charm-helpers. | ||
1580 | 4 | # | ||
1581 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1582 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1583 | 7 | # published by the Free Software Foundation. | ||
1584 | 8 | # | ||
1585 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1586 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1587 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1588 | 12 | # GNU Lesser General Public License for more details. | ||
1589 | 13 | # | ||
1590 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1591 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1592 | 16 | 0 | ||
1593 | === removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' | |||
1594 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-07-29 18:23:55 +0000 | |||
1595 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000 | |||
1596 | @@ -1,360 +0,0 @@ | |||
1597 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1598 | 2 | # | ||
1599 | 3 | # This file is part of charm-helpers. | ||
1600 | 4 | # | ||
1601 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1602 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1603 | 7 | # published by the Free Software Foundation. | ||
1604 | 8 | # | ||
1605 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1606 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1607 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1608 | 12 | # GNU Lesser General Public License for more details. | ||
1609 | 13 | # | ||
1610 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1611 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1612 | 16 | |||
1613 | 17 | """Compatibility with the nrpe-external-master charm""" | ||
1614 | 18 | # Copyright 2012 Canonical Ltd. | ||
1615 | 19 | # | ||
1616 | 20 | # Authors: | ||
1617 | 21 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> | ||
1618 | 22 | |||
1619 | 23 | import subprocess | ||
1620 | 24 | import pwd | ||
1621 | 25 | import grp | ||
1622 | 26 | import os | ||
1623 | 27 | import glob | ||
1624 | 28 | import shutil | ||
1625 | 29 | import re | ||
1626 | 30 | import shlex | ||
1627 | 31 | import yaml | ||
1628 | 32 | |||
1629 | 33 | from charmhelpers.core.hookenv import ( | ||
1630 | 34 | config, | ||
1631 | 35 | local_unit, | ||
1632 | 36 | log, | ||
1633 | 37 | relation_ids, | ||
1634 | 38 | relation_set, | ||
1635 | 39 | relations_of_type, | ||
1636 | 40 | ) | ||
1637 | 41 | |||
1638 | 42 | from charmhelpers.core.host import service | ||
1639 | 43 | |||
1640 | 44 | # This module adds compatibility with the nrpe-external-master and plain nrpe | ||
1641 | 45 | # subordinate charms. To use it in your charm: | ||
1642 | 46 | # | ||
1643 | 47 | # 1. Update metadata.yaml | ||
1644 | 48 | # | ||
1645 | 49 | # provides: | ||
1646 | 50 | # (...) | ||
1647 | 51 | # nrpe-external-master: | ||
1648 | 52 | # interface: nrpe-external-master | ||
1649 | 53 | # scope: container | ||
1650 | 54 | # | ||
1651 | 55 | # and/or | ||
1652 | 56 | # | ||
1653 | 57 | # provides: | ||
1654 | 58 | # (...) | ||
1655 | 59 | # local-monitors: | ||
1656 | 60 | # interface: local-monitors | ||
1657 | 61 | # scope: container | ||
1658 | 62 | |||
1659 | 63 | # | ||
1660 | 64 | # 2. Add the following to config.yaml | ||
1661 | 65 | # | ||
1662 | 66 | # nagios_context: | ||
1663 | 67 | # default: "juju" | ||
1664 | 68 | # type: string | ||
1665 | 69 | # description: | | ||
1666 | 70 | # Used by the nrpe subordinate charms. | ||
1667 | 71 | # A string that will be prepended to instance name to set the host name | ||
1668 | 72 | # in nagios. So for instance the hostname would be something like: | ||
1669 | 73 | # juju-myservice-0 | ||
1670 | 74 | # If you're running multiple environments with the same services in them | ||
1671 | 75 | # this allows you to differentiate between them. | ||
1672 | 76 | # nagios_servicegroups: | ||
1673 | 77 | # default: "" | ||
1674 | 78 | # type: string | ||
1675 | 79 | # description: | | ||
1676 | 80 | # A comma-separated list of nagios servicegroups. | ||
1677 | 81 | # If left empty, the nagios_context will be used as the servicegroup | ||
1678 | 82 | # | ||
1679 | 83 | # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master | ||
1680 | 84 | # | ||
1681 | 85 | # 4. Update your hooks.py with something like this: | ||
1682 | 86 | # | ||
1683 | 87 | # from charmsupport.nrpe import NRPE | ||
1684 | 88 | # (...) | ||
1685 | 89 | # def update_nrpe_config(): | ||
1686 | 90 | # nrpe_compat = NRPE() | ||
1687 | 91 | # nrpe_compat.add_check( | ||
1688 | 92 | # shortname = "myservice", | ||
1689 | 93 | # description = "Check MyService", | ||
1690 | 94 | # check_cmd = "check_http -w 2 -c 10 http://localhost" | ||
1691 | 95 | # ) | ||
1692 | 96 | # nrpe_compat.add_check( | ||
1693 | 97 | # "myservice_other", | ||
1694 | 98 | # "Check for widget failures", | ||
1695 | 99 | # check_cmd = "/srv/myapp/scripts/widget_check" | ||
1696 | 100 | # ) | ||
1697 | 101 | # nrpe_compat.write() | ||
1698 | 102 | # | ||
1699 | 103 | # def config_changed(): | ||
1700 | 104 | # (...) | ||
1701 | 105 | # update_nrpe_config() | ||
1702 | 106 | # | ||
1703 | 107 | # def nrpe_external_master_relation_changed(): | ||
1704 | 108 | # update_nrpe_config() | ||
1705 | 109 | # | ||
1706 | 110 | # def local_monitors_relation_changed(): | ||
1707 | 111 | # update_nrpe_config() | ||
1708 | 112 | # | ||
1709 | 113 | # 5. ln -s hooks.py nrpe-external-master-relation-changed | ||
1710 | 114 | # ln -s hooks.py local-monitors-relation-changed | ||
1711 | 115 | |||
1712 | 116 | |||
1713 | 117 | class CheckException(Exception): | ||
1714 | 118 | pass | ||
1715 | 119 | |||
1716 | 120 | |||
1717 | 121 | class Check(object): | ||
1718 | 122 | shortname_re = '[A-Za-z0-9-_]+$' | ||
1719 | 123 | service_template = (""" | ||
1720 | 124 | #--------------------------------------------------- | ||
1721 | 125 | # This file is Juju managed | ||
1722 | 126 | #--------------------------------------------------- | ||
1723 | 127 | define service {{ | ||
1724 | 128 | use active-service | ||
1725 | 129 | host_name {nagios_hostname} | ||
1726 | 130 | service_description {nagios_hostname}[{shortname}] """ | ||
1727 | 131 | """{description} | ||
1728 | 132 | check_command check_nrpe!{command} | ||
1729 | 133 | servicegroups {nagios_servicegroup} | ||
1730 | 134 | }} | ||
1731 | 135 | """) | ||
1732 | 136 | |||
1733 | 137 | def __init__(self, shortname, description, check_cmd): | ||
1734 | 138 | super(Check, self).__init__() | ||
1735 | 139 | # XXX: could be better to calculate this from the service name | ||
1736 | 140 | if not re.match(self.shortname_re, shortname): | ||
1737 | 141 | raise CheckException("shortname must match {}".format( | ||
1738 | 142 | Check.shortname_re)) | ||
1739 | 143 | self.shortname = shortname | ||
1740 | 144 | self.command = "check_{}".format(shortname) | ||
1741 | 145 | # Note: a set of invalid characters is defined by the | ||
1742 | 146 | # Nagios server config | ||
1743 | 147 | # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= | ||
1744 | 148 | self.description = description | ||
1745 | 149 | self.check_cmd = self._locate_cmd(check_cmd) | ||
1746 | 150 | |||
1747 | 151 | def _locate_cmd(self, check_cmd): | ||
1748 | 152 | search_path = ( | ||
1749 | 153 | '/usr/lib/nagios/plugins', | ||
1750 | 154 | '/usr/local/lib/nagios/plugins', | ||
1751 | 155 | ) | ||
1752 | 156 | parts = shlex.split(check_cmd) | ||
1753 | 157 | for path in search_path: | ||
1754 | 158 | if os.path.exists(os.path.join(path, parts[0])): | ||
1755 | 159 | command = os.path.join(path, parts[0]) | ||
1756 | 160 | if len(parts) > 1: | ||
1757 | 161 | command += " " + " ".join(parts[1:]) | ||
1758 | 162 | return command | ||
1759 | 163 | log('Check command not found: {}'.format(parts[0])) | ||
1760 | 164 | return '' | ||
1761 | 165 | |||
1762 | 166 | def write(self, nagios_context, hostname, nagios_servicegroups): | ||
1763 | 167 | nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( | ||
1764 | 168 | self.command) | ||
1765 | 169 | with open(nrpe_check_file, 'w') as nrpe_check_config: | ||
1766 | 170 | nrpe_check_config.write("# check {}\n".format(self.shortname)) | ||
1767 | 171 | nrpe_check_config.write("command[{}]={}\n".format( | ||
1768 | 172 | self.command, self.check_cmd)) | ||
1769 | 173 | |||
1770 | 174 | if not os.path.exists(NRPE.nagios_exportdir): | ||
1771 | 175 | log('Not writing service config as {} is not accessible'.format( | ||
1772 | 176 | NRPE.nagios_exportdir)) | ||
1773 | 177 | else: | ||
1774 | 178 | self.write_service_config(nagios_context, hostname, | ||
1775 | 179 | nagios_servicegroups) | ||
1776 | 180 | |||
1777 | 181 | def write_service_config(self, nagios_context, hostname, | ||
1778 | 182 | nagios_servicegroups): | ||
1779 | 183 | for f in os.listdir(NRPE.nagios_exportdir): | ||
1780 | 184 | if re.search('.*{}.cfg'.format(self.command), f): | ||
1781 | 185 | os.remove(os.path.join(NRPE.nagios_exportdir, f)) | ||
1782 | 186 | |||
1783 | 187 | templ_vars = { | ||
1784 | 188 | 'nagios_hostname': hostname, | ||
1785 | 189 | 'nagios_servicegroup': nagios_servicegroups, | ||
1786 | 190 | 'description': self.description, | ||
1787 | 191 | 'shortname': self.shortname, | ||
1788 | 192 | 'command': self.command, | ||
1789 | 193 | } | ||
1790 | 194 | nrpe_service_text = Check.service_template.format(**templ_vars) | ||
1791 | 195 | nrpe_service_file = '{}/service__{}_{}.cfg'.format( | ||
1792 | 196 | NRPE.nagios_exportdir, hostname, self.command) | ||
1793 | 197 | with open(nrpe_service_file, 'w') as nrpe_service_config: | ||
1794 | 198 | nrpe_service_config.write(str(nrpe_service_text)) | ||
1795 | 199 | |||
1796 | 200 | def run(self): | ||
1797 | 201 | subprocess.call(self.check_cmd) | ||
1798 | 202 | |||
1799 | 203 | |||
1800 | 204 | class NRPE(object): | ||
1801 | 205 | nagios_logdir = '/var/log/nagios' | ||
1802 | 206 | nagios_exportdir = '/var/lib/nagios/export' | ||
1803 | 207 | nrpe_confdir = '/etc/nagios/nrpe.d' | ||
1804 | 208 | |||
1805 | 209 | def __init__(self, hostname=None): | ||
1806 | 210 | super(NRPE, self).__init__() | ||
1807 | 211 | self.config = config() | ||
1808 | 212 | self.nagios_context = self.config['nagios_context'] | ||
1809 | 213 | if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: | ||
1810 | 214 | self.nagios_servicegroups = self.config['nagios_servicegroups'] | ||
1811 | 215 | else: | ||
1812 | 216 | self.nagios_servicegroups = self.nagios_context | ||
1813 | 217 | self.unit_name = local_unit().replace('/', '-') | ||
1814 | 218 | if hostname: | ||
1815 | 219 | self.hostname = hostname | ||
1816 | 220 | else: | ||
1817 | 221 | self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) | ||
1818 | 222 | self.checks = [] | ||
1819 | 223 | |||
1820 | 224 | def add_check(self, *args, **kwargs): | ||
1821 | 225 | self.checks.append(Check(*args, **kwargs)) | ||
1822 | 226 | |||
1823 | 227 | def write(self): | ||
1824 | 228 | try: | ||
1825 | 229 | nagios_uid = pwd.getpwnam('nagios').pw_uid | ||
1826 | 230 | nagios_gid = grp.getgrnam('nagios').gr_gid | ||
1827 | 231 | except: | ||
1828 | 232 | log("Nagios user not set up, nrpe checks not updated") | ||
1829 | 233 | return | ||
1830 | 234 | |||
1831 | 235 | if not os.path.exists(NRPE.nagios_logdir): | ||
1832 | 236 | os.mkdir(NRPE.nagios_logdir) | ||
1833 | 237 | os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) | ||
1834 | 238 | |||
1835 | 239 | nrpe_monitors = {} | ||
1836 | 240 | monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} | ||
1837 | 241 | for nrpecheck in self.checks: | ||
1838 | 242 | nrpecheck.write(self.nagios_context, self.hostname, | ||
1839 | 243 | self.nagios_servicegroups) | ||
1840 | 244 | nrpe_monitors[nrpecheck.shortname] = { | ||
1841 | 245 | "command": nrpecheck.command, | ||
1842 | 246 | } | ||
1843 | 247 | |||
1844 | 248 | service('restart', 'nagios-nrpe-server') | ||
1845 | 249 | |||
1846 | 250 | monitor_ids = relation_ids("local-monitors") + \ | ||
1847 | 251 | relation_ids("nrpe-external-master") | ||
1848 | 252 | for rid in monitor_ids: | ||
1849 | 253 | relation_set(relation_id=rid, monitors=yaml.dump(monitors)) | ||
1850 | 254 | |||
1851 | 255 | |||
1852 | 256 | def get_nagios_hostcontext(relation_name='nrpe-external-master'): | ||
1853 | 257 | """ | ||
1854 | 258 | Query relation with nrpe subordinate, return the nagios_host_context | ||
1855 | 259 | |||
1856 | 260 | :param str relation_name: Name of relation nrpe sub joined to | ||
1857 | 261 | """ | ||
1858 | 262 | for rel in relations_of_type(relation_name): | ||
1859 | 263 | if 'nagios_hostname' in rel: | ||
1860 | 264 | return rel['nagios_host_context'] | ||
1861 | 265 | |||
1862 | 266 | |||
1863 | 267 | def get_nagios_hostname(relation_name='nrpe-external-master'): | ||
1864 | 268 | """ | ||
1865 | 269 | Query relation with nrpe subordinate, return the nagios_hostname | ||
1866 | 270 | |||
1867 | 271 | :param str relation_name: Name of relation nrpe sub joined to | ||
1868 | 272 | """ | ||
1869 | 273 | for rel in relations_of_type(relation_name): | ||
1870 | 274 | if 'nagios_hostname' in rel: | ||
1871 | 275 | return rel['nagios_hostname'] | ||
1872 | 276 | |||
1873 | 277 | |||
1874 | 278 | def get_nagios_unit_name(relation_name='nrpe-external-master'): | ||
1875 | 279 | """ | ||
1876 | 280 | Return the nagios unit name prepended with host_context if needed | ||
1877 | 281 | |||
1878 | 282 | :param str relation_name: Name of relation nrpe sub joined to | ||
1879 | 283 | """ | ||
1880 | 284 | host_context = get_nagios_hostcontext(relation_name) | ||
1881 | 285 | if host_context: | ||
1882 | 286 | unit = "%s:%s" % (host_context, local_unit()) | ||
1883 | 287 | else: | ||
1884 | 288 | unit = local_unit() | ||
1885 | 289 | return unit | ||
1886 | 290 | |||
1887 | 291 | |||
1888 | 292 | def add_init_service_checks(nrpe, services, unit_name): | ||
1889 | 293 | """ | ||
1890 | 294 | Add checks for each service in list | ||
1891 | 295 | |||
1892 | 296 | :param NRPE nrpe: NRPE object to add check to | ||
1893 | 297 | :param list services: List of services to check | ||
1894 | 298 | :param str unit_name: Unit name to use in check description | ||
1895 | 299 | """ | ||
1896 | 300 | for svc in services: | ||
1897 | 301 | upstart_init = '/etc/init/%s.conf' % svc | ||
1898 | 302 | sysv_init = '/etc/init.d/%s' % svc | ||
1899 | 303 | if os.path.exists(upstart_init): | ||
1900 | 304 | nrpe.add_check( | ||
1901 | 305 | shortname=svc, | ||
1902 | 306 | description='process check {%s}' % unit_name, | ||
1903 | 307 | check_cmd='check_upstart_job %s' % svc | ||
1904 | 308 | ) | ||
1905 | 309 | elif os.path.exists(sysv_init): | ||
1906 | 310 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc | ||
1907 | 311 | cron_file = ('*/5 * * * * root ' | ||
1908 | 312 | '/usr/local/lib/nagios/plugins/check_exit_status.pl ' | ||
1909 | 313 | '-s /etc/init.d/%s status > ' | ||
1910 | 314 | '/var/lib/nagios/service-check-%s.txt\n' % (svc, | ||
1911 | 315 | svc) | ||
1912 | 316 | ) | ||
1913 | 317 | f = open(cronpath, 'w') | ||
1914 | 318 | f.write(cron_file) | ||
1915 | 319 | f.close() | ||
1916 | 320 | nrpe.add_check( | ||
1917 | 321 | shortname=svc, | ||
1918 | 322 | description='process check {%s}' % unit_name, | ||
1919 | 323 | check_cmd='check_status_file.py -f ' | ||
1920 | 324 | '/var/lib/nagios/service-check-%s.txt' % svc, | ||
1921 | 325 | ) | ||
1922 | 326 | |||
1923 | 327 | |||
1924 | 328 | def copy_nrpe_checks(): | ||
1925 | 329 | """ | ||
1926 | 330 | Copy the nrpe checks into place | ||
1927 | 331 | |||
1928 | 332 | """ | ||
1929 | 333 | NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' | ||
1930 | 334 | nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', | ||
1931 | 335 | 'charmhelpers', 'contrib', 'openstack', | ||
1932 | 336 | 'files') | ||
1933 | 337 | |||
1934 | 338 | if not os.path.exists(NAGIOS_PLUGINS): | ||
1935 | 339 | os.makedirs(NAGIOS_PLUGINS) | ||
1936 | 340 | for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): | ||
1937 | 341 | if os.path.isfile(fname): | ||
1938 | 342 | shutil.copy2(fname, | ||
1939 | 343 | os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) | ||
1940 | 344 | |||
1941 | 345 | |||
1942 | 346 | def add_haproxy_checks(nrpe, unit_name): | ||
1943 | 347 | """ | ||
1944 | 348 | Add checks for each service in list | ||
1945 | 349 | |||
1946 | 350 | :param NRPE nrpe: NRPE object to add check to | ||
1947 | 351 | :param str unit_name: Unit name to use in check description | ||
1948 | 352 | """ | ||
1949 | 353 | nrpe.add_check( | ||
1950 | 354 | shortname='haproxy_servers', | ||
1951 | 355 | description='Check HAProxy {%s}' % unit_name, | ||
1952 | 356 | check_cmd='check_haproxy.sh') | ||
1953 | 357 | nrpe.add_check( | ||
1954 | 358 | shortname='haproxy_queue', | ||
1955 | 359 | description='Check HAProxy queue depth {%s}' % unit_name, | ||
1956 | 360 | check_cmd='check_haproxy_queue_depth.sh') | ||
1957 | 361 | 0 | ||
1958 | === removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py' | |||
1959 | --- hooks/charmhelpers/contrib/charmsupport/volumes.py 2015-07-29 18:23:55 +0000 | |||
1960 | +++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000 | |||
1961 | @@ -1,175 +0,0 @@ | |||
1962 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1963 | 2 | # | ||
1964 | 3 | # This file is part of charm-helpers. | ||
1965 | 4 | # | ||
1966 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1967 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1968 | 7 | # published by the Free Software Foundation. | ||
1969 | 8 | # | ||
1970 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1971 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1972 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1973 | 12 | # GNU Lesser General Public License for more details. | ||
1974 | 13 | # | ||
1975 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1976 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1977 | 16 | |||
1978 | 17 | ''' | ||
1979 | 18 | Functions for managing volumes in juju units. One volume is supported per unit. | ||
1980 | 19 | Subordinates may have their own storage, provided it is on its own partition. | ||
1981 | 20 | |||
1982 | 21 | Configuration stanzas:: | ||
1983 | 22 | |||
1984 | 23 | volume-ephemeral: | ||
1985 | 24 | type: boolean | ||
1986 | 25 | default: true | ||
1987 | 26 | description: > | ||
1988 | 27 | If false, a volume is mounted as sepecified in "volume-map" | ||
1989 | 28 | If true, ephemeral storage will be used, meaning that log data | ||
1990 | 29 | will only exist as long as the machine. YOU HAVE BEEN WARNED. | ||
1991 | 30 | volume-map: | ||
1992 | 31 | type: string | ||
1993 | 32 | default: {} | ||
1994 | 33 | description: > | ||
1995 | 34 | YAML map of units to device names, e.g: | ||
1996 | 35 | "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" | ||
1997 | 36 | Service units will raise a configure-error if volume-ephemeral | ||
1998 | 37 | is 'true' and no volume-map value is set. Use 'juju set' to set a | ||
1999 | 38 | value and 'juju resolved' to complete configuration. | ||
2000 | 39 | |||
2001 | 40 | Usage:: | ||
2002 | 41 | |||
2003 | 42 | from charmsupport.volumes import configure_volume, VolumeConfigurationError | ||
2004 | 43 | from charmsupport.hookenv import log, ERROR | ||
2005 | 44 | def post_mount_hook(): | ||
2006 | 45 | stop_service('myservice') | ||
2007 | 46 | def post_mount_hook(): | ||
2008 | 47 | start_service('myservice') | ||
2009 | 48 | |||
2010 | 49 | if __name__ == '__main__': | ||
2011 | 50 | try: | ||
2012 | 51 | configure_volume(before_change=pre_mount_hook, | ||
2013 | 52 | after_change=post_mount_hook) | ||
2014 | 53 | except VolumeConfigurationError: | ||
2015 | 54 | log('Storage could not be configured', ERROR) | ||
2016 | 55 | |||
2017 | 56 | ''' | ||
2018 | 57 | |||
2019 | 58 | # XXX: Known limitations | ||
2020 | 59 | # - fstab is neither consulted nor updated | ||
2021 | 60 | |||
2022 | 61 | import os | ||
2023 | 62 | from charmhelpers.core import hookenv | ||
2024 | 63 | from charmhelpers.core import host | ||
2025 | 64 | import yaml | ||
2026 | 65 | |||
2027 | 66 | |||
2028 | 67 | MOUNT_BASE = '/srv/juju/volumes' | ||
2029 | 68 | |||
2030 | 69 | |||
2031 | 70 | class VolumeConfigurationError(Exception): | ||
2032 | 71 | '''Volume configuration data is missing or invalid''' | ||
2033 | 72 | pass | ||
2034 | 73 | |||
2035 | 74 | |||
2036 | 75 | def get_config(): | ||
2037 | 76 | '''Gather and sanity-check volume configuration data''' | ||
2038 | 77 | volume_config = {} | ||
2039 | 78 | config = hookenv.config() | ||
2040 | 79 | |||
2041 | 80 | errors = False | ||
2042 | 81 | |||
2043 | 82 | if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): | ||
2044 | 83 | volume_config['ephemeral'] = True | ||
2045 | 84 | else: | ||
2046 | 85 | volume_config['ephemeral'] = False | ||
2047 | 86 | |||
2048 | 87 | try: | ||
2049 | 88 | volume_map = yaml.safe_load(config.get('volume-map', '{}')) | ||
2050 | 89 | except yaml.YAMLError as e: | ||
2051 | 90 | hookenv.log("Error parsing YAML volume-map: {}".format(e), | ||
2052 | 91 | hookenv.ERROR) | ||
2053 | 92 | errors = True | ||
2054 | 93 | if volume_map is None: | ||
2055 | 94 | # probably an empty string | ||
2056 | 95 | volume_map = {} | ||
2057 | 96 | elif not isinstance(volume_map, dict): | ||
2058 | 97 | hookenv.log("Volume-map should be a dictionary, not {}".format( | ||
2059 | 98 | type(volume_map))) | ||
2060 | 99 | errors = True | ||
2061 | 100 | |||
2062 | 101 | volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) | ||
2063 | 102 | if volume_config['device'] and volume_config['ephemeral']: | ||
2064 | 103 | # asked for ephemeral storage but also defined a volume ID | ||
2065 | 104 | hookenv.log('A volume is defined for this unit, but ephemeral ' | ||
2066 | 105 | 'storage was requested', hookenv.ERROR) | ||
2067 | 106 | errors = True | ||
2068 | 107 | elif not volume_config['device'] and not volume_config['ephemeral']: | ||
2069 | 108 | # asked for permanent storage but did not define volume ID | ||
2070 | 109 | hookenv.log('Ephemeral storage was requested, but there is no volume ' | ||
2071 | 110 | 'defined for this unit.', hookenv.ERROR) | ||
2072 | 111 | errors = True | ||
2073 | 112 | |||
2074 | 113 | unit_mount_name = hookenv.local_unit().replace('/', '-') | ||
2075 | 114 | volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) | ||
2076 | 115 | |||
2077 | 116 | if errors: | ||
2078 | 117 | return None | ||
2079 | 118 | return volume_config | ||
2080 | 119 | |||
2081 | 120 | |||
2082 | 121 | def mount_volume(config): | ||
2083 | 122 | if os.path.exists(config['mountpoint']): | ||
2084 | 123 | if not os.path.isdir(config['mountpoint']): | ||
2085 | 124 | hookenv.log('Not a directory: {}'.format(config['mountpoint'])) | ||
2086 | 125 | raise VolumeConfigurationError() | ||
2087 | 126 | else: | ||
2088 | 127 | host.mkdir(config['mountpoint']) | ||
2089 | 128 | if os.path.ismount(config['mountpoint']): | ||
2090 | 129 | unmount_volume(config) | ||
2091 | 130 | if not host.mount(config['device'], config['mountpoint'], persist=True): | ||
2092 | 131 | raise VolumeConfigurationError() | ||
2093 | 132 | |||
2094 | 133 | |||
2095 | 134 | def unmount_volume(config): | ||
2096 | 135 | if os.path.ismount(config['mountpoint']): | ||
2097 | 136 | if not host.umount(config['mountpoint'], persist=True): | ||
2098 | 137 | raise VolumeConfigurationError() | ||
2099 | 138 | |||
2100 | 139 | |||
2101 | 140 | def managed_mounts(): | ||
2102 | 141 | '''List of all mounted managed volumes''' | ||
2103 | 142 | return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) | ||
2104 | 143 | |||
2105 | 144 | |||
2106 | 145 | def configure_volume(before_change=lambda: None, after_change=lambda: None): | ||
2107 | 146 | '''Set up storage (or don't) according to the charm's volume configuration. | ||
2108 | 147 | Returns the mount point or "ephemeral". before_change and after_change | ||
2109 | 148 | are optional functions to be called if the volume configuration changes. | ||
2110 | 149 | ''' | ||
2111 | 150 | |||
2112 | 151 | config = get_config() | ||
2113 | 152 | if not config: | ||
2114 | 153 | hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) | ||
2115 | 154 | raise VolumeConfigurationError() | ||
2116 | 155 | |||
2117 | 156 | if config['ephemeral']: | ||
2118 | 157 | if os.path.ismount(config['mountpoint']): | ||
2119 | 158 | before_change() | ||
2120 | 159 | unmount_volume(config) | ||
2121 | 160 | after_change() | ||
2122 | 161 | return 'ephemeral' | ||
2123 | 162 | else: | ||
2124 | 163 | # persistent storage | ||
2125 | 164 | if os.path.ismount(config['mountpoint']): | ||
2126 | 165 | mounts = dict(managed_mounts()) | ||
2127 | 166 | if mounts.get(config['mountpoint']) != config['device']: | ||
2128 | 167 | before_change() | ||
2129 | 168 | unmount_volume(config) | ||
2130 | 169 | mount_volume(config) | ||
2131 | 170 | after_change() | ||
2132 | 171 | else: | ||
2133 | 172 | before_change() | ||
2134 | 173 | mount_volume(config) | ||
2135 | 174 | after_change() | ||
2136 | 175 | return config['mountpoint'] | ||
2137 | 176 | 0 | ||
2138 | === removed directory 'hooks/charmhelpers/contrib/database' | |||
2139 | === removed file 'hooks/charmhelpers/contrib/database/__init__.py' | |||
2140 | === removed file 'hooks/charmhelpers/contrib/database/mysql.py' | |||
2141 | --- hooks/charmhelpers/contrib/database/mysql.py 2015-07-29 18:23:55 +0000 | |||
2142 | +++ hooks/charmhelpers/contrib/database/mysql.py 1970-01-01 00:00:00 +0000 | |||
2143 | @@ -1,412 +0,0 @@ | |||
2144 | 1 | """Helper for working with a MySQL database""" | ||
2145 | 2 | import json | ||
2146 | 3 | import re | ||
2147 | 4 | import sys | ||
2148 | 5 | import platform | ||
2149 | 6 | import os | ||
2150 | 7 | import glob | ||
2151 | 8 | |||
2152 | 9 | # from string import upper | ||
2153 | 10 | |||
2154 | 11 | from charmhelpers.core.host import ( | ||
2155 | 12 | mkdir, | ||
2156 | 13 | pwgen, | ||
2157 | 14 | write_file | ||
2158 | 15 | ) | ||
2159 | 16 | from charmhelpers.core.hookenv import ( | ||
2160 | 17 | config as config_get, | ||
2161 | 18 | relation_get, | ||
2162 | 19 | related_units, | ||
2163 | 20 | unit_get, | ||
2164 | 21 | log, | ||
2165 | 22 | DEBUG, | ||
2166 | 23 | INFO, | ||
2167 | 24 | WARNING, | ||
2168 | 25 | ) | ||
2169 | 26 | from charmhelpers.fetch import ( | ||
2170 | 27 | apt_install, | ||
2171 | 28 | apt_update, | ||
2172 | 29 | filter_installed_packages, | ||
2173 | 30 | ) | ||
2174 | 31 | from charmhelpers.contrib.peerstorage import ( | ||
2175 | 32 | peer_store, | ||
2176 | 33 | peer_retrieve, | ||
2177 | 34 | ) | ||
2178 | 35 | from charmhelpers.contrib.network.ip import get_host_ip | ||
2179 | 36 | |||
2180 | 37 | try: | ||
2181 | 38 | import MySQLdb | ||
2182 | 39 | except ImportError: | ||
2183 | 40 | apt_update(fatal=True) | ||
2184 | 41 | apt_install(filter_installed_packages(['python-mysqldb']), fatal=True) | ||
2185 | 42 | import MySQLdb | ||
2186 | 43 | |||
2187 | 44 | |||
2188 | 45 | class MySQLHelper(object): | ||
2189 | 46 | |||
2190 | 47 | def __init__(self, rpasswdf_template, upasswdf_template, host='localhost', | ||
2191 | 48 | migrate_passwd_to_peer_relation=True, | ||
2192 | 49 | delete_ondisk_passwd_file=True): | ||
2193 | 50 | self.host = host | ||
2194 | 51 | # Password file path templates | ||
2195 | 52 | self.root_passwd_file_template = rpasswdf_template | ||
2196 | 53 | self.user_passwd_file_template = upasswdf_template | ||
2197 | 54 | |||
2198 | 55 | self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation | ||
2199 | 56 | # If we migrate we have the option to delete local copy of root passwd | ||
2200 | 57 | self.delete_ondisk_passwd_file = delete_ondisk_passwd_file | ||
2201 | 58 | |||
2202 | 59 | def connect(self, user='root', password=None): | ||
2203 | 60 | log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG) | ||
2204 | 61 | self.connection = MySQLdb.connect(user=user, host=self.host, | ||
2205 | 62 | passwd=password) | ||
2206 | 63 | |||
2207 | 64 | def database_exists(self, db_name): | ||
2208 | 65 | cursor = self.connection.cursor() | ||
2209 | 66 | try: | ||
2210 | 67 | cursor.execute("SHOW DATABASES") | ||
2211 | 68 | databases = [i[0] for i in cursor.fetchall()] | ||
2212 | 69 | finally: | ||
2213 | 70 | cursor.close() | ||
2214 | 71 | |||
2215 | 72 | return db_name in databases | ||
2216 | 73 | |||
2217 | 74 | def create_database(self, db_name): | ||
2218 | 75 | cursor = self.connection.cursor() | ||
2219 | 76 | try: | ||
2220 | 77 | cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8" | ||
2221 | 78 | .format(db_name)) | ||
2222 | 79 | finally: | ||
2223 | 80 | cursor.close() | ||
2224 | 81 | |||
2225 | 82 | def grant_exists(self, db_name, db_user, remote_ip): | ||
2226 | 83 | cursor = self.connection.cursor() | ||
2227 | 84 | priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \ | ||
2228 | 85 | "TO '{}'@'{}'".format(db_name, db_user, remote_ip) | ||
2229 | 86 | try: | ||
2230 | 87 | cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user, | ||
2231 | 88 | remote_ip)) | ||
2232 | 89 | grants = [i[0] for i in cursor.fetchall()] | ||
2233 | 90 | except MySQLdb.OperationalError: | ||
2234 | 91 | return False | ||
2235 | 92 | finally: | ||
2236 | 93 | cursor.close() | ||
2237 | 94 | |||
2238 | 95 | # TODO: review for different grants | ||
2239 | 96 | return priv_string in grants | ||
2240 | 97 | |||
2241 | 98 | def create_grant(self, db_name, db_user, remote_ip, password): | ||
2242 | 99 | cursor = self.connection.cursor() | ||
2243 | 100 | try: | ||
2244 | 101 | # TODO: review for different grants | ||
2245 | 102 | cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' " | ||
2246 | 103 | "IDENTIFIED BY '{}'".format(db_name, | ||
2247 | 104 | db_user, | ||
2248 | 105 | remote_ip, | ||
2249 | 106 | password)) | ||
2250 | 107 | finally: | ||
2251 | 108 | cursor.close() | ||
2252 | 109 | |||
2253 | 110 | def create_admin_grant(self, db_user, remote_ip, password): | ||
2254 | 111 | cursor = self.connection.cursor() | ||
2255 | 112 | try: | ||
2256 | 113 | cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' " | ||
2257 | 114 | "IDENTIFIED BY '{}'".format(db_user, | ||
2258 | 115 | remote_ip, | ||
2259 | 116 | password)) | ||
2260 | 117 | finally: | ||
2261 | 118 | cursor.close() | ||
2262 | 119 | |||
2263 | 120 | def cleanup_grant(self, db_user, remote_ip): | ||
2264 | 121 | cursor = self.connection.cursor() | ||
2265 | 122 | try: | ||
2266 | 123 | cursor.execute("DROP FROM mysql.user WHERE user='{}' " | ||
2267 | 124 | "AND HOST='{}'".format(db_user, | ||
2268 | 125 | remote_ip)) | ||
2269 | 126 | finally: | ||
2270 | 127 | cursor.close() | ||
2271 | 128 | |||
2272 | 129 | def execute(self, sql): | ||
2273 | 130 | """Execute arbitary SQL against the database.""" | ||
2274 | 131 | cursor = self.connection.cursor() | ||
2275 | 132 | try: | ||
2276 | 133 | cursor.execute(sql) | ||
2277 | 134 | finally: | ||
2278 | 135 | cursor.close() | ||
2279 | 136 | |||
2280 | 137 | def migrate_passwords_to_peer_relation(self, excludes=None): | ||
2281 | 138 | """Migrate any passwords storage on disk to cluster peer relation.""" | ||
2282 | 139 | dirname = os.path.dirname(self.root_passwd_file_template) | ||
2283 | 140 | path = os.path.join(dirname, '*.passwd') | ||
2284 | 141 | for f in glob.glob(path): | ||
2285 | 142 | if excludes and f in excludes: | ||
2286 | 143 | log("Excluding %s from peer migration" % (f), level=DEBUG) | ||
2287 | 144 | continue | ||
2288 | 145 | |||
2289 | 146 | key = os.path.basename(f) | ||
2290 | 147 | with open(f, 'r') as passwd: | ||
2291 | 148 | _value = passwd.read().strip() | ||
2292 | 149 | |||
2293 | 150 | try: | ||
2294 | 151 | peer_store(key, _value) | ||
2295 | 152 | |||
2296 | 153 | if self.delete_ondisk_passwd_file: | ||
2297 | 154 | os.unlink(f) | ||
2298 | 155 | except ValueError: | ||
2299 | 156 | # NOTE cluster relation not yet ready - skip for now | ||
2300 | 157 | pass | ||
2301 | 158 | |||
2302 | 159 | def get_mysql_password_on_disk(self, username=None, password=None): | ||
2303 | 160 | """Retrieve, generate or store a mysql password for the provided | ||
2304 | 161 | username on disk.""" | ||
2305 | 162 | if username: | ||
2306 | 163 | template = self.user_passwd_file_template | ||
2307 | 164 | passwd_file = template.format(username) | ||
2308 | 165 | else: | ||
2309 | 166 | passwd_file = self.root_passwd_file_template | ||
2310 | 167 | |||
2311 | 168 | _password = None | ||
2312 | 169 | if os.path.exists(passwd_file): | ||
2313 | 170 | log("Using existing password file '%s'" % passwd_file, level=DEBUG) | ||
2314 | 171 | with open(passwd_file, 'r') as passwd: | ||
2315 | 172 | _password = passwd.read().strip() | ||
2316 | 173 | else: | ||
2317 | 174 | log("Generating new password file '%s'" % passwd_file, level=DEBUG) | ||
2318 | 175 | if not os.path.isdir(os.path.dirname(passwd_file)): | ||
2319 | 176 | # NOTE: need to ensure this is not mysql root dir (which needs | ||
2320 | 177 | # to be mysql readable) | ||
2321 | 178 | mkdir(os.path.dirname(passwd_file), owner='root', group='root', | ||
2322 | 179 | perms=0o770) | ||
2323 | 180 | # Force permissions - for some reason the chmod in makedirs | ||
2324 | 181 | # fails | ||
2325 | 182 | os.chmod(os.path.dirname(passwd_file), 0o770) | ||
2326 | 183 | |||
2327 | 184 | _password = password or pwgen(length=32) | ||
2328 | 185 | write_file(passwd_file, _password, owner='root', group='root', | ||
2329 | 186 | perms=0o660) | ||
2330 | 187 | |||
2331 | 188 | return _password | ||
2332 | 189 | |||
2333 | 190 | def passwd_keys(self, username): | ||
2334 | 191 | """Generator to return keys used to store passwords in peer store. | ||
2335 | 192 | |||
2336 | 193 | NOTE: we support both legacy and new format to support mysql | ||
2337 | 194 | charm prior to refactor. This is necessary to avoid LP 1451890. | ||
2338 | 195 | """ | ||
2339 | 196 | keys = [] | ||
2340 | 197 | if username == 'mysql': | ||
2341 | 198 | log("Bad username '%s'" % (username), level=WARNING) | ||
2342 | 199 | |||
2343 | 200 | if username: | ||
2344 | 201 | # IMPORTANT: *newer* format must be returned first | ||
2345 | 202 | keys.append('mysql-%s.passwd' % (username)) | ||
2346 | 203 | keys.append('%s.passwd' % (username)) | ||
2347 | 204 | else: | ||
2348 | 205 | keys.append('mysql.passwd') | ||
2349 | 206 | |||
2350 | 207 | for key in keys: | ||
2351 | 208 | yield key | ||
2352 | 209 | |||
2353 | 210 | def get_mysql_password(self, username=None, password=None): | ||
2354 | 211 | """Retrieve, generate or store a mysql password for the provided | ||
2355 | 212 | username using peer relation cluster.""" | ||
2356 | 213 | excludes = [] | ||
2357 | 214 | |||
2358 | 215 | # First check peer relation. | ||
2359 | 216 | try: | ||
2360 | 217 | for key in self.passwd_keys(username): | ||
2361 | 218 | _password = peer_retrieve(key) | ||
2362 | 219 | if _password: | ||
2363 | 220 | break | ||
2364 | 221 | |||
2365 | 222 | # If root password available don't update peer relation from local | ||
2366 | 223 | if _password and not username: | ||
2367 | 224 | excludes.append(self.root_passwd_file_template) | ||
2368 | 225 | |||
2369 | 226 | except ValueError: | ||
2370 | 227 | # cluster relation is not yet started; use on-disk | ||
2371 | 228 | _password = None | ||
2372 | 229 | |||
2373 | 230 | # If none available, generate new one | ||
2374 | 231 | if not _password: | ||
2375 | 232 | _password = self.get_mysql_password_on_disk(username, password) | ||
2376 | 233 | |||
2377 | 234 | # Put on wire if required | ||
2378 | 235 | if self.migrate_passwd_to_peer_relation: | ||
2379 | 236 | self.migrate_passwords_to_peer_relation(excludes=excludes) | ||
2380 | 237 | |||
2381 | 238 | return _password | ||
2382 | 239 | |||
2383 | 240 | def get_mysql_root_password(self, password=None): | ||
2384 | 241 | """Retrieve or generate mysql root password for service units.""" | ||
2385 | 242 | return self.get_mysql_password(username=None, password=password) | ||
2386 | 243 | |||
2387 | 244 | def normalize_address(self, hostname): | ||
2388 | 245 | """Ensure that address returned is an IP address (i.e. not fqdn)""" | ||
2389 | 246 | if config_get('prefer-ipv6'): | ||
2390 | 247 | # TODO: add support for ipv6 dns | ||
2391 | 248 | return hostname | ||
2392 | 249 | |||
2393 | 250 | if hostname != unit_get('private-address'): | ||
2394 | 251 | return get_host_ip(hostname, fallback=hostname) | ||
2395 | 252 | |||
2396 | 253 | # Otherwise assume localhost | ||
2397 | 254 | return '127.0.0.1' | ||
2398 | 255 | |||
2399 | 256 | def get_allowed_units(self, database, username, relation_id=None): | ||
2400 | 257 | """Get list of units with access grants for database with username. | ||
2401 | 258 | |||
2402 | 259 | This is typically used to provide shared-db relations with a list of | ||
2403 | 260 | which units have been granted access to the given database. | ||
2404 | 261 | """ | ||
2405 | 262 | self.connect(password=self.get_mysql_root_password()) | ||
2406 | 263 | allowed_units = set() | ||
2407 | 264 | for unit in related_units(relation_id): | ||
2408 | 265 | settings = relation_get(rid=relation_id, unit=unit) | ||
2409 | 266 | # First check for setting with prefix, then without | ||
2410 | 267 | for attr in ["%s_hostname" % (database), 'hostname']: | ||
2411 | 268 | hosts = settings.get(attr, None) | ||
2412 | 269 | if hosts: | ||
2413 | 270 | break | ||
2414 | 271 | |||
2415 | 272 | if hosts: | ||
2416 | 273 | # hostname can be json-encoded list of hostnames | ||
2417 | 274 | try: | ||
2418 | 275 | hosts = json.loads(hosts) | ||
2419 | 276 | except ValueError: | ||
2420 | 277 | hosts = [hosts] | ||
2421 | 278 | else: | ||
2422 | 279 | hosts = [settings['private-address']] | ||
2423 | 280 | |||
2424 | 281 | if hosts: | ||
2425 | 282 | for host in hosts: | ||
2426 | 283 | host = self.normalize_address(host) | ||
2427 | 284 | if self.grant_exists(database, username, host): | ||
2428 | 285 | log("Grant exists for host '%s' on db '%s'" % | ||
2429 | 286 | (host, database), level=DEBUG) | ||
2430 | 287 | if unit not in allowed_units: | ||
2431 | 288 | allowed_units.add(unit) | ||
2432 | 289 | else: | ||
2433 | 290 | log("Grant does NOT exist for host '%s' on db '%s'" % | ||
2434 | 291 | (host, database), level=DEBUG) | ||
2435 | 292 | else: | ||
2436 | 293 | log("No hosts found for grant check", level=INFO) | ||
2437 | 294 | |||
2438 | 295 | return allowed_units | ||
2439 | 296 | |||
2440 | 297 | def configure_db(self, hostname, database, username, admin=False): | ||
2441 | 298 | """Configure access to database for username from hostname.""" | ||
2442 | 299 | self.connect(password=self.get_mysql_root_password()) | ||
2443 | 300 | if not self.database_exists(database): | ||
2444 | 301 | self.create_database(database) | ||
2445 | 302 | |||
2446 | 303 | remote_ip = self.normalize_address(hostname) | ||
2447 | 304 | password = self.get_mysql_password(username) | ||
2448 | 305 | if not self.grant_exists(database, username, remote_ip): | ||
2449 | 306 | if not admin: | ||
2450 | 307 | self.create_grant(database, username, remote_ip, password) | ||
2451 | 308 | else: | ||
2452 | 309 | self.create_admin_grant(username, remote_ip, password) | ||
2453 | 310 | |||
2454 | 311 | return password | ||
2455 | 312 | |||
2456 | 313 | |||
2457 | 314 | class PerconaClusterHelper(object): | ||
2458 | 315 | |||
2459 | 316 | # Going for the biggest page size to avoid wasted bytes. | ||
2460 | 317 | # InnoDB page size is 16MB | ||
2461 | 318 | |||
2462 | 319 | DEFAULT_PAGE_SIZE = 16 * 1024 * 1024 | ||
2463 | 320 | DEFAULT_INNODB_BUFFER_FACTOR = 0.50 | ||
2464 | 321 | |||
2465 | 322 | def human_to_bytes(self, human): | ||
2466 | 323 | """Convert human readable configuration options to bytes.""" | ||
2467 | 324 | num_re = re.compile('^[0-9]+$') | ||
2468 | 325 | if num_re.match(human): | ||
2469 | 326 | return human | ||
2470 | 327 | |||
2471 | 328 | factors = { | ||
2472 | 329 | 'K': 1024, | ||
2473 | 330 | 'M': 1048576, | ||
2474 | 331 | 'G': 1073741824, | ||
2475 | 332 | 'T': 1099511627776 | ||
2476 | 333 | } | ||
2477 | 334 | modifier = human[-1] | ||
2478 | 335 | if modifier in factors: | ||
2479 | 336 | return int(human[:-1]) * factors[modifier] | ||
2480 | 337 | |||
2481 | 338 | if modifier == '%': | ||
2482 | 339 | total_ram = self.human_to_bytes(self.get_mem_total()) | ||
2483 | 340 | if self.is_32bit_system() and total_ram > self.sys_mem_limit(): | ||
2484 | 341 | total_ram = self.sys_mem_limit() | ||
2485 | 342 | factor = int(human[:-1]) * 0.01 | ||
2486 | 343 | pctram = total_ram * factor | ||
2487 | 344 | return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE)) | ||
2488 | 345 | |||
2489 | 346 | raise ValueError("Can only convert K,M,G, or T") | ||
2490 | 347 | |||
2491 | 348 | def is_32bit_system(self): | ||
2492 | 349 | """Determine whether system is 32 or 64 bit.""" | ||
2493 | 350 | try: | ||
2494 | 351 | return sys.maxsize < 2 ** 32 | ||
2495 | 352 | except OverflowError: | ||
2496 | 353 | return False | ||
2497 | 354 | |||
2498 | 355 | def sys_mem_limit(self): | ||
2499 | 356 | """Determine the default memory limit for the current service unit.""" | ||
2500 | 357 | if platform.machine() in ['armv7l']: | ||
2501 | 358 | _mem_limit = self.human_to_bytes('2700M') # experimentally determined | ||
2502 | 359 | else: | ||
2503 | 360 | # Limit for x86 based 32bit systems | ||
2504 | 361 | _mem_limit = self.human_to_bytes('4G') | ||
2505 | 362 | |||
2506 | 363 | return _mem_limit | ||
2507 | 364 | |||
2508 | 365 | def get_mem_total(self): | ||
2509 | 366 | """Calculate the total memory in the current service unit.""" | ||
2510 | 367 | with open('/proc/meminfo') as meminfo_file: | ||
2511 | 368 | for line in meminfo_file: | ||
2512 | 369 | key, mem = line.split(':', 2) | ||
2513 | 370 | if key == 'MemTotal': | ||
2514 | 371 | mtot, modifier = mem.strip().split(' ') | ||
2515 | 372 | return '%s%s' % (mtot, modifier[0].upper()) | ||
2516 | 373 | |||
2517 | 374 | def parse_config(self): | ||
2518 | 375 | """Parse charm configuration and calculate values for config files.""" | ||
2519 | 376 | config = config_get() | ||
2520 | 377 | mysql_config = {} | ||
2521 | 378 | if 'max-connections' in config: | ||
2522 | 379 | mysql_config['max_connections'] = config['max-connections'] | ||
2523 | 380 | |||
2524 | 381 | if 'wait-timeout' in config: | ||
2525 | 382 | mysql_config['wait_timeout'] = config['wait-timeout'] | ||
2526 | 383 | |||
2527 | 384 | if 'innodb-flush-log-at-trx-commit' in config: | ||
2528 | 385 | mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit'] | ||
2529 | 386 | |||
2530 | 387 | # Set a sane default key_buffer size | ||
2531 | 388 | mysql_config['key_buffer'] = self.human_to_bytes('32M') | ||
2532 | 389 | total_memory = self.human_to_bytes(self.get_mem_total()) | ||
2533 | 390 | |||
2534 | 391 | dataset_bytes = config.get('dataset-size', None) | ||
2535 | 392 | innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) | ||
2536 | 393 | |||
2537 | 394 | if innodb_buffer_pool_size: | ||
2538 | 395 | innodb_buffer_pool_size = self.human_to_bytes( | ||
2539 | 396 | innodb_buffer_pool_size) | ||
2540 | 397 | elif dataset_bytes: | ||
2541 | 398 | log("Option 'dataset-size' has been deprecated, please use" | ||
2542 | 399 | "innodb_buffer_pool_size option instead", level="WARN") | ||
2543 | 400 | innodb_buffer_pool_size = self.human_to_bytes( | ||
2544 | 401 | dataset_bytes) | ||
2545 | 402 | else: | ||
2546 | 403 | innodb_buffer_pool_size = int( | ||
2547 | 404 | total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR) | ||
2548 | 405 | |||
2549 | 406 | if innodb_buffer_pool_size > total_memory: | ||
2550 | 407 | log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( | ||
2551 | 408 | innodb_buffer_pool_size, | ||
2552 | 409 | total_memory), level='WARN') | ||
2553 | 410 | |||
2554 | 411 | mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size | ||
2555 | 412 | return mysql_config | ||
2556 | 413 | 0 | ||
2557 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
2558 | --- hooks/charmhelpers/contrib/network/ip.py 2015-05-19 21:32:01 +0000 | |||
2559 | +++ hooks/charmhelpers/contrib/network/ip.py 2016-05-18 10:06:26 +0000 | |||
2560 | @@ -23,7 +23,7 @@ | |||
2561 | 23 | from functools import partial | 23 | from functools import partial |
2562 | 24 | 24 | ||
2563 | 25 | from charmhelpers.core.hookenv import unit_get | 25 | from charmhelpers.core.hookenv import unit_get |
2565 | 26 | from charmhelpers.fetch import apt_install | 26 | from charmhelpers.fetch import apt_install, apt_update |
2566 | 27 | from charmhelpers.core.hookenv import ( | 27 | from charmhelpers.core.hookenv import ( |
2567 | 28 | log, | 28 | log, |
2568 | 29 | WARNING, | 29 | WARNING, |
2569 | @@ -32,13 +32,15 @@ | |||
2570 | 32 | try: | 32 | try: |
2571 | 33 | import netifaces | 33 | import netifaces |
2572 | 34 | except ImportError: | 34 | except ImportError: |
2574 | 35 | apt_install('python-netifaces') | 35 | apt_update(fatal=True) |
2575 | 36 | apt_install('python-netifaces', fatal=True) | ||
2576 | 36 | import netifaces | 37 | import netifaces |
2577 | 37 | 38 | ||
2578 | 38 | try: | 39 | try: |
2579 | 39 | import netaddr | 40 | import netaddr |
2580 | 40 | except ImportError: | 41 | except ImportError: |
2582 | 41 | apt_install('python-netaddr') | 42 | apt_update(fatal=True) |
2583 | 43 | apt_install('python-netaddr', fatal=True) | ||
2584 | 42 | import netaddr | 44 | import netaddr |
2585 | 43 | 45 | ||
2586 | 44 | 46 | ||
2587 | @@ -51,7 +53,7 @@ | |||
2588 | 51 | 53 | ||
2589 | 52 | 54 | ||
2590 | 53 | def no_ip_found_error_out(network): | 55 | def no_ip_found_error_out(network): |
2592 | 54 | errmsg = ("No IP address found in network: %s" % network) | 56 | errmsg = ("No IP address found in network(s): %s" % network) |
2593 | 55 | raise ValueError(errmsg) | 57 | raise ValueError(errmsg) |
2594 | 56 | 58 | ||
2595 | 57 | 59 | ||
2596 | @@ -59,7 +61,7 @@ | |||
2597 | 59 | """Get an IPv4 or IPv6 address within the network from the host. | 61 | """Get an IPv4 or IPv6 address within the network from the host. |
2598 | 60 | 62 | ||
2599 | 61 | :param network (str): CIDR presentation format. For example, | 63 | :param network (str): CIDR presentation format. For example, |
2601 | 62 | '192.168.1.0/24'. | 64 | '192.168.1.0/24'. Supports multiple networks as a space-delimited list. |
2602 | 63 | :param fallback (str): If no address is found, return fallback. | 65 | :param fallback (str): If no address is found, return fallback. |
2603 | 64 | :param fatal (boolean): If no address is found, fallback is not | 66 | :param fatal (boolean): If no address is found, fallback is not |
2604 | 65 | set and fatal is True then exit(1). | 67 | set and fatal is True then exit(1). |
2605 | @@ -73,24 +75,26 @@ | |||
2606 | 73 | else: | 75 | else: |
2607 | 74 | return None | 76 | return None |
2608 | 75 | 77 | ||
2619 | 76 | _validate_cidr(network) | 78 | networks = network.split() or [network] |
2620 | 77 | network = netaddr.IPNetwork(network) | 79 | for network in networks: |
2621 | 78 | for iface in netifaces.interfaces(): | 80 | _validate_cidr(network) |
2622 | 79 | addresses = netifaces.ifaddresses(iface) | 81 | network = netaddr.IPNetwork(network) |
2623 | 80 | if network.version == 4 and netifaces.AF_INET in addresses: | 82 | for iface in netifaces.interfaces(): |
2624 | 81 | addr = addresses[netifaces.AF_INET][0]['addr'] | 83 | addresses = netifaces.ifaddresses(iface) |
2625 | 82 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | 84 | if network.version == 4 and netifaces.AF_INET in addresses: |
2626 | 83 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | 85 | addr = addresses[netifaces.AF_INET][0]['addr'] |
2627 | 84 | if cidr in network: | 86 | netmask = addresses[netifaces.AF_INET][0]['netmask'] |
2628 | 85 | return str(cidr.ip) | 87 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
2629 | 88 | if cidr in network: | ||
2630 | 89 | return str(cidr.ip) | ||
2631 | 86 | 90 | ||
2639 | 87 | if network.version == 6 and netifaces.AF_INET6 in addresses: | 91 | if network.version == 6 and netifaces.AF_INET6 in addresses: |
2640 | 88 | for addr in addresses[netifaces.AF_INET6]: | 92 | for addr in addresses[netifaces.AF_INET6]: |
2641 | 89 | if not addr['addr'].startswith('fe80'): | 93 | if not addr['addr'].startswith('fe80'): |
2642 | 90 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | 94 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
2643 | 91 | addr['netmask'])) | 95 | addr['netmask'])) |
2644 | 92 | if cidr in network: | 96 | if cidr in network: |
2645 | 93 | return str(cidr.ip) | 97 | return str(cidr.ip) |
2646 | 94 | 98 | ||
2647 | 95 | if fallback is not None: | 99 | if fallback is not None: |
2648 | 96 | return fallback | 100 | return fallback |
2649 | @@ -187,6 +191,15 @@ | |||
2650 | 187 | get_netmask_for_address = partial(_get_for_address, key='netmask') | 191 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
2651 | 188 | 192 | ||
2652 | 189 | 193 | ||
2653 | 194 | def resolve_network_cidr(ip_address): | ||
2654 | 195 | ''' | ||
2655 | 196 | Resolves the full address cidr of an ip_address based on | ||
2656 | 197 | configured network interfaces | ||
2657 | 198 | ''' | ||
2658 | 199 | netmask = get_netmask_for_address(ip_address) | ||
2659 | 200 | return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) | ||
2660 | 201 | |||
2661 | 202 | |||
2662 | 190 | def format_ipv6_addr(address): | 203 | def format_ipv6_addr(address): |
2663 | 191 | """If address is IPv6, wrap it in '[]' otherwise return None. | 204 | """If address is IPv6, wrap it in '[]' otherwise return None. |
2664 | 192 | 205 | ||
2665 | @@ -435,8 +448,12 @@ | |||
2666 | 435 | 448 | ||
2667 | 436 | rev = dns.reversename.from_address(address) | 449 | rev = dns.reversename.from_address(address) |
2668 | 437 | result = ns_query(rev) | 450 | result = ns_query(rev) |
2669 | 451 | |||
2670 | 438 | if not result: | 452 | if not result: |
2672 | 439 | return None | 453 | try: |
2673 | 454 | result = socket.gethostbyaddr(address)[0] | ||
2674 | 455 | except: | ||
2675 | 456 | return None | ||
2676 | 440 | else: | 457 | else: |
2677 | 441 | result = address | 458 | result = address |
2678 | 442 | 459 | ||
2679 | @@ -448,3 +465,18 @@ | |||
2680 | 448 | return result | 465 | return result |
2681 | 449 | else: | 466 | else: |
2682 | 450 | return result.split('.')[0] | 467 | return result.split('.')[0] |
2683 | 468 | |||
2684 | 469 | |||
2685 | 470 | def port_has_listener(address, port): | ||
2686 | 471 | """ | ||
2687 | 472 | Returns True if the address:port is open and being listened to, | ||
2688 | 473 | else False. | ||
2689 | 474 | |||
2690 | 475 | @param address: an IP address or hostname | ||
2691 | 476 | @param port: integer port | ||
2692 | 477 | |||
2693 | 478 | Note calls 'zc' via a subprocess shell | ||
2694 | 479 | """ | ||
2695 | 480 | cmd = ['nc', '-z', address, str(port)] | ||
2696 | 481 | result = subprocess.call(cmd) | ||
2697 | 482 | return not(bool(result)) | ||
2698 | 451 | 483 | ||
2699 | === modified file 'hooks/charmhelpers/contrib/network/ovs/__init__.py' | |||
2700 | --- hooks/charmhelpers/contrib/network/ovs/__init__.py 2015-05-19 21:32:01 +0000 | |||
2701 | +++ hooks/charmhelpers/contrib/network/ovs/__init__.py 2016-05-18 10:06:26 +0000 | |||
2702 | @@ -25,10 +25,14 @@ | |||
2703 | 25 | ) | 25 | ) |
2704 | 26 | 26 | ||
2705 | 27 | 27 | ||
2707 | 28 | def add_bridge(name): | 28 | def add_bridge(name, datapath_type=None): |
2708 | 29 | ''' Add the named bridge to openvswitch ''' | 29 | ''' Add the named bridge to openvswitch ''' |
2709 | 30 | log('Creating bridge {}'.format(name)) | 30 | log('Creating bridge {}'.format(name)) |
2711 | 31 | subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name]) | 31 | cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name] |
2712 | 32 | if datapath_type is not None: | ||
2713 | 33 | cmd += ['--', 'set', 'bridge', name, | ||
2714 | 34 | 'datapath_type={}'.format(datapath_type)] | ||
2715 | 35 | subprocess.check_call(cmd) | ||
2716 | 32 | 36 | ||
2717 | 33 | 37 | ||
2718 | 34 | def del_bridge(name): | 38 | def del_bridge(name): |
2719 | 35 | 39 | ||
2720 | === modified file 'hooks/charmhelpers/contrib/network/ufw.py' | |||
2721 | --- hooks/charmhelpers/contrib/network/ufw.py 2015-07-29 18:23:55 +0000 | |||
2722 | +++ hooks/charmhelpers/contrib/network/ufw.py 2016-05-18 10:06:26 +0000 | |||
2723 | @@ -40,7 +40,9 @@ | |||
2724 | 40 | import re | 40 | import re |
2725 | 41 | import os | 41 | import os |
2726 | 42 | import subprocess | 42 | import subprocess |
2727 | 43 | |||
2728 | 43 | from charmhelpers.core import hookenv | 44 | from charmhelpers.core import hookenv |
2729 | 45 | from charmhelpers.core.kernel import modprobe, is_module_loaded | ||
2730 | 44 | 46 | ||
2731 | 45 | __author__ = "Felipe Reyes <felipe.reyes@canonical.com>" | 47 | __author__ = "Felipe Reyes <felipe.reyes@canonical.com>" |
2732 | 46 | 48 | ||
2733 | @@ -82,14 +84,11 @@ | |||
2734 | 82 | # do we have IPv6 in the machine? | 84 | # do we have IPv6 in the machine? |
2735 | 83 | if os.path.isdir('/proc/sys/net/ipv6'): | 85 | if os.path.isdir('/proc/sys/net/ipv6'): |
2736 | 84 | # is ip6tables kernel module loaded? | 86 | # is ip6tables kernel module loaded? |
2740 | 85 | lsmod = subprocess.check_output(['lsmod'], universal_newlines=True) | 87 | if not is_module_loaded('ip6_tables'): |
2738 | 86 | matches = re.findall('^ip6_tables[ ]+', lsmod, re.M) | ||
2739 | 87 | if len(matches) == 0: | ||
2741 | 88 | # ip6tables support isn't complete, let's try to load it | 88 | # ip6tables support isn't complete, let's try to load it |
2742 | 89 | try: | 89 | try: |
2746 | 90 | subprocess.check_output(['modprobe', 'ip6_tables'], | 90 | modprobe('ip6_tables') |
2747 | 91 | universal_newlines=True) | 91 | # great, we can load the module |
2745 | 92 | # great, we could load the module | ||
2748 | 93 | return True | 92 | return True |
2749 | 94 | except subprocess.CalledProcessError as ex: | 93 | except subprocess.CalledProcessError as ex: |
2750 | 95 | hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, | 94 | hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, |
2751 | 96 | 95 | ||
2752 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
2753 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-29 18:23:55 +0000 | |||
2754 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-05-18 10:06:26 +0000 | |||
2755 | @@ -14,12 +14,18 @@ | |||
2756 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
2757 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2758 | 16 | 16 | ||
2759 | 17 | import logging | ||
2760 | 18 | import re | ||
2761 | 19 | import sys | ||
2762 | 17 | import six | 20 | import six |
2763 | 18 | from collections import OrderedDict | 21 | from collections import OrderedDict |
2764 | 19 | from charmhelpers.contrib.amulet.deployment import ( | 22 | from charmhelpers.contrib.amulet.deployment import ( |
2765 | 20 | AmuletDeployment | 23 | AmuletDeployment |
2766 | 21 | ) | 24 | ) |
2767 | 22 | 25 | ||
2768 | 26 | DEBUG = logging.DEBUG | ||
2769 | 27 | ERROR = logging.ERROR | ||
2770 | 28 | |||
2771 | 23 | 29 | ||
2772 | 24 | class OpenStackAmuletDeployment(AmuletDeployment): | 30 | class OpenStackAmuletDeployment(AmuletDeployment): |
2773 | 25 | """OpenStack amulet deployment. | 31 | """OpenStack amulet deployment. |
2774 | @@ -28,9 +34,12 @@ | |||
2775 | 28 | that is specifically for use by OpenStack charms. | 34 | that is specifically for use by OpenStack charms. |
2776 | 29 | """ | 35 | """ |
2777 | 30 | 36 | ||
2779 | 31 | def __init__(self, series=None, openstack=None, source=None, stable=True): | 37 | def __init__(self, series=None, openstack=None, source=None, |
2780 | 38 | stable=True, log_level=DEBUG): | ||
2781 | 32 | """Initialize the deployment environment.""" | 39 | """Initialize the deployment environment.""" |
2782 | 33 | super(OpenStackAmuletDeployment, self).__init__(series) | 40 | super(OpenStackAmuletDeployment, self).__init__(series) |
2783 | 41 | self.log = self.get_logger(level=log_level) | ||
2784 | 42 | self.log.info('OpenStackAmuletDeployment: init') | ||
2785 | 34 | self.openstack = openstack | 43 | self.openstack = openstack |
2786 | 35 | self.source = source | 44 | self.source = source |
2787 | 36 | self.stable = stable | 45 | self.stable = stable |
2788 | @@ -38,26 +47,55 @@ | |||
2789 | 38 | # out. | 47 | # out. |
2790 | 39 | self.current_next = "trusty" | 48 | self.current_next = "trusty" |
2791 | 40 | 49 | ||
2792 | 50 | def get_logger(self, name="deployment-logger", level=logging.DEBUG): | ||
2793 | 51 | """Get a logger object that will log to stdout.""" | ||
2794 | 52 | log = logging | ||
2795 | 53 | logger = log.getLogger(name) | ||
2796 | 54 | fmt = log.Formatter("%(asctime)s %(funcName)s " | ||
2797 | 55 | "%(levelname)s: %(message)s") | ||
2798 | 56 | |||
2799 | 57 | handler = log.StreamHandler(stream=sys.stdout) | ||
2800 | 58 | handler.setLevel(level) | ||
2801 | 59 | handler.setFormatter(fmt) | ||
2802 | 60 | |||
2803 | 61 | logger.addHandler(handler) | ||
2804 | 62 | logger.setLevel(level) | ||
2805 | 63 | |||
2806 | 64 | return logger | ||
2807 | 65 | |||
2808 | 41 | def _determine_branch_locations(self, other_services): | 66 | def _determine_branch_locations(self, other_services): |
2809 | 42 | """Determine the branch locations for the other services. | 67 | """Determine the branch locations for the other services. |
2810 | 43 | 68 | ||
2811 | 44 | Determine if the local branch being tested is derived from its | 69 | Determine if the local branch being tested is derived from its |
2812 | 45 | stable or next (dev) branch, and based on this, use the corresonding | 70 | stable or next (dev) branch, and based on this, use the corresonding |
2813 | 46 | stable or next branches for the other_services.""" | 71 | stable or next branches for the other_services.""" |
2815 | 47 | base_charms = ['mysql', 'mongodb'] | 72 | |
2816 | 73 | self.log.info('OpenStackAmuletDeployment: determine branch locations') | ||
2817 | 74 | |||
2818 | 75 | # Charms outside the lp:~openstack-charmers namespace | ||
2819 | 76 | base_charms = ['mysql', 'mongodb', 'nrpe'] | ||
2820 | 77 | |||
2821 | 78 | # Force these charms to current series even when using an older series. | ||
2822 | 79 | # ie. Use trusty/nrpe even when series is precise, as the P charm | ||
2823 | 80 | # does not possess the necessary external master config and hooks. | ||
2824 | 81 | force_series_current = ['nrpe'] | ||
2825 | 48 | 82 | ||
2826 | 49 | if self.series in ['precise', 'trusty']: | 83 | if self.series in ['precise', 'trusty']: |
2827 | 50 | base_series = self.series | 84 | base_series = self.series |
2828 | 51 | else: | 85 | else: |
2829 | 52 | base_series = self.current_next | 86 | base_series = self.current_next |
2830 | 53 | 87 | ||
2833 | 54 | if self.stable: | 88 | for svc in other_services: |
2834 | 55 | for svc in other_services: | 89 | if svc['name'] in force_series_current: |
2835 | 90 | base_series = self.current_next | ||
2836 | 91 | # If a location has been explicitly set, use it | ||
2837 | 92 | if svc.get('location'): | ||
2838 | 93 | continue | ||
2839 | 94 | if self.stable: | ||
2840 | 56 | temp = 'lp:charms/{}/{}' | 95 | temp = 'lp:charms/{}/{}' |
2841 | 57 | svc['location'] = temp.format(base_series, | 96 | svc['location'] = temp.format(base_series, |
2842 | 58 | svc['name']) | 97 | svc['name']) |
2845 | 59 | else: | 98 | else: |
2844 | 60 | for svc in other_services: | ||
2846 | 61 | if svc['name'] in base_charms: | 99 | if svc['name'] in base_charms: |
2847 | 62 | temp = 'lp:charms/{}/{}' | 100 | temp = 'lp:charms/{}/{}' |
2848 | 63 | svc['location'] = temp.format(base_series, | 101 | svc['location'] = temp.format(base_series, |
2849 | @@ -66,10 +104,13 @@ | |||
2850 | 66 | temp = 'lp:~openstack-charmers/charms/{}/{}/next' | 104 | temp = 'lp:~openstack-charmers/charms/{}/{}/next' |
2851 | 67 | svc['location'] = temp.format(self.current_next, | 105 | svc['location'] = temp.format(self.current_next, |
2852 | 68 | svc['name']) | 106 | svc['name']) |
2853 | 107 | |||
2854 | 69 | return other_services | 108 | return other_services |
2855 | 70 | 109 | ||
2856 | 71 | def _add_services(self, this_service, other_services): | 110 | def _add_services(self, this_service, other_services): |
2857 | 72 | """Add services to the deployment and set openstack-origin/source.""" | 111 | """Add services to the deployment and set openstack-origin/source.""" |
2858 | 112 | self.log.info('OpenStackAmuletDeployment: adding services') | ||
2859 | 113 | |||
2860 | 73 | other_services = self._determine_branch_locations(other_services) | 114 | other_services = self._determine_branch_locations(other_services) |
2861 | 74 | 115 | ||
2862 | 75 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | 116 | super(OpenStackAmuletDeployment, self)._add_services(this_service, |
2863 | @@ -77,29 +118,105 @@ | |||
2864 | 77 | 118 | ||
2865 | 78 | services = other_services | 119 | services = other_services |
2866 | 79 | services.append(this_service) | 120 | services.append(this_service) |
2867 | 121 | |||
2868 | 122 | # Charms which should use the source config option | ||
2869 | 80 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 123 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
2874 | 81 | 'ceph-osd', 'ceph-radosgw'] | 124 | 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] |
2875 | 82 | # Most OpenStack subordinate charms do not expose an origin option | 125 | |
2876 | 83 | # as that is controlled by the principle. | 126 | # Charms which can not use openstack-origin, ie. many subordinates |
2877 | 84 | ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] | 127 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
2878 | 128 | 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', | ||
2879 | 129 | 'cinder-backup', 'nexentaedge-data', | ||
2880 | 130 | 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', | ||
2881 | 131 | 'cinder-nexentaedge', 'nexentaedge-mgmt'] | ||
2882 | 85 | 132 | ||
2883 | 86 | if self.openstack: | 133 | if self.openstack: |
2884 | 87 | for svc in services: | 134 | for svc in services: |
2886 | 88 | if svc['name'] not in use_source + ignore: | 135 | if svc['name'] not in use_source + no_origin: |
2887 | 89 | config = {'openstack-origin': self.openstack} | 136 | config = {'openstack-origin': self.openstack} |
2888 | 90 | self.d.configure(svc['name'], config) | 137 | self.d.configure(svc['name'], config) |
2889 | 91 | 138 | ||
2890 | 92 | if self.source: | 139 | if self.source: |
2891 | 93 | for svc in services: | 140 | for svc in services: |
2893 | 94 | if svc['name'] in use_source and svc['name'] not in ignore: | 141 | if svc['name'] in use_source and svc['name'] not in no_origin: |
2894 | 95 | config = {'source': self.source} | 142 | config = {'source': self.source} |
2895 | 96 | self.d.configure(svc['name'], config) | 143 | self.d.configure(svc['name'], config) |
2896 | 97 | 144 | ||
2897 | 98 | def _configure_services(self, configs): | 145 | def _configure_services(self, configs): |
2898 | 99 | """Configure all of the services.""" | 146 | """Configure all of the services.""" |
2899 | 147 | self.log.info('OpenStackAmuletDeployment: configure services') | ||
2900 | 100 | for service, config in six.iteritems(configs): | 148 | for service, config in six.iteritems(configs): |
2901 | 101 | self.d.configure(service, config) | 149 | self.d.configure(service, config) |
2902 | 102 | 150 | ||
2903 | 151 | def _auto_wait_for_status(self, message=None, exclude_services=None, | ||
2904 | 152 | include_only=None, timeout=1800): | ||
2905 | 153 | """Wait for all units to have a specific extended status, except | ||
2906 | 154 | for any defined as excluded. Unless specified via message, any | ||
2907 | 155 | status containing any case of 'ready' will be considered a match. | ||
2908 | 156 | |||
2909 | 157 | Examples of message usage: | ||
2910 | 158 | |||
2911 | 159 | Wait for all unit status to CONTAIN any case of 'ready' or 'ok': | ||
2912 | 160 | message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) | ||
2913 | 161 | |||
2914 | 162 | Wait for all units to reach this status (exact match): | ||
2915 | 163 | message = re.compile('^Unit is ready and clustered$') | ||
2916 | 164 | |||
2917 | 165 | Wait for all units to reach any one of these (exact match): | ||
2918 | 166 | message = re.compile('Unit is ready|OK|Ready') | ||
2919 | 167 | |||
2920 | 168 | Wait for at least one unit to reach this status (exact match): | ||
2921 | 169 | message = {'ready'} | ||
2922 | 170 | |||
2923 | 171 | See Amulet's sentry.wait_for_messages() for message usage detail. | ||
2924 | 172 | https://github.com/juju/amulet/blob/master/amulet/sentry.py | ||
2925 | 173 | |||
2926 | 174 | :param message: Expected status match | ||
2927 | 175 | :param exclude_services: List of juju service names to ignore, | ||
2928 | 176 | not to be used in conjuction with include_only. | ||
2929 | 177 | :param include_only: List of juju service names to exclusively check, | ||
2930 | 178 | not to be used in conjuction with exclude_services. | ||
2931 | 179 | :param timeout: Maximum time in seconds to wait for status match | ||
2932 | 180 | :returns: None. Raises if timeout is hit. | ||
2933 | 181 | """ | ||
2934 | 182 | self.log.info('Waiting for extended status on units...') | ||
2935 | 183 | |||
2936 | 184 | all_services = self.d.services.keys() | ||
2937 | 185 | |||
2938 | 186 | if exclude_services and include_only: | ||
2939 | 187 | raise ValueError('exclude_services can not be used ' | ||
2940 | 188 | 'with include_only') | ||
2941 | 189 | |||
2942 | 190 | if message: | ||
2943 | 191 | if isinstance(message, re._pattern_type): | ||
2944 | 192 | match = message.pattern | ||
2945 | 193 | else: | ||
2946 | 194 | match = message | ||
2947 | 195 | |||
2948 | 196 | self.log.debug('Custom extended status wait match: ' | ||
2949 | 197 | '{}'.format(match)) | ||
2950 | 198 | else: | ||
2951 | 199 | self.log.debug('Default extended status wait match: contains ' | ||
2952 | 200 | 'READY (case-insensitive)') | ||
2953 | 201 | message = re.compile('.*ready.*', re.IGNORECASE) | ||
2954 | 202 | |||
2955 | 203 | if exclude_services: | ||
2956 | 204 | self.log.debug('Excluding services from extended status match: ' | ||
2957 | 205 | '{}'.format(exclude_services)) | ||
2958 | 206 | else: | ||
2959 | 207 | exclude_services = [] | ||
2960 | 208 | |||
2961 | 209 | if include_only: | ||
2962 | 210 | services = include_only | ||
2963 | 211 | else: | ||
2964 | 212 | services = list(set(all_services) - set(exclude_services)) | ||
2965 | 213 | |||
2966 | 214 | self.log.debug('Waiting up to {}s for extended status on services: ' | ||
2967 | 215 | '{}'.format(timeout, services)) | ||
2968 | 216 | service_messages = {service: message for service in services} | ||
2969 | 217 | self.d.sentry.wait_for_messages(service_messages, timeout=timeout) | ||
2970 | 218 | self.log.info('OK') | ||
2971 | 219 | |||
2972 | 103 | def _get_openstack_release(self): | 220 | def _get_openstack_release(self): |
2973 | 104 | """Get openstack release. | 221 | """Get openstack release. |
2974 | 105 | 222 | ||
2975 | @@ -111,7 +228,8 @@ | |||
2976 | 111 | self.precise_havana, self.precise_icehouse, | 228 | self.precise_havana, self.precise_icehouse, |
2977 | 112 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, | 229 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
2978 | 113 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, | 230 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
2980 | 114 | self.wily_liberty) = range(12) | 231 | self.wily_liberty, self.trusty_mitaka, |
2981 | 232 | self.xenial_mitaka) = range(14) | ||
2982 | 115 | 233 | ||
2983 | 116 | releases = { | 234 | releases = { |
2984 | 117 | ('precise', None): self.precise_essex, | 235 | ('precise', None): self.precise_essex, |
2985 | @@ -123,9 +241,11 @@ | |||
2986 | 123 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, | 241 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
2987 | 124 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, | 242 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
2988 | 125 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | 243 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
2989 | 244 | ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, | ||
2990 | 126 | ('utopic', None): self.utopic_juno, | 245 | ('utopic', None): self.utopic_juno, |
2991 | 127 | ('vivid', None): self.vivid_kilo, | 246 | ('vivid', None): self.vivid_kilo, |
2993 | 128 | ('wily', None): self.wily_liberty} | 247 | ('wily', None): self.wily_liberty, |
2994 | 248 | ('xenial', None): self.xenial_mitaka} | ||
2995 | 129 | return releases[(self.series, self.openstack)] | 249 | return releases[(self.series, self.openstack)] |
2996 | 130 | 250 | ||
2997 | 131 | def _get_openstack_release_string(self): | 251 | def _get_openstack_release_string(self): |
2998 | @@ -142,6 +262,7 @@ | |||
2999 | 142 | ('utopic', 'juno'), | 262 | ('utopic', 'juno'), |
3000 | 143 | ('vivid', 'kilo'), | 263 | ('vivid', 'kilo'), |
3001 | 144 | ('wily', 'liberty'), | 264 | ('wily', 'liberty'), |
3002 | 265 | ('xenial', 'mitaka'), | ||
3003 | 145 | ]) | 266 | ]) |
3004 | 146 | if self.openstack: | 267 | if self.openstack: |
3005 | 147 | os_origin = self.openstack.split(':')[1] | 268 | os_origin = self.openstack.split(':')[1] |
3006 | 148 | 269 | ||
3007 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
3008 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-29 18:23:55 +0000 | |||
3009 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2016-05-18 10:06:26 +0000 | |||
3010 | @@ -18,6 +18,7 @@ | |||
3011 | 18 | import json | 18 | import json |
3012 | 19 | import logging | 19 | import logging |
3013 | 20 | import os | 20 | import os |
3014 | 21 | import re | ||
3015 | 21 | import six | 22 | import six |
3016 | 22 | import time | 23 | import time |
3017 | 23 | import urllib | 24 | import urllib |
3018 | @@ -26,7 +27,12 @@ | |||
3019 | 26 | import glanceclient.v1.client as glance_client | 27 | import glanceclient.v1.client as glance_client |
3020 | 27 | import heatclient.v1.client as heat_client | 28 | import heatclient.v1.client as heat_client |
3021 | 28 | import keystoneclient.v2_0 as keystone_client | 29 | import keystoneclient.v2_0 as keystone_client |
3023 | 29 | import novaclient.v1_1.client as nova_client | 30 | from keystoneclient.auth.identity import v3 as keystone_id_v3 |
3024 | 31 | from keystoneclient import session as keystone_session | ||
3025 | 32 | from keystoneclient.v3 import client as keystone_client_v3 | ||
3026 | 33 | |||
3027 | 34 | import novaclient.client as nova_client | ||
3028 | 35 | import pika | ||
3029 | 30 | import swiftclient | 36 | import swiftclient |
3030 | 31 | 37 | ||
3031 | 32 | from charmhelpers.contrib.amulet.utils import ( | 38 | from charmhelpers.contrib.amulet.utils import ( |
3032 | @@ -36,6 +42,8 @@ | |||
3033 | 36 | DEBUG = logging.DEBUG | 42 | DEBUG = logging.DEBUG |
3034 | 37 | ERROR = logging.ERROR | 43 | ERROR = logging.ERROR |
3035 | 38 | 44 | ||
3036 | 45 | NOVA_CLIENT_VERSION = "2" | ||
3037 | 46 | |||
3038 | 39 | 47 | ||
3039 | 40 | class OpenStackAmuletUtils(AmuletUtils): | 48 | class OpenStackAmuletUtils(AmuletUtils): |
3040 | 41 | """OpenStack amulet utilities. | 49 | """OpenStack amulet utilities. |
3041 | @@ -137,7 +145,7 @@ | |||
3042 | 137 | return "role {} does not exist".format(e['name']) | 145 | return "role {} does not exist".format(e['name']) |
3043 | 138 | return ret | 146 | return ret |
3044 | 139 | 147 | ||
3046 | 140 | def validate_user_data(self, expected, actual): | 148 | def validate_user_data(self, expected, actual, api_version=None): |
3047 | 141 | """Validate user data. | 149 | """Validate user data. |
3048 | 142 | 150 | ||
3049 | 143 | Validate a list of actual user data vs a list of expected user | 151 | Validate a list of actual user data vs a list of expected user |
3050 | @@ -148,10 +156,15 @@ | |||
3051 | 148 | for e in expected: | 156 | for e in expected: |
3052 | 149 | found = False | 157 | found = False |
3053 | 150 | for act in actual: | 158 | for act in actual: |
3058 | 151 | a = {'enabled': act.enabled, 'name': act.name, | 159 | if e['name'] == act.name: |
3059 | 152 | 'email': act.email, 'tenantId': act.tenantId, | 160 | a = {'enabled': act.enabled, 'name': act.name, |
3060 | 153 | 'id': act.id} | 161 | 'email': act.email, 'id': act.id} |
3061 | 154 | if e['name'] == a['name']: | 162 | if api_version == 3: |
3062 | 163 | a['default_project_id'] = getattr(act, | ||
3063 | 164 | 'default_project_id', | ||
3064 | 165 | 'none') | ||
3065 | 166 | else: | ||
3066 | 167 | a['tenantId'] = act.tenantId | ||
3067 | 155 | found = True | 168 | found = True |
3068 | 156 | ret = self._validate_dict_data(e, a) | 169 | ret = self._validate_dict_data(e, a) |
3069 | 157 | if ret: | 170 | if ret: |
3070 | @@ -186,15 +199,30 @@ | |||
3071 | 186 | return cinder_client.Client(username, password, tenant, ept) | 199 | return cinder_client.Client(username, password, tenant, ept) |
3072 | 187 | 200 | ||
3073 | 188 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 201 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
3075 | 189 | tenant): | 202 | tenant=None, api_version=None, |
3076 | 203 | keystone_ip=None): | ||
3077 | 190 | """Authenticates admin user with the keystone admin endpoint.""" | 204 | """Authenticates admin user with the keystone admin endpoint.""" |
3078 | 191 | self.log.debug('Authenticating keystone admin...') | 205 | self.log.debug('Authenticating keystone admin...') |
3079 | 192 | unit = keystone_sentry | 206 | unit = keystone_sentry |
3085 | 193 | service_ip = unit.relation('shared-db', | 207 | if not keystone_ip: |
3086 | 194 | 'mysql:shared-db')['private-address'] | 208 | keystone_ip = unit.relation('shared-db', |
3087 | 195 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | 209 | 'mysql:shared-db')['private-address'] |
3088 | 196 | return keystone_client.Client(username=user, password=password, | 210 | base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) |
3089 | 197 | tenant_name=tenant, auth_url=ep) | 211 | if not api_version or api_version == 2: |
3090 | 212 | ep = base_ep + "/v2.0" | ||
3091 | 213 | return keystone_client.Client(username=user, password=password, | ||
3092 | 214 | tenant_name=tenant, auth_url=ep) | ||
3093 | 215 | else: | ||
3094 | 216 | ep = base_ep + "/v3" | ||
3095 | 217 | auth = keystone_id_v3.Password( | ||
3096 | 218 | user_domain_name='admin_domain', | ||
3097 | 219 | username=user, | ||
3098 | 220 | password=password, | ||
3099 | 221 | domain_name='admin_domain', | ||
3100 | 222 | auth_url=ep, | ||
3101 | 223 | ) | ||
3102 | 224 | sess = keystone_session.Session(auth=auth) | ||
3103 | 225 | return keystone_client_v3.Client(session=sess) | ||
3104 | 198 | 226 | ||
3105 | 199 | def authenticate_keystone_user(self, keystone, user, password, tenant): | 227 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
3106 | 200 | """Authenticates a regular user with the keystone public endpoint.""" | 228 | """Authenticates a regular user with the keystone public endpoint.""" |
3107 | @@ -223,7 +251,8 @@ | |||
3108 | 223 | self.log.debug('Authenticating nova user ({})...'.format(user)) | 251 | self.log.debug('Authenticating nova user ({})...'.format(user)) |
3109 | 224 | ep = keystone.service_catalog.url_for(service_type='identity', | 252 | ep = keystone.service_catalog.url_for(service_type='identity', |
3110 | 225 | endpoint_type='publicURL') | 253 | endpoint_type='publicURL') |
3112 | 226 | return nova_client.Client(username=user, api_key=password, | 254 | return nova_client.Client(NOVA_CLIENT_VERSION, |
3113 | 255 | username=user, api_key=password, | ||
3114 | 227 | project_id=tenant, auth_url=ep) | 256 | project_id=tenant, auth_url=ep) |
3115 | 228 | 257 | ||
3116 | 229 | def authenticate_swift_user(self, keystone, user, password, tenant): | 258 | def authenticate_swift_user(self, keystone, user, password, tenant): |
3117 | @@ -602,3 +631,382 @@ | |||
3118 | 602 | self.log.debug('Ceph {} samples (OK): ' | 631 | self.log.debug('Ceph {} samples (OK): ' |
3119 | 603 | '{}'.format(sample_type, samples)) | 632 | '{}'.format(sample_type, samples)) |
3120 | 604 | return None | 633 | return None |
3121 | 634 | |||
3122 | 635 | # rabbitmq/amqp specific helpers: | ||
3123 | 636 | |||
3124 | 637 | def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): | ||
3125 | 638 | """Wait for rmq units extended status to show cluster readiness, | ||
3126 | 639 | after an optional initial sleep period. Initial sleep is likely | ||
3127 | 640 | necessary to be effective following a config change, as status | ||
3128 | 641 | message may not instantly update to non-ready.""" | ||
3129 | 642 | |||
3130 | 643 | if init_sleep: | ||
3131 | 644 | time.sleep(init_sleep) | ||
3132 | 645 | |||
3133 | 646 | message = re.compile('^Unit is ready and clustered$') | ||
3134 | 647 | deployment._auto_wait_for_status(message=message, | ||
3135 | 648 | timeout=timeout, | ||
3136 | 649 | include_only=['rabbitmq-server']) | ||
3137 | 650 | |||
3138 | 651 | def add_rmq_test_user(self, sentry_units, | ||
3139 | 652 | username="testuser1", password="changeme"): | ||
3140 | 653 | """Add a test user via the first rmq juju unit, check connection as | ||
3141 | 654 | the new user against all sentry units. | ||
3142 | 655 | |||
3143 | 656 | :param sentry_units: list of sentry unit pointers | ||
3144 | 657 | :param username: amqp user name, default to testuser1 | ||
3145 | 658 | :param password: amqp user password | ||
3146 | 659 | :returns: None if successful. Raise on error. | ||
3147 | 660 | """ | ||
3148 | 661 | self.log.debug('Adding rmq user ({})...'.format(username)) | ||
3149 | 662 | |||
3150 | 663 | # Check that user does not already exist | ||
3151 | 664 | cmd_user_list = 'rabbitmqctl list_users' | ||
3152 | 665 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
3153 | 666 | if username in output: | ||
3154 | 667 | self.log.warning('User ({}) already exists, returning ' | ||
3155 | 668 | 'gracefully.'.format(username)) | ||
3156 | 669 | return | ||
3157 | 670 | |||
3158 | 671 | perms = '".*" ".*" ".*"' | ||
3159 | 672 | cmds = ['rabbitmqctl add_user {} {}'.format(username, password), | ||
3160 | 673 | 'rabbitmqctl set_permissions {} {}'.format(username, perms)] | ||
3161 | 674 | |||
3162 | 675 | # Add user via first unit | ||
3163 | 676 | for cmd in cmds: | ||
3164 | 677 | output, _ = self.run_cmd_unit(sentry_units[0], cmd) | ||
3165 | 678 | |||
3166 | 679 | # Check connection against the other sentry_units | ||
3167 | 680 | self.log.debug('Checking user connect against units...') | ||
3168 | 681 | for sentry_unit in sentry_units: | ||
3169 | 682 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, | ||
3170 | 683 | username=username, | ||
3171 | 684 | password=password) | ||
3172 | 685 | connection.close() | ||
3173 | 686 | |||
3174 | 687 | def delete_rmq_test_user(self, sentry_units, username="testuser1"): | ||
3175 | 688 | """Delete a rabbitmq user via the first rmq juju unit. | ||
3176 | 689 | |||
3177 | 690 | :param sentry_units: list of sentry unit pointers | ||
3178 | 691 | :param username: amqp user name, default to testuser1 | ||
3179 | 692 | :param password: amqp user password | ||
3180 | 693 | :returns: None if successful or no such user. | ||
3181 | 694 | """ | ||
3182 | 695 | self.log.debug('Deleting rmq user ({})...'.format(username)) | ||
3183 | 696 | |||
3184 | 697 | # Check that the user exists | ||
3185 | 698 | cmd_user_list = 'rabbitmqctl list_users' | ||
3186 | 699 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
3187 | 700 | |||
3188 | 701 | if username not in output: | ||
3189 | 702 | self.log.warning('User ({}) does not exist, returning ' | ||
3190 | 703 | 'gracefully.'.format(username)) | ||
3191 | 704 | return | ||
3192 | 705 | |||
3193 | 706 | # Delete the user | ||
3194 | 707 | cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) | ||
3195 | 708 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) | ||
3196 | 709 | |||
3197 | 710 | def get_rmq_cluster_status(self, sentry_unit): | ||
3198 | 711 | """Execute rabbitmq cluster status command on a unit and return | ||
3199 | 712 | the full output. | ||
3200 | 713 | |||
3201 | 714 | :param unit: sentry unit | ||
3202 | 715 | :returns: String containing console output of cluster status command | ||
3203 | 716 | """ | ||
3204 | 717 | cmd = 'rabbitmqctl cluster_status' | ||
3205 | 718 | output, _ = self.run_cmd_unit(sentry_unit, cmd) | ||
3206 | 719 | self.log.debug('{} cluster_status:\n{}'.format( | ||
3207 | 720 | sentry_unit.info['unit_name'], output)) | ||
3208 | 721 | return str(output) | ||
3209 | 722 | |||
3210 | 723 | def get_rmq_cluster_running_nodes(self, sentry_unit): | ||
3211 | 724 | """Parse rabbitmqctl cluster_status output string, return list of | ||
3212 | 725 | running rabbitmq cluster nodes. | ||
3213 | 726 | |||
3214 | 727 | :param unit: sentry unit | ||
3215 | 728 | :returns: List containing node names of running nodes | ||
3216 | 729 | """ | ||
3217 | 730 | # NOTE(beisner): rabbitmqctl cluster_status output is not | ||
3218 | 731 | # json-parsable, do string chop foo, then json.loads that. | ||
3219 | 732 | str_stat = self.get_rmq_cluster_status(sentry_unit) | ||
3220 | 733 | if 'running_nodes' in str_stat: | ||
3221 | 734 | pos_start = str_stat.find("{running_nodes,") + 15 | ||
3222 | 735 | pos_end = str_stat.find("]},", pos_start) + 1 | ||
3223 | 736 | str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') | ||
3224 | 737 | run_nodes = json.loads(str_run_nodes) | ||
3225 | 738 | return run_nodes | ||
3226 | 739 | else: | ||
3227 | 740 | return [] | ||
3228 | 741 | |||
3229 | 742 | def validate_rmq_cluster_running_nodes(self, sentry_units): | ||
3230 | 743 | """Check that all rmq unit hostnames are represented in the | ||
3231 | 744 | cluster_status output of all units. | ||
3232 | 745 | |||
3233 | 746 | :param host_names: dict of juju unit names to host names | ||
3234 | 747 | :param units: list of sentry unit pointers (all rmq units) | ||
3235 | 748 | :returns: None if successful, otherwise return error message | ||
3236 | 749 | """ | ||
3237 | 750 | host_names = self.get_unit_hostnames(sentry_units) | ||
3238 | 751 | errors = [] | ||
3239 | 752 | |||
3240 | 753 | # Query every unit for cluster_status running nodes | ||
3241 | 754 | for query_unit in sentry_units: | ||
3242 | 755 | query_unit_name = query_unit.info['unit_name'] | ||
3243 | 756 | running_nodes = self.get_rmq_cluster_running_nodes(query_unit) | ||
3244 | 757 | |||
3245 | 758 | # Confirm that every unit is represented in the queried unit's | ||
3246 | 759 | # cluster_status running nodes output. | ||
3247 | 760 | for validate_unit in sentry_units: | ||
3248 | 761 | val_host_name = host_names[validate_unit.info['unit_name']] | ||
3249 | 762 | val_node_name = 'rabbit@{}'.format(val_host_name) | ||
3250 | 763 | |||
3251 | 764 | if val_node_name not in running_nodes: | ||
3252 | 765 | errors.append('Cluster member check failed on {}: {} not ' | ||
3253 | 766 | 'in {}\n'.format(query_unit_name, | ||
3254 | 767 | val_node_name, | ||
3255 | 768 | running_nodes)) | ||
3256 | 769 | if errors: | ||
3257 | 770 | return ''.join(errors) | ||
3258 | 771 | |||
3259 | 772 | def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): | ||
3260 | 773 | """Check a single juju rmq unit for ssl and port in the config file.""" | ||
3261 | 774 | host = sentry_unit.info['public-address'] | ||
3262 | 775 | unit_name = sentry_unit.info['unit_name'] | ||
3263 | 776 | |||
3264 | 777 | conf_file = '/etc/rabbitmq/rabbitmq.config' | ||
3265 | 778 | conf_contents = str(self.file_contents_safe(sentry_unit, | ||
3266 | 779 | conf_file, max_wait=16)) | ||
3267 | 780 | # Checks | ||
3268 | 781 | conf_ssl = 'ssl' in conf_contents | ||
3269 | 782 | conf_port = str(port) in conf_contents | ||
3270 | 783 | |||
3271 | 784 | # Port explicitly checked in config | ||
3272 | 785 | if port and conf_port and conf_ssl: | ||
3273 | 786 | self.log.debug('SSL is enabled @{}:{} ' | ||
3274 | 787 | '({})'.format(host, port, unit_name)) | ||
3275 | 788 | return True | ||
3276 | 789 | elif port and not conf_port and conf_ssl: | ||
3277 | 790 | self.log.debug('SSL is enabled @{} but not on port {} ' | ||
3278 | 791 | '({})'.format(host, port, unit_name)) | ||
3279 | 792 | return False | ||
3280 | 793 | # Port not checked (useful when checking that ssl is disabled) | ||
3281 | 794 | elif not port and conf_ssl: | ||
3282 | 795 | self.log.debug('SSL is enabled @{}:{} ' | ||
3283 | 796 | '({})'.format(host, port, unit_name)) | ||
3284 | 797 | return True | ||
3285 | 798 | elif not conf_ssl: | ||
3286 | 799 | self.log.debug('SSL not enabled @{}:{} ' | ||
3287 | 800 | '({})'.format(host, port, unit_name)) | ||
3288 | 801 | return False | ||
3289 | 802 | else: | ||
3290 | 803 | msg = ('Unknown condition when checking SSL status @{}:{} ' | ||
3291 | 804 | '({})'.format(host, port, unit_name)) | ||
3292 | 805 | amulet.raise_status(amulet.FAIL, msg) | ||
3293 | 806 | |||
3294 | 807 | def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): | ||
3295 | 808 | """Check that ssl is enabled on rmq juju sentry units. | ||
3296 | 809 | |||
3297 | 810 | :param sentry_units: list of all rmq sentry units | ||
3298 | 811 | :param port: optional ssl port override to validate | ||
3299 | 812 | :returns: None if successful, otherwise return error message | ||
3300 | 813 | """ | ||
3301 | 814 | for sentry_unit in sentry_units: | ||
3302 | 815 | if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): | ||
3303 | 816 | return ('Unexpected condition: ssl is disabled on unit ' | ||
3304 | 817 | '({})'.format(sentry_unit.info['unit_name'])) | ||
3305 | 818 | return None | ||
3306 | 819 | |||
3307 | 820 | def validate_rmq_ssl_disabled_units(self, sentry_units): | ||
3308 | 821 | """Check that ssl is enabled on listed rmq juju sentry units. | ||
3309 | 822 | |||
3310 | 823 | :param sentry_units: list of all rmq sentry units | ||
3311 | 824 | :returns: True if successful. Raise on error. | ||
3312 | 825 | """ | ||
3313 | 826 | for sentry_unit in sentry_units: | ||
3314 | 827 | if self.rmq_ssl_is_enabled_on_unit(sentry_unit): | ||
3315 | 828 | return ('Unexpected condition: ssl is enabled on unit ' | ||
3316 | 829 | '({})'.format(sentry_unit.info['unit_name'])) | ||
3317 | 830 | return None | ||
3318 | 831 | |||
3319 | 832 | def configure_rmq_ssl_on(self, sentry_units, deployment, | ||
3320 | 833 | port=None, max_wait=60): | ||
3321 | 834 | """Turn ssl charm config option on, with optional non-default | ||
3322 | 835 | ssl port specification. Confirm that it is enabled on every | ||
3323 | 836 | unit. | ||
3324 | 837 | |||
3325 | 838 | :param sentry_units: list of sentry units | ||
3326 | 839 | :param deployment: amulet deployment object pointer | ||
3327 | 840 | :param port: amqp port, use defaults if None | ||
3328 | 841 | :param max_wait: maximum time to wait in seconds to confirm | ||
3329 | 842 | :returns: None if successful. Raise on error. | ||
3330 | 843 | """ | ||
3331 | 844 | self.log.debug('Setting ssl charm config option: on') | ||
3332 | 845 | |||
3333 | 846 | # Enable RMQ SSL | ||
3334 | 847 | config = {'ssl': 'on'} | ||
3335 | 848 | if port: | ||
3336 | 849 | config['ssl_port'] = port | ||
3337 | 850 | |||
3338 | 851 | deployment.d.configure('rabbitmq-server', config) | ||
3339 | 852 | |||
3340 | 853 | # Wait for unit status | ||
3341 | 854 | self.rmq_wait_for_cluster(deployment) | ||
3342 | 855 | |||
3343 | 856 | # Confirm | ||
3344 | 857 | tries = 0 | ||
3345 | 858 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
3346 | 859 | while ret and tries < (max_wait / 4): | ||
3347 | 860 | time.sleep(4) | ||
3348 | 861 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
3349 | 862 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
3350 | 863 | tries += 1 | ||
3351 | 864 | |||
3352 | 865 | if ret: | ||
3353 | 866 | amulet.raise_status(amulet.FAIL, ret) | ||
3354 | 867 | |||
3355 | 868 | def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): | ||
3356 | 869 | """Turn ssl charm config option off, confirm that it is disabled | ||
3357 | 870 | on every unit. | ||
3358 | 871 | |||
3359 | 872 | :param sentry_units: list of sentry units | ||
3360 | 873 | :param deployment: amulet deployment object pointer | ||
3361 | 874 | :param max_wait: maximum time to wait in seconds to confirm | ||
3362 | 875 | :returns: None if successful. Raise on error. | ||
3363 | 876 | """ | ||
3364 | 877 | self.log.debug('Setting ssl charm config option: off') | ||
3365 | 878 | |||
3366 | 879 | # Disable RMQ SSL | ||
3367 | 880 | config = {'ssl': 'off'} | ||
3368 | 881 | deployment.d.configure('rabbitmq-server', config) | ||
3369 | 882 | |||
3370 | 883 | # Wait for unit status | ||
3371 | 884 | self.rmq_wait_for_cluster(deployment) | ||
3372 | 885 | |||
3373 | 886 | # Confirm | ||
3374 | 887 | tries = 0 | ||
3375 | 888 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
3376 | 889 | while ret and tries < (max_wait / 4): | ||
3377 | 890 | time.sleep(4) | ||
3378 | 891 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
3379 | 892 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
3380 | 893 | tries += 1 | ||
3381 | 894 | |||
3382 | 895 | if ret: | ||
3383 | 896 | amulet.raise_status(amulet.FAIL, ret) | ||
3384 | 897 | |||
3385 | 898 | def connect_amqp_by_unit(self, sentry_unit, ssl=False, | ||
3386 | 899 | port=None, fatal=True, | ||
3387 | 900 | username="testuser1", password="changeme"): | ||
3388 | 901 | """Establish and return a pika amqp connection to the rabbitmq service | ||
3389 | 902 | running on a rmq juju unit. | ||
3390 | 903 | |||
3391 | 904 | :param sentry_unit: sentry unit pointer | ||
3392 | 905 | :param ssl: boolean, default to False | ||
3393 | 906 | :param port: amqp port, use defaults if None | ||
3394 | 907 | :param fatal: boolean, default to True (raises on connect error) | ||
3395 | 908 | :param username: amqp user name, default to testuser1 | ||
3396 | 909 | :param password: amqp user password | ||
3397 | 910 | :returns: pika amqp connection pointer or None if failed and non-fatal | ||
3398 | 911 | """ | ||
3399 | 912 | host = sentry_unit.info['public-address'] | ||
3400 | 913 | unit_name = sentry_unit.info['unit_name'] | ||
3401 | 914 | |||
3402 | 915 | # Default port logic if port is not specified | ||
3403 | 916 | if ssl and not port: | ||
3404 | 917 | port = 5671 | ||
3405 | 918 | elif not ssl and not port: | ||
3406 | 919 | port = 5672 | ||
3407 | 920 | |||
3408 | 921 | self.log.debug('Connecting to amqp on {}:{} ({}) as ' | ||
3409 | 922 | '{}...'.format(host, port, unit_name, username)) | ||
3410 | 923 | |||
3411 | 924 | try: | ||
3412 | 925 | credentials = pika.PlainCredentials(username, password) | ||
3413 | 926 | parameters = pika.ConnectionParameters(host=host, port=port, | ||
3414 | 927 | credentials=credentials, | ||
3415 | 928 | ssl=ssl, | ||
3416 | 929 | connection_attempts=3, | ||
3417 | 930 | retry_delay=5, | ||
3418 | 931 | socket_timeout=1) | ||
3419 | 932 | connection = pika.BlockingConnection(parameters) | ||
3420 | 933 | assert connection.server_properties['product'] == 'RabbitMQ' | ||
3421 | 934 | self.log.debug('Connect OK') | ||
3422 | 935 | return connection | ||
3423 | 936 | except Exception as e: | ||
3424 | 937 | msg = ('amqp connection failed to {}:{} as ' | ||
3425 | 938 | '{} ({})'.format(host, port, username, str(e))) | ||
3426 | 939 | if fatal: | ||
3427 | 940 | amulet.raise_status(amulet.FAIL, msg) | ||
3428 | 941 | else: | ||
3429 | 942 | self.log.warn(msg) | ||
3430 | 943 | return None | ||
3431 | 944 | |||
3432 | 945 | def publish_amqp_message_by_unit(self, sentry_unit, message, | ||
3433 | 946 | queue="test", ssl=False, | ||
3434 | 947 | username="testuser1", | ||
3435 | 948 | password="changeme", | ||
3436 | 949 | port=None): | ||
3437 | 950 | """Publish an amqp message to a rmq juju unit. | ||
3438 | 951 | |||
3439 | 952 | :param sentry_unit: sentry unit pointer | ||
3440 | 953 | :param message: amqp message string | ||
3441 | 954 | :param queue: message queue, default to test | ||
3442 | 955 | :param username: amqp user name, default to testuser1 | ||
3443 | 956 | :param password: amqp user password | ||
3444 | 957 | :param ssl: boolean, default to False | ||
3445 | 958 | :param port: amqp port, use defaults if None | ||
3446 | 959 | :returns: None. Raises exception if publish failed. | ||
3447 | 960 | """ | ||
3448 | 961 | self.log.debug('Publishing message to {} queue:\n{}'.format(queue, | ||
3449 | 962 | message)) | ||
3450 | 963 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
3451 | 964 | port=port, | ||
3452 | 965 | username=username, | ||
3453 | 966 | password=password) | ||
3454 | 967 | |||
3455 | 968 | # NOTE(beisner): extra debug here re: pika hang potential: | ||
3456 | 969 | # https://github.com/pika/pika/issues/297 | ||
3457 | 970 | # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw | ||
3458 | 971 | self.log.debug('Defining channel...') | ||
3459 | 972 | channel = connection.channel() | ||
3460 | 973 | self.log.debug('Declaring queue...') | ||
3461 | 974 | channel.queue_declare(queue=queue, auto_delete=False, durable=True) | ||
3462 | 975 | self.log.debug('Publishing message...') | ||
3463 | 976 | channel.basic_publish(exchange='', routing_key=queue, body=message) | ||
3464 | 977 | self.log.debug('Closing channel...') | ||
3465 | 978 | channel.close() | ||
3466 | 979 | self.log.debug('Closing connection...') | ||
3467 | 980 | connection.close() | ||
3468 | 981 | |||
3469 | 982 | def get_amqp_message_by_unit(self, sentry_unit, queue="test", | ||
3470 | 983 | username="testuser1", | ||
3471 | 984 | password="changeme", | ||
3472 | 985 | ssl=False, port=None): | ||
3473 | 986 | """Get an amqp message from a rmq juju unit. | ||
3474 | 987 | |||
3475 | 988 | :param sentry_unit: sentry unit pointer | ||
3476 | 989 | :param queue: message queue, default to test | ||
3477 | 990 | :param username: amqp user name, default to testuser1 | ||
3478 | 991 | :param password: amqp user password | ||
3479 | 992 | :param ssl: boolean, default to False | ||
3480 | 993 | :param port: amqp port, use defaults if None | ||
3481 | 994 | :returns: amqp message body as string. Raise if get fails. | ||
3482 | 995 | """ | ||
3483 | 996 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
3484 | 997 | port=port, | ||
3485 | 998 | username=username, | ||
3486 | 999 | password=password) | ||
3487 | 1000 | channel = connection.channel() | ||
3488 | 1001 | method_frame, _, body = channel.basic_get(queue) | ||
3489 | 1002 | |||
3490 | 1003 | if method_frame: | ||
3491 | 1004 | self.log.debug('Retreived message from {} queue:\n{}'.format(queue, | ||
3492 | 1005 | body)) | ||
3493 | 1006 | channel.basic_ack(method_frame.delivery_tag) | ||
3494 | 1007 | channel.close() | ||
3495 | 1008 | connection.close() | ||
3496 | 1009 | return body | ||
3497 | 1010 | else: | ||
3498 | 1011 | msg = 'No message retrieved.' | ||
3499 | 1012 | amulet.raise_status(amulet.FAIL, msg) | ||
3500 | 605 | 1013 | ||
3501 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
3502 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-07-29 18:23:55 +0000 | |||
3503 | +++ hooks/charmhelpers/contrib/openstack/context.py 2016-05-18 10:06:26 +0000 | |||
3504 | @@ -14,12 +14,13 @@ | |||
3505 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
3506 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
3507 | 16 | 16 | ||
3508 | 17 | import glob | ||
3509 | 17 | import json | 18 | import json |
3510 | 18 | import os | 19 | import os |
3511 | 19 | import re | 20 | import re |
3512 | 20 | import time | 21 | import time |
3513 | 21 | from base64 import b64decode | 22 | from base64 import b64decode |
3515 | 22 | from subprocess import check_call | 23 | from subprocess import check_call, CalledProcessError |
3516 | 23 | 24 | ||
3517 | 24 | import six | 25 | import six |
3518 | 25 | import yaml | 26 | import yaml |
3519 | @@ -44,16 +45,20 @@ | |||
3520 | 44 | INFO, | 45 | INFO, |
3521 | 45 | WARNING, | 46 | WARNING, |
3522 | 46 | ERROR, | 47 | ERROR, |
3523 | 48 | status_set, | ||
3524 | 47 | ) | 49 | ) |
3525 | 48 | 50 | ||
3526 | 49 | from charmhelpers.core.sysctl import create as sysctl_create | 51 | from charmhelpers.core.sysctl import create as sysctl_create |
3527 | 50 | from charmhelpers.core.strutils import bool_from_string | 52 | from charmhelpers.core.strutils import bool_from_string |
3528 | 51 | 53 | ||
3529 | 52 | from charmhelpers.core.host import ( | 54 | from charmhelpers.core.host import ( |
3530 | 55 | get_bond_master, | ||
3531 | 56 | is_phy_iface, | ||
3532 | 53 | list_nics, | 57 | list_nics, |
3533 | 54 | get_nic_hwaddr, | 58 | get_nic_hwaddr, |
3534 | 55 | mkdir, | 59 | mkdir, |
3535 | 56 | write_file, | 60 | write_file, |
3536 | 61 | pwgen, | ||
3537 | 57 | ) | 62 | ) |
3538 | 58 | from charmhelpers.contrib.hahelpers.cluster import ( | 63 | from charmhelpers.contrib.hahelpers.cluster import ( |
3539 | 59 | determine_apache_port, | 64 | determine_apache_port, |
3540 | @@ -84,6 +89,14 @@ | |||
3541 | 84 | is_bridge_member, | 89 | is_bridge_member, |
3542 | 85 | ) | 90 | ) |
3543 | 86 | from charmhelpers.contrib.openstack.utils import get_host_ip | 91 | from charmhelpers.contrib.openstack.utils import get_host_ip |
3544 | 92 | from charmhelpers.core.unitdata import kv | ||
3545 | 93 | |||
3546 | 94 | try: | ||
3547 | 95 | import psutil | ||
3548 | 96 | except ImportError: | ||
3549 | 97 | apt_install('python-psutil', fatal=True) | ||
3550 | 98 | import psutil | ||
3551 | 99 | |||
3552 | 87 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 100 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
3553 | 88 | ADDRESS_TYPES = ['admin', 'internal', 'public'] | 101 | ADDRESS_TYPES = ['admin', 'internal', 'public'] |
3554 | 89 | 102 | ||
3555 | @@ -192,10 +205,50 @@ | |||
3556 | 192 | class OSContextGenerator(object): | 205 | class OSContextGenerator(object): |
3557 | 193 | """Base class for all context generators.""" | 206 | """Base class for all context generators.""" |
3558 | 194 | interfaces = [] | 207 | interfaces = [] |
3559 | 208 | related = False | ||
3560 | 209 | complete = False | ||
3561 | 210 | missing_data = [] | ||
3562 | 195 | 211 | ||
3563 | 196 | def __call__(self): | 212 | def __call__(self): |
3564 | 197 | raise NotImplementedError | 213 | raise NotImplementedError |
3565 | 198 | 214 | ||
3566 | 215 | def context_complete(self, ctxt): | ||
3567 | 216 | """Check for missing data for the required context data. | ||
3568 | 217 | Set self.missing_data if it exists and return False. | ||
3569 | 218 | Set self.complete if no missing data and return True. | ||
3570 | 219 | """ | ||
3571 | 220 | # Fresh start | ||
3572 | 221 | self.complete = False | ||
3573 | 222 | self.missing_data = [] | ||
3574 | 223 | for k, v in six.iteritems(ctxt): | ||
3575 | 224 | if v is None or v == '': | ||
3576 | 225 | if k not in self.missing_data: | ||
3577 | 226 | self.missing_data.append(k) | ||
3578 | 227 | |||
3579 | 228 | if self.missing_data: | ||
3580 | 229 | self.complete = False | ||
3581 | 230 | log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) | ||
3582 | 231 | else: | ||
3583 | 232 | self.complete = True | ||
3584 | 233 | return self.complete | ||
3585 | 234 | |||
3586 | 235 | def get_related(self): | ||
3587 | 236 | """Check if any of the context interfaces have relation ids. | ||
3588 | 237 | Set self.related and return True if one of the interfaces | ||
3589 | 238 | has relation ids. | ||
3590 | 239 | """ | ||
3591 | 240 | # Fresh start | ||
3592 | 241 | self.related = False | ||
3593 | 242 | try: | ||
3594 | 243 | for interface in self.interfaces: | ||
3595 | 244 | if relation_ids(interface): | ||
3596 | 245 | self.related = True | ||
3597 | 246 | return self.related | ||
3598 | 247 | except AttributeError as e: | ||
3599 | 248 | log("{} {}" | ||
3600 | 249 | "".format(self, e), 'INFO') | ||
3601 | 250 | return self.related | ||
3602 | 251 | |||
3603 | 199 | 252 | ||
3604 | 200 | class SharedDBContext(OSContextGenerator): | 253 | class SharedDBContext(OSContextGenerator): |
3605 | 201 | interfaces = ['shared-db'] | 254 | interfaces = ['shared-db'] |
3606 | @@ -211,6 +264,7 @@ | |||
3607 | 211 | self.database = database | 264 | self.database = database |
3608 | 212 | self.user = user | 265 | self.user = user |
3609 | 213 | self.ssl_dir = ssl_dir | 266 | self.ssl_dir = ssl_dir |
3610 | 267 | self.rel_name = self.interfaces[0] | ||
3611 | 214 | 268 | ||
3612 | 215 | def __call__(self): | 269 | def __call__(self): |
3613 | 216 | self.database = self.database or config('database') | 270 | self.database = self.database or config('database') |
3614 | @@ -244,6 +298,7 @@ | |||
3615 | 244 | password_setting = self.relation_prefix + '_password' | 298 | password_setting = self.relation_prefix + '_password' |
3616 | 245 | 299 | ||
3617 | 246 | for rid in relation_ids(self.interfaces[0]): | 300 | for rid in relation_ids(self.interfaces[0]): |
3618 | 301 | self.related = True | ||
3619 | 247 | for unit in related_units(rid): | 302 | for unit in related_units(rid): |
3620 | 248 | rdata = relation_get(rid=rid, unit=unit) | 303 | rdata = relation_get(rid=rid, unit=unit) |
3621 | 249 | host = rdata.get('db_host') | 304 | host = rdata.get('db_host') |
3622 | @@ -255,7 +310,7 @@ | |||
3623 | 255 | 'database_password': rdata.get(password_setting), | 310 | 'database_password': rdata.get(password_setting), |
3624 | 256 | 'database_type': 'mysql' | 311 | 'database_type': 'mysql' |
3625 | 257 | } | 312 | } |
3627 | 258 | if context_complete(ctxt): | 313 | if self.context_complete(ctxt): |
3628 | 259 | db_ssl(rdata, ctxt, self.ssl_dir) | 314 | db_ssl(rdata, ctxt, self.ssl_dir) |
3629 | 260 | return ctxt | 315 | return ctxt |
3630 | 261 | return {} | 316 | return {} |
3631 | @@ -276,6 +331,7 @@ | |||
3632 | 276 | 331 | ||
3633 | 277 | ctxt = {} | 332 | ctxt = {} |
3634 | 278 | for rid in relation_ids(self.interfaces[0]): | 333 | for rid in relation_ids(self.interfaces[0]): |
3635 | 334 | self.related = True | ||
3636 | 279 | for unit in related_units(rid): | 335 | for unit in related_units(rid): |
3637 | 280 | rel_host = relation_get('host', rid=rid, unit=unit) | 336 | rel_host = relation_get('host', rid=rid, unit=unit) |
3638 | 281 | rel_user = relation_get('user', rid=rid, unit=unit) | 337 | rel_user = relation_get('user', rid=rid, unit=unit) |
3639 | @@ -285,7 +341,7 @@ | |||
3640 | 285 | 'database_user': rel_user, | 341 | 'database_user': rel_user, |
3641 | 286 | 'database_password': rel_passwd, | 342 | 'database_password': rel_passwd, |
3642 | 287 | 'database_type': 'postgresql'} | 343 | 'database_type': 'postgresql'} |
3644 | 288 | if context_complete(ctxt): | 344 | if self.context_complete(ctxt): |
3645 | 289 | return ctxt | 345 | return ctxt |
3646 | 290 | 346 | ||
3647 | 291 | return {} | 347 | return {} |
3648 | @@ -346,6 +402,7 @@ | |||
3649 | 346 | ctxt['signing_dir'] = cachedir | 402 | ctxt['signing_dir'] = cachedir |
3650 | 347 | 403 | ||
3651 | 348 | for rid in relation_ids(self.rel_name): | 404 | for rid in relation_ids(self.rel_name): |
3652 | 405 | self.related = True | ||
3653 | 349 | for unit in related_units(rid): | 406 | for unit in related_units(rid): |
3654 | 350 | rdata = relation_get(rid=rid, unit=unit) | 407 | rdata = relation_get(rid=rid, unit=unit) |
3655 | 351 | serv_host = rdata.get('service_host') | 408 | serv_host = rdata.get('service_host') |
3656 | @@ -354,6 +411,7 @@ | |||
3657 | 354 | auth_host = format_ipv6_addr(auth_host) or auth_host | 411 | auth_host = format_ipv6_addr(auth_host) or auth_host |
3658 | 355 | svc_protocol = rdata.get('service_protocol') or 'http' | 412 | svc_protocol = rdata.get('service_protocol') or 'http' |
3659 | 356 | auth_protocol = rdata.get('auth_protocol') or 'http' | 413 | auth_protocol = rdata.get('auth_protocol') or 'http' |
3660 | 414 | api_version = rdata.get('api_version') or '2.0' | ||
3661 | 357 | ctxt.update({'service_port': rdata.get('service_port'), | 415 | ctxt.update({'service_port': rdata.get('service_port'), |
3662 | 358 | 'service_host': serv_host, | 416 | 'service_host': serv_host, |
3663 | 359 | 'auth_host': auth_host, | 417 | 'auth_host': auth_host, |
3664 | @@ -362,9 +420,10 @@ | |||
3665 | 362 | 'admin_user': rdata.get('service_username'), | 420 | 'admin_user': rdata.get('service_username'), |
3666 | 363 | 'admin_password': rdata.get('service_password'), | 421 | 'admin_password': rdata.get('service_password'), |
3667 | 364 | 'service_protocol': svc_protocol, | 422 | 'service_protocol': svc_protocol, |
3669 | 365 | 'auth_protocol': auth_protocol}) | 423 | 'auth_protocol': auth_protocol, |
3670 | 424 | 'api_version': api_version}) | ||
3671 | 366 | 425 | ||
3673 | 367 | if context_complete(ctxt): | 426 | if self.context_complete(ctxt): |
3674 | 368 | # NOTE(jamespage) this is required for >= icehouse | 427 | # NOTE(jamespage) this is required for >= icehouse |
3675 | 369 | # so a missing value just indicates keystone needs | 428 | # so a missing value just indicates keystone needs |
3676 | 370 | # upgrading | 429 | # upgrading |
3677 | @@ -403,6 +462,7 @@ | |||
3678 | 403 | ctxt = {} | 462 | ctxt = {} |
3679 | 404 | for rid in relation_ids(self.rel_name): | 463 | for rid in relation_ids(self.rel_name): |
3680 | 405 | ha_vip_only = False | 464 | ha_vip_only = False |
3681 | 465 | self.related = True | ||
3682 | 406 | for unit in related_units(rid): | 466 | for unit in related_units(rid): |
3683 | 407 | if relation_get('clustered', rid=rid, unit=unit): | 467 | if relation_get('clustered', rid=rid, unit=unit): |
3684 | 408 | ctxt['clustered'] = True | 468 | ctxt['clustered'] = True |
3685 | @@ -435,7 +495,7 @@ | |||
3686 | 435 | ha_vip_only = relation_get('ha-vip-only', | 495 | ha_vip_only = relation_get('ha-vip-only', |
3687 | 436 | rid=rid, unit=unit) is not None | 496 | rid=rid, unit=unit) is not None |
3688 | 437 | 497 | ||
3690 | 438 | if context_complete(ctxt): | 498 | if self.context_complete(ctxt): |
3691 | 439 | if 'rabbit_ssl_ca' in ctxt: | 499 | if 'rabbit_ssl_ca' in ctxt: |
3692 | 440 | if not self.ssl_dir: | 500 | if not self.ssl_dir: |
3693 | 441 | log("Charm not setup for ssl support but ssl ca " | 501 | log("Charm not setup for ssl support but ssl ca " |
3694 | @@ -467,7 +527,7 @@ | |||
3695 | 467 | ctxt['oslo_messaging_flags'] = config_flags_parser( | 527 | ctxt['oslo_messaging_flags'] = config_flags_parser( |
3696 | 468 | oslo_messaging_flags) | 528 | oslo_messaging_flags) |
3697 | 469 | 529 | ||
3699 | 470 | if not context_complete(ctxt): | 530 | if not self.complete: |
3700 | 471 | return {} | 531 | return {} |
3701 | 472 | 532 | ||
3702 | 473 | return ctxt | 533 | return ctxt |
3703 | @@ -483,13 +543,15 @@ | |||
3704 | 483 | 543 | ||
3705 | 484 | log('Generating template context for ceph', level=DEBUG) | 544 | log('Generating template context for ceph', level=DEBUG) |
3706 | 485 | mon_hosts = [] | 545 | mon_hosts = [] |
3710 | 486 | auth = None | 546 | ctxt = { |
3711 | 487 | key = None | 547 | 'use_syslog': str(config('use-syslog')).lower() |
3712 | 488 | use_syslog = str(config('use-syslog')).lower() | 548 | } |
3713 | 489 | for rid in relation_ids('ceph'): | 549 | for rid in relation_ids('ceph'): |
3714 | 490 | for unit in related_units(rid): | 550 | for unit in related_units(rid): |
3717 | 491 | auth = relation_get('auth', rid=rid, unit=unit) | 551 | if not ctxt.get('auth'): |
3718 | 492 | key = relation_get('key', rid=rid, unit=unit) | 552 | ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) |
3719 | 553 | if not ctxt.get('key'): | ||
3720 | 554 | ctxt['key'] = relation_get('key', rid=rid, unit=unit) | ||
3721 | 493 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, | 555 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, |
3722 | 494 | unit=unit) | 556 | unit=unit) |
3723 | 495 | unit_priv_addr = relation_get('private-address', rid=rid, | 557 | unit_priv_addr = relation_get('private-address', rid=rid, |
3724 | @@ -498,15 +560,12 @@ | |||
3725 | 498 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr | 560 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr |
3726 | 499 | mon_hosts.append(ceph_addr) | 561 | mon_hosts.append(ceph_addr) |
3727 | 500 | 562 | ||
3732 | 501 | ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), | 563 | ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) |
3729 | 502 | 'auth': auth, | ||
3730 | 503 | 'key': key, | ||
3731 | 504 | 'use_syslog': use_syslog} | ||
3733 | 505 | 564 | ||
3734 | 506 | if not os.path.isdir('/etc/ceph'): | 565 | if not os.path.isdir('/etc/ceph'): |
3735 | 507 | os.mkdir('/etc/ceph') | 566 | os.mkdir('/etc/ceph') |
3736 | 508 | 567 | ||
3738 | 509 | if not context_complete(ctxt): | 568 | if not self.context_complete(ctxt): |
3739 | 510 | return {} | 569 | return {} |
3740 | 511 | 570 | ||
3741 | 512 | ensure_packages(['ceph-common']) | 571 | ensure_packages(['ceph-common']) |
3742 | @@ -579,15 +638,28 @@ | |||
3743 | 579 | if config('haproxy-client-timeout'): | 638 | if config('haproxy-client-timeout'): |
3744 | 580 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') | 639 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') |
3745 | 581 | 640 | ||
3746 | 641 | if config('haproxy-queue-timeout'): | ||
3747 | 642 | ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') | ||
3748 | 643 | |||
3749 | 644 | if config('haproxy-connect-timeout'): | ||
3750 | 645 | ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') | ||
3751 | 646 | |||
3752 | 582 | if config('prefer-ipv6'): | 647 | if config('prefer-ipv6'): |
3753 | 583 | ctxt['ipv6'] = True | 648 | ctxt['ipv6'] = True |
3754 | 584 | ctxt['local_host'] = 'ip6-localhost' | 649 | ctxt['local_host'] = 'ip6-localhost' |
3755 | 585 | ctxt['haproxy_host'] = '::' | 650 | ctxt['haproxy_host'] = '::' |
3756 | 586 | ctxt['stat_port'] = ':::8888' | ||
3757 | 587 | else: | 651 | else: |
3758 | 588 | ctxt['local_host'] = '127.0.0.1' | 652 | ctxt['local_host'] = '127.0.0.1' |
3759 | 589 | ctxt['haproxy_host'] = '0.0.0.0' | 653 | ctxt['haproxy_host'] = '0.0.0.0' |
3761 | 590 | ctxt['stat_port'] = ':8888' | 654 | |
3762 | 655 | ctxt['stat_port'] = '8888' | ||
3763 | 656 | |||
3764 | 657 | db = kv() | ||
3765 | 658 | ctxt['stat_password'] = db.get('stat-password') | ||
3766 | 659 | if not ctxt['stat_password']: | ||
3767 | 660 | ctxt['stat_password'] = db.set('stat-password', | ||
3768 | 661 | pwgen(32)) | ||
3769 | 662 | db.flush() | ||
3770 | 591 | 663 | ||
3771 | 592 | for frontend in cluster_hosts: | 664 | for frontend in cluster_hosts: |
3772 | 593 | if (len(cluster_hosts[frontend]['backends']) > 1 or | 665 | if (len(cluster_hosts[frontend]['backends']) > 1 or |
3773 | @@ -878,19 +950,6 @@ | |||
3774 | 878 | 950 | ||
3775 | 879 | return calico_ctxt | 951 | return calico_ctxt |
3776 | 880 | 952 | ||
3777 | 881 | def pg_ctxt(self): | ||
3778 | 882 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
3779 | 883 | self.network_manager) | ||
3780 | 884 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
3781 | 885 | self.network_manager) | ||
3782 | 886 | ovs_ctxt = {'core_plugin': driver, | ||
3783 | 887 | 'neutron_plugin': 'plumgrid', | ||
3784 | 888 | 'neutron_security_groups': self.neutron_security_groups, | ||
3785 | 889 | 'local_ip': unit_private_ip(), | ||
3786 | 890 | 'config': config} | ||
3787 | 891 | |||
3788 | 892 | return ovs_ctxt | ||
3789 | 893 | |||
3790 | 894 | def neutron_ctxt(self): | 953 | def neutron_ctxt(self): |
3791 | 895 | if https(): | 954 | if https(): |
3792 | 896 | proto = 'https' | 955 | proto = 'https' |
3793 | @@ -906,6 +965,31 @@ | |||
3794 | 906 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} | 965 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} |
3795 | 907 | return ctxt | 966 | return ctxt |
3796 | 908 | 967 | ||
3797 | 968 | def pg_ctxt(self): | ||
3798 | 969 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
3799 | 970 | self.network_manager) | ||
3800 | 971 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
3801 | 972 | self.network_manager) | ||
3802 | 973 | ovs_ctxt = {'core_plugin': driver, | ||
3803 | 974 | 'neutron_plugin': 'plumgrid', | ||
3804 | 975 | 'neutron_security_groups': self.neutron_security_groups, | ||
3805 | 976 | 'local_ip': unit_private_ip(), | ||
3806 | 977 | 'config': config} | ||
3807 | 978 | return ovs_ctxt | ||
3808 | 979 | |||
3809 | 980 | def midonet_ctxt(self): | ||
3810 | 981 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
3811 | 982 | self.network_manager) | ||
3812 | 983 | midonet_config = neutron_plugin_attribute(self.plugin, 'config', | ||
3813 | 984 | self.network_manager) | ||
3814 | 985 | mido_ctxt = {'core_plugin': driver, | ||
3815 | 986 | 'neutron_plugin': 'midonet', | ||
3816 | 987 | 'neutron_security_groups': self.neutron_security_groups, | ||
3817 | 988 | 'local_ip': unit_private_ip(), | ||
3818 | 989 | 'config': midonet_config} | ||
3819 | 990 | |||
3820 | 991 | return mido_ctxt | ||
3821 | 992 | |||
3822 | 909 | def __call__(self): | 993 | def __call__(self): |
3823 | 910 | if self.network_manager not in ['quantum', 'neutron']: | 994 | if self.network_manager not in ['quantum', 'neutron']: |
3824 | 911 | return {} | 995 | return {} |
3825 | @@ -927,6 +1011,8 @@ | |||
3826 | 927 | ctxt.update(self.nuage_ctxt()) | 1011 | ctxt.update(self.nuage_ctxt()) |
3827 | 928 | elif self.plugin == 'plumgrid': | 1012 | elif self.plugin == 'plumgrid': |
3828 | 929 | ctxt.update(self.pg_ctxt()) | 1013 | ctxt.update(self.pg_ctxt()) |
3829 | 1014 | elif self.plugin == 'midonet': | ||
3830 | 1015 | ctxt.update(self.midonet_ctxt()) | ||
3831 | 930 | 1016 | ||
3832 | 931 | alchemy_flags = config('neutron-alchemy-flags') | 1017 | alchemy_flags = config('neutron-alchemy-flags') |
3833 | 932 | if alchemy_flags: | 1018 | if alchemy_flags: |
3834 | @@ -938,7 +1024,6 @@ | |||
3835 | 938 | 1024 | ||
3836 | 939 | 1025 | ||
3837 | 940 | class NeutronPortContext(OSContextGenerator): | 1026 | class NeutronPortContext(OSContextGenerator): |
3838 | 941 | NIC_PREFIXES = ['eth', 'bond'] | ||
3839 | 942 | 1027 | ||
3840 | 943 | def resolve_ports(self, ports): | 1028 | def resolve_ports(self, ports): |
3841 | 944 | """Resolve NICs not yet bound to bridge(s) | 1029 | """Resolve NICs not yet bound to bridge(s) |
3842 | @@ -950,7 +1035,18 @@ | |||
3843 | 950 | 1035 | ||
3844 | 951 | hwaddr_to_nic = {} | 1036 | hwaddr_to_nic = {} |
3845 | 952 | hwaddr_to_ip = {} | 1037 | hwaddr_to_ip = {} |
3847 | 953 | for nic in list_nics(self.NIC_PREFIXES): | 1038 | for nic in list_nics(): |
3848 | 1039 | # Ignore virtual interfaces (bond masters will be identified from | ||
3849 | 1040 | # their slaves) | ||
3850 | 1041 | if not is_phy_iface(nic): | ||
3851 | 1042 | continue | ||
3852 | 1043 | |||
3853 | 1044 | _nic = get_bond_master(nic) | ||
3854 | 1045 | if _nic: | ||
3855 | 1046 | log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), | ||
3856 | 1047 | level=DEBUG) | ||
3857 | 1048 | nic = _nic | ||
3858 | 1049 | |||
3859 | 954 | hwaddr = get_nic_hwaddr(nic) | 1050 | hwaddr = get_nic_hwaddr(nic) |
3860 | 955 | hwaddr_to_nic[hwaddr] = nic | 1051 | hwaddr_to_nic[hwaddr] = nic |
3861 | 956 | addresses = get_ipv4_addr(nic, fatal=False) | 1052 | addresses = get_ipv4_addr(nic, fatal=False) |
3862 | @@ -976,7 +1072,8 @@ | |||
3863 | 976 | # trust it to be the real external network). | 1072 | # trust it to be the real external network). |
3864 | 977 | resolved.append(entry) | 1073 | resolved.append(entry) |
3865 | 978 | 1074 | ||
3867 | 979 | return resolved | 1075 | # Ensure no duplicates |
3868 | 1076 | return list(set(resolved)) | ||
3869 | 980 | 1077 | ||
3870 | 981 | 1078 | ||
3871 | 982 | class OSConfigFlagContext(OSContextGenerator): | 1079 | class OSConfigFlagContext(OSContextGenerator): |
3872 | @@ -1016,6 +1113,20 @@ | |||
3873 | 1016 | config_flags_parser(config_flags)} | 1113 | config_flags_parser(config_flags)} |
3874 | 1017 | 1114 | ||
3875 | 1018 | 1115 | ||
3876 | 1116 | class LibvirtConfigFlagsContext(OSContextGenerator): | ||
3877 | 1117 | """ | ||
3878 | 1118 | This context provides support for extending | ||
3879 | 1119 | the libvirt section through user-defined flags. | ||
3880 | 1120 | """ | ||
3881 | 1121 | def __call__(self): | ||
3882 | 1122 | ctxt = {} | ||
3883 | 1123 | libvirt_flags = config('libvirt-flags') | ||
3884 | 1124 | if libvirt_flags: | ||
3885 | 1125 | ctxt['libvirt_flags'] = config_flags_parser( | ||
3886 | 1126 | libvirt_flags) | ||
3887 | 1127 | return ctxt | ||
3888 | 1128 | |||
3889 | 1129 | |||
3890 | 1019 | class SubordinateConfigContext(OSContextGenerator): | 1130 | class SubordinateConfigContext(OSContextGenerator): |
3891 | 1020 | 1131 | ||
3892 | 1021 | """ | 1132 | """ |
3893 | @@ -1048,7 +1159,7 @@ | |||
3894 | 1048 | 1159 | ||
3895 | 1049 | ctxt = { | 1160 | ctxt = { |
3896 | 1050 | ... other context ... | 1161 | ... other context ... |
3898 | 1051 | 'subordinate_config': { | 1162 | 'subordinate_configuration': { |
3899 | 1052 | 'DEFAULT': { | 1163 | 'DEFAULT': { |
3900 | 1053 | 'key1': 'value1', | 1164 | 'key1': 'value1', |
3901 | 1054 | }, | 1165 | }, |
3902 | @@ -1066,13 +1177,22 @@ | |||
3903 | 1066 | :param config_file : Service's config file to query sections | 1177 | :param config_file : Service's config file to query sections |
3904 | 1067 | :param interface : Subordinate interface to inspect | 1178 | :param interface : Subordinate interface to inspect |
3905 | 1068 | """ | 1179 | """ |
3906 | 1069 | self.service = service | ||
3907 | 1070 | self.config_file = config_file | 1180 | self.config_file = config_file |
3909 | 1071 | self.interface = interface | 1181 | if isinstance(service, list): |
3910 | 1182 | self.services = service | ||
3911 | 1183 | else: | ||
3912 | 1184 | self.services = [service] | ||
3913 | 1185 | if isinstance(interface, list): | ||
3914 | 1186 | self.interfaces = interface | ||
3915 | 1187 | else: | ||
3916 | 1188 | self.interfaces = [interface] | ||
3917 | 1072 | 1189 | ||
3918 | 1073 | def __call__(self): | 1190 | def __call__(self): |
3919 | 1074 | ctxt = {'sections': {}} | 1191 | ctxt = {'sections': {}} |
3921 | 1075 | for rid in relation_ids(self.interface): | 1192 | rids = [] |
3922 | 1193 | for interface in self.interfaces: | ||
3923 | 1194 | rids.extend(relation_ids(interface)) | ||
3924 | 1195 | for rid in rids: | ||
3925 | 1076 | for unit in related_units(rid): | 1196 | for unit in related_units(rid): |
3926 | 1077 | sub_config = relation_get('subordinate_configuration', | 1197 | sub_config = relation_get('subordinate_configuration', |
3927 | 1078 | rid=rid, unit=unit) | 1198 | rid=rid, unit=unit) |
3928 | @@ -1080,33 +1200,37 @@ | |||
3929 | 1080 | try: | 1200 | try: |
3930 | 1081 | sub_config = json.loads(sub_config) | 1201 | sub_config = json.loads(sub_config) |
3931 | 1082 | except: | 1202 | except: |
3959 | 1083 | log('Could not parse JSON from subordinate_config ' | 1203 | log('Could not parse JSON from ' |
3960 | 1084 | 'setting from %s' % rid, level=ERROR) | 1204 | 'subordinate_configuration setting from %s' |
3961 | 1085 | continue | 1205 | % rid, level=ERROR) |
3962 | 1086 | 1206 | continue | |
3963 | 1087 | if self.service not in sub_config: | 1207 | |
3964 | 1088 | log('Found subordinate_config on %s but it contained' | 1208 | for service in self.services: |
3965 | 1089 | 'nothing for %s service' % (rid, self.service), | 1209 | if service not in sub_config: |
3966 | 1090 | level=INFO) | 1210 | log('Found subordinate_configuration on %s but it ' |
3967 | 1091 | continue | 1211 | 'contained nothing for %s service' |
3968 | 1092 | 1212 | % (rid, service), level=INFO) | |
3969 | 1093 | sub_config = sub_config[self.service] | 1213 | continue |
3970 | 1094 | if self.config_file not in sub_config: | 1214 | |
3971 | 1095 | log('Found subordinate_config on %s but it contained' | 1215 | sub_config = sub_config[service] |
3972 | 1096 | 'nothing for %s' % (rid, self.config_file), | 1216 | if self.config_file not in sub_config: |
3973 | 1097 | level=INFO) | 1217 | log('Found subordinate_configuration on %s but it ' |
3974 | 1098 | continue | 1218 | 'contained nothing for %s' |
3975 | 1099 | 1219 | % (rid, self.config_file), level=INFO) | |
3976 | 1100 | sub_config = sub_config[self.config_file] | 1220 | continue |
3977 | 1101 | for k, v in six.iteritems(sub_config): | 1221 | |
3978 | 1102 | if k == 'sections': | 1222 | sub_config = sub_config[self.config_file] |
3979 | 1103 | for section, config_dict in six.iteritems(v): | 1223 | for k, v in six.iteritems(sub_config): |
3980 | 1104 | log("adding section '%s'" % (section), | 1224 | if k == 'sections': |
3981 | 1105 | level=DEBUG) | 1225 | for section, config_list in six.iteritems(v): |
3982 | 1106 | ctxt[k][section] = config_dict | 1226 | log("adding section '%s'" % (section), |
3983 | 1107 | else: | 1227 | level=DEBUG) |
3984 | 1108 | ctxt[k] = v | 1228 | if ctxt[k].get(section): |
3985 | 1109 | 1229 | ctxt[k][section].extend(config_list) | |
3986 | 1230 | else: | ||
3987 | 1231 | ctxt[k][section] = config_list | ||
3988 | 1232 | else: | ||
3989 | 1233 | ctxt[k] = v | ||
3990 | 1110 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) | 1234 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
3991 | 1111 | return ctxt | 1235 | return ctxt |
3992 | 1112 | 1236 | ||
3993 | @@ -1143,13 +1267,11 @@ | |||
3994 | 1143 | 1267 | ||
3995 | 1144 | @property | 1268 | @property |
3996 | 1145 | def num_cpus(self): | 1269 | def num_cpus(self): |
4004 | 1146 | try: | 1270 | # NOTE: use cpu_count if present (16.04 support) |
4005 | 1147 | from psutil import NUM_CPUS | 1271 | if hasattr(psutil, 'cpu_count'): |
4006 | 1148 | except ImportError: | 1272 | return psutil.cpu_count() |
4007 | 1149 | apt_install('python-psutil', fatal=True) | 1273 | else: |
4008 | 1150 | from psutil import NUM_CPUS | 1274 | return psutil.NUM_CPUS |
4002 | 1151 | |||
4003 | 1152 | return NUM_CPUS | ||
4009 | 1153 | 1275 | ||
4010 | 1154 | def __call__(self): | 1276 | def __call__(self): |
4011 | 1155 | multiplier = config('worker-multiplier') or 0 | 1277 | multiplier = config('worker-multiplier') or 0 |
4012 | @@ -1283,15 +1405,19 @@ | |||
4013 | 1283 | def __call__(self): | 1405 | def __call__(self): |
4014 | 1284 | ports = config('data-port') | 1406 | ports = config('data-port') |
4015 | 1285 | if ports: | 1407 | if ports: |
4016 | 1408 | # Map of {port/mac:bridge} | ||
4017 | 1286 | portmap = parse_data_port_mappings(ports) | 1409 | portmap = parse_data_port_mappings(ports) |
4019 | 1287 | ports = portmap.values() | 1410 | ports = portmap.keys() |
4020 | 1411 | # Resolve provided ports or mac addresses and filter out those | ||
4021 | 1412 | # already attached to a bridge. | ||
4022 | 1288 | resolved = self.resolve_ports(ports) | 1413 | resolved = self.resolve_ports(ports) |
4023 | 1414 | # FIXME: is this necessary? | ||
4024 | 1289 | normalized = {get_nic_hwaddr(port): port for port in resolved | 1415 | normalized = {get_nic_hwaddr(port): port for port in resolved |
4025 | 1290 | if port not in ports} | 1416 | if port not in ports} |
4026 | 1291 | normalized.update({port: port for port in resolved | 1417 | normalized.update({port: port for port in resolved |
4027 | 1292 | if port in ports}) | 1418 | if port in ports}) |
4028 | 1293 | if resolved: | 1419 | if resolved: |
4030 | 1294 | return {bridge: normalized[port] for bridge, port in | 1420 | return {normalized[port]: bridge for port, bridge in |
4031 | 1295 | six.iteritems(portmap) if port in normalized.keys()} | 1421 | six.iteritems(portmap) if port in normalized.keys()} |
4032 | 1296 | 1422 | ||
4033 | 1297 | return None | 1423 | return None |
4034 | @@ -1302,12 +1428,22 @@ | |||
4035 | 1302 | def __call__(self): | 1428 | def __call__(self): |
4036 | 1303 | ctxt = {} | 1429 | ctxt = {} |
4037 | 1304 | mappings = super(PhyNICMTUContext, self).__call__() | 1430 | mappings = super(PhyNICMTUContext, self).__call__() |
4040 | 1305 | if mappings and mappings.values(): | 1431 | if mappings and mappings.keys(): |
4041 | 1306 | ports = mappings.values() | 1432 | ports = sorted(mappings.keys()) |
4042 | 1307 | napi_settings = NeutronAPIContext()() | 1433 | napi_settings = NeutronAPIContext()() |
4043 | 1308 | mtu = napi_settings.get('network_device_mtu') | 1434 | mtu = napi_settings.get('network_device_mtu') |
4044 | 1435 | all_ports = set() | ||
4045 | 1436 | # If any of ports is a vlan device, its underlying device must have | ||
4046 | 1437 | # mtu applied first. | ||
4047 | 1438 | for port in ports: | ||
4048 | 1439 | for lport in glob.glob("/sys/class/net/%s/lower_*" % port): | ||
4049 | 1440 | lport = os.path.basename(lport) | ||
4050 | 1441 | all_ports.add(lport.split('_')[1]) | ||
4051 | 1442 | |||
4052 | 1443 | all_ports = list(all_ports) | ||
4053 | 1444 | all_ports.extend(ports) | ||
4054 | 1309 | if mtu: | 1445 | if mtu: |
4056 | 1310 | ctxt["devs"] = '\\n'.join(ports) | 1446 | ctxt["devs"] = '\\n'.join(all_ports) |
4057 | 1311 | ctxt['mtu'] = mtu | 1447 | ctxt['mtu'] = mtu |
4058 | 1312 | 1448 | ||
4059 | 1313 | return ctxt | 1449 | return ctxt |
4060 | @@ -1338,7 +1474,110 @@ | |||
4061 | 1338 | rdata.get('service_protocol') or 'http', | 1474 | rdata.get('service_protocol') or 'http', |
4062 | 1339 | 'auth_protocol': | 1475 | 'auth_protocol': |
4063 | 1340 | rdata.get('auth_protocol') or 'http', | 1476 | rdata.get('auth_protocol') or 'http', |
4064 | 1477 | 'api_version': | ||
4065 | 1478 | rdata.get('api_version') or '2.0', | ||
4066 | 1341 | } | 1479 | } |
4068 | 1342 | if context_complete(ctxt): | 1480 | if self.context_complete(ctxt): |
4069 | 1343 | return ctxt | 1481 | return ctxt |
4070 | 1344 | return {} | 1482 | return {} |
4071 | 1483 | |||
4072 | 1484 | |||
4073 | 1485 | class InternalEndpointContext(OSContextGenerator): | ||
4074 | 1486 | """Internal endpoint context. | ||
4075 | 1487 | |||
4076 | 1488 | This context provides the endpoint type used for communication between | ||
4077 | 1489 | services e.g. between Nova and Cinder internally. Openstack uses Public | ||
4078 | 1490 | endpoints by default so this allows admins to optionally use internal | ||
4079 | 1491 | endpoints. | ||
4080 | 1492 | """ | ||
4081 | 1493 | def __call__(self): | ||
4082 | 1494 | return {'use_internal_endpoints': config('use-internal-endpoints')} | ||
4083 | 1495 | |||
4084 | 1496 | |||
4085 | 1497 | class AppArmorContext(OSContextGenerator): | ||
4086 | 1498 | """Base class for apparmor contexts.""" | ||
4087 | 1499 | |||
4088 | 1500 | def __init__(self): | ||
4089 | 1501 | self._ctxt = None | ||
4090 | 1502 | self.aa_profile = None | ||
4091 | 1503 | self.aa_utils_packages = ['apparmor-utils'] | ||
4092 | 1504 | |||
4093 | 1505 | @property | ||
4094 | 1506 | def ctxt(self): | ||
4095 | 1507 | if self._ctxt is not None: | ||
4096 | 1508 | return self._ctxt | ||
4097 | 1509 | self._ctxt = self._determine_ctxt() | ||
4098 | 1510 | return self._ctxt | ||
4099 | 1511 | |||
4100 | 1512 | def _determine_ctxt(self): | ||
4101 | 1513 | """ | ||
4102 | 1514 | Validate aa-profile-mode settings is disable, enforce, or complain. | ||
4103 | 1515 | |||
4104 | 1516 | :return ctxt: Dictionary of the apparmor profile or None | ||
4105 | 1517 | """ | ||
4106 | 1518 | if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: | ||
4107 | 1519 | ctxt = {'aa-profile-mode': config('aa-profile-mode')} | ||
4108 | 1520 | else: | ||
4109 | 1521 | ctxt = None | ||
4110 | 1522 | return ctxt | ||
4111 | 1523 | |||
4112 | 1524 | def __call__(self): | ||
4113 | 1525 | return self.ctxt | ||
4114 | 1526 | |||
4115 | 1527 | def install_aa_utils(self): | ||
4116 | 1528 | """ | ||
4117 | 1529 | Install packages required for apparmor configuration. | ||
4118 | 1530 | """ | ||
4119 | 1531 | log("Installing apparmor utils.") | ||
4120 | 1532 | ensure_packages(self.aa_utils_packages) | ||
4121 | 1533 | |||
4122 | 1534 | def manually_disable_aa_profile(self): | ||
4123 | 1535 | """ | ||
4124 | 1536 | Manually disable an apparmor profile. | ||
4125 | 1537 | |||
4126 | 1538 | If aa-profile-mode is set to disabled (default) this is required as the | ||
4127 | 1539 | template has been written but apparmor is yet unaware of the profile | ||
4128 | 1540 | and aa-disable aa-profile fails. Without this the profile would kick | ||
4129 | 1541 | into enforce mode on the next service restart. | ||
4130 | 1542 | |||
4131 | 1543 | """ | ||
4132 | 1544 | profile_path = '/etc/apparmor.d' | ||
4133 | 1545 | disable_path = '/etc/apparmor.d/disable' | ||
4134 | 1546 | if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): | ||
4135 | 1547 | os.symlink(os.path.join(profile_path, self.aa_profile), | ||
4136 | 1548 | os.path.join(disable_path, self.aa_profile)) | ||
4137 | 1549 | |||
4138 | 1550 | def setup_aa_profile(self): | ||
4139 | 1551 | """ | ||
4140 | 1552 | Setup an apparmor profile. | ||
4141 | 1553 | The ctxt dictionary will contain the apparmor profile mode and | ||
4142 | 1554 | the apparmor profile name. | ||
4143 | 1555 | Makes calls out to aa-disable, aa-complain, or aa-enforce to setup | ||
4144 | 1556 | the apparmor profile. | ||
4145 | 1557 | """ | ||
4146 | 1558 | self() | ||
4147 | 1559 | if not self.ctxt: | ||
4148 | 1560 | log("Not enabling apparmor Profile") | ||
4149 | 1561 | return | ||
4150 | 1562 | self.install_aa_utils() | ||
4151 | 1563 | cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])] | ||
4152 | 1564 | cmd.append(self.ctxt['aa-profile']) | ||
4153 | 1565 | log("Setting up the apparmor profile for {} in {} mode." | ||
4154 | 1566 | "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode'])) | ||
4155 | 1567 | try: | ||
4156 | 1568 | check_call(cmd) | ||
4157 | 1569 | except CalledProcessError as e: | ||
4158 | 1570 | # If aa-profile-mode is set to disabled (default) manual | ||
4159 | 1571 | # disabling is required as the template has been written but | ||
4160 | 1572 | # apparmor is yet unaware of the profile and aa-disable aa-profile | ||
4161 | 1573 | # fails. If aa-disable learns to read profile files first this can | ||
4162 | 1574 | # be removed. | ||
4163 | 1575 | if self.ctxt['aa-profile-mode'] == 'disable': | ||
4164 | 1576 | log("Manually disabling the apparmor profile for {}." | ||
4165 | 1577 | "".format(self.ctxt['aa-profile'])) | ||
4166 | 1578 | self.manually_disable_aa_profile() | ||
4167 | 1579 | return | ||
4168 | 1580 | status_set('blocked', "Apparmor profile {} failed to be set to {}." | ||
4169 | 1581 | "".format(self.ctxt['aa-profile'], | ||
4170 | 1582 | self.ctxt['aa-profile-mode'])) | ||
4171 | 1583 | raise e | ||
4172 | 1345 | 1584 | ||
4173 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
4174 | --- hooks/charmhelpers/contrib/openstack/ip.py 2015-07-29 18:23:55 +0000 | |||
4175 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2016-05-18 10:06:26 +0000 | |||
4176 | @@ -14,16 +14,19 @@ | |||
4177 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
4178 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
4179 | 16 | 16 | ||
4180 | 17 | |||
4181 | 17 | from charmhelpers.core.hookenv import ( | 18 | from charmhelpers.core.hookenv import ( |
4182 | 18 | config, | 19 | config, |
4183 | 19 | unit_get, | 20 | unit_get, |
4184 | 20 | service_name, | 21 | service_name, |
4185 | 22 | network_get_primary_address, | ||
4186 | 21 | ) | 23 | ) |
4187 | 22 | from charmhelpers.contrib.network.ip import ( | 24 | from charmhelpers.contrib.network.ip import ( |
4188 | 23 | get_address_in_network, | 25 | get_address_in_network, |
4189 | 24 | is_address_in_network, | 26 | is_address_in_network, |
4190 | 25 | is_ipv6, | 27 | is_ipv6, |
4191 | 26 | get_ipv6_addr, | 28 | get_ipv6_addr, |
4192 | 29 | resolve_network_cidr, | ||
4193 | 27 | ) | 30 | ) |
4194 | 28 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | 31 | from charmhelpers.contrib.hahelpers.cluster import is_clustered |
4195 | 29 | 32 | ||
4196 | @@ -33,16 +36,19 @@ | |||
4197 | 33 | 36 | ||
4198 | 34 | ADDRESS_MAP = { | 37 | ADDRESS_MAP = { |
4199 | 35 | PUBLIC: { | 38 | PUBLIC: { |
4200 | 39 | 'binding': 'public', | ||
4201 | 36 | 'config': 'os-public-network', | 40 | 'config': 'os-public-network', |
4202 | 37 | 'fallback': 'public-address', | 41 | 'fallback': 'public-address', |
4203 | 38 | 'override': 'os-public-hostname', | 42 | 'override': 'os-public-hostname', |
4204 | 39 | }, | 43 | }, |
4205 | 40 | INTERNAL: { | 44 | INTERNAL: { |
4206 | 45 | 'binding': 'internal', | ||
4207 | 41 | 'config': 'os-internal-network', | 46 | 'config': 'os-internal-network', |
4208 | 42 | 'fallback': 'private-address', | 47 | 'fallback': 'private-address', |
4209 | 43 | 'override': 'os-internal-hostname', | 48 | 'override': 'os-internal-hostname', |
4210 | 44 | }, | 49 | }, |
4211 | 45 | ADMIN: { | 50 | ADMIN: { |
4212 | 51 | 'binding': 'admin', | ||
4213 | 46 | 'config': 'os-admin-network', | 52 | 'config': 'os-admin-network', |
4214 | 47 | 'fallback': 'private-address', | 53 | 'fallback': 'private-address', |
4215 | 48 | 'override': 'os-admin-hostname', | 54 | 'override': 'os-admin-hostname', |
4216 | @@ -110,7 +116,7 @@ | |||
4217 | 110 | correct network. If clustered with no nets defined, return primary vip. | 116 | correct network. If clustered with no nets defined, return primary vip. |
4218 | 111 | 117 | ||
4219 | 112 | If not clustered, return unit address ensuring address is on configured net | 118 | If not clustered, return unit address ensuring address is on configured net |
4221 | 113 | split if one is configured. | 119 | split if one is configured, or a Juju 2.0 extra-binding has been used. |
4222 | 114 | 120 | ||
4223 | 115 | :param endpoint_type: Network endpoing type | 121 | :param endpoint_type: Network endpoing type |
4224 | 116 | """ | 122 | """ |
4225 | @@ -125,23 +131,45 @@ | |||
4226 | 125 | net_type = ADDRESS_MAP[endpoint_type]['config'] | 131 | net_type = ADDRESS_MAP[endpoint_type]['config'] |
4227 | 126 | net_addr = config(net_type) | 132 | net_addr = config(net_type) |
4228 | 127 | net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] | 133 | net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] |
4229 | 134 | binding = ADDRESS_MAP[endpoint_type]['binding'] | ||
4230 | 128 | clustered = is_clustered() | 135 | clustered = is_clustered() |
4236 | 129 | if clustered: | 136 | |
4237 | 130 | if not net_addr: | 137 | if clustered and vips: |
4238 | 131 | # If no net-splits defined, we expect a single vip | 138 | if net_addr: |
4234 | 132 | resolved_address = vips[0] | ||
4235 | 133 | else: | ||
4239 | 134 | for vip in vips: | 139 | for vip in vips: |
4240 | 135 | if is_address_in_network(net_addr, vip): | 140 | if is_address_in_network(net_addr, vip): |
4241 | 136 | resolved_address = vip | 141 | resolved_address = vip |
4242 | 137 | break | 142 | break |
4243 | 143 | else: | ||
4244 | 144 | # NOTE: endeavour to check vips against network space | ||
4245 | 145 | # bindings | ||
4246 | 146 | try: | ||
4247 | 147 | bound_cidr = resolve_network_cidr( | ||
4248 | 148 | network_get_primary_address(binding) | ||
4249 | 149 | ) | ||
4250 | 150 | for vip in vips: | ||
4251 | 151 | if is_address_in_network(bound_cidr, vip): | ||
4252 | 152 | resolved_address = vip | ||
4253 | 153 | break | ||
4254 | 154 | except NotImplementedError: | ||
4255 | 155 | # If no net-splits configured and no support for extra | ||
4256 | 156 | # bindings/network spaces so we expect a single vip | ||
4257 | 157 | resolved_address = vips[0] | ||
4258 | 138 | else: | 158 | else: |
4259 | 139 | if config('prefer-ipv6'): | 159 | if config('prefer-ipv6'): |
4260 | 140 | fallback_addr = get_ipv6_addr(exc_list=vips)[0] | 160 | fallback_addr = get_ipv6_addr(exc_list=vips)[0] |
4261 | 141 | else: | 161 | else: |
4262 | 142 | fallback_addr = unit_get(net_fallback) | 162 | fallback_addr = unit_get(net_fallback) |
4263 | 143 | 163 | ||
4265 | 144 | resolved_address = get_address_in_network(net_addr, fallback_addr) | 164 | if net_addr: |
4266 | 165 | resolved_address = get_address_in_network(net_addr, fallback_addr) | ||
4267 | 166 | else: | ||
4268 | 167 | # NOTE: only try to use extra bindings if legacy network | ||
4269 | 168 | # configuration is not in use | ||
4270 | 169 | try: | ||
4271 | 170 | resolved_address = network_get_primary_address(binding) | ||
4272 | 171 | except NotImplementedError: | ||
4273 | 172 | resolved_address = fallback_addr | ||
4274 | 145 | 173 | ||
4275 | 146 | if resolved_address is None: | 174 | if resolved_address is None: |
4276 | 147 | raise ValueError("Unable to resolve a suitable IP address based on " | 175 | raise ValueError("Unable to resolve a suitable IP address based on " |
4277 | 148 | 176 | ||
4278 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
4279 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-10-02 15:08:10 +0000 | |||
4280 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2016-05-18 10:06:26 +0000 | |||
4281 | @@ -50,7 +50,7 @@ | |||
4282 | 50 | if kernel_version() >= (3, 13): | 50 | if kernel_version() >= (3, 13): |
4283 | 51 | return [] | 51 | return [] |
4284 | 52 | else: | 52 | else: |
4286 | 53 | return ['openvswitch-datapath-dkms'] | 53 | return [headers_package(), 'openvswitch-datapath-dkms'] |
4287 | 54 | 54 | ||
4288 | 55 | 55 | ||
4289 | 56 | # legacy | 56 | # legacy |
4290 | @@ -70,7 +70,7 @@ | |||
4291 | 70 | relation_prefix='neutron', | 70 | relation_prefix='neutron', |
4292 | 71 | ssl_dir=QUANTUM_CONF_DIR)], | 71 | ssl_dir=QUANTUM_CONF_DIR)], |
4293 | 72 | 'services': ['quantum-plugin-openvswitch-agent'], | 72 | 'services': ['quantum-plugin-openvswitch-agent'], |
4295 | 73 | 'packages': [[headers_package()] + determine_dkms_package(), | 73 | 'packages': [determine_dkms_package(), |
4296 | 74 | ['quantum-plugin-openvswitch-agent']], | 74 | ['quantum-plugin-openvswitch-agent']], |
4297 | 75 | 'server_packages': ['quantum-server', | 75 | 'server_packages': ['quantum-server', |
4298 | 76 | 'quantum-plugin-openvswitch'], | 76 | 'quantum-plugin-openvswitch'], |
4299 | @@ -111,7 +111,7 @@ | |||
4300 | 111 | relation_prefix='neutron', | 111 | relation_prefix='neutron', |
4301 | 112 | ssl_dir=NEUTRON_CONF_DIR)], | 112 | ssl_dir=NEUTRON_CONF_DIR)], |
4302 | 113 | 'services': ['neutron-plugin-openvswitch-agent'], | 113 | 'services': ['neutron-plugin-openvswitch-agent'], |
4304 | 114 | 'packages': [[headers_package()] + determine_dkms_package(), | 114 | 'packages': [determine_dkms_package(), |
4305 | 115 | ['neutron-plugin-openvswitch-agent']], | 115 | ['neutron-plugin-openvswitch-agent']], |
4306 | 116 | 'server_packages': ['neutron-server', | 116 | 'server_packages': ['neutron-server', |
4307 | 117 | 'neutron-plugin-openvswitch'], | 117 | 'neutron-plugin-openvswitch'], |
4308 | @@ -155,7 +155,7 @@ | |||
4309 | 155 | relation_prefix='neutron', | 155 | relation_prefix='neutron', |
4310 | 156 | ssl_dir=NEUTRON_CONF_DIR)], | 156 | ssl_dir=NEUTRON_CONF_DIR)], |
4311 | 157 | 'services': [], | 157 | 'services': [], |
4313 | 158 | 'packages': [[headers_package()] + determine_dkms_package(), | 158 | 'packages': [determine_dkms_package(), |
4314 | 159 | ['neutron-plugin-cisco']], | 159 | ['neutron-plugin-cisco']], |
4315 | 160 | 'server_packages': ['neutron-server', | 160 | 'server_packages': ['neutron-server', |
4316 | 161 | 'neutron-plugin-cisco'], | 161 | 'neutron-plugin-cisco'], |
4317 | @@ -174,7 +174,7 @@ | |||
4318 | 174 | 'neutron-dhcp-agent', | 174 | 'neutron-dhcp-agent', |
4319 | 175 | 'nova-api-metadata', | 175 | 'nova-api-metadata', |
4320 | 176 | 'etcd'], | 176 | 'etcd'], |
4322 | 177 | 'packages': [[headers_package()] + determine_dkms_package(), | 177 | 'packages': [determine_dkms_package(), |
4323 | 178 | ['calico-compute', | 178 | ['calico-compute', |
4324 | 179 | 'bird', | 179 | 'bird', |
4325 | 180 | 'neutron-dhcp-agent', | 180 | 'neutron-dhcp-agent', |
4326 | @@ -209,6 +209,20 @@ | |||
4327 | 209 | 'server_packages': ['neutron-server', | 209 | 'server_packages': ['neutron-server', |
4328 | 210 | 'neutron-plugin-plumgrid'], | 210 | 'neutron-plugin-plumgrid'], |
4329 | 211 | 'server_services': ['neutron-server'] | 211 | 'server_services': ['neutron-server'] |
4330 | 212 | }, | ||
4331 | 213 | 'midonet': { | ||
4332 | 214 | 'config': '/etc/neutron/plugins/midonet/midonet.ini', | ||
4333 | 215 | 'driver': 'midonet.neutron.plugin.MidonetPluginV2', | ||
4334 | 216 | 'contexts': [ | ||
4335 | 217 | context.SharedDBContext(user=config('neutron-database-user'), | ||
4336 | 218 | database=config('neutron-database'), | ||
4337 | 219 | relation_prefix='neutron', | ||
4338 | 220 | ssl_dir=NEUTRON_CONF_DIR)], | ||
4339 | 221 | 'services': [], | ||
4340 | 222 | 'packages': [determine_dkms_package()], | ||
4341 | 223 | 'server_packages': ['neutron-server', | ||
4342 | 224 | 'python-neutron-plugin-midonet'], | ||
4343 | 225 | 'server_services': ['neutron-server'] | ||
4344 | 212 | } | 226 | } |
4345 | 213 | } | 227 | } |
4346 | 214 | if release >= 'icehouse': | 228 | if release >= 'icehouse': |
4347 | @@ -219,6 +233,20 @@ | |||
4348 | 219 | 'neutron-plugin-ml2'] | 233 | 'neutron-plugin-ml2'] |
4349 | 220 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards | 234 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards |
4350 | 221 | plugins['nvp'] = plugins['nsx'] | 235 | plugins['nvp'] = plugins['nsx'] |
4351 | 236 | if release >= 'kilo': | ||
4352 | 237 | plugins['midonet']['driver'] = ( | ||
4353 | 238 | 'neutron.plugins.midonet.plugin.MidonetPluginV2') | ||
4354 | 239 | if release >= 'liberty': | ||
4355 | 240 | plugins['midonet']['driver'] = ( | ||
4356 | 241 | 'midonet.neutron.plugin_v1.MidonetPluginV2') | ||
4357 | 242 | plugins['midonet']['server_packages'].remove( | ||
4358 | 243 | 'python-neutron-plugin-midonet') | ||
4359 | 244 | plugins['midonet']['server_packages'].append( | ||
4360 | 245 | 'python-networking-midonet') | ||
4361 | 246 | plugins['plumgrid']['driver'] = ( | ||
4362 | 247 | 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') | ||
4363 | 248 | plugins['plumgrid']['server_packages'].remove( | ||
4364 | 249 | 'neutron-plugin-plumgrid') | ||
4365 | 222 | return plugins | 250 | return plugins |
4366 | 223 | 251 | ||
4367 | 224 | 252 | ||
4368 | @@ -269,17 +297,30 @@ | |||
4369 | 269 | return 'neutron' | 297 | return 'neutron' |
4370 | 270 | 298 | ||
4371 | 271 | 299 | ||
4373 | 272 | def parse_mappings(mappings): | 300 | def parse_mappings(mappings, key_rvalue=False): |
4374 | 301 | """By default mappings are lvalue keyed. | ||
4375 | 302 | |||
4376 | 303 | If key_rvalue is True, the mapping will be reversed to allow multiple | ||
4377 | 304 | configs for the same lvalue. | ||
4378 | 305 | """ | ||
4379 | 273 | parsed = {} | 306 | parsed = {} |
4380 | 274 | if mappings: | 307 | if mappings: |
4381 | 275 | mappings = mappings.split() | 308 | mappings = mappings.split() |
4382 | 276 | for m in mappings: | 309 | for m in mappings: |
4383 | 277 | p = m.partition(':') | 310 | p = m.partition(':') |
4387 | 278 | key = p[0].strip() | 311 | |
4388 | 279 | if p[1]: | 312 | if key_rvalue: |
4389 | 280 | parsed[key] = p[2].strip() | 313 | key_index = 2 |
4390 | 314 | val_index = 0 | ||
4391 | 315 | # if there is no rvalue skip to next | ||
4392 | 316 | if not p[1]: | ||
4393 | 317 | continue | ||
4394 | 281 | else: | 318 | else: |
4396 | 282 | parsed[key] = '' | 319 | key_index = 0 |
4397 | 320 | val_index = 2 | ||
4398 | 321 | |||
4399 | 322 | key = p[key_index].strip() | ||
4400 | 323 | parsed[key] = p[val_index].strip() | ||
4401 | 283 | 324 | ||
4402 | 284 | return parsed | 325 | return parsed |
4403 | 285 | 326 | ||
4404 | @@ -297,25 +338,25 @@ | |||
4405 | 297 | def parse_data_port_mappings(mappings, default_bridge='br-data'): | 338 | def parse_data_port_mappings(mappings, default_bridge='br-data'): |
4406 | 298 | """Parse data port mappings. | 339 | """Parse data port mappings. |
4407 | 299 | 340 | ||
4409 | 300 | Mappings must be a space-delimited list of bridge:port mappings. | 341 | Mappings must be a space-delimited list of bridge:port. |
4410 | 301 | 342 | ||
4412 | 302 | Returns dict of the form {bridge:port}. | 343 | Returns dict of the form {port:bridge} where ports may be mac addresses or |
4413 | 344 | interface names. | ||
4414 | 303 | """ | 345 | """ |
4416 | 304 | _mappings = parse_mappings(mappings) | 346 | |
4417 | 347 | # NOTE(dosaboy): we use rvalue for key to allow multiple values to be | ||
4418 | 348 | # proposed for <port> since it may be a mac address which will differ | ||
4419 | 349 | # across units this allowing first-known-good to be chosen. | ||
4420 | 350 | _mappings = parse_mappings(mappings, key_rvalue=True) | ||
4421 | 305 | if not _mappings or list(_mappings.values()) == ['']: | 351 | if not _mappings or list(_mappings.values()) == ['']: |
4422 | 306 | if not mappings: | 352 | if not mappings: |
4423 | 307 | return {} | 353 | return {} |
4424 | 308 | 354 | ||
4425 | 309 | # For backwards-compatibility we need to support port-only provided in | 355 | # For backwards-compatibility we need to support port-only provided in |
4426 | 310 | # config. | 356 | # config. |
4435 | 311 | _mappings = {default_bridge: mappings.split()[0]} | 357 | _mappings = {mappings.split()[0]: default_bridge} |
4436 | 312 | 358 | ||
4437 | 313 | bridges = _mappings.keys() | 359 | ports = _mappings.keys() |
4430 | 314 | ports = _mappings.values() | ||
4431 | 315 | if len(set(bridges)) != len(bridges): | ||
4432 | 316 | raise Exception("It is not allowed to have more than one port " | ||
4433 | 317 | "configured on the same bridge") | ||
4434 | 318 | |||
4438 | 319 | if len(set(ports)) != len(ports): | 360 | if len(set(ports)) != len(ports): |
4439 | 320 | raise Exception("It is not allowed to have the same port configured " | 361 | raise Exception("It is not allowed to have the same port configured " |
4440 | 321 | "on more than one bridge") | 362 | "on more than one bridge") |
4441 | 322 | 363 | ||
4442 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
4443 | --- hooks/charmhelpers/contrib/openstack/templating.py 2015-07-29 18:23:55 +0000 | |||
4444 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2016-05-18 10:06:26 +0000 | |||
4445 | @@ -18,7 +18,7 @@ | |||
4446 | 18 | 18 | ||
4447 | 19 | import six | 19 | import six |
4448 | 20 | 20 | ||
4450 | 21 | from charmhelpers.fetch import apt_install | 21 | from charmhelpers.fetch import apt_install, apt_update |
4451 | 22 | from charmhelpers.core.hookenv import ( | 22 | from charmhelpers.core.hookenv import ( |
4452 | 23 | log, | 23 | log, |
4453 | 24 | ERROR, | 24 | ERROR, |
4454 | @@ -29,6 +29,7 @@ | |||
4455 | 29 | try: | 29 | try: |
4456 | 30 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | 30 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
4457 | 31 | except ImportError: | 31 | except ImportError: |
4458 | 32 | apt_update(fatal=True) | ||
4459 | 32 | apt_install('python-jinja2', fatal=True) | 33 | apt_install('python-jinja2', fatal=True) |
4460 | 33 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | 34 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
4461 | 34 | 35 | ||
4462 | @@ -112,7 +113,7 @@ | |||
4463 | 112 | 113 | ||
4464 | 113 | def complete_contexts(self): | 114 | def complete_contexts(self): |
4465 | 114 | ''' | 115 | ''' |
4467 | 115 | Return a list of interfaces that have atisfied contexts. | 116 | Return a list of interfaces that have satisfied contexts. |
4468 | 116 | ''' | 117 | ''' |
4469 | 117 | if self._complete_contexts: | 118 | if self._complete_contexts: |
4470 | 118 | return self._complete_contexts | 119 | return self._complete_contexts |
4471 | @@ -293,3 +294,30 @@ | |||
4472 | 293 | [interfaces.extend(i.complete_contexts()) | 294 | [interfaces.extend(i.complete_contexts()) |
4473 | 294 | for i in six.itervalues(self.templates)] | 295 | for i in six.itervalues(self.templates)] |
4474 | 295 | return interfaces | 296 | return interfaces |
4475 | 297 | |||
4476 | 298 | def get_incomplete_context_data(self, interfaces): | ||
4477 | 299 | ''' | ||
4478 | 300 | Return dictionary of relation status of interfaces and any missing | ||
4479 | 301 | required context data. Example: | ||
4480 | 302 | {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, | ||
4481 | 303 | 'zeromq-configuration': {'related': False}} | ||
4482 | 304 | ''' | ||
4483 | 305 | incomplete_context_data = {} | ||
4484 | 306 | |||
4485 | 307 | for i in six.itervalues(self.templates): | ||
4486 | 308 | for context in i.contexts: | ||
4487 | 309 | for interface in interfaces: | ||
4488 | 310 | related = False | ||
4489 | 311 | if interface in context.interfaces: | ||
4490 | 312 | related = context.get_related() | ||
4491 | 313 | missing_data = context.missing_data | ||
4492 | 314 | if missing_data: | ||
4493 | 315 | incomplete_context_data[interface] = {'missing_data': missing_data} | ||
4494 | 316 | if related: | ||
4495 | 317 | if incomplete_context_data.get(interface): | ||
4496 | 318 | incomplete_context_data[interface].update({'related': True}) | ||
4497 | 319 | else: | ||
4498 | 320 | incomplete_context_data[interface] = {'related': True} | ||
4499 | 321 | else: | ||
4500 | 322 | incomplete_context_data[interface] = {'related': False} | ||
4501 | 323 | return incomplete_context_data | ||
4502 | 296 | 324 | ||
4503 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
4504 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-07-29 18:23:55 +0000 | |||
4505 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2016-05-18 10:06:26 +0000 | |||
4506 | @@ -1,5 +1,3 @@ | |||
4507 | 1 | #!/usr/bin/python | ||
4508 | 2 | |||
4509 | 3 | # Copyright 2014-2015 Canonical Limited. | 1 | # Copyright 2014-2015 Canonical Limited. |
4510 | 4 | # | 2 | # |
4511 | 5 | # This file is part of charm-helpers. | 3 | # This file is part of charm-helpers. |
4512 | @@ -24,8 +22,14 @@ | |||
4513 | 24 | import json | 22 | import json |
4514 | 25 | import os | 23 | import os |
4515 | 26 | import sys | 24 | import sys |
4516 | 25 | import re | ||
4517 | 26 | import itertools | ||
4518 | 27 | import functools | ||
4519 | 27 | 28 | ||
4520 | 28 | import six | 29 | import six |
4521 | 30 | import tempfile | ||
4522 | 31 | import traceback | ||
4523 | 32 | import uuid | ||
4524 | 29 | import yaml | 33 | import yaml |
4525 | 30 | 34 | ||
4526 | 31 | from charmhelpers.contrib.network import ip | 35 | from charmhelpers.contrib.network import ip |
4527 | @@ -35,12 +39,18 @@ | |||
4528 | 35 | ) | 39 | ) |
4529 | 36 | 40 | ||
4530 | 37 | from charmhelpers.core.hookenv import ( | 41 | from charmhelpers.core.hookenv import ( |
4531 | 42 | action_fail, | ||
4532 | 43 | action_set, | ||
4533 | 38 | config, | 44 | config, |
4534 | 39 | log as juju_log, | 45 | log as juju_log, |
4535 | 40 | charm_dir, | 46 | charm_dir, |
4536 | 47 | DEBUG, | ||
4537 | 41 | INFO, | 48 | INFO, |
4538 | 49 | related_units, | ||
4539 | 42 | relation_ids, | 50 | relation_ids, |
4541 | 43 | relation_set | 51 | relation_set, |
4542 | 52 | status_set, | ||
4543 | 53 | hook_name | ||
4544 | 44 | ) | 54 | ) |
4545 | 45 | 55 | ||
4546 | 46 | from charmhelpers.contrib.storage.linux.lvm import ( | 56 | from charmhelpers.contrib.storage.linux.lvm import ( |
4547 | @@ -50,7 +60,9 @@ | |||
4548 | 50 | ) | 60 | ) |
4549 | 51 | 61 | ||
4550 | 52 | from charmhelpers.contrib.network.ip import ( | 62 | from charmhelpers.contrib.network.ip import ( |
4552 | 53 | get_ipv6_addr | 63 | get_ipv6_addr, |
4553 | 64 | is_ipv6, | ||
4554 | 65 | port_has_listener, | ||
4555 | 54 | ) | 66 | ) |
4556 | 55 | 67 | ||
4557 | 56 | from charmhelpers.contrib.python.packages import ( | 68 | from charmhelpers.contrib.python.packages import ( |
4558 | @@ -58,7 +70,15 @@ | |||
4559 | 58 | pip_install, | 70 | pip_install, |
4560 | 59 | ) | 71 | ) |
4561 | 60 | 72 | ||
4563 | 61 | from charmhelpers.core.host import lsb_release, mounts, umount | 73 | from charmhelpers.core.host import ( |
4564 | 74 | lsb_release, | ||
4565 | 75 | mounts, | ||
4566 | 76 | umount, | ||
4567 | 77 | service_running, | ||
4568 | 78 | service_pause, | ||
4569 | 79 | service_resume, | ||
4570 | 80 | restart_on_change_helper, | ||
4571 | 81 | ) | ||
4572 | 62 | from charmhelpers.fetch import apt_install, apt_cache, install_remote | 82 | from charmhelpers.fetch import apt_install, apt_cache, install_remote |
4573 | 63 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk | 83 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
4574 | 64 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device | 84 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device |
4575 | @@ -69,7 +89,6 @@ | |||
4576 | 69 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' | 89 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' |
4577 | 70 | 'restricted main multiverse universe') | 90 | 'restricted main multiverse universe') |
4578 | 71 | 91 | ||
4579 | 72 | |||
4580 | 73 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ | 92 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ |
4581 | 74 | ('oneiric', 'diablo'), | 93 | ('oneiric', 'diablo'), |
4582 | 75 | ('precise', 'essex'), | 94 | ('precise', 'essex'), |
4583 | @@ -80,6 +99,7 @@ | |||
4584 | 80 | ('utopic', 'juno'), | 99 | ('utopic', 'juno'), |
4585 | 81 | ('vivid', 'kilo'), | 100 | ('vivid', 'kilo'), |
4586 | 82 | ('wily', 'liberty'), | 101 | ('wily', 'liberty'), |
4587 | 102 | ('xenial', 'mitaka'), | ||
4588 | 83 | ]) | 103 | ]) |
4589 | 84 | 104 | ||
4590 | 85 | 105 | ||
4591 | @@ -93,31 +113,74 @@ | |||
4592 | 93 | ('2014.2', 'juno'), | 113 | ('2014.2', 'juno'), |
4593 | 94 | ('2015.1', 'kilo'), | 114 | ('2015.1', 'kilo'), |
4594 | 95 | ('2015.2', 'liberty'), | 115 | ('2015.2', 'liberty'), |
4595 | 116 | ('2016.1', 'mitaka'), | ||
4596 | 96 | ]) | 117 | ]) |
4597 | 97 | 118 | ||
4599 | 98 | # The ugly duckling | 119 | # The ugly duckling - must list releases oldest to newest |
4600 | 99 | SWIFT_CODENAMES = OrderedDict([ | 120 | SWIFT_CODENAMES = OrderedDict([ |
4620 | 100 | ('1.4.3', 'diablo'), | 121 | ('diablo', |
4621 | 101 | ('1.4.8', 'essex'), | 122 | ['1.4.3']), |
4622 | 102 | ('1.7.4', 'folsom'), | 123 | ('essex', |
4623 | 103 | ('1.8.0', 'grizzly'), | 124 | ['1.4.8']), |
4624 | 104 | ('1.7.7', 'grizzly'), | 125 | ('folsom', |
4625 | 105 | ('1.7.6', 'grizzly'), | 126 | ['1.7.4']), |
4626 | 106 | ('1.10.0', 'havana'), | 127 | ('grizzly', |
4627 | 107 | ('1.9.1', 'havana'), | 128 | ['1.7.6', '1.7.7', '1.8.0']), |
4628 | 108 | ('1.9.0', 'havana'), | 129 | ('havana', |
4629 | 109 | ('1.13.1', 'icehouse'), | 130 | ['1.9.0', '1.9.1', '1.10.0']), |
4630 | 110 | ('1.13.0', 'icehouse'), | 131 | ('icehouse', |
4631 | 111 | ('1.12.0', 'icehouse'), | 132 | ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), |
4632 | 112 | ('1.11.0', 'icehouse'), | 133 | ('juno', |
4633 | 113 | ('2.0.0', 'juno'), | 134 | ['2.0.0', '2.1.0', '2.2.0']), |
4634 | 114 | ('2.1.0', 'juno'), | 135 | ('kilo', |
4635 | 115 | ('2.2.0', 'juno'), | 136 | ['2.2.1', '2.2.2']), |
4636 | 116 | ('2.2.1', 'kilo'), | 137 | ('liberty', |
4637 | 117 | ('2.2.2', 'kilo'), | 138 | ['2.3.0', '2.4.0', '2.5.0']), |
4638 | 118 | ('2.3.0', 'liberty'), | 139 | ('mitaka', |
4639 | 140 | ['2.5.0', '2.6.0', '2.7.0']), | ||
4640 | 119 | ]) | 141 | ]) |
4641 | 120 | 142 | ||
4642 | 143 | # >= Liberty version->codename mapping | ||
4643 | 144 | PACKAGE_CODENAMES = { | ||
4644 | 145 | 'nova-common': OrderedDict([ | ||
4645 | 146 | ('12.0', 'liberty'), | ||
4646 | 147 | ('13.0', 'mitaka'), | ||
4647 | 148 | ]), | ||
4648 | 149 | 'neutron-common': OrderedDict([ | ||
4649 | 150 | ('7.0', 'liberty'), | ||
4650 | 151 | ('8.0', 'mitaka'), | ||
4651 | 152 | ]), | ||
4652 | 153 | 'cinder-common': OrderedDict([ | ||
4653 | 154 | ('7.0', 'liberty'), | ||
4654 | 155 | ('8.0', 'mitaka'), | ||
4655 | 156 | ]), | ||
4656 | 157 | 'keystone': OrderedDict([ | ||
4657 | 158 | ('8.0', 'liberty'), | ||
4658 | 159 | ('8.1', 'liberty'), | ||
4659 | 160 | ('9.0', 'mitaka'), | ||
4660 | 161 | ]), | ||
4661 | 162 | 'horizon-common': OrderedDict([ | ||
4662 | 163 | ('8.0', 'liberty'), | ||
4663 | 164 | ('9.0', 'mitaka'), | ||
4664 | 165 | ]), | ||
4665 | 166 | 'ceilometer-common': OrderedDict([ | ||
4666 | 167 | ('5.0', 'liberty'), | ||
4667 | 168 | ('6.0', 'mitaka'), | ||
4668 | 169 | ]), | ||
4669 | 170 | 'heat-common': OrderedDict([ | ||
4670 | 171 | ('5.0', 'liberty'), | ||
4671 | 172 | ('6.0', 'mitaka'), | ||
4672 | 173 | ]), | ||
4673 | 174 | 'glance-common': OrderedDict([ | ||
4674 | 175 | ('11.0', 'liberty'), | ||
4675 | 176 | ('12.0', 'mitaka'), | ||
4676 | 177 | ]), | ||
4677 | 178 | 'openstack-dashboard': OrderedDict([ | ||
4678 | 179 | ('8.0', 'liberty'), | ||
4679 | 180 | ('9.0', 'mitaka'), | ||
4680 | 181 | ]), | ||
4681 | 182 | } | ||
4682 | 183 | |||
4683 | 121 | DEFAULT_LOOPBACK_SIZE = '5G' | 184 | DEFAULT_LOOPBACK_SIZE = '5G' |
4684 | 122 | 185 | ||
4685 | 123 | 186 | ||
4686 | @@ -167,9 +230,9 @@ | |||
4687 | 167 | error_out(e) | 230 | error_out(e) |
4688 | 168 | 231 | ||
4689 | 169 | 232 | ||
4691 | 170 | def get_os_version_codename(codename): | 233 | def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): |
4692 | 171 | '''Determine OpenStack version number from codename.''' | 234 | '''Determine OpenStack version number from codename.''' |
4694 | 172 | for k, v in six.iteritems(OPENSTACK_CODENAMES): | 235 | for k, v in six.iteritems(version_map): |
4695 | 173 | if v == codename: | 236 | if v == codename: |
4696 | 174 | return k | 237 | return k |
4697 | 175 | e = 'Could not derive OpenStack version for '\ | 238 | e = 'Could not derive OpenStack version for '\ |
4698 | @@ -177,6 +240,33 @@ | |||
4699 | 177 | error_out(e) | 240 | error_out(e) |
4700 | 178 | 241 | ||
4701 | 179 | 242 | ||
4702 | 243 | def get_os_version_codename_swift(codename): | ||
4703 | 244 | '''Determine OpenStack version number of swift from codename.''' | ||
4704 | 245 | for k, v in six.iteritems(SWIFT_CODENAMES): | ||
4705 | 246 | if k == codename: | ||
4706 | 247 | return v[-1] | ||
4707 | 248 | e = 'Could not derive swift version for '\ | ||
4708 | 249 | 'codename: %s' % codename | ||
4709 | 250 | error_out(e) | ||
4710 | 251 | |||
4711 | 252 | |||
4712 | 253 | def get_swift_codename(version): | ||
4713 | 254 | '''Determine OpenStack codename that corresponds to swift version.''' | ||
4714 | 255 | codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] | ||
4715 | 256 | if len(codenames) > 1: | ||
4716 | 257 | # If more than one release codename contains this version we determine | ||
4717 | 258 | # the actual codename based on the highest available install source. | ||
4718 | 259 | for codename in reversed(codenames): | ||
4719 | 260 | releases = UBUNTU_OPENSTACK_RELEASE | ||
4720 | 261 | release = [k for k, v in six.iteritems(releases) if codename in v] | ||
4721 | 262 | ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) | ||
4722 | 263 | if codename in ret or release[0] in ret: | ||
4723 | 264 | return codename | ||
4724 | 265 | elif len(codenames) == 1: | ||
4725 | 266 | return codenames[0] | ||
4726 | 267 | return None | ||
4727 | 268 | |||
4728 | 269 | |||
4729 | 180 | def get_os_codename_package(package, fatal=True): | 270 | def get_os_codename_package(package, fatal=True): |
4730 | 181 | '''Derive OpenStack release codename from an installed package.''' | 271 | '''Derive OpenStack release codename from an installed package.''' |
4731 | 182 | import apt_pkg as apt | 272 | import apt_pkg as apt |
4732 | @@ -201,20 +291,33 @@ | |||
4733 | 201 | error_out(e) | 291 | error_out(e) |
4734 | 202 | 292 | ||
4735 | 203 | vers = apt.upstream_version(pkg.current_ver.ver_str) | 293 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
4750 | 204 | 294 | if 'swift' in pkg.name: | |
4751 | 205 | try: | 295 | # Fully x.y.z match for swift versions |
4752 | 206 | if 'swift' in pkg.name: | 296 | match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) |
4753 | 207 | swift_vers = vers[:5] | 297 | else: |
4754 | 208 | if swift_vers not in SWIFT_CODENAMES: | 298 | # x.y match only for 20XX.X |
4755 | 209 | # Deal with 1.10.0 upward | 299 | # and ignore patch level for other packages |
4756 | 210 | swift_vers = vers[:6] | 300 | match = re.match('^(\d+)\.(\d+)', vers) |
4757 | 211 | return SWIFT_CODENAMES[swift_vers] | 301 | |
4758 | 212 | else: | 302 | if match: |
4759 | 213 | vers = vers[:6] | 303 | vers = match.group(0) |
4760 | 214 | return OPENSTACK_CODENAMES[vers] | 304 | |
4761 | 215 | except KeyError: | 305 | # >= Liberty independent project versions |
4762 | 216 | e = 'Could not determine OpenStack codename for version %s' % vers | 306 | if (package in PACKAGE_CODENAMES and |
4763 | 217 | error_out(e) | 307 | vers in PACKAGE_CODENAMES[package]): |
4764 | 308 | return PACKAGE_CODENAMES[package][vers] | ||
4765 | 309 | else: | ||
4766 | 310 | # < Liberty co-ordinated project versions | ||
4767 | 311 | try: | ||
4768 | 312 | if 'swift' in pkg.name: | ||
4769 | 313 | return get_swift_codename(vers) | ||
4770 | 314 | else: | ||
4771 | 315 | return OPENSTACK_CODENAMES[vers] | ||
4772 | 316 | except KeyError: | ||
4773 | 317 | if not fatal: | ||
4774 | 318 | return None | ||
4775 | 319 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
4776 | 320 | error_out(e) | ||
4777 | 218 | 321 | ||
4778 | 219 | 322 | ||
4779 | 220 | def get_os_version_package(pkg, fatal=True): | 323 | def get_os_version_package(pkg, fatal=True): |
4780 | @@ -226,12 +329,14 @@ | |||
4781 | 226 | 329 | ||
4782 | 227 | if 'swift' in pkg: | 330 | if 'swift' in pkg: |
4783 | 228 | vers_map = SWIFT_CODENAMES | 331 | vers_map = SWIFT_CODENAMES |
4784 | 332 | for cname, version in six.iteritems(vers_map): | ||
4785 | 333 | if cname == codename: | ||
4786 | 334 | return version[-1] | ||
4787 | 229 | else: | 335 | else: |
4788 | 230 | vers_map = OPENSTACK_CODENAMES | 336 | vers_map = OPENSTACK_CODENAMES |
4793 | 231 | 337 | for version, cname in six.iteritems(vers_map): | |
4794 | 232 | for version, cname in six.iteritems(vers_map): | 338 | if cname == codename: |
4795 | 233 | if cname == codename: | 339 | return version |
4792 | 234 | return version | ||
4796 | 235 | # e = "Could not determine OpenStack version for package: %s" % pkg | 340 | # e = "Could not determine OpenStack version for package: %s" % pkg |
4797 | 236 | # error_out(e) | 341 | # error_out(e) |
4798 | 237 | 342 | ||
4799 | @@ -256,12 +361,42 @@ | |||
4800 | 256 | 361 | ||
4801 | 257 | 362 | ||
4802 | 258 | def import_key(keyid): | 363 | def import_key(keyid): |
4809 | 259 | cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ | 364 | key = keyid.strip() |
4810 | 260 | "--recv-keys %s" % keyid | 365 | if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and |
4811 | 261 | try: | 366 | key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): |
4812 | 262 | subprocess.check_call(cmd.split(' ')) | 367 | juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) |
4813 | 263 | except subprocess.CalledProcessError: | 368 | juju_log("Importing ASCII Armor PGP key", level=DEBUG) |
4814 | 264 | error_out("Error importing repo key %s" % keyid) | 369 | with tempfile.NamedTemporaryFile() as keyfile: |
4815 | 370 | with open(keyfile.name, 'w') as fd: | ||
4816 | 371 | fd.write(key) | ||
4817 | 372 | fd.write("\n") | ||
4818 | 373 | |||
4819 | 374 | cmd = ['apt-key', 'add', keyfile.name] | ||
4820 | 375 | try: | ||
4821 | 376 | subprocess.check_call(cmd) | ||
4822 | 377 | except subprocess.CalledProcessError: | ||
4823 | 378 | error_out("Error importing PGP key '%s'" % key) | ||
4824 | 379 | else: | ||
4825 | 380 | juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) | ||
4826 | 381 | juju_log("Importing PGP key from keyserver", level=DEBUG) | ||
4827 | 382 | cmd = ['apt-key', 'adv', '--keyserver', | ||
4828 | 383 | 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] | ||
4829 | 384 | try: | ||
4830 | 385 | subprocess.check_call(cmd) | ||
4831 | 386 | except subprocess.CalledProcessError: | ||
4832 | 387 | error_out("Error importing PGP key '%s'" % key) | ||
4833 | 388 | |||
4834 | 389 | |||
4835 | 390 | def get_source_and_pgp_key(input): | ||
4836 | 391 | """Look for a pgp key ID or ascii-armor key in the given input.""" | ||
4837 | 392 | index = input.strip() | ||
4838 | 393 | index = input.rfind('|') | ||
4839 | 394 | if index < 0: | ||
4840 | 395 | return input, None | ||
4841 | 396 | |||
4842 | 397 | key = input[index + 1:].strip('|') | ||
4843 | 398 | source = input[:index] | ||
4844 | 399 | return source, key | ||
4845 | 265 | 400 | ||
4846 | 266 | 401 | ||
4847 | 267 | def configure_installation_source(rel): | 402 | def configure_installation_source(rel): |
4848 | @@ -273,16 +408,16 @@ | |||
4849 | 273 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | 408 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: |
4850 | 274 | f.write(DISTRO_PROPOSED % ubuntu_rel) | 409 | f.write(DISTRO_PROPOSED % ubuntu_rel) |
4851 | 275 | elif rel[:4] == "ppa:": | 410 | elif rel[:4] == "ppa:": |
4853 | 276 | src = rel | 411 | src, key = get_source_and_pgp_key(rel) |
4854 | 412 | if key: | ||
4855 | 413 | import_key(key) | ||
4856 | 414 | |||
4857 | 277 | subprocess.check_call(["add-apt-repository", "-y", src]) | 415 | subprocess.check_call(["add-apt-repository", "-y", src]) |
4858 | 278 | elif rel[:3] == "deb": | 416 | elif rel[:3] == "deb": |
4863 | 279 | l = len(rel.split('|')) | 417 | src, key = get_source_and_pgp_key(rel) |
4864 | 280 | if l == 2: | 418 | if key: |
4861 | 281 | src, key = rel.split('|') | ||
4862 | 282 | juju_log("Importing PPA key from keyserver for %s" % src) | ||
4865 | 283 | import_key(key) | 419 | import_key(key) |
4868 | 284 | elif l == 1: | 420 | |
4867 | 285 | src = rel | ||
4869 | 286 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | 421 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: |
4870 | 287 | f.write(src) | 422 | f.write(src) |
4871 | 288 | elif rel[:6] == 'cloud:': | 423 | elif rel[:6] == 'cloud:': |
4872 | @@ -327,6 +462,9 @@ | |||
4873 | 327 | 'liberty': 'trusty-updates/liberty', | 462 | 'liberty': 'trusty-updates/liberty', |
4874 | 328 | 'liberty/updates': 'trusty-updates/liberty', | 463 | 'liberty/updates': 'trusty-updates/liberty', |
4875 | 329 | 'liberty/proposed': 'trusty-proposed/liberty', | 464 | 'liberty/proposed': 'trusty-proposed/liberty', |
4876 | 465 | 'mitaka': 'trusty-updates/mitaka', | ||
4877 | 466 | 'mitaka/updates': 'trusty-updates/mitaka', | ||
4878 | 467 | 'mitaka/proposed': 'trusty-proposed/mitaka', | ||
4879 | 330 | } | 468 | } |
4880 | 331 | 469 | ||
4881 | 332 | try: | 470 | try: |
4882 | @@ -392,9 +530,18 @@ | |||
4883 | 392 | import apt_pkg as apt | 530 | import apt_pkg as apt |
4884 | 393 | src = config('openstack-origin') | 531 | src = config('openstack-origin') |
4885 | 394 | cur_vers = get_os_version_package(package) | 532 | cur_vers = get_os_version_package(package) |
4887 | 395 | available_vers = get_os_version_install_source(src) | 533 | if "swift" in package: |
4888 | 534 | codename = get_os_codename_install_source(src) | ||
4889 | 535 | avail_vers = get_os_version_codename_swift(codename) | ||
4890 | 536 | else: | ||
4891 | 537 | avail_vers = get_os_version_install_source(src) | ||
4892 | 396 | apt.init() | 538 | apt.init() |
4894 | 397 | return apt.version_compare(available_vers, cur_vers) == 1 | 539 | if "swift" in package: |
4895 | 540 | major_cur_vers = cur_vers.split('.', 1)[0] | ||
4896 | 541 | major_avail_vers = avail_vers.split('.', 1)[0] | ||
4897 | 542 | major_diff = apt.version_compare(major_avail_vers, major_cur_vers) | ||
4898 | 543 | return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) | ||
4899 | 544 | return apt.version_compare(avail_vers, cur_vers) == 1 | ||
4900 | 398 | 545 | ||
4901 | 399 | 546 | ||
4902 | 400 | def ensure_block_device(block_device): | 547 | def ensure_block_device(block_device): |
4903 | @@ -469,6 +616,12 @@ | |||
4904 | 469 | relation_prefix=None): | 616 | relation_prefix=None): |
4905 | 470 | hosts = get_ipv6_addr(dynamic_only=False) | 617 | hosts = get_ipv6_addr(dynamic_only=False) |
4906 | 471 | 618 | ||
4907 | 619 | if config('vip'): | ||
4908 | 620 | vips = config('vip').split() | ||
4909 | 621 | for vip in vips: | ||
4910 | 622 | if vip and is_ipv6(vip): | ||
4911 | 623 | hosts.append(vip) | ||
4912 | 624 | |||
4913 | 472 | kwargs = {'database': database, | 625 | kwargs = {'database': database, |
4914 | 473 | 'username': database_user, | 626 | 'username': database_user, |
4915 | 474 | 'hostname': json.dumps(hosts)} | 627 | 'hostname': json.dumps(hosts)} |
4916 | @@ -517,7 +670,7 @@ | |||
4917 | 517 | return yaml.load(projects_yaml) | 670 | return yaml.load(projects_yaml) |
4918 | 518 | 671 | ||
4919 | 519 | 672 | ||
4921 | 520 | def git_clone_and_install(projects_yaml, core_project, depth=1): | 673 | def git_clone_and_install(projects_yaml, core_project): |
4922 | 521 | """ | 674 | """ |
4923 | 522 | Clone/install all specified OpenStack repositories. | 675 | Clone/install all specified OpenStack repositories. |
4924 | 523 | 676 | ||
4925 | @@ -567,6 +720,9 @@ | |||
4926 | 567 | for p in projects['repositories']: | 720 | for p in projects['repositories']: |
4927 | 568 | repo = p['repository'] | 721 | repo = p['repository'] |
4928 | 569 | branch = p['branch'] | 722 | branch = p['branch'] |
4929 | 723 | depth = '1' | ||
4930 | 724 | if 'depth' in p.keys(): | ||
4931 | 725 | depth = p['depth'] | ||
4932 | 570 | if p['name'] == 'requirements': | 726 | if p['name'] == 'requirements': |
4933 | 571 | repo_dir = _git_clone_and_install_single(repo, branch, depth, | 727 | repo_dir = _git_clone_and_install_single(repo, branch, depth, |
4934 | 572 | parent_dir, http_proxy, | 728 | parent_dir, http_proxy, |
4935 | @@ -611,19 +767,14 @@ | |||
4936 | 611 | """ | 767 | """ |
4937 | 612 | Clone and install a single git repository. | 768 | Clone and install a single git repository. |
4938 | 613 | """ | 769 | """ |
4939 | 614 | dest_dir = os.path.join(parent_dir, os.path.basename(repo)) | ||
4940 | 615 | |||
4941 | 616 | if not os.path.exists(parent_dir): | 770 | if not os.path.exists(parent_dir): |
4942 | 617 | juju_log('Directory already exists at {}. ' | 771 | juju_log('Directory already exists at {}. ' |
4943 | 618 | 'No need to create directory.'.format(parent_dir)) | 772 | 'No need to create directory.'.format(parent_dir)) |
4944 | 619 | os.mkdir(parent_dir) | 773 | os.mkdir(parent_dir) |
4945 | 620 | 774 | ||
4952 | 621 | if not os.path.exists(dest_dir): | 775 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
4953 | 622 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) | 776 | repo_dir = install_remote( |
4954 | 623 | repo_dir = install_remote(repo, dest=parent_dir, branch=branch, | 777 | repo, dest=parent_dir, branch=branch, depth=depth) |
4949 | 624 | depth=depth) | ||
4950 | 625 | else: | ||
4951 | 626 | repo_dir = dest_dir | ||
4955 | 627 | 778 | ||
4956 | 628 | venv = os.path.join(parent_dir, 'venv') | 779 | venv = os.path.join(parent_dir, 'venv') |
4957 | 629 | 780 | ||
4958 | @@ -704,3 +855,721 @@ | |||
4959 | 704 | return projects[key] | 855 | return projects[key] |
4960 | 705 | 856 | ||
4961 | 706 | return None | 857 | return None |
4962 | 858 | |||
4963 | 859 | |||
4964 | 860 | def os_workload_status(configs, required_interfaces, charm_func=None): | ||
4965 | 861 | """ | ||
4966 | 862 | Decorator to set workload status based on complete contexts | ||
4967 | 863 | """ | ||
4968 | 864 | def wrap(f): | ||
4969 | 865 | @wraps(f) | ||
4970 | 866 | def wrapped_f(*args, **kwargs): | ||
4971 | 867 | # Run the original function first | ||
4972 | 868 | f(*args, **kwargs) | ||
4973 | 869 | # Set workload status now that contexts have been | ||
4974 | 870 | # acted on | ||
4975 | 871 | set_os_workload_status(configs, required_interfaces, charm_func) | ||
4976 | 872 | return wrapped_f | ||
4977 | 873 | return wrap | ||
4978 | 874 | |||
4979 | 875 | |||
4980 | 876 | def set_os_workload_status(configs, required_interfaces, charm_func=None, | ||
4981 | 877 | services=None, ports=None): | ||
4982 | 878 | """Set the state of the workload status for the charm. | ||
4983 | 879 | |||
4984 | 880 | This calls _determine_os_workload_status() to get the new state, message | ||
4985 | 881 | and sets the status using status_set() | ||
4986 | 882 | |||
4987 | 883 | @param configs: a templating.OSConfigRenderer() object | ||
4988 | 884 | @param required_interfaces: {generic: [specific, specific2, ...]} | ||
4989 | 885 | @param charm_func: a callable function that returns state, message. The | ||
4990 | 886 | signature is charm_func(configs) -> (state, message) | ||
4991 | 887 | @param services: list of strings OR dictionary specifying services/ports | ||
4992 | 888 | @param ports: OPTIONAL list of port numbers. | ||
4993 | 889 | @returns state, message: the new workload status, user message | ||
4994 | 890 | """ | ||
4995 | 891 | state, message = _determine_os_workload_status( | ||
4996 | 892 | configs, required_interfaces, charm_func, services, ports) | ||
4997 | 893 | status_set(state, message) | ||
4998 | 894 | |||
4999 | 895 | |||
5000 | 896 | def _determine_os_workload_status( |
This item has failed automated testing! Results available here http:// juju-ci. vapour. ws:8080/ job/charm- bundle- test-lxc/ 2222/