Merge lp:~junaidali/charms/trusty/plumgrid-director/pg-restart into lp:~junaidali/charms/trusty/plumgrid-director/trunk
- Trusty Tahr (14.04)
- pg-restart
- Merge into trunk
Proposed by
Junaid Ali
Status: | Merged |
---|---|
Merged at revision: | 33 |
Proposed branch: | lp:~junaidali/charms/trusty/plumgrid-director/pg-restart |
Merge into: | lp:~junaidali/charms/trusty/plumgrid-director/trunk |
Diff against target: |
9831 lines (+4429/-3430) 53 files modified
Makefile (+1/-1) bin/charm_helpers_sync.py (+253/-0) charm-helpers-sync.yaml (+6/-1) config.yaml (+8/-0) hooks/charmhelpers/contrib/amulet/deployment.py (+4/-2) hooks/charmhelpers/contrib/amulet/utils.py (+382/-86) hooks/charmhelpers/contrib/ansible/__init__.py (+0/-254) hooks/charmhelpers/contrib/benchmark/__init__.py (+0/-126) hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-208) hooks/charmhelpers/contrib/charmsupport/__init__.py (+0/-15) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-360) hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-175) hooks/charmhelpers/contrib/database/mysql.py (+0/-412) hooks/charmhelpers/contrib/network/ip.py (+55/-23) hooks/charmhelpers/contrib/network/ovs/__init__.py (+6/-2) hooks/charmhelpers/contrib/network/ufw.py (+5/-6) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+135/-14) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+421/-13) hooks/charmhelpers/contrib/openstack/context.py (+318/-79) hooks/charmhelpers/contrib/openstack/ip.py (+35/-7) hooks/charmhelpers/contrib/openstack/neutron.py (+62/-21) hooks/charmhelpers/contrib/openstack/templating.py (+30/-2) hooks/charmhelpers/contrib/openstack/utils.py (+939/-70) hooks/charmhelpers/contrib/peerstorage/__init__.py (+0/-268) hooks/charmhelpers/contrib/python/packages.py (+35/-11) hooks/charmhelpers/contrib/saltstack/__init__.py (+0/-118) hooks/charmhelpers/contrib/ssl/__init__.py (+0/-94) hooks/charmhelpers/contrib/ssl/service.py (+0/-279) hooks/charmhelpers/contrib/storage/linux/ceph.py (+823/-61) hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0) hooks/charmhelpers/contrib/storage/linux/utils.py (+8/-7) hooks/charmhelpers/contrib/templating/__init__.py (+0/-15) hooks/charmhelpers/contrib/templating/contexts.py (+0/-139) hooks/charmhelpers/contrib/templating/jinja.py (+0/-39) hooks/charmhelpers/contrib/templating/pyformat.py (+0/-29) hooks/charmhelpers/contrib/unison/__init__.py (+0/-313) hooks/charmhelpers/core/hookenv.py (+220/-13) hooks/charmhelpers/core/host.py (+298/-75) hooks/charmhelpers/core/hugepage.py (+71/-0) hooks/charmhelpers/core/kernel.py (+68/-0) hooks/charmhelpers/core/services/helpers.py (+30/-5) hooks/charmhelpers/core/strutils.py (+30/-0) hooks/charmhelpers/core/templating.py (+21/-8) hooks/charmhelpers/core/unitdata.py (+61/-17) hooks/charmhelpers/fetch/__init__.py (+18/-2) hooks/charmhelpers/fetch/archiveurl.py (+1/-1) hooks/charmhelpers/fetch/bzrurl.py (+22/-32) hooks/charmhelpers/fetch/giturl.py (+20/-23) hooks/pg_dir_hooks.py (+21/-0) hooks/pg_dir_utils.py (+3/-2) metadata.yaml (+2/-0) templates/kilo/nginx.conf (+5/-1) unit_tests/test_pg_dir_hooks.py (+2/-1) |
To merge this branch: | bzr merge lp:~junaidali/charms/trusty/plumgrid-director/pg-restart |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Junaid Ali | Pending | ||
Review via email: mp+295139@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'Makefile' | |||
2 | --- Makefile 2016-03-03 20:56:40 +0000 | |||
3 | +++ Makefile 2016-05-19 03:33:34 +0000 | |||
4 | @@ -4,7 +4,7 @@ | |||
5 | 4 | virtualenv: | 4 | virtualenv: |
6 | 5 | virtualenv .venv | 5 | virtualenv .venv |
7 | 6 | .venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \ | 6 | .venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \ |
9 | 7 | netaddr jinja2 | 7 | netaddr jinja2 pyflakes pep8 six pbr funcsigs psutil |
10 | 8 | 8 | ||
11 | 9 | lint: virtualenv | 9 | lint: virtualenv |
12 | 10 | .venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402 | 10 | .venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402 |
13 | 11 | 11 | ||
14 | === added directory 'bin' | |||
15 | === added file 'bin/charm_helpers_sync.py' | |||
16 | --- bin/charm_helpers_sync.py 1970-01-01 00:00:00 +0000 | |||
17 | +++ bin/charm_helpers_sync.py 2016-05-19 03:33:34 +0000 | |||
18 | @@ -0,0 +1,253 @@ | |||
19 | 1 | #!/usr/bin/python | ||
20 | 2 | |||
21 | 3 | # Copyright 2014-2015 Canonical Limited. | ||
22 | 4 | # | ||
23 | 5 | # This file is part of charm-helpers. | ||
24 | 6 | # | ||
25 | 7 | # charm-helpers is free software: you can redistribute it and/or modify | ||
26 | 8 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
27 | 9 | # published by the Free Software Foundation. | ||
28 | 10 | # | ||
29 | 11 | # charm-helpers is distributed in the hope that it will be useful, | ||
30 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
31 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
32 | 14 | # GNU Lesser General Public License for more details. | ||
33 | 15 | # | ||
34 | 16 | # You should have received a copy of the GNU Lesser General Public License | ||
35 | 17 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
36 | 18 | |||
37 | 19 | # Authors: | ||
38 | 20 | # Adam Gandelman <adamg@ubuntu.com> | ||
39 | 21 | |||
40 | 22 | import logging | ||
41 | 23 | import optparse | ||
42 | 24 | import os | ||
43 | 25 | import subprocess | ||
44 | 26 | import shutil | ||
45 | 27 | import sys | ||
46 | 28 | import tempfile | ||
47 | 29 | import yaml | ||
48 | 30 | from fnmatch import fnmatch | ||
49 | 31 | |||
50 | 32 | import six | ||
51 | 33 | |||
52 | 34 | CHARM_HELPERS_BRANCH = 'lp:charm-helpers' | ||
53 | 35 | |||
54 | 36 | |||
55 | 37 | def parse_config(conf_file): | ||
56 | 38 | if not os.path.isfile(conf_file): | ||
57 | 39 | logging.error('Invalid config file: %s.' % conf_file) | ||
58 | 40 | return False | ||
59 | 41 | return yaml.load(open(conf_file).read()) | ||
60 | 42 | |||
61 | 43 | |||
62 | 44 | def clone_helpers(work_dir, branch): | ||
63 | 45 | dest = os.path.join(work_dir, 'charm-helpers') | ||
64 | 46 | logging.info('Checking out %s to %s.' % (branch, dest)) | ||
65 | 47 | cmd = ['bzr', 'checkout', '--lightweight', branch, dest] | ||
66 | 48 | subprocess.check_call(cmd) | ||
67 | 49 | return dest | ||
68 | 50 | |||
69 | 51 | |||
70 | 52 | def _module_path(module): | ||
71 | 53 | return os.path.join(*module.split('.')) | ||
72 | 54 | |||
73 | 55 | |||
74 | 56 | def _src_path(src, module): | ||
75 | 57 | return os.path.join(src, 'charmhelpers', _module_path(module)) | ||
76 | 58 | |||
77 | 59 | |||
78 | 60 | def _dest_path(dest, module): | ||
79 | 61 | return os.path.join(dest, _module_path(module)) | ||
80 | 62 | |||
81 | 63 | |||
82 | 64 | def _is_pyfile(path): | ||
83 | 65 | return os.path.isfile(path + '.py') | ||
84 | 66 | |||
85 | 67 | |||
86 | 68 | def ensure_init(path): | ||
87 | 69 | ''' | ||
88 | 70 | ensure directories leading up to path are importable, omitting | ||
89 | 71 | parent directory, eg path='/hooks/helpers/foo'/: | ||
90 | 72 | hooks/ | ||
91 | 73 | hooks/helpers/__init__.py | ||
92 | 74 | hooks/helpers/foo/__init__.py | ||
93 | 75 | ''' | ||
94 | 76 | for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): | ||
95 | 77 | _i = os.path.join(d, '__init__.py') | ||
96 | 78 | if not os.path.exists(_i): | ||
97 | 79 | logging.info('Adding missing __init__.py: %s' % _i) | ||
98 | 80 | open(_i, 'wb').close() | ||
99 | 81 | |||
100 | 82 | |||
101 | 83 | def sync_pyfile(src, dest): | ||
102 | 84 | src = src + '.py' | ||
103 | 85 | src_dir = os.path.dirname(src) | ||
104 | 86 | logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) | ||
105 | 87 | if not os.path.exists(dest): | ||
106 | 88 | os.makedirs(dest) | ||
107 | 89 | shutil.copy(src, dest) | ||
108 | 90 | if os.path.isfile(os.path.join(src_dir, '__init__.py')): | ||
109 | 91 | shutil.copy(os.path.join(src_dir, '__init__.py'), | ||
110 | 92 | dest) | ||
111 | 93 | ensure_init(dest) | ||
112 | 94 | |||
113 | 95 | |||
114 | 96 | def get_filter(opts=None): | ||
115 | 97 | opts = opts or [] | ||
116 | 98 | if 'inc=*' in opts: | ||
117 | 99 | # do not filter any files, include everything | ||
118 | 100 | return None | ||
119 | 101 | |||
120 | 102 | def _filter(dir, ls): | ||
121 | 103 | incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] | ||
122 | 104 | _filter = [] | ||
123 | 105 | for f in ls: | ||
124 | 106 | _f = os.path.join(dir, f) | ||
125 | 107 | |||
126 | 108 | if not os.path.isdir(_f) and not _f.endswith('.py') and incs: | ||
127 | 109 | if True not in [fnmatch(_f, inc) for inc in incs]: | ||
128 | 110 | logging.debug('Not syncing %s, does not match include ' | ||
129 | 111 | 'filters (%s)' % (_f, incs)) | ||
130 | 112 | _filter.append(f) | ||
131 | 113 | else: | ||
132 | 114 | logging.debug('Including file, which matches include ' | ||
133 | 115 | 'filters (%s): %s' % (incs, _f)) | ||
134 | 116 | elif (os.path.isfile(_f) and not _f.endswith('.py')): | ||
135 | 117 | logging.debug('Not syncing file: %s' % f) | ||
136 | 118 | _filter.append(f) | ||
137 | 119 | elif (os.path.isdir(_f) and not | ||
138 | 120 | os.path.isfile(os.path.join(_f, '__init__.py'))): | ||
139 | 121 | logging.debug('Not syncing directory: %s' % f) | ||
140 | 122 | _filter.append(f) | ||
141 | 123 | return _filter | ||
142 | 124 | return _filter | ||
143 | 125 | |||
144 | 126 | |||
145 | 127 | def sync_directory(src, dest, opts=None): | ||
146 | 128 | if os.path.exists(dest): | ||
147 | 129 | logging.debug('Removing existing directory: %s' % dest) | ||
148 | 130 | shutil.rmtree(dest) | ||
149 | 131 | logging.info('Syncing directory: %s -> %s.' % (src, dest)) | ||
150 | 132 | |||
151 | 133 | shutil.copytree(src, dest, ignore=get_filter(opts)) | ||
152 | 134 | ensure_init(dest) | ||
153 | 135 | |||
154 | 136 | |||
155 | 137 | def sync(src, dest, module, opts=None): | ||
156 | 138 | |||
157 | 139 | # Sync charmhelpers/__init__.py for bootstrap code. | ||
158 | 140 | sync_pyfile(_src_path(src, '__init__'), dest) | ||
159 | 141 | |||
160 | 142 | # Sync other __init__.py files in the path leading to module. | ||
161 | 143 | m = [] | ||
162 | 144 | steps = module.split('.')[:-1] | ||
163 | 145 | while steps: | ||
164 | 146 | m.append(steps.pop(0)) | ||
165 | 147 | init = '.'.join(m + ['__init__']) | ||
166 | 148 | sync_pyfile(_src_path(src, init), | ||
167 | 149 | os.path.dirname(_dest_path(dest, init))) | ||
168 | 150 | |||
169 | 151 | # Sync the module, or maybe a .py file. | ||
170 | 152 | if os.path.isdir(_src_path(src, module)): | ||
171 | 153 | sync_directory(_src_path(src, module), _dest_path(dest, module), opts) | ||
172 | 154 | elif _is_pyfile(_src_path(src, module)): | ||
173 | 155 | sync_pyfile(_src_path(src, module), | ||
174 | 156 | os.path.dirname(_dest_path(dest, module))) | ||
175 | 157 | else: | ||
176 | 158 | logging.warn('Could not sync: %s. Neither a pyfile or directory, ' | ||
177 | 159 | 'does it even exist?' % module) | ||
178 | 160 | |||
179 | 161 | |||
180 | 162 | def parse_sync_options(options): | ||
181 | 163 | if not options: | ||
182 | 164 | return [] | ||
183 | 165 | return options.split(',') | ||
184 | 166 | |||
185 | 167 | |||
186 | 168 | def extract_options(inc, global_options=None): | ||
187 | 169 | global_options = global_options or [] | ||
188 | 170 | if global_options and isinstance(global_options, six.string_types): | ||
189 | 171 | global_options = [global_options] | ||
190 | 172 | if '|' not in inc: | ||
191 | 173 | return (inc, global_options) | ||
192 | 174 | inc, opts = inc.split('|') | ||
193 | 175 | return (inc, parse_sync_options(opts) + global_options) | ||
194 | 176 | |||
195 | 177 | |||
196 | 178 | def sync_helpers(include, src, dest, options=None): | ||
197 | 179 | if not os.path.isdir(dest): | ||
198 | 180 | os.makedirs(dest) | ||
199 | 181 | |||
200 | 182 | global_options = parse_sync_options(options) | ||
201 | 183 | |||
202 | 184 | for inc in include: | ||
203 | 185 | if isinstance(inc, str): | ||
204 | 186 | inc, opts = extract_options(inc, global_options) | ||
205 | 187 | sync(src, dest, inc, opts) | ||
206 | 188 | elif isinstance(inc, dict): | ||
207 | 189 | # could also do nested dicts here. | ||
208 | 190 | for k, v in six.iteritems(inc): | ||
209 | 191 | if isinstance(v, list): | ||
210 | 192 | for m in v: | ||
211 | 193 | inc, opts = extract_options(m, global_options) | ||
212 | 194 | sync(src, dest, '%s.%s' % (k, inc), opts) | ||
213 | 195 | |||
214 | 196 | if __name__ == '__main__': | ||
215 | 197 | parser = optparse.OptionParser() | ||
216 | 198 | parser.add_option('-c', '--config', action='store', dest='config', | ||
217 | 199 | default=None, help='helper config file') | ||
218 | 200 | parser.add_option('-D', '--debug', action='store_true', dest='debug', | ||
219 | 201 | default=False, help='debug') | ||
220 | 202 | parser.add_option('-b', '--branch', action='store', dest='branch', | ||
221 | 203 | help='charm-helpers bzr branch (overrides config)') | ||
222 | 204 | parser.add_option('-d', '--destination', action='store', dest='dest_dir', | ||
223 | 205 | help='sync destination dir (overrides config)') | ||
224 | 206 | (opts, args) = parser.parse_args() | ||
225 | 207 | |||
226 | 208 | if opts.debug: | ||
227 | 209 | logging.basicConfig(level=logging.DEBUG) | ||
228 | 210 | else: | ||
229 | 211 | logging.basicConfig(level=logging.INFO) | ||
230 | 212 | |||
231 | 213 | if opts.config: | ||
232 | 214 | logging.info('Loading charm helper config from %s.' % opts.config) | ||
233 | 215 | config = parse_config(opts.config) | ||
234 | 216 | if not config: | ||
235 | 217 | logging.error('Could not parse config from %s.' % opts.config) | ||
236 | 218 | sys.exit(1) | ||
237 | 219 | else: | ||
238 | 220 | config = {} | ||
239 | 221 | |||
240 | 222 | if 'branch' not in config: | ||
241 | 223 | config['branch'] = CHARM_HELPERS_BRANCH | ||
242 | 224 | if opts.branch: | ||
243 | 225 | config['branch'] = opts.branch | ||
244 | 226 | if opts.dest_dir: | ||
245 | 227 | config['destination'] = opts.dest_dir | ||
246 | 228 | |||
247 | 229 | if 'destination' not in config: | ||
248 | 230 | logging.error('No destination dir. specified as option or config.') | ||
249 | 231 | sys.exit(1) | ||
250 | 232 | |||
251 | 233 | if 'include' not in config: | ||
252 | 234 | if not args: | ||
253 | 235 | logging.error('No modules to sync specified as option or config.') | ||
254 | 236 | sys.exit(1) | ||
255 | 237 | config['include'] = [] | ||
256 | 238 | [config['include'].append(a) for a in args] | ||
257 | 239 | |||
258 | 240 | sync_options = None | ||
259 | 241 | if 'options' in config: | ||
260 | 242 | sync_options = config['options'] | ||
261 | 243 | tmpd = tempfile.mkdtemp() | ||
262 | 244 | try: | ||
263 | 245 | checkout = clone_helpers(tmpd, config['branch']) | ||
264 | 246 | sync_helpers(config['include'], checkout, config['destination'], | ||
265 | 247 | options=sync_options) | ||
266 | 248 | except Exception as e: | ||
267 | 249 | logging.error("Could not sync: %s" % e) | ||
268 | 250 | raise e | ||
269 | 251 | finally: | ||
270 | 252 | logging.debug('Cleaning up %s' % tmpd) | ||
271 | 253 | shutil.rmtree(tmpd) | ||
272 | 0 | 254 | ||
273 | === modified file 'charm-helpers-sync.yaml' | |||
274 | --- charm-helpers-sync.yaml 2015-07-29 18:07:31 +0000 | |||
275 | +++ charm-helpers-sync.yaml 2016-05-19 03:33:34 +0000 | |||
276 | @@ -3,5 +3,10 @@ | |||
277 | 3 | include: | 3 | include: |
278 | 4 | - core | 4 | - core |
279 | 5 | - fetch | 5 | - fetch |
281 | 6 | - contrib | 6 | - contrib.amulet |
282 | 7 | - contrib.hahelpers | ||
283 | 8 | - contrib.network | ||
284 | 9 | - contrib.openstack | ||
285 | 10 | - contrib.python | ||
286 | 11 | - contrib.storage | ||
287 | 7 | - payload | 12 | - payload |
288 | 8 | 13 | ||
289 | === modified file 'config.yaml' | |||
290 | --- config.yaml 2016-03-24 12:33:25 +0000 | |||
291 | +++ config.yaml 2016-05-19 03:33:34 +0000 | |||
292 | @@ -3,6 +3,14 @@ | |||
293 | 3 | default: 192.168.100.250 | 3 | default: 192.168.100.250 |
294 | 4 | type: string | 4 | type: string |
295 | 5 | description: IP address of the Director's Management interface. Same IP can be used to access PG Console. | 5 | description: IP address of the Director's Management interface. Same IP can be used to access PG Console. |
296 | 6 | plumgrid-username: | ||
297 | 7 | default: plumgrid | ||
298 | 8 | type: string | ||
299 | 9 | description: Username to access PLUMgrid Director | ||
300 | 10 | plumgrid-password: | ||
301 | 11 | default: plumgrid | ||
302 | 12 | type: string | ||
303 | 13 | description: Password to access PLUMgrid Director | ||
304 | 6 | lcm-ssh-key: | 14 | lcm-ssh-key: |
305 | 7 | default: 'null' | 15 | default: 'null' |
306 | 8 | type: string | 16 | type: string |
307 | 9 | 17 | ||
308 | === modified file 'hooks/charmhelpers/contrib/amulet/deployment.py' | |||
309 | --- hooks/charmhelpers/contrib/amulet/deployment.py 2015-07-29 18:07:31 +0000 | |||
310 | +++ hooks/charmhelpers/contrib/amulet/deployment.py 2016-05-19 03:33:34 +0000 | |||
311 | @@ -51,7 +51,8 @@ | |||
312 | 51 | if 'units' not in this_service: | 51 | if 'units' not in this_service: |
313 | 52 | this_service['units'] = 1 | 52 | this_service['units'] = 1 |
314 | 53 | 53 | ||
316 | 54 | self.d.add(this_service['name'], units=this_service['units']) | 54 | self.d.add(this_service['name'], units=this_service['units'], |
317 | 55 | constraints=this_service.get('constraints')) | ||
318 | 55 | 56 | ||
319 | 56 | for svc in other_services: | 57 | for svc in other_services: |
320 | 57 | if 'location' in svc: | 58 | if 'location' in svc: |
321 | @@ -64,7 +65,8 @@ | |||
322 | 64 | if 'units' not in svc: | 65 | if 'units' not in svc: |
323 | 65 | svc['units'] = 1 | 66 | svc['units'] = 1 |
324 | 66 | 67 | ||
326 | 67 | self.d.add(svc['name'], charm=branch_location, units=svc['units']) | 68 | self.d.add(svc['name'], charm=branch_location, units=svc['units'], |
327 | 69 | constraints=svc.get('constraints')) | ||
328 | 68 | 70 | ||
329 | 69 | def _add_relations(self, relations): | 71 | def _add_relations(self, relations): |
330 | 70 | """Add all of the relations for the services.""" | 72 | """Add all of the relations for the services.""" |
331 | 71 | 73 | ||
332 | === modified file 'hooks/charmhelpers/contrib/amulet/utils.py' | |||
333 | --- hooks/charmhelpers/contrib/amulet/utils.py 2015-07-29 18:07:31 +0000 | |||
334 | +++ hooks/charmhelpers/contrib/amulet/utils.py 2016-05-19 03:33:34 +0000 | |||
335 | @@ -14,17 +14,25 @@ | |||
336 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
337 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
338 | 16 | 16 | ||
339 | 17 | import amulet | ||
340 | 18 | import ConfigParser | ||
341 | 19 | import distro_info | ||
342 | 20 | import io | 17 | import io |
343 | 18 | import json | ||
344 | 21 | import logging | 19 | import logging |
345 | 22 | import os | 20 | import os |
346 | 23 | import re | 21 | import re |
348 | 24 | import six | 22 | import socket |
349 | 23 | import subprocess | ||
350 | 25 | import sys | 24 | import sys |
351 | 26 | import time | 25 | import time |
353 | 27 | import urlparse | 26 | import uuid |
354 | 27 | |||
355 | 28 | import amulet | ||
356 | 29 | import distro_info | ||
357 | 30 | import six | ||
358 | 31 | from six.moves import configparser | ||
359 | 32 | if six.PY3: | ||
360 | 33 | from urllib import parse as urlparse | ||
361 | 34 | else: | ||
362 | 35 | import urlparse | ||
363 | 28 | 36 | ||
364 | 29 | 37 | ||
365 | 30 | class AmuletUtils(object): | 38 | class AmuletUtils(object): |
366 | @@ -108,7 +116,7 @@ | |||
367 | 108 | # /!\ DEPRECATION WARNING (beisner): | 116 | # /!\ DEPRECATION WARNING (beisner): |
368 | 109 | # New and existing tests should be rewritten to use | 117 | # New and existing tests should be rewritten to use |
369 | 110 | # validate_services_by_name() as it is aware of init systems. | 118 | # validate_services_by_name() as it is aware of init systems. |
371 | 111 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | 119 | self.log.warn('DEPRECATION WARNING: use ' |
372 | 112 | 'validate_services_by_name instead of validate_services ' | 120 | 'validate_services_by_name instead of validate_services ' |
373 | 113 | 'due to init system differences.') | 121 | 'due to init system differences.') |
374 | 114 | 122 | ||
375 | @@ -142,19 +150,23 @@ | |||
376 | 142 | 150 | ||
377 | 143 | for service_name in services_list: | 151 | for service_name in services_list: |
378 | 144 | if (self.ubuntu_releases.index(release) >= systemd_switch or | 152 | if (self.ubuntu_releases.index(release) >= systemd_switch or |
381 | 145 | service_name == "rabbitmq-server"): | 153 | service_name in ['rabbitmq-server', 'apache2']): |
382 | 146 | # init is systemd | 154 | # init is systemd (or regular sysv) |
383 | 147 | cmd = 'sudo service {} status'.format(service_name) | 155 | cmd = 'sudo service {} status'.format(service_name) |
384 | 156 | output, code = sentry_unit.run(cmd) | ||
385 | 157 | service_running = code == 0 | ||
386 | 148 | elif self.ubuntu_releases.index(release) < systemd_switch: | 158 | elif self.ubuntu_releases.index(release) < systemd_switch: |
387 | 149 | # init is upstart | 159 | # init is upstart |
388 | 150 | cmd = 'sudo status {}'.format(service_name) | 160 | cmd = 'sudo status {}'.format(service_name) |
389 | 161 | output, code = sentry_unit.run(cmd) | ||
390 | 162 | service_running = code == 0 and "start/running" in output | ||
391 | 151 | 163 | ||
392 | 152 | output, code = sentry_unit.run(cmd) | ||
393 | 153 | self.log.debug('{} `{}` returned ' | 164 | self.log.debug('{} `{}` returned ' |
394 | 154 | '{}'.format(sentry_unit.info['unit_name'], | 165 | '{}'.format(sentry_unit.info['unit_name'], |
395 | 155 | cmd, code)) | 166 | cmd, code)) |
398 | 156 | if code != 0: | 167 | if not service_running: |
399 | 157 | return "command `{}` returned {}".format(cmd, str(code)) | 168 | return u"command `{}` returned {} {}".format( |
400 | 169 | cmd, output, str(code)) | ||
401 | 158 | return None | 170 | return None |
402 | 159 | 171 | ||
403 | 160 | def _get_config(self, unit, filename): | 172 | def _get_config(self, unit, filename): |
404 | @@ -164,7 +176,7 @@ | |||
405 | 164 | # NOTE(beisner): by default, ConfigParser does not handle options | 176 | # NOTE(beisner): by default, ConfigParser does not handle options |
406 | 165 | # with no value, such as the flags used in the mysql my.cnf file. | 177 | # with no value, such as the flags used in the mysql my.cnf file. |
407 | 166 | # https://bugs.python.org/issue7005 | 178 | # https://bugs.python.org/issue7005 |
409 | 167 | config = ConfigParser.ConfigParser(allow_no_value=True) | 179 | config = configparser.ConfigParser(allow_no_value=True) |
410 | 168 | config.readfp(io.StringIO(file_contents)) | 180 | config.readfp(io.StringIO(file_contents)) |
411 | 169 | return config | 181 | return config |
412 | 170 | 182 | ||
413 | @@ -259,33 +271,52 @@ | |||
414 | 259 | """Get last modification time of directory.""" | 271 | """Get last modification time of directory.""" |
415 | 260 | return sentry_unit.directory_stat(directory)['mtime'] | 272 | return sentry_unit.directory_stat(directory)['mtime'] |
416 | 261 | 273 | ||
435 | 262 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | 274 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): |
436 | 263 | """Get process' start time. | 275 | """Get start time of a process based on the last modification time |
437 | 264 | 276 | of the /proc/pid directory. | |
438 | 265 | Determine start time of the process based on the last modification | 277 | |
439 | 266 | time of the /proc/pid directory. If pgrep_full is True, the process | 278 | :sentry_unit: The sentry unit to check for the service on |
440 | 267 | name is matched against the full command line. | 279 | :service: service name to look for in process table |
441 | 268 | """ | 280 | :pgrep_full: [Deprecated] Use full command line search mode with pgrep |
442 | 269 | if pgrep_full: | 281 | :returns: epoch time of service process start |
443 | 270 | cmd = 'pgrep -o -f {}'.format(service) | 282 | :param commands: list of bash commands |
444 | 271 | else: | 283 | :param sentry_units: list of sentry unit pointers |
445 | 272 | cmd = 'pgrep -o {}'.format(service) | 284 | :returns: None if successful; Failure message otherwise |
446 | 273 | cmd = cmd + ' | grep -v pgrep || exit 0' | 285 | """ |
447 | 274 | cmd_out = sentry_unit.run(cmd) | 286 | if pgrep_full is not None: |
448 | 275 | self.log.debug('CMDout: ' + str(cmd_out)) | 287 | # /!\ DEPRECATION WARNING (beisner): |
449 | 276 | if cmd_out[0]: | 288 | # No longer implemented, as pidof is now used instead of pgrep. |
450 | 277 | self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) | 289 | # https://bugs.launchpad.net/charm-helpers/+bug/1474030 |
451 | 278 | proc_dir = '/proc/{}'.format(cmd_out[0].strip()) | 290 | self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' |
452 | 279 | return self._get_dir_mtime(sentry_unit, proc_dir) | 291 | 'longer implemented re: lp 1474030.') |
453 | 292 | |||
454 | 293 | pid_list = self.get_process_id_list(sentry_unit, service) | ||
455 | 294 | pid = pid_list[0] | ||
456 | 295 | proc_dir = '/proc/{}'.format(pid) | ||
457 | 296 | self.log.debug('Pid for {} on {}: {}'.format( | ||
458 | 297 | service, sentry_unit.info['unit_name'], pid)) | ||
459 | 298 | |||
460 | 299 | return self._get_dir_mtime(sentry_unit, proc_dir) | ||
461 | 280 | 300 | ||
462 | 281 | def service_restarted(self, sentry_unit, service, filename, | 301 | def service_restarted(self, sentry_unit, service, filename, |
464 | 282 | pgrep_full=False, sleep_time=20): | 302 | pgrep_full=None, sleep_time=20): |
465 | 283 | """Check if service was restarted. | 303 | """Check if service was restarted. |
466 | 284 | 304 | ||
467 | 285 | Compare a service's start time vs a file's last modification time | 305 | Compare a service's start time vs a file's last modification time |
468 | 286 | (such as a config file for that service) to determine if the service | 306 | (such as a config file for that service) to determine if the service |
469 | 287 | has been restarted. | 307 | has been restarted. |
470 | 288 | """ | 308 | """ |
471 | 309 | # /!\ DEPRECATION WARNING (beisner): | ||
472 | 310 | # This method is prone to races in that no before-time is known. | ||
473 | 311 | # Use validate_service_config_changed instead. | ||
474 | 312 | |||
475 | 313 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now | ||
476 | 314 | # used instead of pgrep. pgrep_full is still passed through to ensure | ||
477 | 315 | # deprecation WARNS. lp1474030 | ||
478 | 316 | self.log.warn('DEPRECATION WARNING: use ' | ||
479 | 317 | 'validate_service_config_changed instead of ' | ||
480 | 318 | 'service_restarted due to known races.') | ||
481 | 319 | |||
482 | 289 | time.sleep(sleep_time) | 320 | time.sleep(sleep_time) |
483 | 290 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= | 321 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= |
484 | 291 | self._get_file_mtime(sentry_unit, filename)): | 322 | self._get_file_mtime(sentry_unit, filename)): |
485 | @@ -294,78 +325,122 @@ | |||
486 | 294 | return False | 325 | return False |
487 | 295 | 326 | ||
488 | 296 | def service_restarted_since(self, sentry_unit, mtime, service, | 327 | def service_restarted_since(self, sentry_unit, mtime, service, |
491 | 297 | pgrep_full=False, sleep_time=20, | 328 | pgrep_full=None, sleep_time=20, |
492 | 298 | retry_count=2): | 329 | retry_count=30, retry_sleep_time=10): |
493 | 299 | """Check if service was been started after a given time. | 330 | """Check if service was been started after a given time. |
494 | 300 | 331 | ||
495 | 301 | Args: | 332 | Args: |
496 | 302 | sentry_unit (sentry): The sentry unit to check for the service on | 333 | sentry_unit (sentry): The sentry unit to check for the service on |
497 | 303 | mtime (float): The epoch time to check against | 334 | mtime (float): The epoch time to check against |
498 | 304 | service (string): service name to look for in process table | 335 | service (string): service name to look for in process table |
502 | 305 | pgrep_full (boolean): Use full command line search mode with pgrep | 336 | pgrep_full: [Deprecated] Use full command line search mode with pgrep |
503 | 306 | sleep_time (int): Seconds to sleep before looking for process | 337 | sleep_time (int): Initial sleep time (s) before looking for file |
504 | 307 | retry_count (int): If service is not found, how many times to retry | 338 | retry_sleep_time (int): Time (s) to sleep between retries |
505 | 339 | retry_count (int): If file is not found, how many times to retry | ||
506 | 308 | 340 | ||
507 | 309 | Returns: | 341 | Returns: |
508 | 310 | bool: True if service found and its start time it newer than mtime, | 342 | bool: True if service found and its start time it newer than mtime, |
509 | 311 | False if service is older than mtime or if service was | 343 | False if service is older than mtime or if service was |
510 | 312 | not found. | 344 | not found. |
511 | 313 | """ | 345 | """ |
513 | 314 | self.log.debug('Checking %s restarted since %s' % (service, mtime)) | 346 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
514 | 347 | # used instead of pgrep. pgrep_full is still passed through to ensure | ||
515 | 348 | # deprecation WARNS. lp1474030 | ||
516 | 349 | |||
517 | 350 | unit_name = sentry_unit.info['unit_name'] | ||
518 | 351 | self.log.debug('Checking that %s service restarted since %s on ' | ||
519 | 352 | '%s' % (service, mtime, unit_name)) | ||
520 | 315 | time.sleep(sleep_time) | 353 | time.sleep(sleep_time) |
530 | 316 | proc_start_time = self._get_proc_start_time(sentry_unit, service, | 354 | proc_start_time = None |
531 | 317 | pgrep_full) | 355 | tries = 0 |
532 | 318 | while retry_count > 0 and not proc_start_time: | 356 | while tries <= retry_count and not proc_start_time: |
533 | 319 | self.log.debug('No pid file found for service %s, will retry %i ' | 357 | try: |
534 | 320 | 'more times' % (service, retry_count)) | 358 | proc_start_time = self._get_proc_start_time(sentry_unit, |
535 | 321 | time.sleep(30) | 359 | service, |
536 | 322 | proc_start_time = self._get_proc_start_time(sentry_unit, service, | 360 | pgrep_full) |
537 | 323 | pgrep_full) | 361 | self.log.debug('Attempt {} to get {} proc start time on {} ' |
538 | 324 | retry_count = retry_count - 1 | 362 | 'OK'.format(tries, service, unit_name)) |
539 | 363 | except IOError as e: | ||
540 | 364 | # NOTE(beisner) - race avoidance, proc may not exist yet. | ||
541 | 365 | # https://bugs.launchpad.net/charm-helpers/+bug/1474030 | ||
542 | 366 | self.log.debug('Attempt {} to get {} proc start time on {} ' | ||
543 | 367 | 'failed\n{}'.format(tries, service, | ||
544 | 368 | unit_name, e)) | ||
545 | 369 | time.sleep(retry_sleep_time) | ||
546 | 370 | tries += 1 | ||
547 | 325 | 371 | ||
548 | 326 | if not proc_start_time: | 372 | if not proc_start_time: |
549 | 327 | self.log.warn('No proc start time found, assuming service did ' | 373 | self.log.warn('No proc start time found, assuming service did ' |
550 | 328 | 'not start') | 374 | 'not start') |
551 | 329 | return False | 375 | return False |
552 | 330 | if proc_start_time >= mtime: | 376 | if proc_start_time >= mtime: |
555 | 331 | self.log.debug('proc start time is newer than provided mtime' | 377 | self.log.debug('Proc start time is newer than provided mtime' |
556 | 332 | '(%s >= %s)' % (proc_start_time, mtime)) | 378 | '(%s >= %s) on %s (OK)' % (proc_start_time, |
557 | 379 | mtime, unit_name)) | ||
558 | 333 | return True | 380 | return True |
559 | 334 | else: | 381 | else: |
563 | 335 | self.log.warn('proc start time (%s) is older than provided mtime ' | 382 | self.log.warn('Proc start time (%s) is older than provided mtime ' |
564 | 336 | '(%s), service did not restart' % (proc_start_time, | 383 | '(%s) on %s, service did not ' |
565 | 337 | mtime)) | 384 | 'restart' % (proc_start_time, mtime, unit_name)) |
566 | 338 | return False | 385 | return False |
567 | 339 | 386 | ||
568 | 340 | def config_updated_since(self, sentry_unit, filename, mtime, | 387 | def config_updated_since(self, sentry_unit, filename, mtime, |
570 | 341 | sleep_time=20): | 388 | sleep_time=20, retry_count=30, |
571 | 389 | retry_sleep_time=10): | ||
572 | 342 | """Check if file was modified after a given time. | 390 | """Check if file was modified after a given time. |
573 | 343 | 391 | ||
574 | 344 | Args: | 392 | Args: |
575 | 345 | sentry_unit (sentry): The sentry unit to check the file mtime on | 393 | sentry_unit (sentry): The sentry unit to check the file mtime on |
576 | 346 | filename (string): The file to check mtime of | 394 | filename (string): The file to check mtime of |
577 | 347 | mtime (float): The epoch time to check against | 395 | mtime (float): The epoch time to check against |
579 | 348 | sleep_time (int): Seconds to sleep before looking for process | 396 | sleep_time (int): Initial sleep time (s) before looking for file |
580 | 397 | retry_sleep_time (int): Time (s) to sleep between retries | ||
581 | 398 | retry_count (int): If file is not found, how many times to retry | ||
582 | 349 | 399 | ||
583 | 350 | Returns: | 400 | Returns: |
584 | 351 | bool: True if file was modified more recently than mtime, False if | 401 | bool: True if file was modified more recently than mtime, False if |
586 | 352 | file was modified before mtime, | 402 | file was modified before mtime, or if file not found. |
587 | 353 | """ | 403 | """ |
589 | 354 | self.log.debug('Checking %s updated since %s' % (filename, mtime)) | 404 | unit_name = sentry_unit.info['unit_name'] |
590 | 405 | self.log.debug('Checking that %s updated since %s on ' | ||
591 | 406 | '%s' % (filename, mtime, unit_name)) | ||
592 | 355 | time.sleep(sleep_time) | 407 | time.sleep(sleep_time) |
594 | 356 | file_mtime = self._get_file_mtime(sentry_unit, filename) | 408 | file_mtime = None |
595 | 409 | tries = 0 | ||
596 | 410 | while tries <= retry_count and not file_mtime: | ||
597 | 411 | try: | ||
598 | 412 | file_mtime = self._get_file_mtime(sentry_unit, filename) | ||
599 | 413 | self.log.debug('Attempt {} to get {} file mtime on {} ' | ||
600 | 414 | 'OK'.format(tries, filename, unit_name)) | ||
601 | 415 | except IOError as e: | ||
602 | 416 | # NOTE(beisner) - race avoidance, file may not exist yet. | ||
603 | 417 | # https://bugs.launchpad.net/charm-helpers/+bug/1474030 | ||
604 | 418 | self.log.debug('Attempt {} to get {} file mtime on {} ' | ||
605 | 419 | 'failed\n{}'.format(tries, filename, | ||
606 | 420 | unit_name, e)) | ||
607 | 421 | time.sleep(retry_sleep_time) | ||
608 | 422 | tries += 1 | ||
609 | 423 | |||
610 | 424 | if not file_mtime: | ||
611 | 425 | self.log.warn('Could not determine file mtime, assuming ' | ||
612 | 426 | 'file does not exist') | ||
613 | 427 | return False | ||
614 | 428 | |||
615 | 357 | if file_mtime >= mtime: | 429 | if file_mtime >= mtime: |
616 | 358 | self.log.debug('File mtime is newer than provided mtime ' | 430 | self.log.debug('File mtime is newer than provided mtime ' |
618 | 359 | '(%s >= %s)' % (file_mtime, mtime)) | 431 | '(%s >= %s) on %s (OK)' % (file_mtime, |
619 | 432 | mtime, unit_name)) | ||
620 | 360 | return True | 433 | return True |
621 | 361 | else: | 434 | else: |
624 | 362 | self.log.warn('File mtime %s is older than provided mtime %s' | 435 | self.log.warn('File mtime is older than provided mtime' |
625 | 363 | % (file_mtime, mtime)) | 436 | '(%s < on %s) on %s' % (file_mtime, |
626 | 437 | mtime, unit_name)) | ||
627 | 364 | return False | 438 | return False |
628 | 365 | 439 | ||
629 | 366 | def validate_service_config_changed(self, sentry_unit, mtime, service, | 440 | def validate_service_config_changed(self, sentry_unit, mtime, service, |
632 | 367 | filename, pgrep_full=False, | 441 | filename, pgrep_full=None, |
633 | 368 | sleep_time=20, retry_count=2): | 442 | sleep_time=20, retry_count=30, |
634 | 443 | retry_sleep_time=10): | ||
635 | 369 | """Check service and file were updated after mtime | 444 | """Check service and file were updated after mtime |
636 | 370 | 445 | ||
637 | 371 | Args: | 446 | Args: |
638 | @@ -373,9 +448,10 @@ | |||
639 | 373 | mtime (float): The epoch time to check against | 448 | mtime (float): The epoch time to check against |
640 | 374 | service (string): service name to look for in process table | 449 | service (string): service name to look for in process table |
641 | 375 | filename (string): The file to check mtime of | 450 | filename (string): The file to check mtime of |
644 | 376 | pgrep_full (boolean): Use full command line search mode with pgrep | 451 | pgrep_full: [Deprecated] Use full command line search mode with pgrep |
645 | 377 | sleep_time (int): Seconds to sleep before looking for process | 452 | sleep_time (int): Initial sleep in seconds to pass to test helpers |
646 | 378 | retry_count (int): If service is not found, how many times to retry | 453 | retry_count (int): If service is not found, how many times to retry |
647 | 454 | retry_sleep_time (int): Time in seconds to wait between retries | ||
648 | 379 | 455 | ||
649 | 380 | Typical Usage: | 456 | Typical Usage: |
650 | 381 | u = OpenStackAmuletUtils(ERROR) | 457 | u = OpenStackAmuletUtils(ERROR) |
651 | @@ -392,15 +468,27 @@ | |||
652 | 392 | mtime, False if service is older than mtime or if service was | 468 | mtime, False if service is older than mtime or if service was |
653 | 393 | not found or if filename was modified before mtime. | 469 | not found or if filename was modified before mtime. |
654 | 394 | """ | 470 | """ |
664 | 395 | self.log.debug('Checking %s restarted since %s' % (service, mtime)) | 471 | |
665 | 396 | time.sleep(sleep_time) | 472 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
666 | 397 | service_restart = self.service_restarted_since(sentry_unit, mtime, | 473 | # used instead of pgrep. pgrep_full is still passed through to ensure |
667 | 398 | service, | 474 | # deprecation WARNS. lp1474030 |
668 | 399 | pgrep_full=pgrep_full, | 475 | |
669 | 400 | sleep_time=0, | 476 | service_restart = self.service_restarted_since( |
670 | 401 | retry_count=retry_count) | 477 | sentry_unit, mtime, |
671 | 402 | config_update = self.config_updated_since(sentry_unit, filename, mtime, | 478 | service, |
672 | 403 | sleep_time=0) | 479 | pgrep_full=pgrep_full, |
673 | 480 | sleep_time=sleep_time, | ||
674 | 481 | retry_count=retry_count, | ||
675 | 482 | retry_sleep_time=retry_sleep_time) | ||
676 | 483 | |||
677 | 484 | config_update = self.config_updated_since( | ||
678 | 485 | sentry_unit, | ||
679 | 486 | filename, | ||
680 | 487 | mtime, | ||
681 | 488 | sleep_time=sleep_time, | ||
682 | 489 | retry_count=retry_count, | ||
683 | 490 | retry_sleep_time=retry_sleep_time) | ||
684 | 491 | |||
685 | 404 | return service_restart and config_update | 492 | return service_restart and config_update |
686 | 405 | 493 | ||
687 | 406 | def get_sentry_time(self, sentry_unit): | 494 | def get_sentry_time(self, sentry_unit): |
688 | @@ -418,7 +506,6 @@ | |||
689 | 418 | """Return a list of all Ubuntu releases in order of release.""" | 506 | """Return a list of all Ubuntu releases in order of release.""" |
690 | 419 | _d = distro_info.UbuntuDistroInfo() | 507 | _d = distro_info.UbuntuDistroInfo() |
691 | 420 | _release_list = _d.all | 508 | _release_list = _d.all |
692 | 421 | self.log.debug('Ubuntu release list: {}'.format(_release_list)) | ||
693 | 422 | return _release_list | 509 | return _release_list |
694 | 423 | 510 | ||
695 | 424 | def file_to_url(self, file_rel_path): | 511 | def file_to_url(self, file_rel_path): |
696 | @@ -450,15 +537,20 @@ | |||
697 | 450 | cmd, code, output)) | 537 | cmd, code, output)) |
698 | 451 | return None | 538 | return None |
699 | 452 | 539 | ||
701 | 453 | def get_process_id_list(self, sentry_unit, process_name): | 540 | def get_process_id_list(self, sentry_unit, process_name, |
702 | 541 | expect_success=True): | ||
703 | 454 | """Get a list of process ID(s) from a single sentry juju unit | 542 | """Get a list of process ID(s) from a single sentry juju unit |
704 | 455 | for a single process name. | 543 | for a single process name. |
705 | 456 | 544 | ||
707 | 457 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | 545 | :param sentry_unit: Amulet sentry instance (juju unit) |
708 | 458 | :param process_name: Process name | 546 | :param process_name: Process name |
709 | 547 | :param expect_success: If False, expect the PID to be missing, | ||
710 | 548 | raise if it is present. | ||
711 | 459 | :returns: List of process IDs | 549 | :returns: List of process IDs |
712 | 460 | """ | 550 | """ |
714 | 461 | cmd = 'pidof {}'.format(process_name) | 551 | cmd = 'pidof -x {}'.format(process_name) |
715 | 552 | if not expect_success: | ||
716 | 553 | cmd += " || exit 0 && exit 1" | ||
717 | 462 | output, code = sentry_unit.run(cmd) | 554 | output, code = sentry_unit.run(cmd) |
718 | 463 | if code != 0: | 555 | if code != 0: |
719 | 464 | msg = ('{} `{}` returned {} ' | 556 | msg = ('{} `{}` returned {} ' |
720 | @@ -467,14 +559,23 @@ | |||
721 | 467 | amulet.raise_status(amulet.FAIL, msg=msg) | 559 | amulet.raise_status(amulet.FAIL, msg=msg) |
722 | 468 | return str(output).split() | 560 | return str(output).split() |
723 | 469 | 561 | ||
725 | 470 | def get_unit_process_ids(self, unit_processes): | 562 | def get_unit_process_ids(self, unit_processes, expect_success=True): |
726 | 471 | """Construct a dict containing unit sentries, process names, and | 563 | """Construct a dict containing unit sentries, process names, and |
728 | 472 | process IDs.""" | 564 | process IDs. |
729 | 565 | |||
730 | 566 | :param unit_processes: A dictionary of Amulet sentry instance | ||
731 | 567 | to list of process names. | ||
732 | 568 | :param expect_success: if False expect the processes to not be | ||
733 | 569 | running, raise if they are. | ||
734 | 570 | :returns: Dictionary of Amulet sentry instance to dictionary | ||
735 | 571 | of process names to PIDs. | ||
736 | 572 | """ | ||
737 | 473 | pid_dict = {} | 573 | pid_dict = {} |
739 | 474 | for sentry_unit, process_list in unit_processes.iteritems(): | 574 | for sentry_unit, process_list in six.iteritems(unit_processes): |
740 | 475 | pid_dict[sentry_unit] = {} | 575 | pid_dict[sentry_unit] = {} |
741 | 476 | for process in process_list: | 576 | for process in process_list: |
743 | 477 | pids = self.get_process_id_list(sentry_unit, process) | 577 | pids = self.get_process_id_list( |
744 | 578 | sentry_unit, process, expect_success=expect_success) | ||
745 | 478 | pid_dict[sentry_unit].update({process: pids}) | 579 | pid_dict[sentry_unit].update({process: pids}) |
746 | 479 | return pid_dict | 580 | return pid_dict |
747 | 480 | 581 | ||
748 | @@ -488,7 +589,7 @@ | |||
749 | 488 | return ('Unit count mismatch. expected, actual: {}, ' | 589 | return ('Unit count mismatch. expected, actual: {}, ' |
750 | 489 | '{} '.format(len(expected), len(actual))) | 590 | '{} '.format(len(expected), len(actual))) |
751 | 490 | 591 | ||
753 | 491 | for (e_sentry, e_proc_names) in expected.iteritems(): | 592 | for (e_sentry, e_proc_names) in six.iteritems(expected): |
754 | 492 | e_sentry_name = e_sentry.info['unit_name'] | 593 | e_sentry_name = e_sentry.info['unit_name'] |
755 | 493 | if e_sentry in actual.keys(): | 594 | if e_sentry in actual.keys(): |
756 | 494 | a_proc_names = actual[e_sentry] | 595 | a_proc_names = actual[e_sentry] |
757 | @@ -500,22 +601,40 @@ | |||
758 | 500 | return ('Process name count mismatch. expected, actual: {}, ' | 601 | return ('Process name count mismatch. expected, actual: {}, ' |
759 | 501 | '{}'.format(len(expected), len(actual))) | 602 | '{}'.format(len(expected), len(actual))) |
760 | 502 | 603 | ||
762 | 503 | for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ | 604 | for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ |
763 | 504 | zip(e_proc_names.items(), a_proc_names.items()): | 605 | zip(e_proc_names.items(), a_proc_names.items()): |
764 | 505 | if e_proc_name != a_proc_name: | 606 | if e_proc_name != a_proc_name: |
765 | 506 | return ('Process name mismatch. expected, actual: {}, ' | 607 | return ('Process name mismatch. expected, actual: {}, ' |
766 | 507 | '{}'.format(e_proc_name, a_proc_name)) | 608 | '{}'.format(e_proc_name, a_proc_name)) |
767 | 508 | 609 | ||
768 | 509 | a_pids_length = len(a_pids) | 610 | a_pids_length = len(a_pids) |
771 | 510 | if e_pids_length != a_pids_length: | 611 | fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' |
770 | 511 | return ('PID count mismatch. {} ({}) expected, actual: ' | ||
772 | 512 | '{}, {} ({})'.format(e_sentry_name, e_proc_name, | 612 | '{}, {} ({})'.format(e_sentry_name, e_proc_name, |
774 | 513 | e_pids_length, a_pids_length, | 613 | e_pids, a_pids_length, |
775 | 514 | a_pids)) | 614 | a_pids)) |
776 | 615 | |||
777 | 616 | # If expected is a list, ensure at least one PID quantity match | ||
778 | 617 | if isinstance(e_pids, list) and \ | ||
779 | 618 | a_pids_length not in e_pids: | ||
780 | 619 | return fail_msg | ||
781 | 620 | # If expected is not bool and not list, | ||
782 | 621 | # ensure PID quantities match | ||
783 | 622 | elif not isinstance(e_pids, bool) and \ | ||
784 | 623 | not isinstance(e_pids, list) and \ | ||
785 | 624 | a_pids_length != e_pids: | ||
786 | 625 | return fail_msg | ||
787 | 626 | # If expected is bool True, ensure 1 or more PIDs exist | ||
788 | 627 | elif isinstance(e_pids, bool) and \ | ||
789 | 628 | e_pids is True and a_pids_length < 1: | ||
790 | 629 | return fail_msg | ||
791 | 630 | # If expected is bool False, ensure 0 PIDs exist | ||
792 | 631 | elif isinstance(e_pids, bool) and \ | ||
793 | 632 | e_pids is False and a_pids_length != 0: | ||
794 | 633 | return fail_msg | ||
795 | 515 | else: | 634 | else: |
796 | 516 | self.log.debug('PID check OK: {} {} {}: ' | 635 | self.log.debug('PID check OK: {} {} {}: ' |
797 | 517 | '{}'.format(e_sentry_name, e_proc_name, | 636 | '{}'.format(e_sentry_name, e_proc_name, |
799 | 518 | e_pids_length, a_pids)) | 637 | e_pids, a_pids)) |
800 | 519 | return None | 638 | return None |
801 | 520 | 639 | ||
802 | 521 | def validate_list_of_identical_dicts(self, list_of_dicts): | 640 | def validate_list_of_identical_dicts(self, list_of_dicts): |
803 | @@ -531,3 +650,180 @@ | |||
804 | 531 | return 'Dicts within list are not identical' | 650 | return 'Dicts within list are not identical' |
805 | 532 | 651 | ||
806 | 533 | return None | 652 | return None |
807 | 653 | |||
808 | 654 | def validate_sectionless_conf(self, file_contents, expected): | ||
809 | 655 | """A crude conf parser. Useful to inspect configuration files which | ||
810 | 656 | do not have section headers (as would be necessary in order to use | ||
811 | 657 | the configparser). Such as openstack-dashboard or rabbitmq confs.""" | ||
812 | 658 | for line in file_contents.split('\n'): | ||
813 | 659 | if '=' in line: | ||
814 | 660 | args = line.split('=') | ||
815 | 661 | if len(args) <= 1: | ||
816 | 662 | continue | ||
817 | 663 | key = args[0].strip() | ||
818 | 664 | value = args[1].strip() | ||
819 | 665 | if key in expected.keys(): | ||
820 | 666 | if expected[key] != value: | ||
821 | 667 | msg = ('Config mismatch. Expected, actual: {}, ' | ||
822 | 668 | '{}'.format(expected[key], value)) | ||
823 | 669 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
824 | 670 | |||
825 | 671 | def get_unit_hostnames(self, units): | ||
826 | 672 | """Return a dict of juju unit names to hostnames.""" | ||
827 | 673 | host_names = {} | ||
828 | 674 | for unit in units: | ||
829 | 675 | host_names[unit.info['unit_name']] = \ | ||
830 | 676 | str(unit.file_contents('/etc/hostname').strip()) | ||
831 | 677 | self.log.debug('Unit host names: {}'.format(host_names)) | ||
832 | 678 | return host_names | ||
833 | 679 | |||
834 | 680 | def run_cmd_unit(self, sentry_unit, cmd): | ||
835 | 681 | """Run a command on a unit, return the output and exit code.""" | ||
836 | 682 | output, code = sentry_unit.run(cmd) | ||
837 | 683 | if code == 0: | ||
838 | 684 | self.log.debug('{} `{}` command returned {} ' | ||
839 | 685 | '(OK)'.format(sentry_unit.info['unit_name'], | ||
840 | 686 | cmd, code)) | ||
841 | 687 | else: | ||
842 | 688 | msg = ('{} `{}` command returned {} ' | ||
843 | 689 | '{}'.format(sentry_unit.info['unit_name'], | ||
844 | 690 | cmd, code, output)) | ||
845 | 691 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
846 | 692 | return str(output), code | ||
847 | 693 | |||
848 | 694 | def file_exists_on_unit(self, sentry_unit, file_name): | ||
849 | 695 | """Check if a file exists on a unit.""" | ||
850 | 696 | try: | ||
851 | 697 | sentry_unit.file_stat(file_name) | ||
852 | 698 | return True | ||
853 | 699 | except IOError: | ||
854 | 700 | return False | ||
855 | 701 | except Exception as e: | ||
856 | 702 | msg = 'Error checking file {}: {}'.format(file_name, e) | ||
857 | 703 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
858 | 704 | |||
859 | 705 | def file_contents_safe(self, sentry_unit, file_name, | ||
860 | 706 | max_wait=60, fatal=False): | ||
861 | 707 | """Get file contents from a sentry unit. Wrap amulet file_contents | ||
862 | 708 | with retry logic to address races where a file checks as existing, | ||
863 | 709 | but no longer exists by the time file_contents is called. | ||
864 | 710 | Return None if file not found. Optionally raise if fatal is True.""" | ||
865 | 711 | unit_name = sentry_unit.info['unit_name'] | ||
866 | 712 | file_contents = False | ||
867 | 713 | tries = 0 | ||
868 | 714 | while not file_contents and tries < (max_wait / 4): | ||
869 | 715 | try: | ||
870 | 716 | file_contents = sentry_unit.file_contents(file_name) | ||
871 | 717 | except IOError: | ||
872 | 718 | self.log.debug('Attempt {} to open file {} from {} ' | ||
873 | 719 | 'failed'.format(tries, file_name, | ||
874 | 720 | unit_name)) | ||
875 | 721 | time.sleep(4) | ||
876 | 722 | tries += 1 | ||
877 | 723 | |||
878 | 724 | if file_contents: | ||
879 | 725 | return file_contents | ||
880 | 726 | elif not fatal: | ||
881 | 727 | return None | ||
882 | 728 | elif fatal: | ||
883 | 729 | msg = 'Failed to get file contents from unit.' | ||
884 | 730 | amulet.raise_status(amulet.FAIL, msg) | ||
885 | 731 | |||
886 | 732 | def port_knock_tcp(self, host="localhost", port=22, timeout=15): | ||
887 | 733 | """Open a TCP socket to check for a listening sevice on a host. | ||
888 | 734 | |||
889 | 735 | :param host: host name or IP address, default to localhost | ||
890 | 736 | :param port: TCP port number, default to 22 | ||
891 | 737 | :param timeout: Connect timeout, default to 15 seconds | ||
892 | 738 | :returns: True if successful, False if connect failed | ||
893 | 739 | """ | ||
894 | 740 | |||
895 | 741 | # Resolve host name if possible | ||
896 | 742 | try: | ||
897 | 743 | connect_host = socket.gethostbyname(host) | ||
898 | 744 | host_human = "{} ({})".format(connect_host, host) | ||
899 | 745 | except socket.error as e: | ||
900 | 746 | self.log.warn('Unable to resolve address: ' | ||
901 | 747 | '{} ({}) Trying anyway!'.format(host, e)) | ||
902 | 748 | connect_host = host | ||
903 | 749 | host_human = connect_host | ||
904 | 750 | |||
905 | 751 | # Attempt socket connection | ||
906 | 752 | try: | ||
907 | 753 | knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | ||
908 | 754 | knock.settimeout(timeout) | ||
909 | 755 | knock.connect((connect_host, port)) | ||
910 | 756 | knock.close() | ||
911 | 757 | self.log.debug('Socket connect OK for host ' | ||
912 | 758 | '{} on port {}.'.format(host_human, port)) | ||
913 | 759 | return True | ||
914 | 760 | except socket.error as e: | ||
915 | 761 | self.log.debug('Socket connect FAIL for' | ||
916 | 762 | ' {} port {} ({})'.format(host_human, port, e)) | ||
917 | 763 | return False | ||
918 | 764 | |||
919 | 765 | def port_knock_units(self, sentry_units, port=22, | ||
920 | 766 | timeout=15, expect_success=True): | ||
921 | 767 | """Open a TCP socket to check for a listening sevice on each | ||
922 | 768 | listed juju unit. | ||
923 | 769 | |||
924 | 770 | :param sentry_units: list of sentry unit pointers | ||
925 | 771 | :param port: TCP port number, default to 22 | ||
926 | 772 | :param timeout: Connect timeout, default to 15 seconds | ||
927 | 773 | :expect_success: True by default, set False to invert logic | ||
928 | 774 | :returns: None if successful, Failure message otherwise | ||
929 | 775 | """ | ||
930 | 776 | for unit in sentry_units: | ||
931 | 777 | host = unit.info['public-address'] | ||
932 | 778 | connected = self.port_knock_tcp(host, port, timeout) | ||
933 | 779 | if not connected and expect_success: | ||
934 | 780 | return 'Socket connect failed.' | ||
935 | 781 | elif connected and not expect_success: | ||
936 | 782 | return 'Socket connected unexpectedly.' | ||
937 | 783 | |||
938 | 784 | def get_uuid_epoch_stamp(self): | ||
939 | 785 | """Returns a stamp string based on uuid4 and epoch time. Useful in | ||
940 | 786 | generating test messages which need to be unique-ish.""" | ||
941 | 787 | return '[{}-{}]'.format(uuid.uuid4(), time.time()) | ||
942 | 788 | |||
943 | 789 | # amulet juju action helpers: | ||
944 | 790 | def run_action(self, unit_sentry, action, | ||
945 | 791 | _check_output=subprocess.check_output, | ||
946 | 792 | params=None): | ||
947 | 793 | """Run the named action on a given unit sentry. | ||
948 | 794 | |||
949 | 795 | params a dict of parameters to use | ||
950 | 796 | _check_output parameter is used for dependency injection. | ||
951 | 797 | |||
952 | 798 | @return action_id. | ||
953 | 799 | """ | ||
954 | 800 | unit_id = unit_sentry.info["unit_name"] | ||
955 | 801 | command = ["juju", "action", "do", "--format=json", unit_id, action] | ||
956 | 802 | if params is not None: | ||
957 | 803 | for key, value in params.iteritems(): | ||
958 | 804 | command.append("{}={}".format(key, value)) | ||
959 | 805 | self.log.info("Running command: %s\n" % " ".join(command)) | ||
960 | 806 | output = _check_output(command, universal_newlines=True) | ||
961 | 807 | data = json.loads(output) | ||
962 | 808 | action_id = data[u'Action queued with id'] | ||
963 | 809 | return action_id | ||
964 | 810 | |||
965 | 811 | def wait_on_action(self, action_id, _check_output=subprocess.check_output): | ||
966 | 812 | """Wait for a given action, returning if it completed or not. | ||
967 | 813 | |||
968 | 814 | _check_output parameter is used for dependency injection. | ||
969 | 815 | """ | ||
970 | 816 | command = ["juju", "action", "fetch", "--format=json", "--wait=0", | ||
971 | 817 | action_id] | ||
972 | 818 | output = _check_output(command, universal_newlines=True) | ||
973 | 819 | data = json.loads(output) | ||
974 | 820 | return data.get(u"status") == "completed" | ||
975 | 821 | |||
976 | 822 | def status_get(self, unit): | ||
977 | 823 | """Return the current service status of this unit.""" | ||
978 | 824 | raw_status, return_code = unit.run( | ||
979 | 825 | "status-get --format=json --include-data") | ||
980 | 826 | if return_code != 0: | ||
981 | 827 | return ("unknown", "") | ||
982 | 828 | status = json.loads(raw_status) | ||
983 | 829 | return (status["status"], status["message"]) | ||
984 | 534 | 830 | ||
985 | === removed directory 'hooks/charmhelpers/contrib/ansible' | |||
986 | === removed file 'hooks/charmhelpers/contrib/ansible/__init__.py' | |||
987 | --- hooks/charmhelpers/contrib/ansible/__init__.py 2015-07-29 18:07:31 +0000 | |||
988 | +++ hooks/charmhelpers/contrib/ansible/__init__.py 1970-01-01 00:00:00 +0000 | |||
989 | @@ -1,254 +0,0 @@ | |||
990 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
991 | 2 | # | ||
992 | 3 | # This file is part of charm-helpers. | ||
993 | 4 | # | ||
994 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
995 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
996 | 7 | # published by the Free Software Foundation. | ||
997 | 8 | # | ||
998 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
999 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1000 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1001 | 12 | # GNU Lesser General Public License for more details. | ||
1002 | 13 | # | ||
1003 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1004 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1005 | 16 | |||
1006 | 17 | # Copyright 2013 Canonical Ltd. | ||
1007 | 18 | # | ||
1008 | 19 | # Authors: | ||
1009 | 20 | # Charm Helpers Developers <juju@lists.ubuntu.com> | ||
1010 | 21 | """Charm Helpers ansible - declare the state of your machines. | ||
1011 | 22 | |||
1012 | 23 | This helper enables you to declare your machine state, rather than | ||
1013 | 24 | program it procedurally (and have to test each change to your procedures). | ||
1014 | 25 | Your install hook can be as simple as:: | ||
1015 | 26 | |||
1016 | 27 | {{{ | ||
1017 | 28 | import charmhelpers.contrib.ansible | ||
1018 | 29 | |||
1019 | 30 | |||
1020 | 31 | def install(): | ||
1021 | 32 | charmhelpers.contrib.ansible.install_ansible_support() | ||
1022 | 33 | charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml') | ||
1023 | 34 | }}} | ||
1024 | 35 | |||
1025 | 36 | and won't need to change (nor will its tests) when you change the machine | ||
1026 | 37 | state. | ||
1027 | 38 | |||
1028 | 39 | All of your juju config and relation-data are available as template | ||
1029 | 40 | variables within your playbooks and templates. An install playbook looks | ||
1030 | 41 | something like:: | ||
1031 | 42 | |||
1032 | 43 | {{{ | ||
1033 | 44 | --- | ||
1034 | 45 | - hosts: localhost | ||
1035 | 46 | user: root | ||
1036 | 47 | |||
1037 | 48 | tasks: | ||
1038 | 49 | - name: Add private repositories. | ||
1039 | 50 | template: | ||
1040 | 51 | src: ../templates/private-repositories.list.jinja2 | ||
1041 | 52 | dest: /etc/apt/sources.list.d/private.list | ||
1042 | 53 | |||
1043 | 54 | - name: Update the cache. | ||
1044 | 55 | apt: update_cache=yes | ||
1045 | 56 | |||
1046 | 57 | - name: Install dependencies. | ||
1047 | 58 | apt: pkg={{ item }} | ||
1048 | 59 | with_items: | ||
1049 | 60 | - python-mimeparse | ||
1050 | 61 | - python-webob | ||
1051 | 62 | - sunburnt | ||
1052 | 63 | |||
1053 | 64 | - name: Setup groups. | ||
1054 | 65 | group: name={{ item.name }} gid={{ item.gid }} | ||
1055 | 66 | with_items: | ||
1056 | 67 | - { name: 'deploy_user', gid: 1800 } | ||
1057 | 68 | - { name: 'service_user', gid: 1500 } | ||
1058 | 69 | |||
1059 | 70 | ... | ||
1060 | 71 | }}} | ||
1061 | 72 | |||
1062 | 73 | Read more online about `playbooks`_ and standard ansible `modules`_. | ||
1063 | 74 | |||
1064 | 75 | .. _playbooks: http://www.ansibleworks.com/docs/playbooks.html | ||
1065 | 76 | .. _modules: http://www.ansibleworks.com/docs/modules.html | ||
1066 | 77 | |||
1067 | 78 | A further feature os the ansible hooks is to provide a light weight "action" | ||
1068 | 79 | scripting tool. This is a decorator that you apply to a function, and that | ||
1069 | 80 | function can now receive cli args, and can pass extra args to the playbook. | ||
1070 | 81 | |||
1071 | 82 | e.g. | ||
1072 | 83 | |||
1073 | 84 | |||
1074 | 85 | @hooks.action() | ||
1075 | 86 | def some_action(amount, force="False"): | ||
1076 | 87 | "Usage: some-action AMOUNT [force=True]" # <-- shown on error | ||
1077 | 88 | # process the arguments | ||
1078 | 89 | # do some calls | ||
1079 | 90 | # return extra-vars to be passed to ansible-playbook | ||
1080 | 91 | return { | ||
1081 | 92 | 'amount': int(amount), | ||
1082 | 93 | 'type': force, | ||
1083 | 94 | } | ||
1084 | 95 | |||
1085 | 96 | You can now create a symlink to hooks.py that can be invoked like a hook, but | ||
1086 | 97 | with cli params: | ||
1087 | 98 | |||
1088 | 99 | # link actions/some-action to hooks/hooks.py | ||
1089 | 100 | |||
1090 | 101 | actions/some-action amount=10 force=true | ||
1091 | 102 | |||
1092 | 103 | """ | ||
1093 | 104 | import os | ||
1094 | 105 | import stat | ||
1095 | 106 | import subprocess | ||
1096 | 107 | import functools | ||
1097 | 108 | |||
1098 | 109 | import charmhelpers.contrib.templating.contexts | ||
1099 | 110 | import charmhelpers.core.host | ||
1100 | 111 | import charmhelpers.core.hookenv | ||
1101 | 112 | import charmhelpers.fetch | ||
1102 | 113 | |||
1103 | 114 | |||
1104 | 115 | charm_dir = os.environ.get('CHARM_DIR', '') | ||
1105 | 116 | ansible_hosts_path = '/etc/ansible/hosts' | ||
1106 | 117 | # Ansible will automatically include any vars in the following | ||
1107 | 118 | # file in its inventory when run locally. | ||
1108 | 119 | ansible_vars_path = '/etc/ansible/host_vars/localhost' | ||
1109 | 120 | |||
1110 | 121 | |||
1111 | 122 | def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'): | ||
1112 | 123 | """Installs the ansible package. | ||
1113 | 124 | |||
1114 | 125 | By default it is installed from the `PPA`_ linked from | ||
1115 | 126 | the ansible `website`_ or from a ppa specified by a charm config.. | ||
1116 | 127 | |||
1117 | 128 | .. _PPA: https://launchpad.net/~rquillo/+archive/ansible | ||
1118 | 129 | .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu | ||
1119 | 130 | |||
1120 | 131 | If from_ppa is empty, you must ensure that the package is available | ||
1121 | 132 | from a configured repository. | ||
1122 | 133 | """ | ||
1123 | 134 | if from_ppa: | ||
1124 | 135 | charmhelpers.fetch.add_source(ppa_location) | ||
1125 | 136 | charmhelpers.fetch.apt_update(fatal=True) | ||
1126 | 137 | charmhelpers.fetch.apt_install('ansible') | ||
1127 | 138 | with open(ansible_hosts_path, 'w+') as hosts_file: | ||
1128 | 139 | hosts_file.write('localhost ansible_connection=local') | ||
1129 | 140 | |||
1130 | 141 | |||
1131 | 142 | def apply_playbook(playbook, tags=None, extra_vars=None): | ||
1132 | 143 | tags = tags or [] | ||
1133 | 144 | tags = ",".join(tags) | ||
1134 | 145 | charmhelpers.contrib.templating.contexts.juju_state_to_yaml( | ||
1135 | 146 | ansible_vars_path, namespace_separator='__', | ||
1136 | 147 | allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR)) | ||
1137 | 148 | |||
1138 | 149 | # we want ansible's log output to be unbuffered | ||
1139 | 150 | env = os.environ.copy() | ||
1140 | 151 | env['PYTHONUNBUFFERED'] = "1" | ||
1141 | 152 | call = [ | ||
1142 | 153 | 'ansible-playbook', | ||
1143 | 154 | '-c', | ||
1144 | 155 | 'local', | ||
1145 | 156 | playbook, | ||
1146 | 157 | ] | ||
1147 | 158 | if tags: | ||
1148 | 159 | call.extend(['--tags', '{}'.format(tags)]) | ||
1149 | 160 | if extra_vars: | ||
1150 | 161 | extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()] | ||
1151 | 162 | call.extend(['--extra-vars', " ".join(extra)]) | ||
1152 | 163 | subprocess.check_call(call, env=env) | ||
1153 | 164 | |||
1154 | 165 | |||
1155 | 166 | class AnsibleHooks(charmhelpers.core.hookenv.Hooks): | ||
1156 | 167 | """Run a playbook with the hook-name as the tag. | ||
1157 | 168 | |||
1158 | 169 | This helper builds on the standard hookenv.Hooks helper, | ||
1159 | 170 | but additionally runs the playbook with the hook-name specified | ||
1160 | 171 | using --tags (ie. running all the tasks tagged with the hook-name). | ||
1161 | 172 | |||
1162 | 173 | Example:: | ||
1163 | 174 | |||
1164 | 175 | hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml') | ||
1165 | 176 | |||
1166 | 177 | # All the tasks within my_machine_state.yaml tagged with 'install' | ||
1167 | 178 | # will be run automatically after do_custom_work() | ||
1168 | 179 | @hooks.hook() | ||
1169 | 180 | def install(): | ||
1170 | 181 | do_custom_work() | ||
1171 | 182 | |||
1172 | 183 | # For most of your hooks, you won't need to do anything other | ||
1173 | 184 | # than run the tagged tasks for the hook: | ||
1174 | 185 | @hooks.hook('config-changed', 'start', 'stop') | ||
1175 | 186 | def just_use_playbook(): | ||
1176 | 187 | pass | ||
1177 | 188 | |||
1178 | 189 | # As a convenience, you can avoid the above noop function by specifying | ||
1179 | 190 | # the hooks which are handled by ansible-only and they'll be registered | ||
1180 | 191 | # for you: | ||
1181 | 192 | # hooks = AnsibleHooks( | ||
1182 | 193 | # 'playbooks/my_machine_state.yaml', | ||
1183 | 194 | # default_hooks=['config-changed', 'start', 'stop']) | ||
1184 | 195 | |||
1185 | 196 | if __name__ == "__main__": | ||
1186 | 197 | # execute a hook based on the name the program is called by | ||
1187 | 198 | hooks.execute(sys.argv) | ||
1188 | 199 | |||
1189 | 200 | """ | ||
1190 | 201 | |||
1191 | 202 | def __init__(self, playbook_path, default_hooks=None): | ||
1192 | 203 | """Register any hooks handled by ansible.""" | ||
1193 | 204 | super(AnsibleHooks, self).__init__() | ||
1194 | 205 | |||
1195 | 206 | self._actions = {} | ||
1196 | 207 | self.playbook_path = playbook_path | ||
1197 | 208 | |||
1198 | 209 | default_hooks = default_hooks or [] | ||
1199 | 210 | |||
1200 | 211 | def noop(*args, **kwargs): | ||
1201 | 212 | pass | ||
1202 | 213 | |||
1203 | 214 | for hook in default_hooks: | ||
1204 | 215 | self.register(hook, noop) | ||
1205 | 216 | |||
1206 | 217 | def register_action(self, name, function): | ||
1207 | 218 | """Register a hook""" | ||
1208 | 219 | self._actions[name] = function | ||
1209 | 220 | |||
1210 | 221 | def execute(self, args): | ||
1211 | 222 | """Execute the hook followed by the playbook using the hook as tag.""" | ||
1212 | 223 | hook_name = os.path.basename(args[0]) | ||
1213 | 224 | extra_vars = None | ||
1214 | 225 | if hook_name in self._actions: | ||
1215 | 226 | extra_vars = self._actions[hook_name](args[1:]) | ||
1216 | 227 | else: | ||
1217 | 228 | super(AnsibleHooks, self).execute(args) | ||
1218 | 229 | |||
1219 | 230 | charmhelpers.contrib.ansible.apply_playbook( | ||
1220 | 231 | self.playbook_path, tags=[hook_name], extra_vars=extra_vars) | ||
1221 | 232 | |||
1222 | 233 | def action(self, *action_names): | ||
1223 | 234 | """Decorator, registering them as actions""" | ||
1224 | 235 | def action_wrapper(decorated): | ||
1225 | 236 | |||
1226 | 237 | @functools.wraps(decorated) | ||
1227 | 238 | def wrapper(argv): | ||
1228 | 239 | kwargs = dict(arg.split('=') for arg in argv) | ||
1229 | 240 | try: | ||
1230 | 241 | return decorated(**kwargs) | ||
1231 | 242 | except TypeError as e: | ||
1232 | 243 | if decorated.__doc__: | ||
1233 | 244 | e.args += (decorated.__doc__,) | ||
1234 | 245 | raise | ||
1235 | 246 | |||
1236 | 247 | self.register_action(decorated.__name__, wrapper) | ||
1237 | 248 | if '_' in decorated.__name__: | ||
1238 | 249 | self.register_action( | ||
1239 | 250 | decorated.__name__.replace('_', '-'), wrapper) | ||
1240 | 251 | |||
1241 | 252 | return wrapper | ||
1242 | 253 | |||
1243 | 254 | return action_wrapper | ||
1244 | 255 | 0 | ||
1245 | === removed directory 'hooks/charmhelpers/contrib/benchmark' | |||
1246 | === removed file 'hooks/charmhelpers/contrib/benchmark/__init__.py' | |||
1247 | --- hooks/charmhelpers/contrib/benchmark/__init__.py 2015-07-29 18:07:31 +0000 | |||
1248 | +++ hooks/charmhelpers/contrib/benchmark/__init__.py 1970-01-01 00:00:00 +0000 | |||
1249 | @@ -1,126 +0,0 @@ | |||
1250 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1251 | 2 | # | ||
1252 | 3 | # This file is part of charm-helpers. | ||
1253 | 4 | # | ||
1254 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1255 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1256 | 7 | # published by the Free Software Foundation. | ||
1257 | 8 | # | ||
1258 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1259 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1260 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1261 | 12 | # GNU Lesser General Public License for more details. | ||
1262 | 13 | # | ||
1263 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1264 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1265 | 16 | |||
1266 | 17 | import subprocess | ||
1267 | 18 | import time | ||
1268 | 19 | import os | ||
1269 | 20 | from distutils.spawn import find_executable | ||
1270 | 21 | |||
1271 | 22 | from charmhelpers.core.hookenv import ( | ||
1272 | 23 | in_relation_hook, | ||
1273 | 24 | relation_ids, | ||
1274 | 25 | relation_set, | ||
1275 | 26 | relation_get, | ||
1276 | 27 | ) | ||
1277 | 28 | |||
1278 | 29 | |||
1279 | 30 | def action_set(key, val): | ||
1280 | 31 | if find_executable('action-set'): | ||
1281 | 32 | action_cmd = ['action-set'] | ||
1282 | 33 | |||
1283 | 34 | if isinstance(val, dict): | ||
1284 | 35 | for k, v in iter(val.items()): | ||
1285 | 36 | action_set('%s.%s' % (key, k), v) | ||
1286 | 37 | return True | ||
1287 | 38 | |||
1288 | 39 | action_cmd.append('%s=%s' % (key, val)) | ||
1289 | 40 | subprocess.check_call(action_cmd) | ||
1290 | 41 | return True | ||
1291 | 42 | return False | ||
1292 | 43 | |||
1293 | 44 | |||
1294 | 45 | class Benchmark(): | ||
1295 | 46 | """ | ||
1296 | 47 | Helper class for the `benchmark` interface. | ||
1297 | 48 | |||
1298 | 49 | :param list actions: Define the actions that are also benchmarks | ||
1299 | 50 | |||
1300 | 51 | From inside the benchmark-relation-changed hook, you would | ||
1301 | 52 | Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom']) | ||
1302 | 53 | |||
1303 | 54 | Examples: | ||
1304 | 55 | |||
1305 | 56 | siege = Benchmark(['siege']) | ||
1306 | 57 | siege.start() | ||
1307 | 58 | [... run siege ...] | ||
1308 | 59 | # The higher the score, the better the benchmark | ||
1309 | 60 | siege.set_composite_score(16.70, 'trans/sec', 'desc') | ||
1310 | 61 | siege.finish() | ||
1311 | 62 | |||
1312 | 63 | |||
1313 | 64 | """ | ||
1314 | 65 | |||
1315 | 66 | BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing | ||
1316 | 67 | |||
1317 | 68 | required_keys = [ | ||
1318 | 69 | 'hostname', | ||
1319 | 70 | 'port', | ||
1320 | 71 | 'graphite_port', | ||
1321 | 72 | 'graphite_endpoint', | ||
1322 | 73 | 'api_port' | ||
1323 | 74 | ] | ||
1324 | 75 | |||
1325 | 76 | def __init__(self, benchmarks=None): | ||
1326 | 77 | if in_relation_hook(): | ||
1327 | 78 | if benchmarks is not None: | ||
1328 | 79 | for rid in sorted(relation_ids('benchmark')): | ||
1329 | 80 | relation_set(relation_id=rid, relation_settings={ | ||
1330 | 81 | 'benchmarks': ",".join(benchmarks) | ||
1331 | 82 | }) | ||
1332 | 83 | |||
1333 | 84 | # Check the relation data | ||
1334 | 85 | config = {} | ||
1335 | 86 | for key in self.required_keys: | ||
1336 | 87 | val = relation_get(key) | ||
1337 | 88 | if val is not None: | ||
1338 | 89 | config[key] = val | ||
1339 | 90 | else: | ||
1340 | 91 | # We don't have all of the required keys | ||
1341 | 92 | config = {} | ||
1342 | 93 | break | ||
1343 | 94 | |||
1344 | 95 | if len(config): | ||
1345 | 96 | with open(self.BENCHMARK_CONF, 'w') as f: | ||
1346 | 97 | for key, val in iter(config.items()): | ||
1347 | 98 | f.write("%s=%s\n" % (key, val)) | ||
1348 | 99 | |||
1349 | 100 | @staticmethod | ||
1350 | 101 | def start(): | ||
1351 | 102 | action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ')) | ||
1352 | 103 | |||
1353 | 104 | """ | ||
1354 | 105 | If the collectd charm is also installed, tell it to send a snapshot | ||
1355 | 106 | of the current profile data. | ||
1356 | 107 | """ | ||
1357 | 108 | COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' | ||
1358 | 109 | if os.path.exists(COLLECT_PROFILE_DATA): | ||
1359 | 110 | subprocess.check_output([COLLECT_PROFILE_DATA]) | ||
1360 | 111 | |||
1361 | 112 | @staticmethod | ||
1362 | 113 | def finish(): | ||
1363 | 114 | action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ')) | ||
1364 | 115 | |||
1365 | 116 | @staticmethod | ||
1366 | 117 | def set_composite_score(value, units, direction='asc'): | ||
1367 | 118 | """ | ||
1368 | 119 | Set the composite score for a benchmark run. This is a single number | ||
1369 | 120 | representative of the benchmark results. This could be the most | ||
1370 | 121 | important metric, or an amalgamation of metric scores. | ||
1371 | 122 | """ | ||
1372 | 123 | return action_set( | ||
1373 | 124 | "meta.composite", | ||
1374 | 125 | {'value': value, 'units': units, 'direction': direction} | ||
1375 | 126 | ) | ||
1376 | 127 | 0 | ||
1377 | === removed directory 'hooks/charmhelpers/contrib/charmhelpers' | |||
1378 | === removed file 'hooks/charmhelpers/contrib/charmhelpers/__init__.py' | |||
1379 | --- hooks/charmhelpers/contrib/charmhelpers/__init__.py 2015-07-29 18:07:31 +0000 | |||
1380 | +++ hooks/charmhelpers/contrib/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000 | |||
1381 | @@ -1,208 +0,0 @@ | |||
1382 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1383 | 2 | # | ||
1384 | 3 | # This file is part of charm-helpers. | ||
1385 | 4 | # | ||
1386 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1387 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1388 | 7 | # published by the Free Software Foundation. | ||
1389 | 8 | # | ||
1390 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1391 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1392 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1393 | 12 | # GNU Lesser General Public License for more details. | ||
1394 | 13 | # | ||
1395 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1396 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1397 | 16 | |||
1398 | 17 | # Copyright 2012 Canonical Ltd. This software is licensed under the | ||
1399 | 18 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
1400 | 19 | |||
1401 | 20 | import warnings | ||
1402 | 21 | warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa | ||
1403 | 22 | |||
1404 | 23 | import operator | ||
1405 | 24 | import tempfile | ||
1406 | 25 | import time | ||
1407 | 26 | import yaml | ||
1408 | 27 | import subprocess | ||
1409 | 28 | |||
1410 | 29 | import six | ||
1411 | 30 | if six.PY3: | ||
1412 | 31 | from urllib.request import urlopen | ||
1413 | 32 | from urllib.error import (HTTPError, URLError) | ||
1414 | 33 | else: | ||
1415 | 34 | from urllib2 import (urlopen, HTTPError, URLError) | ||
1416 | 35 | |||
1417 | 36 | """Helper functions for writing Juju charms in Python.""" | ||
1418 | 37 | |||
1419 | 38 | __metaclass__ = type | ||
1420 | 39 | __all__ = [ | ||
1421 | 40 | # 'get_config', # core.hookenv.config() | ||
1422 | 41 | # 'log', # core.hookenv.log() | ||
1423 | 42 | # 'log_entry', # core.hookenv.log() | ||
1424 | 43 | # 'log_exit', # core.hookenv.log() | ||
1425 | 44 | # 'relation_get', # core.hookenv.relation_get() | ||
1426 | 45 | # 'relation_set', # core.hookenv.relation_set() | ||
1427 | 46 | # 'relation_ids', # core.hookenv.relation_ids() | ||
1428 | 47 | # 'relation_list', # core.hookenv.relation_units() | ||
1429 | 48 | # 'config_get', # core.hookenv.config() | ||
1430 | 49 | # 'unit_get', # core.hookenv.unit_get() | ||
1431 | 50 | # 'open_port', # core.hookenv.open_port() | ||
1432 | 51 | # 'close_port', # core.hookenv.close_port() | ||
1433 | 52 | # 'service_control', # core.host.service() | ||
1434 | 53 | 'unit_info', # client-side, NOT IMPLEMENTED | ||
1435 | 54 | 'wait_for_machine', # client-side, NOT IMPLEMENTED | ||
1436 | 55 | 'wait_for_page_contents', # client-side, NOT IMPLEMENTED | ||
1437 | 56 | 'wait_for_relation', # client-side, NOT IMPLEMENTED | ||
1438 | 57 | 'wait_for_unit', # client-side, NOT IMPLEMENTED | ||
1439 | 58 | ] | ||
1440 | 59 | |||
1441 | 60 | |||
1442 | 61 | SLEEP_AMOUNT = 0.1 | ||
1443 | 62 | |||
1444 | 63 | |||
1445 | 64 | # We create a juju_status Command here because it makes testing much, | ||
1446 | 65 | # much easier. | ||
1447 | 66 | def juju_status(): | ||
1448 | 67 | subprocess.check_call(['juju', 'status']) | ||
1449 | 68 | |||
1450 | 69 | # re-implemented as charmhelpers.fetch.configure_sources() | ||
1451 | 70 | # def configure_source(update=False): | ||
1452 | 71 | # source = config_get('source') | ||
1453 | 72 | # if ((source.startswith('ppa:') or | ||
1454 | 73 | # source.startswith('cloud:') or | ||
1455 | 74 | # source.startswith('http:'))): | ||
1456 | 75 | # run('add-apt-repository', source) | ||
1457 | 76 | # if source.startswith("http:"): | ||
1458 | 77 | # run('apt-key', 'import', config_get('key')) | ||
1459 | 78 | # if update: | ||
1460 | 79 | # run('apt-get', 'update') | ||
1461 | 80 | |||
1462 | 81 | |||
1463 | 82 | # DEPRECATED: client-side only | ||
1464 | 83 | def make_charm_config_file(charm_config): | ||
1465 | 84 | charm_config_file = tempfile.NamedTemporaryFile(mode='w+') | ||
1466 | 85 | charm_config_file.write(yaml.dump(charm_config)) | ||
1467 | 86 | charm_config_file.flush() | ||
1468 | 87 | # The NamedTemporaryFile instance is returned instead of just the name | ||
1469 | 88 | # because we want to take advantage of garbage collection-triggered | ||
1470 | 89 | # deletion of the temp file when it goes out of scope in the caller. | ||
1471 | 90 | return charm_config_file | ||
1472 | 91 | |||
1473 | 92 | |||
1474 | 93 | # DEPRECATED: client-side only | ||
1475 | 94 | def unit_info(service_name, item_name, data=None, unit=None): | ||
1476 | 95 | if data is None: | ||
1477 | 96 | data = yaml.safe_load(juju_status()) | ||
1478 | 97 | service = data['services'].get(service_name) | ||
1479 | 98 | if service is None: | ||
1480 | 99 | # XXX 2012-02-08 gmb: | ||
1481 | 100 | # This allows us to cope with the race condition that we | ||
1482 | 101 | # have between deploying a service and having it come up in | ||
1483 | 102 | # `juju status`. We could probably do with cleaning it up so | ||
1484 | 103 | # that it fails a bit more noisily after a while. | ||
1485 | 104 | return '' | ||
1486 | 105 | units = service['units'] | ||
1487 | 106 | if unit is not None: | ||
1488 | 107 | item = units[unit][item_name] | ||
1489 | 108 | else: | ||
1490 | 109 | # It might seem odd to sort the units here, but we do it to | ||
1491 | 110 | # ensure that when no unit is specified, the first unit for the | ||
1492 | 111 | # service (or at least the one with the lowest number) is the | ||
1493 | 112 | # one whose data gets returned. | ||
1494 | 113 | sorted_unit_names = sorted(units.keys()) | ||
1495 | 114 | item = units[sorted_unit_names[0]][item_name] | ||
1496 | 115 | return item | ||
1497 | 116 | |||
1498 | 117 | |||
1499 | 118 | # DEPRECATED: client-side only | ||
1500 | 119 | def get_machine_data(): | ||
1501 | 120 | return yaml.safe_load(juju_status())['machines'] | ||
1502 | 121 | |||
1503 | 122 | |||
1504 | 123 | # DEPRECATED: client-side only | ||
1505 | 124 | def wait_for_machine(num_machines=1, timeout=300): | ||
1506 | 125 | """Wait `timeout` seconds for `num_machines` machines to come up. | ||
1507 | 126 | |||
1508 | 127 | This wait_for... function can be called by other wait_for functions | ||
1509 | 128 | whose timeouts might be too short in situations where only a bare | ||
1510 | 129 | Juju setup has been bootstrapped. | ||
1511 | 130 | |||
1512 | 131 | :return: A tuple of (num_machines, time_taken). This is used for | ||
1513 | 132 | testing. | ||
1514 | 133 | """ | ||
1515 | 134 | # You may think this is a hack, and you'd be right. The easiest way | ||
1516 | 135 | # to tell what environment we're working in (LXC vs EC2) is to check | ||
1517 | 136 | # the dns-name of the first machine. If it's localhost we're in LXC | ||
1518 | 137 | # and we can just return here. | ||
1519 | 138 | if get_machine_data()[0]['dns-name'] == 'localhost': | ||
1520 | 139 | return 1, 0 | ||
1521 | 140 | start_time = time.time() | ||
1522 | 141 | while True: | ||
1523 | 142 | # Drop the first machine, since it's the Zookeeper and that's | ||
1524 | 143 | # not a machine that we need to wait for. This will only work | ||
1525 | 144 | # for EC2 environments, which is why we return early above if | ||
1526 | 145 | # we're in LXC. | ||
1527 | 146 | machine_data = get_machine_data() | ||
1528 | 147 | non_zookeeper_machines = [ | ||
1529 | 148 | machine_data[key] for key in list(machine_data.keys())[1:]] | ||
1530 | 149 | if len(non_zookeeper_machines) >= num_machines: | ||
1531 | 150 | all_machines_running = True | ||
1532 | 151 | for machine in non_zookeeper_machines: | ||
1533 | 152 | if machine.get('instance-state') != 'running': | ||
1534 | 153 | all_machines_running = False | ||
1535 | 154 | break | ||
1536 | 155 | if all_machines_running: | ||
1537 | 156 | break | ||
1538 | 157 | if time.time() - start_time >= timeout: | ||
1539 | 158 | raise RuntimeError('timeout waiting for service to start') | ||
1540 | 159 | time.sleep(SLEEP_AMOUNT) | ||
1541 | 160 | return num_machines, time.time() - start_time | ||
1542 | 161 | |||
1543 | 162 | |||
1544 | 163 | # DEPRECATED: client-side only | ||
1545 | 164 | def wait_for_unit(service_name, timeout=480): | ||
1546 | 165 | """Wait `timeout` seconds for a given service name to come up.""" | ||
1547 | 166 | wait_for_machine(num_machines=1) | ||
1548 | 167 | start_time = time.time() | ||
1549 | 168 | while True: | ||
1550 | 169 | state = unit_info(service_name, 'agent-state') | ||
1551 | 170 | if 'error' in state or state == 'started': | ||
1552 | 171 | break | ||
1553 | 172 | if time.time() - start_time >= timeout: | ||
1554 | 173 | raise RuntimeError('timeout waiting for service to start') | ||
1555 | 174 | time.sleep(SLEEP_AMOUNT) | ||
1556 | 175 | if state != 'started': | ||
1557 | 176 | raise RuntimeError('unit did not start, agent-state: ' + state) | ||
1558 | 177 | |||
1559 | 178 | |||
1560 | 179 | # DEPRECATED: client-side only | ||
1561 | 180 | def wait_for_relation(service_name, relation_name, timeout=120): | ||
1562 | 181 | """Wait `timeout` seconds for a given relation to come up.""" | ||
1563 | 182 | start_time = time.time() | ||
1564 | 183 | while True: | ||
1565 | 184 | relation = unit_info(service_name, 'relations').get(relation_name) | ||
1566 | 185 | if relation is not None and relation['state'] == 'up': | ||
1567 | 186 | break | ||
1568 | 187 | if time.time() - start_time >= timeout: | ||
1569 | 188 | raise RuntimeError('timeout waiting for relation to be up') | ||
1570 | 189 | time.sleep(SLEEP_AMOUNT) | ||
1571 | 190 | |||
1572 | 191 | |||
1573 | 192 | # DEPRECATED: client-side only | ||
1574 | 193 | def wait_for_page_contents(url, contents, timeout=120, validate=None): | ||
1575 | 194 | if validate is None: | ||
1576 | 195 | validate = operator.contains | ||
1577 | 196 | start_time = time.time() | ||
1578 | 197 | while True: | ||
1579 | 198 | try: | ||
1580 | 199 | stream = urlopen(url) | ||
1581 | 200 | except (HTTPError, URLError): | ||
1582 | 201 | pass | ||
1583 | 202 | else: | ||
1584 | 203 | page = stream.read() | ||
1585 | 204 | if validate(page, contents): | ||
1586 | 205 | return page | ||
1587 | 206 | if time.time() - start_time >= timeout: | ||
1588 | 207 | raise RuntimeError('timeout waiting for contents of ' + url) | ||
1589 | 208 | time.sleep(SLEEP_AMOUNT) | ||
1590 | 209 | 0 | ||
1591 | === removed directory 'hooks/charmhelpers/contrib/charmsupport' | |||
1592 | === removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py' | |||
1593 | --- hooks/charmhelpers/contrib/charmsupport/__init__.py 2015-07-29 18:07:31 +0000 | |||
1594 | +++ hooks/charmhelpers/contrib/charmsupport/__init__.py 1970-01-01 00:00:00 +0000 | |||
1595 | @@ -1,15 +0,0 @@ | |||
1596 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1597 | 2 | # | ||
1598 | 3 | # This file is part of charm-helpers. | ||
1599 | 4 | # | ||
1600 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1601 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1602 | 7 | # published by the Free Software Foundation. | ||
1603 | 8 | # | ||
1604 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1605 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1606 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1607 | 12 | # GNU Lesser General Public License for more details. | ||
1608 | 13 | # | ||
1609 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1610 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1611 | 16 | 0 | ||
1612 | === removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' | |||
1613 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-07-29 18:07:31 +0000 | |||
1614 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000 | |||
1615 | @@ -1,360 +0,0 @@ | |||
1616 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1617 | 2 | # | ||
1618 | 3 | # This file is part of charm-helpers. | ||
1619 | 4 | # | ||
1620 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1621 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1622 | 7 | # published by the Free Software Foundation. | ||
1623 | 8 | # | ||
1624 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1625 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1626 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1627 | 12 | # GNU Lesser General Public License for more details. | ||
1628 | 13 | # | ||
1629 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1630 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1631 | 16 | |||
1632 | 17 | """Compatibility with the nrpe-external-master charm""" | ||
1633 | 18 | # Copyright 2012 Canonical Ltd. | ||
1634 | 19 | # | ||
1635 | 20 | # Authors: | ||
1636 | 21 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> | ||
1637 | 22 | |||
1638 | 23 | import subprocess | ||
1639 | 24 | import pwd | ||
1640 | 25 | import grp | ||
1641 | 26 | import os | ||
1642 | 27 | import glob | ||
1643 | 28 | import shutil | ||
1644 | 29 | import re | ||
1645 | 30 | import shlex | ||
1646 | 31 | import yaml | ||
1647 | 32 | |||
1648 | 33 | from charmhelpers.core.hookenv import ( | ||
1649 | 34 | config, | ||
1650 | 35 | local_unit, | ||
1651 | 36 | log, | ||
1652 | 37 | relation_ids, | ||
1653 | 38 | relation_set, | ||
1654 | 39 | relations_of_type, | ||
1655 | 40 | ) | ||
1656 | 41 | |||
1657 | 42 | from charmhelpers.core.host import service | ||
1658 | 43 | |||
1659 | 44 | # This module adds compatibility with the nrpe-external-master and plain nrpe | ||
1660 | 45 | # subordinate charms. To use it in your charm: | ||
1661 | 46 | # | ||
1662 | 47 | # 1. Update metadata.yaml | ||
1663 | 48 | # | ||
1664 | 49 | # provides: | ||
1665 | 50 | # (...) | ||
1666 | 51 | # nrpe-external-master: | ||
1667 | 52 | # interface: nrpe-external-master | ||
1668 | 53 | # scope: container | ||
1669 | 54 | # | ||
1670 | 55 | # and/or | ||
1671 | 56 | # | ||
1672 | 57 | # provides: | ||
1673 | 58 | # (...) | ||
1674 | 59 | # local-monitors: | ||
1675 | 60 | # interface: local-monitors | ||
1676 | 61 | # scope: container | ||
1677 | 62 | |||
1678 | 63 | # | ||
1679 | 64 | # 2. Add the following to config.yaml | ||
1680 | 65 | # | ||
1681 | 66 | # nagios_context: | ||
1682 | 67 | # default: "juju" | ||
1683 | 68 | # type: string | ||
1684 | 69 | # description: | | ||
1685 | 70 | # Used by the nrpe subordinate charms. | ||
1686 | 71 | # A string that will be prepended to instance name to set the host name | ||
1687 | 72 | # in nagios. So for instance the hostname would be something like: | ||
1688 | 73 | # juju-myservice-0 | ||
1689 | 74 | # If you're running multiple environments with the same services in them | ||
1690 | 75 | # this allows you to differentiate between them. | ||
1691 | 76 | # nagios_servicegroups: | ||
1692 | 77 | # default: "" | ||
1693 | 78 | # type: string | ||
1694 | 79 | # description: | | ||
1695 | 80 | # A comma-separated list of nagios servicegroups. | ||
1696 | 81 | # If left empty, the nagios_context will be used as the servicegroup | ||
1697 | 82 | # | ||
1698 | 83 | # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master | ||
1699 | 84 | # | ||
1700 | 85 | # 4. Update your hooks.py with something like this: | ||
1701 | 86 | # | ||
1702 | 87 | # from charmsupport.nrpe import NRPE | ||
1703 | 88 | # (...) | ||
1704 | 89 | # def update_nrpe_config(): | ||
1705 | 90 | # nrpe_compat = NRPE() | ||
1706 | 91 | # nrpe_compat.add_check( | ||
1707 | 92 | # shortname = "myservice", | ||
1708 | 93 | # description = "Check MyService", | ||
1709 | 94 | # check_cmd = "check_http -w 2 -c 10 http://localhost" | ||
1710 | 95 | # ) | ||
1711 | 96 | # nrpe_compat.add_check( | ||
1712 | 97 | # "myservice_other", | ||
1713 | 98 | # "Check for widget failures", | ||
1714 | 99 | # check_cmd = "/srv/myapp/scripts/widget_check" | ||
1715 | 100 | # ) | ||
1716 | 101 | # nrpe_compat.write() | ||
1717 | 102 | # | ||
1718 | 103 | # def config_changed(): | ||
1719 | 104 | # (...) | ||
1720 | 105 | # update_nrpe_config() | ||
1721 | 106 | # | ||
1722 | 107 | # def nrpe_external_master_relation_changed(): | ||
1723 | 108 | # update_nrpe_config() | ||
1724 | 109 | # | ||
1725 | 110 | # def local_monitors_relation_changed(): | ||
1726 | 111 | # update_nrpe_config() | ||
1727 | 112 | # | ||
1728 | 113 | # 5. ln -s hooks.py nrpe-external-master-relation-changed | ||
1729 | 114 | # ln -s hooks.py local-monitors-relation-changed | ||
1730 | 115 | |||
1731 | 116 | |||
1732 | 117 | class CheckException(Exception): | ||
1733 | 118 | pass | ||
1734 | 119 | |||
1735 | 120 | |||
1736 | 121 | class Check(object): | ||
1737 | 122 | shortname_re = '[A-Za-z0-9-_]+$' | ||
1738 | 123 | service_template = (""" | ||
1739 | 124 | #--------------------------------------------------- | ||
1740 | 125 | # This file is Juju managed | ||
1741 | 126 | #--------------------------------------------------- | ||
1742 | 127 | define service {{ | ||
1743 | 128 | use active-service | ||
1744 | 129 | host_name {nagios_hostname} | ||
1745 | 130 | service_description {nagios_hostname}[{shortname}] """ | ||
1746 | 131 | """{description} | ||
1747 | 132 | check_command check_nrpe!{command} | ||
1748 | 133 | servicegroups {nagios_servicegroup} | ||
1749 | 134 | }} | ||
1750 | 135 | """) | ||
1751 | 136 | |||
1752 | 137 | def __init__(self, shortname, description, check_cmd): | ||
1753 | 138 | super(Check, self).__init__() | ||
1754 | 139 | # XXX: could be better to calculate this from the service name | ||
1755 | 140 | if not re.match(self.shortname_re, shortname): | ||
1756 | 141 | raise CheckException("shortname must match {}".format( | ||
1757 | 142 | Check.shortname_re)) | ||
1758 | 143 | self.shortname = shortname | ||
1759 | 144 | self.command = "check_{}".format(shortname) | ||
1760 | 145 | # Note: a set of invalid characters is defined by the | ||
1761 | 146 | # Nagios server config | ||
1762 | 147 | # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= | ||
1763 | 148 | self.description = description | ||
1764 | 149 | self.check_cmd = self._locate_cmd(check_cmd) | ||
1765 | 150 | |||
1766 | 151 | def _locate_cmd(self, check_cmd): | ||
1767 | 152 | search_path = ( | ||
1768 | 153 | '/usr/lib/nagios/plugins', | ||
1769 | 154 | '/usr/local/lib/nagios/plugins', | ||
1770 | 155 | ) | ||
1771 | 156 | parts = shlex.split(check_cmd) | ||
1772 | 157 | for path in search_path: | ||
1773 | 158 | if os.path.exists(os.path.join(path, parts[0])): | ||
1774 | 159 | command = os.path.join(path, parts[0]) | ||
1775 | 160 | if len(parts) > 1: | ||
1776 | 161 | command += " " + " ".join(parts[1:]) | ||
1777 | 162 | return command | ||
1778 | 163 | log('Check command not found: {}'.format(parts[0])) | ||
1779 | 164 | return '' | ||
1780 | 165 | |||
1781 | 166 | def write(self, nagios_context, hostname, nagios_servicegroups): | ||
1782 | 167 | nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( | ||
1783 | 168 | self.command) | ||
1784 | 169 | with open(nrpe_check_file, 'w') as nrpe_check_config: | ||
1785 | 170 | nrpe_check_config.write("# check {}\n".format(self.shortname)) | ||
1786 | 171 | nrpe_check_config.write("command[{}]={}\n".format( | ||
1787 | 172 | self.command, self.check_cmd)) | ||
1788 | 173 | |||
1789 | 174 | if not os.path.exists(NRPE.nagios_exportdir): | ||
1790 | 175 | log('Not writing service config as {} is not accessible'.format( | ||
1791 | 176 | NRPE.nagios_exportdir)) | ||
1792 | 177 | else: | ||
1793 | 178 | self.write_service_config(nagios_context, hostname, | ||
1794 | 179 | nagios_servicegroups) | ||
1795 | 180 | |||
1796 | 181 | def write_service_config(self, nagios_context, hostname, | ||
1797 | 182 | nagios_servicegroups): | ||
1798 | 183 | for f in os.listdir(NRPE.nagios_exportdir): | ||
1799 | 184 | if re.search('.*{}.cfg'.format(self.command), f): | ||
1800 | 185 | os.remove(os.path.join(NRPE.nagios_exportdir, f)) | ||
1801 | 186 | |||
1802 | 187 | templ_vars = { | ||
1803 | 188 | 'nagios_hostname': hostname, | ||
1804 | 189 | 'nagios_servicegroup': nagios_servicegroups, | ||
1805 | 190 | 'description': self.description, | ||
1806 | 191 | 'shortname': self.shortname, | ||
1807 | 192 | 'command': self.command, | ||
1808 | 193 | } | ||
1809 | 194 | nrpe_service_text = Check.service_template.format(**templ_vars) | ||
1810 | 195 | nrpe_service_file = '{}/service__{}_{}.cfg'.format( | ||
1811 | 196 | NRPE.nagios_exportdir, hostname, self.command) | ||
1812 | 197 | with open(nrpe_service_file, 'w') as nrpe_service_config: | ||
1813 | 198 | nrpe_service_config.write(str(nrpe_service_text)) | ||
1814 | 199 | |||
1815 | 200 | def run(self): | ||
1816 | 201 | subprocess.call(self.check_cmd) | ||
1817 | 202 | |||
1818 | 203 | |||
1819 | 204 | class NRPE(object): | ||
1820 | 205 | nagios_logdir = '/var/log/nagios' | ||
1821 | 206 | nagios_exportdir = '/var/lib/nagios/export' | ||
1822 | 207 | nrpe_confdir = '/etc/nagios/nrpe.d' | ||
1823 | 208 | |||
1824 | 209 | def __init__(self, hostname=None): | ||
1825 | 210 | super(NRPE, self).__init__() | ||
1826 | 211 | self.config = config() | ||
1827 | 212 | self.nagios_context = self.config['nagios_context'] | ||
1828 | 213 | if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: | ||
1829 | 214 | self.nagios_servicegroups = self.config['nagios_servicegroups'] | ||
1830 | 215 | else: | ||
1831 | 216 | self.nagios_servicegroups = self.nagios_context | ||
1832 | 217 | self.unit_name = local_unit().replace('/', '-') | ||
1833 | 218 | if hostname: | ||
1834 | 219 | self.hostname = hostname | ||
1835 | 220 | else: | ||
1836 | 221 | self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) | ||
1837 | 222 | self.checks = [] | ||
1838 | 223 | |||
1839 | 224 | def add_check(self, *args, **kwargs): | ||
1840 | 225 | self.checks.append(Check(*args, **kwargs)) | ||
1841 | 226 | |||
1842 | 227 | def write(self): | ||
1843 | 228 | try: | ||
1844 | 229 | nagios_uid = pwd.getpwnam('nagios').pw_uid | ||
1845 | 230 | nagios_gid = grp.getgrnam('nagios').gr_gid | ||
1846 | 231 | except: | ||
1847 | 232 | log("Nagios user not set up, nrpe checks not updated") | ||
1848 | 233 | return | ||
1849 | 234 | |||
1850 | 235 | if not os.path.exists(NRPE.nagios_logdir): | ||
1851 | 236 | os.mkdir(NRPE.nagios_logdir) | ||
1852 | 237 | os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) | ||
1853 | 238 | |||
1854 | 239 | nrpe_monitors = {} | ||
1855 | 240 | monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} | ||
1856 | 241 | for nrpecheck in self.checks: | ||
1857 | 242 | nrpecheck.write(self.nagios_context, self.hostname, | ||
1858 | 243 | self.nagios_servicegroups) | ||
1859 | 244 | nrpe_monitors[nrpecheck.shortname] = { | ||
1860 | 245 | "command": nrpecheck.command, | ||
1861 | 246 | } | ||
1862 | 247 | |||
1863 | 248 | service('restart', 'nagios-nrpe-server') | ||
1864 | 249 | |||
1865 | 250 | monitor_ids = relation_ids("local-monitors") + \ | ||
1866 | 251 | relation_ids("nrpe-external-master") | ||
1867 | 252 | for rid in monitor_ids: | ||
1868 | 253 | relation_set(relation_id=rid, monitors=yaml.dump(monitors)) | ||
1869 | 254 | |||
1870 | 255 | |||
1871 | 256 | def get_nagios_hostcontext(relation_name='nrpe-external-master'): | ||
1872 | 257 | """ | ||
1873 | 258 | Query relation with nrpe subordinate, return the nagios_host_context | ||
1874 | 259 | |||
1875 | 260 | :param str relation_name: Name of relation nrpe sub joined to | ||
1876 | 261 | """ | ||
1877 | 262 | for rel in relations_of_type(relation_name): | ||
1878 | 263 | if 'nagios_hostname' in rel: | ||
1879 | 264 | return rel['nagios_host_context'] | ||
1880 | 265 | |||
1881 | 266 | |||
1882 | 267 | def get_nagios_hostname(relation_name='nrpe-external-master'): | ||
1883 | 268 | """ | ||
1884 | 269 | Query relation with nrpe subordinate, return the nagios_hostname | ||
1885 | 270 | |||
1886 | 271 | :param str relation_name: Name of relation nrpe sub joined to | ||
1887 | 272 | """ | ||
1888 | 273 | for rel in relations_of_type(relation_name): | ||
1889 | 274 | if 'nagios_hostname' in rel: | ||
1890 | 275 | return rel['nagios_hostname'] | ||
1891 | 276 | |||
1892 | 277 | |||
1893 | 278 | def get_nagios_unit_name(relation_name='nrpe-external-master'): | ||
1894 | 279 | """ | ||
1895 | 280 | Return the nagios unit name prepended with host_context if needed | ||
1896 | 281 | |||
1897 | 282 | :param str relation_name: Name of relation nrpe sub joined to | ||
1898 | 283 | """ | ||
1899 | 284 | host_context = get_nagios_hostcontext(relation_name) | ||
1900 | 285 | if host_context: | ||
1901 | 286 | unit = "%s:%s" % (host_context, local_unit()) | ||
1902 | 287 | else: | ||
1903 | 288 | unit = local_unit() | ||
1904 | 289 | return unit | ||
1905 | 290 | |||
1906 | 291 | |||
1907 | 292 | def add_init_service_checks(nrpe, services, unit_name): | ||
1908 | 293 | """ | ||
1909 | 294 | Add checks for each service in list | ||
1910 | 295 | |||
1911 | 296 | :param NRPE nrpe: NRPE object to add check to | ||
1912 | 297 | :param list services: List of services to check | ||
1913 | 298 | :param str unit_name: Unit name to use in check description | ||
1914 | 299 | """ | ||
1915 | 300 | for svc in services: | ||
1916 | 301 | upstart_init = '/etc/init/%s.conf' % svc | ||
1917 | 302 | sysv_init = '/etc/init.d/%s' % svc | ||
1918 | 303 | if os.path.exists(upstart_init): | ||
1919 | 304 | nrpe.add_check( | ||
1920 | 305 | shortname=svc, | ||
1921 | 306 | description='process check {%s}' % unit_name, | ||
1922 | 307 | check_cmd='check_upstart_job %s' % svc | ||
1923 | 308 | ) | ||
1924 | 309 | elif os.path.exists(sysv_init): | ||
1925 | 310 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc | ||
1926 | 311 | cron_file = ('*/5 * * * * root ' | ||
1927 | 312 | '/usr/local/lib/nagios/plugins/check_exit_status.pl ' | ||
1928 | 313 | '-s /etc/init.d/%s status > ' | ||
1929 | 314 | '/var/lib/nagios/service-check-%s.txt\n' % (svc, | ||
1930 | 315 | svc) | ||
1931 | 316 | ) | ||
1932 | 317 | f = open(cronpath, 'w') | ||
1933 | 318 | f.write(cron_file) | ||
1934 | 319 | f.close() | ||
1935 | 320 | nrpe.add_check( | ||
1936 | 321 | shortname=svc, | ||
1937 | 322 | description='process check {%s}' % unit_name, | ||
1938 | 323 | check_cmd='check_status_file.py -f ' | ||
1939 | 324 | '/var/lib/nagios/service-check-%s.txt' % svc, | ||
1940 | 325 | ) | ||
1941 | 326 | |||
1942 | 327 | |||
1943 | 328 | def copy_nrpe_checks(): | ||
1944 | 329 | """ | ||
1945 | 330 | Copy the nrpe checks into place | ||
1946 | 331 | |||
1947 | 332 | """ | ||
1948 | 333 | NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' | ||
1949 | 334 | nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', | ||
1950 | 335 | 'charmhelpers', 'contrib', 'openstack', | ||
1951 | 336 | 'files') | ||
1952 | 337 | |||
1953 | 338 | if not os.path.exists(NAGIOS_PLUGINS): | ||
1954 | 339 | os.makedirs(NAGIOS_PLUGINS) | ||
1955 | 340 | for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): | ||
1956 | 341 | if os.path.isfile(fname): | ||
1957 | 342 | shutil.copy2(fname, | ||
1958 | 343 | os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) | ||
1959 | 344 | |||
1960 | 345 | |||
1961 | 346 | def add_haproxy_checks(nrpe, unit_name): | ||
1962 | 347 | """ | ||
1963 | 348 | Add checks for each service in list | ||
1964 | 349 | |||
1965 | 350 | :param NRPE nrpe: NRPE object to add check to | ||
1966 | 351 | :param str unit_name: Unit name to use in check description | ||
1967 | 352 | """ | ||
1968 | 353 | nrpe.add_check( | ||
1969 | 354 | shortname='haproxy_servers', | ||
1970 | 355 | description='Check HAProxy {%s}' % unit_name, | ||
1971 | 356 | check_cmd='check_haproxy.sh') | ||
1972 | 357 | nrpe.add_check( | ||
1973 | 358 | shortname='haproxy_queue', | ||
1974 | 359 | description='Check HAProxy queue depth {%s}' % unit_name, | ||
1975 | 360 | check_cmd='check_haproxy_queue_depth.sh') | ||
1976 | 361 | 0 | ||
1977 | === removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py' | |||
1978 | --- hooks/charmhelpers/contrib/charmsupport/volumes.py 2015-07-29 18:07:31 +0000 | |||
1979 | +++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000 | |||
1980 | @@ -1,175 +0,0 @@ | |||
1981 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1982 | 2 | # | ||
1983 | 3 | # This file is part of charm-helpers. | ||
1984 | 4 | # | ||
1985 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1986 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1987 | 7 | # published by the Free Software Foundation. | ||
1988 | 8 | # | ||
1989 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1990 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1991 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1992 | 12 | # GNU Lesser General Public License for more details. | ||
1993 | 13 | # | ||
1994 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1995 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1996 | 16 | |||
1997 | 17 | ''' | ||
1998 | 18 | Functions for managing volumes in juju units. One volume is supported per unit. | ||
1999 | 19 | Subordinates may have their own storage, provided it is on its own partition. | ||
2000 | 20 | |||
2001 | 21 | Configuration stanzas:: | ||
2002 | 22 | |||
2003 | 23 | volume-ephemeral: | ||
2004 | 24 | type: boolean | ||
2005 | 25 | default: true | ||
2006 | 26 | description: > | ||
2007 | 27 | If false, a volume is mounted as sepecified in "volume-map" | ||
2008 | 28 | If true, ephemeral storage will be used, meaning that log data | ||
2009 | 29 | will only exist as long as the machine. YOU HAVE BEEN WARNED. | ||
2010 | 30 | volume-map: | ||
2011 | 31 | type: string | ||
2012 | 32 | default: {} | ||
2013 | 33 | description: > | ||
2014 | 34 | YAML map of units to device names, e.g: | ||
2015 | 35 | "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" | ||
2016 | 36 | Service units will raise a configure-error if volume-ephemeral | ||
2017 | 37 | is 'true' and no volume-map value is set. Use 'juju set' to set a | ||
2018 | 38 | value and 'juju resolved' to complete configuration. | ||
2019 | 39 | |||
2020 | 40 | Usage:: | ||
2021 | 41 | |||
2022 | 42 | from charmsupport.volumes import configure_volume, VolumeConfigurationError | ||
2023 | 43 | from charmsupport.hookenv import log, ERROR | ||
2024 | 44 | def post_mount_hook(): | ||
2025 | 45 | stop_service('myservice') | ||
2026 | 46 | def post_mount_hook(): | ||
2027 | 47 | start_service('myservice') | ||
2028 | 48 | |||
2029 | 49 | if __name__ == '__main__': | ||
2030 | 50 | try: | ||
2031 | 51 | configure_volume(before_change=pre_mount_hook, | ||
2032 | 52 | after_change=post_mount_hook) | ||
2033 | 53 | except VolumeConfigurationError: | ||
2034 | 54 | log('Storage could not be configured', ERROR) | ||
2035 | 55 | |||
2036 | 56 | ''' | ||
2037 | 57 | |||
2038 | 58 | # XXX: Known limitations | ||
2039 | 59 | # - fstab is neither consulted nor updated | ||
2040 | 60 | |||
2041 | 61 | import os | ||
2042 | 62 | from charmhelpers.core import hookenv | ||
2043 | 63 | from charmhelpers.core import host | ||
2044 | 64 | import yaml | ||
2045 | 65 | |||
2046 | 66 | |||
2047 | 67 | MOUNT_BASE = '/srv/juju/volumes' | ||
2048 | 68 | |||
2049 | 69 | |||
2050 | 70 | class VolumeConfigurationError(Exception): | ||
2051 | 71 | '''Volume configuration data is missing or invalid''' | ||
2052 | 72 | pass | ||
2053 | 73 | |||
2054 | 74 | |||
2055 | 75 | def get_config(): | ||
2056 | 76 | '''Gather and sanity-check volume configuration data''' | ||
2057 | 77 | volume_config = {} | ||
2058 | 78 | config = hookenv.config() | ||
2059 | 79 | |||
2060 | 80 | errors = False | ||
2061 | 81 | |||
2062 | 82 | if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): | ||
2063 | 83 | volume_config['ephemeral'] = True | ||
2064 | 84 | else: | ||
2065 | 85 | volume_config['ephemeral'] = False | ||
2066 | 86 | |||
2067 | 87 | try: | ||
2068 | 88 | volume_map = yaml.safe_load(config.get('volume-map', '{}')) | ||
2069 | 89 | except yaml.YAMLError as e: | ||
2070 | 90 | hookenv.log("Error parsing YAML volume-map: {}".format(e), | ||
2071 | 91 | hookenv.ERROR) | ||
2072 | 92 | errors = True | ||
2073 | 93 | if volume_map is None: | ||
2074 | 94 | # probably an empty string | ||
2075 | 95 | volume_map = {} | ||
2076 | 96 | elif not isinstance(volume_map, dict): | ||
2077 | 97 | hookenv.log("Volume-map should be a dictionary, not {}".format( | ||
2078 | 98 | type(volume_map))) | ||
2079 | 99 | errors = True | ||
2080 | 100 | |||
2081 | 101 | volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) | ||
2082 | 102 | if volume_config['device'] and volume_config['ephemeral']: | ||
2083 | 103 | # asked for ephemeral storage but also defined a volume ID | ||
2084 | 104 | hookenv.log('A volume is defined for this unit, but ephemeral ' | ||
2085 | 105 | 'storage was requested', hookenv.ERROR) | ||
2086 | 106 | errors = True | ||
2087 | 107 | elif not volume_config['device'] and not volume_config['ephemeral']: | ||
2088 | 108 | # asked for permanent storage but did not define volume ID | ||
2089 | 109 | hookenv.log('Ephemeral storage was requested, but there is no volume ' | ||
2090 | 110 | 'defined for this unit.', hookenv.ERROR) | ||
2091 | 111 | errors = True | ||
2092 | 112 | |||
2093 | 113 | unit_mount_name = hookenv.local_unit().replace('/', '-') | ||
2094 | 114 | volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) | ||
2095 | 115 | |||
2096 | 116 | if errors: | ||
2097 | 117 | return None | ||
2098 | 118 | return volume_config | ||
2099 | 119 | |||
2100 | 120 | |||
2101 | 121 | def mount_volume(config): | ||
2102 | 122 | if os.path.exists(config['mountpoint']): | ||
2103 | 123 | if not os.path.isdir(config['mountpoint']): | ||
2104 | 124 | hookenv.log('Not a directory: {}'.format(config['mountpoint'])) | ||
2105 | 125 | raise VolumeConfigurationError() | ||
2106 | 126 | else: | ||
2107 | 127 | host.mkdir(config['mountpoint']) | ||
2108 | 128 | if os.path.ismount(config['mountpoint']): | ||
2109 | 129 | unmount_volume(config) | ||
2110 | 130 | if not host.mount(config['device'], config['mountpoint'], persist=True): | ||
2111 | 131 | raise VolumeConfigurationError() | ||
2112 | 132 | |||
2113 | 133 | |||
2114 | 134 | def unmount_volume(config): | ||
2115 | 135 | if os.path.ismount(config['mountpoint']): | ||
2116 | 136 | if not host.umount(config['mountpoint'], persist=True): | ||
2117 | 137 | raise VolumeConfigurationError() | ||
2118 | 138 | |||
2119 | 139 | |||
2120 | 140 | def managed_mounts(): | ||
2121 | 141 | '''List of all mounted managed volumes''' | ||
2122 | 142 | return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) | ||
2123 | 143 | |||
2124 | 144 | |||
2125 | 145 | def configure_volume(before_change=lambda: None, after_change=lambda: None): | ||
2126 | 146 | '''Set up storage (or don't) according to the charm's volume configuration. | ||
2127 | 147 | Returns the mount point or "ephemeral". before_change and after_change | ||
2128 | 148 | are optional functions to be called if the volume configuration changes. | ||
2129 | 149 | ''' | ||
2130 | 150 | |||
2131 | 151 | config = get_config() | ||
2132 | 152 | if not config: | ||
2133 | 153 | hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) | ||
2134 | 154 | raise VolumeConfigurationError() | ||
2135 | 155 | |||
2136 | 156 | if config['ephemeral']: | ||
2137 | 157 | if os.path.ismount(config['mountpoint']): | ||
2138 | 158 | before_change() | ||
2139 | 159 | unmount_volume(config) | ||
2140 | 160 | after_change() | ||
2141 | 161 | return 'ephemeral' | ||
2142 | 162 | else: | ||
2143 | 163 | # persistent storage | ||
2144 | 164 | if os.path.ismount(config['mountpoint']): | ||
2145 | 165 | mounts = dict(managed_mounts()) | ||
2146 | 166 | if mounts.get(config['mountpoint']) != config['device']: | ||
2147 | 167 | before_change() | ||
2148 | 168 | unmount_volume(config) | ||
2149 | 169 | mount_volume(config) | ||
2150 | 170 | after_change() | ||
2151 | 171 | else: | ||
2152 | 172 | before_change() | ||
2153 | 173 | mount_volume(config) | ||
2154 | 174 | after_change() | ||
2155 | 175 | return config['mountpoint'] | ||
2156 | 176 | 0 | ||
2157 | === removed directory 'hooks/charmhelpers/contrib/database' | |||
2158 | === removed file 'hooks/charmhelpers/contrib/database/__init__.py' | |||
2159 | === removed file 'hooks/charmhelpers/contrib/database/mysql.py' | |||
2160 | --- hooks/charmhelpers/contrib/database/mysql.py 2015-07-29 18:07:31 +0000 | |||
2161 | +++ hooks/charmhelpers/contrib/database/mysql.py 1970-01-01 00:00:00 +0000 | |||
2162 | @@ -1,412 +0,0 @@ | |||
2163 | 1 | """Helper for working with a MySQL database""" | ||
2164 | 2 | import json | ||
2165 | 3 | import re | ||
2166 | 4 | import sys | ||
2167 | 5 | import platform | ||
2168 | 6 | import os | ||
2169 | 7 | import glob | ||
2170 | 8 | |||
2171 | 9 | # from string import upper | ||
2172 | 10 | |||
2173 | 11 | from charmhelpers.core.host import ( | ||
2174 | 12 | mkdir, | ||
2175 | 13 | pwgen, | ||
2176 | 14 | write_file | ||
2177 | 15 | ) | ||
2178 | 16 | from charmhelpers.core.hookenv import ( | ||
2179 | 17 | config as config_get, | ||
2180 | 18 | relation_get, | ||
2181 | 19 | related_units, | ||
2182 | 20 | unit_get, | ||
2183 | 21 | log, | ||
2184 | 22 | DEBUG, | ||
2185 | 23 | INFO, | ||
2186 | 24 | WARNING, | ||
2187 | 25 | ) | ||
2188 | 26 | from charmhelpers.fetch import ( | ||
2189 | 27 | apt_install, | ||
2190 | 28 | apt_update, | ||
2191 | 29 | filter_installed_packages, | ||
2192 | 30 | ) | ||
2193 | 31 | from charmhelpers.contrib.peerstorage import ( | ||
2194 | 32 | peer_store, | ||
2195 | 33 | peer_retrieve, | ||
2196 | 34 | ) | ||
2197 | 35 | from charmhelpers.contrib.network.ip import get_host_ip | ||
2198 | 36 | |||
2199 | 37 | try: | ||
2200 | 38 | import MySQLdb | ||
2201 | 39 | except ImportError: | ||
2202 | 40 | apt_update(fatal=True) | ||
2203 | 41 | apt_install(filter_installed_packages(['python-mysqldb']), fatal=True) | ||
2204 | 42 | import MySQLdb | ||
2205 | 43 | |||
2206 | 44 | |||
2207 | 45 | class MySQLHelper(object): | ||
2208 | 46 | |||
2209 | 47 | def __init__(self, rpasswdf_template, upasswdf_template, host='localhost', | ||
2210 | 48 | migrate_passwd_to_peer_relation=True, | ||
2211 | 49 | delete_ondisk_passwd_file=True): | ||
2212 | 50 | self.host = host | ||
2213 | 51 | # Password file path templates | ||
2214 | 52 | self.root_passwd_file_template = rpasswdf_template | ||
2215 | 53 | self.user_passwd_file_template = upasswdf_template | ||
2216 | 54 | |||
2217 | 55 | self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation | ||
2218 | 56 | # If we migrate we have the option to delete local copy of root passwd | ||
2219 | 57 | self.delete_ondisk_passwd_file = delete_ondisk_passwd_file | ||
2220 | 58 | |||
2221 | 59 | def connect(self, user='root', password=None): | ||
2222 | 60 | log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG) | ||
2223 | 61 | self.connection = MySQLdb.connect(user=user, host=self.host, | ||
2224 | 62 | passwd=password) | ||
2225 | 63 | |||
2226 | 64 | def database_exists(self, db_name): | ||
2227 | 65 | cursor = self.connection.cursor() | ||
2228 | 66 | try: | ||
2229 | 67 | cursor.execute("SHOW DATABASES") | ||
2230 | 68 | databases = [i[0] for i in cursor.fetchall()] | ||
2231 | 69 | finally: | ||
2232 | 70 | cursor.close() | ||
2233 | 71 | |||
2234 | 72 | return db_name in databases | ||
2235 | 73 | |||
2236 | 74 | def create_database(self, db_name): | ||
2237 | 75 | cursor = self.connection.cursor() | ||
2238 | 76 | try: | ||
2239 | 77 | cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8" | ||
2240 | 78 | .format(db_name)) | ||
2241 | 79 | finally: | ||
2242 | 80 | cursor.close() | ||
2243 | 81 | |||
2244 | 82 | def grant_exists(self, db_name, db_user, remote_ip): | ||
2245 | 83 | cursor = self.connection.cursor() | ||
2246 | 84 | priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \ | ||
2247 | 85 | "TO '{}'@'{}'".format(db_name, db_user, remote_ip) | ||
2248 | 86 | try: | ||
2249 | 87 | cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user, | ||
2250 | 88 | remote_ip)) | ||
2251 | 89 | grants = [i[0] for i in cursor.fetchall()] | ||
2252 | 90 | except MySQLdb.OperationalError: | ||
2253 | 91 | return False | ||
2254 | 92 | finally: | ||
2255 | 93 | cursor.close() | ||
2256 | 94 | |||
2257 | 95 | # TODO: review for different grants | ||
2258 | 96 | return priv_string in grants | ||
2259 | 97 | |||
2260 | 98 | def create_grant(self, db_name, db_user, remote_ip, password): | ||
2261 | 99 | cursor = self.connection.cursor() | ||
2262 | 100 | try: | ||
2263 | 101 | # TODO: review for different grants | ||
2264 | 102 | cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' " | ||
2265 | 103 | "IDENTIFIED BY '{}'".format(db_name, | ||
2266 | 104 | db_user, | ||
2267 | 105 | remote_ip, | ||
2268 | 106 | password)) | ||
2269 | 107 | finally: | ||
2270 | 108 | cursor.close() | ||
2271 | 109 | |||
2272 | 110 | def create_admin_grant(self, db_user, remote_ip, password): | ||
2273 | 111 | cursor = self.connection.cursor() | ||
2274 | 112 | try: | ||
2275 | 113 | cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' " | ||
2276 | 114 | "IDENTIFIED BY '{}'".format(db_user, | ||
2277 | 115 | remote_ip, | ||
2278 | 116 | password)) | ||
2279 | 117 | finally: | ||
2280 | 118 | cursor.close() | ||
2281 | 119 | |||
2282 | 120 | def cleanup_grant(self, db_user, remote_ip): | ||
2283 | 121 | cursor = self.connection.cursor() | ||
2284 | 122 | try: | ||
2285 | 123 | cursor.execute("DROP FROM mysql.user WHERE user='{}' " | ||
2286 | 124 | "AND HOST='{}'".format(db_user, | ||
2287 | 125 | remote_ip)) | ||
2288 | 126 | finally: | ||
2289 | 127 | cursor.close() | ||
2290 | 128 | |||
2291 | 129 | def execute(self, sql): | ||
2292 | 130 | """Execute arbitary SQL against the database.""" | ||
2293 | 131 | cursor = self.connection.cursor() | ||
2294 | 132 | try: | ||
2295 | 133 | cursor.execute(sql) | ||
2296 | 134 | finally: | ||
2297 | 135 | cursor.close() | ||
2298 | 136 | |||
2299 | 137 | def migrate_passwords_to_peer_relation(self, excludes=None): | ||
2300 | 138 | """Migrate any passwords storage on disk to cluster peer relation.""" | ||
2301 | 139 | dirname = os.path.dirname(self.root_passwd_file_template) | ||
2302 | 140 | path = os.path.join(dirname, '*.passwd') | ||
2303 | 141 | for f in glob.glob(path): | ||
2304 | 142 | if excludes and f in excludes: | ||
2305 | 143 | log("Excluding %s from peer migration" % (f), level=DEBUG) | ||
2306 | 144 | continue | ||
2307 | 145 | |||
2308 | 146 | key = os.path.basename(f) | ||
2309 | 147 | with open(f, 'r') as passwd: | ||
2310 | 148 | _value = passwd.read().strip() | ||
2311 | 149 | |||
2312 | 150 | try: | ||
2313 | 151 | peer_store(key, _value) | ||
2314 | 152 | |||
2315 | 153 | if self.delete_ondisk_passwd_file: | ||
2316 | 154 | os.unlink(f) | ||
2317 | 155 | except ValueError: | ||
2318 | 156 | # NOTE cluster relation not yet ready - skip for now | ||
2319 | 157 | pass | ||
2320 | 158 | |||
2321 | 159 | def get_mysql_password_on_disk(self, username=None, password=None): | ||
2322 | 160 | """Retrieve, generate or store a mysql password for the provided | ||
2323 | 161 | username on disk.""" | ||
2324 | 162 | if username: | ||
2325 | 163 | template = self.user_passwd_file_template | ||
2326 | 164 | passwd_file = template.format(username) | ||
2327 | 165 | else: | ||
2328 | 166 | passwd_file = self.root_passwd_file_template | ||
2329 | 167 | |||
2330 | 168 | _password = None | ||
2331 | 169 | if os.path.exists(passwd_file): | ||
2332 | 170 | log("Using existing password file '%s'" % passwd_file, level=DEBUG) | ||
2333 | 171 | with open(passwd_file, 'r') as passwd: | ||
2334 | 172 | _password = passwd.read().strip() | ||
2335 | 173 | else: | ||
2336 | 174 | log("Generating new password file '%s'" % passwd_file, level=DEBUG) | ||
2337 | 175 | if not os.path.isdir(os.path.dirname(passwd_file)): | ||
2338 | 176 | # NOTE: need to ensure this is not mysql root dir (which needs | ||
2339 | 177 | # to be mysql readable) | ||
2340 | 178 | mkdir(os.path.dirname(passwd_file), owner='root', group='root', | ||
2341 | 179 | perms=0o770) | ||
2342 | 180 | # Force permissions - for some reason the chmod in makedirs | ||
2343 | 181 | # fails | ||
2344 | 182 | os.chmod(os.path.dirname(passwd_file), 0o770) | ||
2345 | 183 | |||
2346 | 184 | _password = password or pwgen(length=32) | ||
2347 | 185 | write_file(passwd_file, _password, owner='root', group='root', | ||
2348 | 186 | perms=0o660) | ||
2349 | 187 | |||
2350 | 188 | return _password | ||
2351 | 189 | |||
2352 | 190 | def passwd_keys(self, username): | ||
2353 | 191 | """Generator to return keys used to store passwords in peer store. | ||
2354 | 192 | |||
2355 | 193 | NOTE: we support both legacy and new format to support mysql | ||
2356 | 194 | charm prior to refactor. This is necessary to avoid LP 1451890. | ||
2357 | 195 | """ | ||
2358 | 196 | keys = [] | ||
2359 | 197 | if username == 'mysql': | ||
2360 | 198 | log("Bad username '%s'" % (username), level=WARNING) | ||
2361 | 199 | |||
2362 | 200 | if username: | ||
2363 | 201 | # IMPORTANT: *newer* format must be returned first | ||
2364 | 202 | keys.append('mysql-%s.passwd' % (username)) | ||
2365 | 203 | keys.append('%s.passwd' % (username)) | ||
2366 | 204 | else: | ||
2367 | 205 | keys.append('mysql.passwd') | ||
2368 | 206 | |||
2369 | 207 | for key in keys: | ||
2370 | 208 | yield key | ||
2371 | 209 | |||
2372 | 210 | def get_mysql_password(self, username=None, password=None): | ||
2373 | 211 | """Retrieve, generate or store a mysql password for the provided | ||
2374 | 212 | username using peer relation cluster.""" | ||
2375 | 213 | excludes = [] | ||
2376 | 214 | |||
2377 | 215 | # First check peer relation. | ||
2378 | 216 | try: | ||
2379 | 217 | for key in self.passwd_keys(username): | ||
2380 | 218 | _password = peer_retrieve(key) | ||
2381 | 219 | if _password: | ||
2382 | 220 | break | ||
2383 | 221 | |||
2384 | 222 | # If root password available don't update peer relation from local | ||
2385 | 223 | if _password and not username: | ||
2386 | 224 | excludes.append(self.root_passwd_file_template) | ||
2387 | 225 | |||
2388 | 226 | except ValueError: | ||
2389 | 227 | # cluster relation is not yet started; use on-disk | ||
2390 | 228 | _password = None | ||
2391 | 229 | |||
2392 | 230 | # If none available, generate new one | ||
2393 | 231 | if not _password: | ||
2394 | 232 | _password = self.get_mysql_password_on_disk(username, password) | ||
2395 | 233 | |||
2396 | 234 | # Put on wire if required | ||
2397 | 235 | if self.migrate_passwd_to_peer_relation: | ||
2398 | 236 | self.migrate_passwords_to_peer_relation(excludes=excludes) | ||
2399 | 237 | |||
2400 | 238 | return _password | ||
2401 | 239 | |||
2402 | 240 | def get_mysql_root_password(self, password=None): | ||
2403 | 241 | """Retrieve or generate mysql root password for service units.""" | ||
2404 | 242 | return self.get_mysql_password(username=None, password=password) | ||
2405 | 243 | |||
2406 | 244 | def normalize_address(self, hostname): | ||
2407 | 245 | """Ensure that address returned is an IP address (i.e. not fqdn)""" | ||
2408 | 246 | if config_get('prefer-ipv6'): | ||
2409 | 247 | # TODO: add support for ipv6 dns | ||
2410 | 248 | return hostname | ||
2411 | 249 | |||
2412 | 250 | if hostname != unit_get('private-address'): | ||
2413 | 251 | return get_host_ip(hostname, fallback=hostname) | ||
2414 | 252 | |||
2415 | 253 | # Otherwise assume localhost | ||
2416 | 254 | return '127.0.0.1' | ||
2417 | 255 | |||
2418 | 256 | def get_allowed_units(self, database, username, relation_id=None): | ||
2419 | 257 | """Get list of units with access grants for database with username. | ||
2420 | 258 | |||
2421 | 259 | This is typically used to provide shared-db relations with a list of | ||
2422 | 260 | which units have been granted access to the given database. | ||
2423 | 261 | """ | ||
2424 | 262 | self.connect(password=self.get_mysql_root_password()) | ||
2425 | 263 | allowed_units = set() | ||
2426 | 264 | for unit in related_units(relation_id): | ||
2427 | 265 | settings = relation_get(rid=relation_id, unit=unit) | ||
2428 | 266 | # First check for setting with prefix, then without | ||
2429 | 267 | for attr in ["%s_hostname" % (database), 'hostname']: | ||
2430 | 268 | hosts = settings.get(attr, None) | ||
2431 | 269 | if hosts: | ||
2432 | 270 | break | ||
2433 | 271 | |||
2434 | 272 | if hosts: | ||
2435 | 273 | # hostname can be json-encoded list of hostnames | ||
2436 | 274 | try: | ||
2437 | 275 | hosts = json.loads(hosts) | ||
2438 | 276 | except ValueError: | ||
2439 | 277 | hosts = [hosts] | ||
2440 | 278 | else: | ||
2441 | 279 | hosts = [settings['private-address']] | ||
2442 | 280 | |||
2443 | 281 | if hosts: | ||
2444 | 282 | for host in hosts: | ||
2445 | 283 | host = self.normalize_address(host) | ||
2446 | 284 | if self.grant_exists(database, username, host): | ||
2447 | 285 | log("Grant exists for host '%s' on db '%s'" % | ||
2448 | 286 | (host, database), level=DEBUG) | ||
2449 | 287 | if unit not in allowed_units: | ||
2450 | 288 | allowed_units.add(unit) | ||
2451 | 289 | else: | ||
2452 | 290 | log("Grant does NOT exist for host '%s' on db '%s'" % | ||
2453 | 291 | (host, database), level=DEBUG) | ||
2454 | 292 | else: | ||
2455 | 293 | log("No hosts found for grant check", level=INFO) | ||
2456 | 294 | |||
2457 | 295 | return allowed_units | ||
2458 | 296 | |||
2459 | 297 | def configure_db(self, hostname, database, username, admin=False): | ||
2460 | 298 | """Configure access to database for username from hostname.""" | ||
2461 | 299 | self.connect(password=self.get_mysql_root_password()) | ||
2462 | 300 | if not self.database_exists(database): | ||
2463 | 301 | self.create_database(database) | ||
2464 | 302 | |||
2465 | 303 | remote_ip = self.normalize_address(hostname) | ||
2466 | 304 | password = self.get_mysql_password(username) | ||
2467 | 305 | if not self.grant_exists(database, username, remote_ip): | ||
2468 | 306 | if not admin: | ||
2469 | 307 | self.create_grant(database, username, remote_ip, password) | ||
2470 | 308 | else: | ||
2471 | 309 | self.create_admin_grant(username, remote_ip, password) | ||
2472 | 310 | |||
2473 | 311 | return password | ||
2474 | 312 | |||
2475 | 313 | |||
2476 | 314 | class PerconaClusterHelper(object): | ||
2477 | 315 | |||
2478 | 316 | # Going for the biggest page size to avoid wasted bytes. | ||
2479 | 317 | # InnoDB page size is 16MB | ||
2480 | 318 | |||
2481 | 319 | DEFAULT_PAGE_SIZE = 16 * 1024 * 1024 | ||
2482 | 320 | DEFAULT_INNODB_BUFFER_FACTOR = 0.50 | ||
2483 | 321 | |||
2484 | 322 | def human_to_bytes(self, human): | ||
2485 | 323 | """Convert human readable configuration options to bytes.""" | ||
2486 | 324 | num_re = re.compile('^[0-9]+$') | ||
2487 | 325 | if num_re.match(human): | ||
2488 | 326 | return human | ||
2489 | 327 | |||
2490 | 328 | factors = { | ||
2491 | 329 | 'K': 1024, | ||
2492 | 330 | 'M': 1048576, | ||
2493 | 331 | 'G': 1073741824, | ||
2494 | 332 | 'T': 1099511627776 | ||
2495 | 333 | } | ||
2496 | 334 | modifier = human[-1] | ||
2497 | 335 | if modifier in factors: | ||
2498 | 336 | return int(human[:-1]) * factors[modifier] | ||
2499 | 337 | |||
2500 | 338 | if modifier == '%': | ||
2501 | 339 | total_ram = self.human_to_bytes(self.get_mem_total()) | ||
2502 | 340 | if self.is_32bit_system() and total_ram > self.sys_mem_limit(): | ||
2503 | 341 | total_ram = self.sys_mem_limit() | ||
2504 | 342 | factor = int(human[:-1]) * 0.01 | ||
2505 | 343 | pctram = total_ram * factor | ||
2506 | 344 | return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE)) | ||
2507 | 345 | |||
2508 | 346 | raise ValueError("Can only convert K,M,G, or T") | ||
2509 | 347 | |||
2510 | 348 | def is_32bit_system(self): | ||
2511 | 349 | """Determine whether system is 32 or 64 bit.""" | ||
2512 | 350 | try: | ||
2513 | 351 | return sys.maxsize < 2 ** 32 | ||
2514 | 352 | except OverflowError: | ||
2515 | 353 | return False | ||
2516 | 354 | |||
2517 | 355 | def sys_mem_limit(self): | ||
2518 | 356 | """Determine the default memory limit for the current service unit.""" | ||
2519 | 357 | if platform.machine() in ['armv7l']: | ||
2520 | 358 | _mem_limit = self.human_to_bytes('2700M') # experimentally determined | ||
2521 | 359 | else: | ||
2522 | 360 | # Limit for x86 based 32bit systems | ||
2523 | 361 | _mem_limit = self.human_to_bytes('4G') | ||
2524 | 362 | |||
2525 | 363 | return _mem_limit | ||
2526 | 364 | |||
2527 | 365 | def get_mem_total(self): | ||
2528 | 366 | """Calculate the total memory in the current service unit.""" | ||
2529 | 367 | with open('/proc/meminfo') as meminfo_file: | ||
2530 | 368 | for line in meminfo_file: | ||
2531 | 369 | key, mem = line.split(':', 2) | ||
2532 | 370 | if key == 'MemTotal': | ||
2533 | 371 | mtot, modifier = mem.strip().split(' ') | ||
2534 | 372 | return '%s%s' % (mtot, modifier[0].upper()) | ||
2535 | 373 | |||
2536 | 374 | def parse_config(self): | ||
2537 | 375 | """Parse charm configuration and calculate values for config files.""" | ||
2538 | 376 | config = config_get() | ||
2539 | 377 | mysql_config = {} | ||
2540 | 378 | if 'max-connections' in config: | ||
2541 | 379 | mysql_config['max_connections'] = config['max-connections'] | ||
2542 | 380 | |||
2543 | 381 | if 'wait-timeout' in config: | ||
2544 | 382 | mysql_config['wait_timeout'] = config['wait-timeout'] | ||
2545 | 383 | |||
2546 | 384 | if 'innodb-flush-log-at-trx-commit' in config: | ||
2547 | 385 | mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit'] | ||
2548 | 386 | |||
2549 | 387 | # Set a sane default key_buffer size | ||
2550 | 388 | mysql_config['key_buffer'] = self.human_to_bytes('32M') | ||
2551 | 389 | total_memory = self.human_to_bytes(self.get_mem_total()) | ||
2552 | 390 | |||
2553 | 391 | dataset_bytes = config.get('dataset-size', None) | ||
2554 | 392 | innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) | ||
2555 | 393 | |||
2556 | 394 | if innodb_buffer_pool_size: | ||
2557 | 395 | innodb_buffer_pool_size = self.human_to_bytes( | ||
2558 | 396 | innodb_buffer_pool_size) | ||
2559 | 397 | elif dataset_bytes: | ||
2560 | 398 | log("Option 'dataset-size' has been deprecated, please use" | ||
2561 | 399 | "innodb_buffer_pool_size option instead", level="WARN") | ||
2562 | 400 | innodb_buffer_pool_size = self.human_to_bytes( | ||
2563 | 401 | dataset_bytes) | ||
2564 | 402 | else: | ||
2565 | 403 | innodb_buffer_pool_size = int( | ||
2566 | 404 | total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR) | ||
2567 | 405 | |||
2568 | 406 | if innodb_buffer_pool_size > total_memory: | ||
2569 | 407 | log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( | ||
2570 | 408 | innodb_buffer_pool_size, | ||
2571 | 409 | total_memory), level='WARN') | ||
2572 | 410 | |||
2573 | 411 | mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size | ||
2574 | 412 | return mysql_config | ||
2575 | 413 | 0 | ||
2576 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
2577 | --- hooks/charmhelpers/contrib/network/ip.py 2015-05-19 21:31:00 +0000 | |||
2578 | +++ hooks/charmhelpers/contrib/network/ip.py 2016-05-19 03:33:34 +0000 | |||
2579 | @@ -23,7 +23,7 @@ | |||
2580 | 23 | from functools import partial | 23 | from functools import partial |
2581 | 24 | 24 | ||
2582 | 25 | from charmhelpers.core.hookenv import unit_get | 25 | from charmhelpers.core.hookenv import unit_get |
2584 | 26 | from charmhelpers.fetch import apt_install | 26 | from charmhelpers.fetch import apt_install, apt_update |
2585 | 27 | from charmhelpers.core.hookenv import ( | 27 | from charmhelpers.core.hookenv import ( |
2586 | 28 | log, | 28 | log, |
2587 | 29 | WARNING, | 29 | WARNING, |
2588 | @@ -32,13 +32,15 @@ | |||
2589 | 32 | try: | 32 | try: |
2590 | 33 | import netifaces | 33 | import netifaces |
2591 | 34 | except ImportError: | 34 | except ImportError: |
2593 | 35 | apt_install('python-netifaces') | 35 | apt_update(fatal=True) |
2594 | 36 | apt_install('python-netifaces', fatal=True) | ||
2595 | 36 | import netifaces | 37 | import netifaces |
2596 | 37 | 38 | ||
2597 | 38 | try: | 39 | try: |
2598 | 39 | import netaddr | 40 | import netaddr |
2599 | 40 | except ImportError: | 41 | except ImportError: |
2601 | 41 | apt_install('python-netaddr') | 42 | apt_update(fatal=True) |
2602 | 43 | apt_install('python-netaddr', fatal=True) | ||
2603 | 42 | import netaddr | 44 | import netaddr |
2604 | 43 | 45 | ||
2605 | 44 | 46 | ||
2606 | @@ -51,7 +53,7 @@ | |||
2607 | 51 | 53 | ||
2608 | 52 | 54 | ||
2609 | 53 | def no_ip_found_error_out(network): | 55 | def no_ip_found_error_out(network): |
2611 | 54 | errmsg = ("No IP address found in network: %s" % network) | 56 | errmsg = ("No IP address found in network(s): %s" % network) |
2612 | 55 | raise ValueError(errmsg) | 57 | raise ValueError(errmsg) |
2613 | 56 | 58 | ||
2614 | 57 | 59 | ||
2615 | @@ -59,7 +61,7 @@ | |||
2616 | 59 | """Get an IPv4 or IPv6 address within the network from the host. | 61 | """Get an IPv4 or IPv6 address within the network from the host. |
2617 | 60 | 62 | ||
2618 | 61 | :param network (str): CIDR presentation format. For example, | 63 | :param network (str): CIDR presentation format. For example, |
2620 | 62 | '192.168.1.0/24'. | 64 | '192.168.1.0/24'. Supports multiple networks as a space-delimited list. |
2621 | 63 | :param fallback (str): If no address is found, return fallback. | 65 | :param fallback (str): If no address is found, return fallback. |
2622 | 64 | :param fatal (boolean): If no address is found, fallback is not | 66 | :param fatal (boolean): If no address is found, fallback is not |
2623 | 65 | set and fatal is True then exit(1). | 67 | set and fatal is True then exit(1). |
2624 | @@ -73,24 +75,26 @@ | |||
2625 | 73 | else: | 75 | else: |
2626 | 74 | return None | 76 | return None |
2627 | 75 | 77 | ||
2638 | 76 | _validate_cidr(network) | 78 | networks = network.split() or [network] |
2639 | 77 | network = netaddr.IPNetwork(network) | 79 | for network in networks: |
2640 | 78 | for iface in netifaces.interfaces(): | 80 | _validate_cidr(network) |
2641 | 79 | addresses = netifaces.ifaddresses(iface) | 81 | network = netaddr.IPNetwork(network) |
2642 | 80 | if network.version == 4 and netifaces.AF_INET in addresses: | 82 | for iface in netifaces.interfaces(): |
2643 | 81 | addr = addresses[netifaces.AF_INET][0]['addr'] | 83 | addresses = netifaces.ifaddresses(iface) |
2644 | 82 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | 84 | if network.version == 4 and netifaces.AF_INET in addresses: |
2645 | 83 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | 85 | addr = addresses[netifaces.AF_INET][0]['addr'] |
2646 | 84 | if cidr in network: | 86 | netmask = addresses[netifaces.AF_INET][0]['netmask'] |
2647 | 85 | return str(cidr.ip) | 87 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
2648 | 88 | if cidr in network: | ||
2649 | 89 | return str(cidr.ip) | ||
2650 | 86 | 90 | ||
2658 | 87 | if network.version == 6 and netifaces.AF_INET6 in addresses: | 91 | if network.version == 6 and netifaces.AF_INET6 in addresses: |
2659 | 88 | for addr in addresses[netifaces.AF_INET6]: | 92 | for addr in addresses[netifaces.AF_INET6]: |
2660 | 89 | if not addr['addr'].startswith('fe80'): | 93 | if not addr['addr'].startswith('fe80'): |
2661 | 90 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | 94 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
2662 | 91 | addr['netmask'])) | 95 | addr['netmask'])) |
2663 | 92 | if cidr in network: | 96 | if cidr in network: |
2664 | 93 | return str(cidr.ip) | 97 | return str(cidr.ip) |
2665 | 94 | 98 | ||
2666 | 95 | if fallback is not None: | 99 | if fallback is not None: |
2667 | 96 | return fallback | 100 | return fallback |
2668 | @@ -187,6 +191,15 @@ | |||
2669 | 187 | get_netmask_for_address = partial(_get_for_address, key='netmask') | 191 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
2670 | 188 | 192 | ||
2671 | 189 | 193 | ||
2672 | 194 | def resolve_network_cidr(ip_address): | ||
2673 | 195 | ''' | ||
2674 | 196 | Resolves the full address cidr of an ip_address based on | ||
2675 | 197 | configured network interfaces | ||
2676 | 198 | ''' | ||
2677 | 199 | netmask = get_netmask_for_address(ip_address) | ||
2678 | 200 | return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) | ||
2679 | 201 | |||
2680 | 202 | |||
2681 | 190 | def format_ipv6_addr(address): | 203 | def format_ipv6_addr(address): |
2682 | 191 | """If address is IPv6, wrap it in '[]' otherwise return None. | 204 | """If address is IPv6, wrap it in '[]' otherwise return None. |
2683 | 192 | 205 | ||
2684 | @@ -435,8 +448,12 @@ | |||
2685 | 435 | 448 | ||
2686 | 436 | rev = dns.reversename.from_address(address) | 449 | rev = dns.reversename.from_address(address) |
2687 | 437 | result = ns_query(rev) | 450 | result = ns_query(rev) |
2688 | 451 | |||
2689 | 438 | if not result: | 452 | if not result: |
2691 | 439 | return None | 453 | try: |
2692 | 454 | result = socket.gethostbyaddr(address)[0] | ||
2693 | 455 | except: | ||
2694 | 456 | return None | ||
2695 | 440 | else: | 457 | else: |
2696 | 441 | result = address | 458 | result = address |
2697 | 442 | 459 | ||
2698 | @@ -448,3 +465,18 @@ | |||
2699 | 448 | return result | 465 | return result |
2700 | 449 | else: | 466 | else: |
2701 | 450 | return result.split('.')[0] | 467 | return result.split('.')[0] |
2702 | 468 | |||
2703 | 469 | |||
2704 | 470 | def port_has_listener(address, port): | ||
2705 | 471 | """ | ||
2706 | 472 | Returns True if the address:port is open and being listened to, | ||
2707 | 473 | else False. | ||
2708 | 474 | |||
2709 | 475 | @param address: an IP address or hostname | ||
2710 | 476 | @param port: integer port | ||
2711 | 477 | |||
2712 | 478 | Note calls 'zc' via a subprocess shell | ||
2713 | 479 | """ | ||
2714 | 480 | cmd = ['nc', '-z', address, str(port)] | ||
2715 | 481 | result = subprocess.call(cmd) | ||
2716 | 482 | return not(bool(result)) | ||
2717 | 451 | 483 | ||
2718 | === modified file 'hooks/charmhelpers/contrib/network/ovs/__init__.py' | |||
2719 | --- hooks/charmhelpers/contrib/network/ovs/__init__.py 2015-05-19 21:31:00 +0000 | |||
2720 | +++ hooks/charmhelpers/contrib/network/ovs/__init__.py 2016-05-19 03:33:34 +0000 | |||
2721 | @@ -25,10 +25,14 @@ | |||
2722 | 25 | ) | 25 | ) |
2723 | 26 | 26 | ||
2724 | 27 | 27 | ||
2726 | 28 | def add_bridge(name): | 28 | def add_bridge(name, datapath_type=None): |
2727 | 29 | ''' Add the named bridge to openvswitch ''' | 29 | ''' Add the named bridge to openvswitch ''' |
2728 | 30 | log('Creating bridge {}'.format(name)) | 30 | log('Creating bridge {}'.format(name)) |
2730 | 31 | subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name]) | 31 | cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name] |
2731 | 32 | if datapath_type is not None: | ||
2732 | 33 | cmd += ['--', 'set', 'bridge', name, | ||
2733 | 34 | 'datapath_type={}'.format(datapath_type)] | ||
2734 | 35 | subprocess.check_call(cmd) | ||
2735 | 32 | 36 | ||
2736 | 33 | 37 | ||
2737 | 34 | def del_bridge(name): | 38 | def del_bridge(name): |
2738 | 35 | 39 | ||
2739 | === modified file 'hooks/charmhelpers/contrib/network/ufw.py' | |||
2740 | --- hooks/charmhelpers/contrib/network/ufw.py 2015-07-29 18:07:31 +0000 | |||
2741 | +++ hooks/charmhelpers/contrib/network/ufw.py 2016-05-19 03:33:34 +0000 | |||
2742 | @@ -40,7 +40,9 @@ | |||
2743 | 40 | import re | 40 | import re |
2744 | 41 | import os | 41 | import os |
2745 | 42 | import subprocess | 42 | import subprocess |
2746 | 43 | |||
2747 | 43 | from charmhelpers.core import hookenv | 44 | from charmhelpers.core import hookenv |
2748 | 45 | from charmhelpers.core.kernel import modprobe, is_module_loaded | ||
2749 | 44 | 46 | ||
2750 | 45 | __author__ = "Felipe Reyes <felipe.reyes@canonical.com>" | 47 | __author__ = "Felipe Reyes <felipe.reyes@canonical.com>" |
2751 | 46 | 48 | ||
2752 | @@ -82,14 +84,11 @@ | |||
2753 | 82 | # do we have IPv6 in the machine? | 84 | # do we have IPv6 in the machine? |
2754 | 83 | if os.path.isdir('/proc/sys/net/ipv6'): | 85 | if os.path.isdir('/proc/sys/net/ipv6'): |
2755 | 84 | # is ip6tables kernel module loaded? | 86 | # is ip6tables kernel module loaded? |
2759 | 85 | lsmod = subprocess.check_output(['lsmod'], universal_newlines=True) | 87 | if not is_module_loaded('ip6_tables'): |
2757 | 86 | matches = re.findall('^ip6_tables[ ]+', lsmod, re.M) | ||
2758 | 87 | if len(matches) == 0: | ||
2760 | 88 | # ip6tables support isn't complete, let's try to load it | 88 | # ip6tables support isn't complete, let's try to load it |
2761 | 89 | try: | 89 | try: |
2765 | 90 | subprocess.check_output(['modprobe', 'ip6_tables'], | 90 | modprobe('ip6_tables') |
2766 | 91 | universal_newlines=True) | 91 | # great, we can load the module |
2764 | 92 | # great, we could load the module | ||
2767 | 93 | return True | 92 | return True |
2768 | 94 | except subprocess.CalledProcessError as ex: | 93 | except subprocess.CalledProcessError as ex: |
2769 | 95 | hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, | 94 | hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, |
2770 | 96 | 95 | ||
2771 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
2772 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-29 18:07:31 +0000 | |||
2773 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-05-19 03:33:34 +0000 | |||
2774 | @@ -14,12 +14,18 @@ | |||
2775 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
2776 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2777 | 16 | 16 | ||
2778 | 17 | import logging | ||
2779 | 18 | import re | ||
2780 | 19 | import sys | ||
2781 | 17 | import six | 20 | import six |
2782 | 18 | from collections import OrderedDict | 21 | from collections import OrderedDict |
2783 | 19 | from charmhelpers.contrib.amulet.deployment import ( | 22 | from charmhelpers.contrib.amulet.deployment import ( |
2784 | 20 | AmuletDeployment | 23 | AmuletDeployment |
2785 | 21 | ) | 24 | ) |
2786 | 22 | 25 | ||
2787 | 26 | DEBUG = logging.DEBUG | ||
2788 | 27 | ERROR = logging.ERROR | ||
2789 | 28 | |||
2790 | 23 | 29 | ||
2791 | 24 | class OpenStackAmuletDeployment(AmuletDeployment): | 30 | class OpenStackAmuletDeployment(AmuletDeployment): |
2792 | 25 | """OpenStack amulet deployment. | 31 | """OpenStack amulet deployment. |
2793 | @@ -28,9 +34,12 @@ | |||
2794 | 28 | that is specifically for use by OpenStack charms. | 34 | that is specifically for use by OpenStack charms. |
2795 | 29 | """ | 35 | """ |
2796 | 30 | 36 | ||
2798 | 31 | def __init__(self, series=None, openstack=None, source=None, stable=True): | 37 | def __init__(self, series=None, openstack=None, source=None, |
2799 | 38 | stable=True, log_level=DEBUG): | ||
2800 | 32 | """Initialize the deployment environment.""" | 39 | """Initialize the deployment environment.""" |
2801 | 33 | super(OpenStackAmuletDeployment, self).__init__(series) | 40 | super(OpenStackAmuletDeployment, self).__init__(series) |
2802 | 41 | self.log = self.get_logger(level=log_level) | ||
2803 | 42 | self.log.info('OpenStackAmuletDeployment: init') | ||
2804 | 34 | self.openstack = openstack | 43 | self.openstack = openstack |
2805 | 35 | self.source = source | 44 | self.source = source |
2806 | 36 | self.stable = stable | 45 | self.stable = stable |
2807 | @@ -38,26 +47,55 @@ | |||
2808 | 38 | # out. | 47 | # out. |
2809 | 39 | self.current_next = "trusty" | 48 | self.current_next = "trusty" |
2810 | 40 | 49 | ||
2811 | 50 | def get_logger(self, name="deployment-logger", level=logging.DEBUG): | ||
2812 | 51 | """Get a logger object that will log to stdout.""" | ||
2813 | 52 | log = logging | ||
2814 | 53 | logger = log.getLogger(name) | ||
2815 | 54 | fmt = log.Formatter("%(asctime)s %(funcName)s " | ||
2816 | 55 | "%(levelname)s: %(message)s") | ||
2817 | 56 | |||
2818 | 57 | handler = log.StreamHandler(stream=sys.stdout) | ||
2819 | 58 | handler.setLevel(level) | ||
2820 | 59 | handler.setFormatter(fmt) | ||
2821 | 60 | |||
2822 | 61 | logger.addHandler(handler) | ||
2823 | 62 | logger.setLevel(level) | ||
2824 | 63 | |||
2825 | 64 | return logger | ||
2826 | 65 | |||
2827 | 41 | def _determine_branch_locations(self, other_services): | 66 | def _determine_branch_locations(self, other_services): |
2828 | 42 | """Determine the branch locations for the other services. | 67 | """Determine the branch locations for the other services. |
2829 | 43 | 68 | ||
2830 | 44 | Determine if the local branch being tested is derived from its | 69 | Determine if the local branch being tested is derived from its |
2831 | 45 | stable or next (dev) branch, and based on this, use the corresonding | 70 | stable or next (dev) branch, and based on this, use the corresonding |
2832 | 46 | stable or next branches for the other_services.""" | 71 | stable or next branches for the other_services.""" |
2834 | 47 | base_charms = ['mysql', 'mongodb'] | 72 | |
2835 | 73 | self.log.info('OpenStackAmuletDeployment: determine branch locations') | ||
2836 | 74 | |||
2837 | 75 | # Charms outside the lp:~openstack-charmers namespace | ||
2838 | 76 | base_charms = ['mysql', 'mongodb', 'nrpe'] | ||
2839 | 77 | |||
2840 | 78 | # Force these charms to current series even when using an older series. | ||
2841 | 79 | # ie. Use trusty/nrpe even when series is precise, as the P charm | ||
2842 | 80 | # does not possess the necessary external master config and hooks. | ||
2843 | 81 | force_series_current = ['nrpe'] | ||
2844 | 48 | 82 | ||
2845 | 49 | if self.series in ['precise', 'trusty']: | 83 | if self.series in ['precise', 'trusty']: |
2846 | 50 | base_series = self.series | 84 | base_series = self.series |
2847 | 51 | else: | 85 | else: |
2848 | 52 | base_series = self.current_next | 86 | base_series = self.current_next |
2849 | 53 | 87 | ||
2852 | 54 | if self.stable: | 88 | for svc in other_services: |
2853 | 55 | for svc in other_services: | 89 | if svc['name'] in force_series_current: |
2854 | 90 | base_series = self.current_next | ||
2855 | 91 | # If a location has been explicitly set, use it | ||
2856 | 92 | if svc.get('location'): | ||
2857 | 93 | continue | ||
2858 | 94 | if self.stable: | ||
2859 | 56 | temp = 'lp:charms/{}/{}' | 95 | temp = 'lp:charms/{}/{}' |
2860 | 57 | svc['location'] = temp.format(base_series, | 96 | svc['location'] = temp.format(base_series, |
2861 | 58 | svc['name']) | 97 | svc['name']) |
2864 | 59 | else: | 98 | else: |
2863 | 60 | for svc in other_services: | ||
2865 | 61 | if svc['name'] in base_charms: | 99 | if svc['name'] in base_charms: |
2866 | 62 | temp = 'lp:charms/{}/{}' | 100 | temp = 'lp:charms/{}/{}' |
2867 | 63 | svc['location'] = temp.format(base_series, | 101 | svc['location'] = temp.format(base_series, |
2868 | @@ -66,10 +104,13 @@ | |||
2869 | 66 | temp = 'lp:~openstack-charmers/charms/{}/{}/next' | 104 | temp = 'lp:~openstack-charmers/charms/{}/{}/next' |
2870 | 67 | svc['location'] = temp.format(self.current_next, | 105 | svc['location'] = temp.format(self.current_next, |
2871 | 68 | svc['name']) | 106 | svc['name']) |
2872 | 107 | |||
2873 | 69 | return other_services | 108 | return other_services |
2874 | 70 | 109 | ||
2875 | 71 | def _add_services(self, this_service, other_services): | 110 | def _add_services(self, this_service, other_services): |
2876 | 72 | """Add services to the deployment and set openstack-origin/source.""" | 111 | """Add services to the deployment and set openstack-origin/source.""" |
2877 | 112 | self.log.info('OpenStackAmuletDeployment: adding services') | ||
2878 | 113 | |||
2879 | 73 | other_services = self._determine_branch_locations(other_services) | 114 | other_services = self._determine_branch_locations(other_services) |
2880 | 74 | 115 | ||
2881 | 75 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | 116 | super(OpenStackAmuletDeployment, self)._add_services(this_service, |
2882 | @@ -77,29 +118,105 @@ | |||
2883 | 77 | 118 | ||
2884 | 78 | services = other_services | 119 | services = other_services |
2885 | 79 | services.append(this_service) | 120 | services.append(this_service) |
2886 | 121 | |||
2887 | 122 | # Charms which should use the source config option | ||
2888 | 80 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 123 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
2893 | 81 | 'ceph-osd', 'ceph-radosgw'] | 124 | 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] |
2894 | 82 | # Most OpenStack subordinate charms do not expose an origin option | 125 | |
2895 | 83 | # as that is controlled by the principle. | 126 | # Charms which can not use openstack-origin, ie. many subordinates |
2896 | 84 | ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] | 127 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
2897 | 128 | 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', | ||
2898 | 129 | 'cinder-backup', 'nexentaedge-data', | ||
2899 | 130 | 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', | ||
2900 | 131 | 'cinder-nexentaedge', 'nexentaedge-mgmt'] | ||
2901 | 85 | 132 | ||
2902 | 86 | if self.openstack: | 133 | if self.openstack: |
2903 | 87 | for svc in services: | 134 | for svc in services: |
2905 | 88 | if svc['name'] not in use_source + ignore: | 135 | if svc['name'] not in use_source + no_origin: |
2906 | 89 | config = {'openstack-origin': self.openstack} | 136 | config = {'openstack-origin': self.openstack} |
2907 | 90 | self.d.configure(svc['name'], config) | 137 | self.d.configure(svc['name'], config) |
2908 | 91 | 138 | ||
2909 | 92 | if self.source: | 139 | if self.source: |
2910 | 93 | for svc in services: | 140 | for svc in services: |
2912 | 94 | if svc['name'] in use_source and svc['name'] not in ignore: | 141 | if svc['name'] in use_source and svc['name'] not in no_origin: |
2913 | 95 | config = {'source': self.source} | 142 | config = {'source': self.source} |
2914 | 96 | self.d.configure(svc['name'], config) | 143 | self.d.configure(svc['name'], config) |
2915 | 97 | 144 | ||
2916 | 98 | def _configure_services(self, configs): | 145 | def _configure_services(self, configs): |
2917 | 99 | """Configure all of the services.""" | 146 | """Configure all of the services.""" |
2918 | 147 | self.log.info('OpenStackAmuletDeployment: configure services') | ||
2919 | 100 | for service, config in six.iteritems(configs): | 148 | for service, config in six.iteritems(configs): |
2920 | 101 | self.d.configure(service, config) | 149 | self.d.configure(service, config) |
2921 | 102 | 150 | ||
2922 | 151 | def _auto_wait_for_status(self, message=None, exclude_services=None, | ||
2923 | 152 | include_only=None, timeout=1800): | ||
2924 | 153 | """Wait for all units to have a specific extended status, except | ||
2925 | 154 | for any defined as excluded. Unless specified via message, any | ||
2926 | 155 | status containing any case of 'ready' will be considered a match. | ||
2927 | 156 | |||
2928 | 157 | Examples of message usage: | ||
2929 | 158 | |||
2930 | 159 | Wait for all unit status to CONTAIN any case of 'ready' or 'ok': | ||
2931 | 160 | message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) | ||
2932 | 161 | |||
2933 | 162 | Wait for all units to reach this status (exact match): | ||
2934 | 163 | message = re.compile('^Unit is ready and clustered$') | ||
2935 | 164 | |||
2936 | 165 | Wait for all units to reach any one of these (exact match): | ||
2937 | 166 | message = re.compile('Unit is ready|OK|Ready') | ||
2938 | 167 | |||
2939 | 168 | Wait for at least one unit to reach this status (exact match): | ||
2940 | 169 | message = {'ready'} | ||
2941 | 170 | |||
2942 | 171 | See Amulet's sentry.wait_for_messages() for message usage detail. | ||
2943 | 172 | https://github.com/juju/amulet/blob/master/amulet/sentry.py | ||
2944 | 173 | |||
2945 | 174 | :param message: Expected status match | ||
2946 | 175 | :param exclude_services: List of juju service names to ignore, | ||
2947 | 176 | not to be used in conjuction with include_only. | ||
2948 | 177 | :param include_only: List of juju service names to exclusively check, | ||
2949 | 178 | not to be used in conjuction with exclude_services. | ||
2950 | 179 | :param timeout: Maximum time in seconds to wait for status match | ||
2951 | 180 | :returns: None. Raises if timeout is hit. | ||
2952 | 181 | """ | ||
2953 | 182 | self.log.info('Waiting for extended status on units...') | ||
2954 | 183 | |||
2955 | 184 | all_services = self.d.services.keys() | ||
2956 | 185 | |||
2957 | 186 | if exclude_services and include_only: | ||
2958 | 187 | raise ValueError('exclude_services can not be used ' | ||
2959 | 188 | 'with include_only') | ||
2960 | 189 | |||
2961 | 190 | if message: | ||
2962 | 191 | if isinstance(message, re._pattern_type): | ||
2963 | 192 | match = message.pattern | ||
2964 | 193 | else: | ||
2965 | 194 | match = message | ||
2966 | 195 | |||
2967 | 196 | self.log.debug('Custom extended status wait match: ' | ||
2968 | 197 | '{}'.format(match)) | ||
2969 | 198 | else: | ||
2970 | 199 | self.log.debug('Default extended status wait match: contains ' | ||
2971 | 200 | 'READY (case-insensitive)') | ||
2972 | 201 | message = re.compile('.*ready.*', re.IGNORECASE) | ||
2973 | 202 | |||
2974 | 203 | if exclude_services: | ||
2975 | 204 | self.log.debug('Excluding services from extended status match: ' | ||
2976 | 205 | '{}'.format(exclude_services)) | ||
2977 | 206 | else: | ||
2978 | 207 | exclude_services = [] | ||
2979 | 208 | |||
2980 | 209 | if include_only: | ||
2981 | 210 | services = include_only | ||
2982 | 211 | else: | ||
2983 | 212 | services = list(set(all_services) - set(exclude_services)) | ||
2984 | 213 | |||
2985 | 214 | self.log.debug('Waiting up to {}s for extended status on services: ' | ||
2986 | 215 | '{}'.format(timeout, services)) | ||
2987 | 216 | service_messages = {service: message for service in services} | ||
2988 | 217 | self.d.sentry.wait_for_messages(service_messages, timeout=timeout) | ||
2989 | 218 | self.log.info('OK') | ||
2990 | 219 | |||
2991 | 103 | def _get_openstack_release(self): | 220 | def _get_openstack_release(self): |
2992 | 104 | """Get openstack release. | 221 | """Get openstack release. |
2993 | 105 | 222 | ||
2994 | @@ -111,7 +228,8 @@ | |||
2995 | 111 | self.precise_havana, self.precise_icehouse, | 228 | self.precise_havana, self.precise_icehouse, |
2996 | 112 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, | 229 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
2997 | 113 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, | 230 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
2999 | 114 | self.wily_liberty) = range(12) | 231 | self.wily_liberty, self.trusty_mitaka, |
3000 | 232 | self.xenial_mitaka) = range(14) | ||
3001 | 115 | 233 | ||
3002 | 116 | releases = { | 234 | releases = { |
3003 | 117 | ('precise', None): self.precise_essex, | 235 | ('precise', None): self.precise_essex, |
3004 | @@ -123,9 +241,11 @@ | |||
3005 | 123 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, | 241 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
3006 | 124 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, | 242 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
3007 | 125 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | 243 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
3008 | 244 | ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, | ||
3009 | 126 | ('utopic', None): self.utopic_juno, | 245 | ('utopic', None): self.utopic_juno, |
3010 | 127 | ('vivid', None): self.vivid_kilo, | 246 | ('vivid', None): self.vivid_kilo, |
3012 | 128 | ('wily', None): self.wily_liberty} | 247 | ('wily', None): self.wily_liberty, |
3013 | 248 | ('xenial', None): self.xenial_mitaka} | ||
3014 | 129 | return releases[(self.series, self.openstack)] | 249 | return releases[(self.series, self.openstack)] |
3015 | 130 | 250 | ||
3016 | 131 | def _get_openstack_release_string(self): | 251 | def _get_openstack_release_string(self): |
3017 | @@ -142,6 +262,7 @@ | |||
3018 | 142 | ('utopic', 'juno'), | 262 | ('utopic', 'juno'), |
3019 | 143 | ('vivid', 'kilo'), | 263 | ('vivid', 'kilo'), |
3020 | 144 | ('wily', 'liberty'), | 264 | ('wily', 'liberty'), |
3021 | 265 | ('xenial', 'mitaka'), | ||
3022 | 145 | ]) | 266 | ]) |
3023 | 146 | if self.openstack: | 267 | if self.openstack: |
3024 | 147 | os_origin = self.openstack.split(':')[1] | 268 | os_origin = self.openstack.split(':')[1] |
3025 | 148 | 269 | ||
3026 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
3027 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-29 18:07:31 +0000 | |||
3028 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2016-05-19 03:33:34 +0000 | |||
3029 | @@ -18,6 +18,7 @@ | |||
3030 | 18 | import json | 18 | import json |
3031 | 19 | import logging | 19 | import logging |
3032 | 20 | import os | 20 | import os |
3033 | 21 | import re | ||
3034 | 21 | import six | 22 | import six |
3035 | 22 | import time | 23 | import time |
3036 | 23 | import urllib | 24 | import urllib |
3037 | @@ -26,7 +27,12 @@ | |||
3038 | 26 | import glanceclient.v1.client as glance_client | 27 | import glanceclient.v1.client as glance_client |
3039 | 27 | import heatclient.v1.client as heat_client | 28 | import heatclient.v1.client as heat_client |
3040 | 28 | import keystoneclient.v2_0 as keystone_client | 29 | import keystoneclient.v2_0 as keystone_client |
3042 | 29 | import novaclient.v1_1.client as nova_client | 30 | from keystoneclient.auth.identity import v3 as keystone_id_v3 |
3043 | 31 | from keystoneclient import session as keystone_session | ||
3044 | 32 | from keystoneclient.v3 import client as keystone_client_v3 | ||
3045 | 33 | |||
3046 | 34 | import novaclient.client as nova_client | ||
3047 | 35 | import pika | ||
3048 | 30 | import swiftclient | 36 | import swiftclient |
3049 | 31 | 37 | ||
3050 | 32 | from charmhelpers.contrib.amulet.utils import ( | 38 | from charmhelpers.contrib.amulet.utils import ( |
3051 | @@ -36,6 +42,8 @@ | |||
3052 | 36 | DEBUG = logging.DEBUG | 42 | DEBUG = logging.DEBUG |
3053 | 37 | ERROR = logging.ERROR | 43 | ERROR = logging.ERROR |
3054 | 38 | 44 | ||
3055 | 45 | NOVA_CLIENT_VERSION = "2" | ||
3056 | 46 | |||
3057 | 39 | 47 | ||
3058 | 40 | class OpenStackAmuletUtils(AmuletUtils): | 48 | class OpenStackAmuletUtils(AmuletUtils): |
3059 | 41 | """OpenStack amulet utilities. | 49 | """OpenStack amulet utilities. |
3060 | @@ -137,7 +145,7 @@ | |||
3061 | 137 | return "role {} does not exist".format(e['name']) | 145 | return "role {} does not exist".format(e['name']) |
3062 | 138 | return ret | 146 | return ret |
3063 | 139 | 147 | ||
3065 | 140 | def validate_user_data(self, expected, actual): | 148 | def validate_user_data(self, expected, actual, api_version=None): |
3066 | 141 | """Validate user data. | 149 | """Validate user data. |
3067 | 142 | 150 | ||
3068 | 143 | Validate a list of actual user data vs a list of expected user | 151 | Validate a list of actual user data vs a list of expected user |
3069 | @@ -148,10 +156,15 @@ | |||
3070 | 148 | for e in expected: | 156 | for e in expected: |
3071 | 149 | found = False | 157 | found = False |
3072 | 150 | for act in actual: | 158 | for act in actual: |
3077 | 151 | a = {'enabled': act.enabled, 'name': act.name, | 159 | if e['name'] == act.name: |
3078 | 152 | 'email': act.email, 'tenantId': act.tenantId, | 160 | a = {'enabled': act.enabled, 'name': act.name, |
3079 | 153 | 'id': act.id} | 161 | 'email': act.email, 'id': act.id} |
3080 | 154 | if e['name'] == a['name']: | 162 | if api_version == 3: |
3081 | 163 | a['default_project_id'] = getattr(act, | ||
3082 | 164 | 'default_project_id', | ||
3083 | 165 | 'none') | ||
3084 | 166 | else: | ||
3085 | 167 | a['tenantId'] = act.tenantId | ||
3086 | 155 | found = True | 168 | found = True |
3087 | 156 | ret = self._validate_dict_data(e, a) | 169 | ret = self._validate_dict_data(e, a) |
3088 | 157 | if ret: | 170 | if ret: |
3089 | @@ -186,15 +199,30 @@ | |||
3090 | 186 | return cinder_client.Client(username, password, tenant, ept) | 199 | return cinder_client.Client(username, password, tenant, ept) |
3091 | 187 | 200 | ||
3092 | 188 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 201 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
3094 | 189 | tenant): | 202 | tenant=None, api_version=None, |
3095 | 203 | keystone_ip=None): | ||
3096 | 190 | """Authenticates admin user with the keystone admin endpoint.""" | 204 | """Authenticates admin user with the keystone admin endpoint.""" |
3097 | 191 | self.log.debug('Authenticating keystone admin...') | 205 | self.log.debug('Authenticating keystone admin...') |
3098 | 192 | unit = keystone_sentry | 206 | unit = keystone_sentry |
3104 | 193 | service_ip = unit.relation('shared-db', | 207 | if not keystone_ip: |
3105 | 194 | 'mysql:shared-db')['private-address'] | 208 | keystone_ip = unit.relation('shared-db', |
3106 | 195 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | 209 | 'mysql:shared-db')['private-address'] |
3107 | 196 | return keystone_client.Client(username=user, password=password, | 210 | base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) |
3108 | 197 | tenant_name=tenant, auth_url=ep) | 211 | if not api_version or api_version == 2: |
3109 | 212 | ep = base_ep + "/v2.0" | ||
3110 | 213 | return keystone_client.Client(username=user, password=password, | ||
3111 | 214 | tenant_name=tenant, auth_url=ep) | ||
3112 | 215 | else: | ||
3113 | 216 | ep = base_ep + "/v3" | ||
3114 | 217 | auth = keystone_id_v3.Password( | ||
3115 | 218 | user_domain_name='admin_domain', | ||
3116 | 219 | username=user, | ||
3117 | 220 | password=password, | ||
3118 | 221 | domain_name='admin_domain', | ||
3119 | 222 | auth_url=ep, | ||
3120 | 223 | ) | ||
3121 | 224 | sess = keystone_session.Session(auth=auth) | ||
3122 | 225 | return keystone_client_v3.Client(session=sess) | ||
3123 | 198 | 226 | ||
3124 | 199 | def authenticate_keystone_user(self, keystone, user, password, tenant): | 227 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
3125 | 200 | """Authenticates a regular user with the keystone public endpoint.""" | 228 | """Authenticates a regular user with the keystone public endpoint.""" |
3126 | @@ -223,7 +251,8 @@ | |||
3127 | 223 | self.log.debug('Authenticating nova user ({})...'.format(user)) | 251 | self.log.debug('Authenticating nova user ({})...'.format(user)) |
3128 | 224 | ep = keystone.service_catalog.url_for(service_type='identity', | 252 | ep = keystone.service_catalog.url_for(service_type='identity', |
3129 | 225 | endpoint_type='publicURL') | 253 | endpoint_type='publicURL') |
3131 | 226 | return nova_client.Client(username=user, api_key=password, | 254 | return nova_client.Client(NOVA_CLIENT_VERSION, |
3132 | 255 | username=user, api_key=password, | ||
3133 | 227 | project_id=tenant, auth_url=ep) | 256 | project_id=tenant, auth_url=ep) |
3134 | 228 | 257 | ||
3135 | 229 | def authenticate_swift_user(self, keystone, user, password, tenant): | 258 | def authenticate_swift_user(self, keystone, user, password, tenant): |
3136 | @@ -602,3 +631,382 @@ | |||
3137 | 602 | self.log.debug('Ceph {} samples (OK): ' | 631 | self.log.debug('Ceph {} samples (OK): ' |
3138 | 603 | '{}'.format(sample_type, samples)) | 632 | '{}'.format(sample_type, samples)) |
3139 | 604 | return None | 633 | return None |
3140 | 634 | |||
3141 | 635 | # rabbitmq/amqp specific helpers: | ||
3142 | 636 | |||
3143 | 637 | def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): | ||
3144 | 638 | """Wait for rmq units extended status to show cluster readiness, | ||
3145 | 639 | after an optional initial sleep period. Initial sleep is likely | ||
3146 | 640 | necessary to be effective following a config change, as status | ||
3147 | 641 | message may not instantly update to non-ready.""" | ||
3148 | 642 | |||
3149 | 643 | if init_sleep: | ||
3150 | 644 | time.sleep(init_sleep) | ||
3151 | 645 | |||
3152 | 646 | message = re.compile('^Unit is ready and clustered$') | ||
3153 | 647 | deployment._auto_wait_for_status(message=message, | ||
3154 | 648 | timeout=timeout, | ||
3155 | 649 | include_only=['rabbitmq-server']) | ||
3156 | 650 | |||
3157 | 651 | def add_rmq_test_user(self, sentry_units, | ||
3158 | 652 | username="testuser1", password="changeme"): | ||
3159 | 653 | """Add a test user via the first rmq juju unit, check connection as | ||
3160 | 654 | the new user against all sentry units. | ||
3161 | 655 | |||
3162 | 656 | :param sentry_units: list of sentry unit pointers | ||
3163 | 657 | :param username: amqp user name, default to testuser1 | ||
3164 | 658 | :param password: amqp user password | ||
3165 | 659 | :returns: None if successful. Raise on error. | ||
3166 | 660 | """ | ||
3167 | 661 | self.log.debug('Adding rmq user ({})...'.format(username)) | ||
3168 | 662 | |||
3169 | 663 | # Check that user does not already exist | ||
3170 | 664 | cmd_user_list = 'rabbitmqctl list_users' | ||
3171 | 665 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
3172 | 666 | if username in output: | ||
3173 | 667 | self.log.warning('User ({}) already exists, returning ' | ||
3174 | 668 | 'gracefully.'.format(username)) | ||
3175 | 669 | return | ||
3176 | 670 | |||
3177 | 671 | perms = '".*" ".*" ".*"' | ||
3178 | 672 | cmds = ['rabbitmqctl add_user {} {}'.format(username, password), | ||
3179 | 673 | 'rabbitmqctl set_permissions {} {}'.format(username, perms)] | ||
3180 | 674 | |||
3181 | 675 | # Add user via first unit | ||
3182 | 676 | for cmd in cmds: | ||
3183 | 677 | output, _ = self.run_cmd_unit(sentry_units[0], cmd) | ||
3184 | 678 | |||
3185 | 679 | # Check connection against the other sentry_units | ||
3186 | 680 | self.log.debug('Checking user connect against units...') | ||
3187 | 681 | for sentry_unit in sentry_units: | ||
3188 | 682 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, | ||
3189 | 683 | username=username, | ||
3190 | 684 | password=password) | ||
3191 | 685 | connection.close() | ||
3192 | 686 | |||
3193 | 687 | def delete_rmq_test_user(self, sentry_units, username="testuser1"): | ||
3194 | 688 | """Delete a rabbitmq user via the first rmq juju unit. | ||
3195 | 689 | |||
3196 | 690 | :param sentry_units: list of sentry unit pointers | ||
3197 | 691 | :param username: amqp user name, default to testuser1 | ||
3198 | 692 | :param password: amqp user password | ||
3199 | 693 | :returns: None if successful or no such user. | ||
3200 | 694 | """ | ||
3201 | 695 | self.log.debug('Deleting rmq user ({})...'.format(username)) | ||
3202 | 696 | |||
3203 | 697 | # Check that the user exists | ||
3204 | 698 | cmd_user_list = 'rabbitmqctl list_users' | ||
3205 | 699 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
3206 | 700 | |||
3207 | 701 | if username not in output: | ||
3208 | 702 | self.log.warning('User ({}) does not exist, returning ' | ||
3209 | 703 | 'gracefully.'.format(username)) | ||
3210 | 704 | return | ||
3211 | 705 | |||
3212 | 706 | # Delete the user | ||
3213 | 707 | cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) | ||
3214 | 708 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) | ||
3215 | 709 | |||
3216 | 710 | def get_rmq_cluster_status(self, sentry_unit): | ||
3217 | 711 | """Execute rabbitmq cluster status command on a unit and return | ||
3218 | 712 | the full output. | ||
3219 | 713 | |||
3220 | 714 | :param unit: sentry unit | ||
3221 | 715 | :returns: String containing console output of cluster status command | ||
3222 | 716 | """ | ||
3223 | 717 | cmd = 'rabbitmqctl cluster_status' | ||
3224 | 718 | output, _ = self.run_cmd_unit(sentry_unit, cmd) | ||
3225 | 719 | self.log.debug('{} cluster_status:\n{}'.format( | ||
3226 | 720 | sentry_unit.info['unit_name'], output)) | ||
3227 | 721 | return str(output) | ||
3228 | 722 | |||
3229 | 723 | def get_rmq_cluster_running_nodes(self, sentry_unit): | ||
3230 | 724 | """Parse rabbitmqctl cluster_status output string, return list of | ||
3231 | 725 | running rabbitmq cluster nodes. | ||
3232 | 726 | |||
3233 | 727 | :param unit: sentry unit | ||
3234 | 728 | :returns: List containing node names of running nodes | ||
3235 | 729 | """ | ||
3236 | 730 | # NOTE(beisner): rabbitmqctl cluster_status output is not | ||
3237 | 731 | # json-parsable, do string chop foo, then json.loads that. | ||
3238 | 732 | str_stat = self.get_rmq_cluster_status(sentry_unit) | ||
3239 | 733 | if 'running_nodes' in str_stat: | ||
3240 | 734 | pos_start = str_stat.find("{running_nodes,") + 15 | ||
3241 | 735 | pos_end = str_stat.find("]},", pos_start) + 1 | ||
3242 | 736 | str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') | ||
3243 | 737 | run_nodes = json.loads(str_run_nodes) | ||
3244 | 738 | return run_nodes | ||
3245 | 739 | else: | ||
3246 | 740 | return [] | ||
3247 | 741 | |||
3248 | 742 | def validate_rmq_cluster_running_nodes(self, sentry_units): | ||
3249 | 743 | """Check that all rmq unit hostnames are represented in the | ||
3250 | 744 | cluster_status output of all units. | ||
3251 | 745 | |||
3252 | 746 | :param host_names: dict of juju unit names to host names | ||
3253 | 747 | :param units: list of sentry unit pointers (all rmq units) | ||
3254 | 748 | :returns: None if successful, otherwise return error message | ||
3255 | 749 | """ | ||
3256 | 750 | host_names = self.get_unit_hostnames(sentry_units) | ||
3257 | 751 | errors = [] | ||
3258 | 752 | |||
3259 | 753 | # Query every unit for cluster_status running nodes | ||
3260 | 754 | for query_unit in sentry_units: | ||
3261 | 755 | query_unit_name = query_unit.info['unit_name'] | ||
3262 | 756 | running_nodes = self.get_rmq_cluster_running_nodes(query_unit) | ||
3263 | 757 | |||
3264 | 758 | # Confirm that every unit is represented in the queried unit's | ||
3265 | 759 | # cluster_status running nodes output. | ||
3266 | 760 | for validate_unit in sentry_units: | ||
3267 | 761 | val_host_name = host_names[validate_unit.info['unit_name']] | ||
3268 | 762 | val_node_name = 'rabbit@{}'.format(val_host_name) | ||
3269 | 763 | |||
3270 | 764 | if val_node_name not in running_nodes: | ||
3271 | 765 | errors.append('Cluster member check failed on {}: {} not ' | ||
3272 | 766 | 'in {}\n'.format(query_unit_name, | ||
3273 | 767 | val_node_name, | ||
3274 | 768 | running_nodes)) | ||
3275 | 769 | if errors: | ||
3276 | 770 | return ''.join(errors) | ||
3277 | 771 | |||
3278 | 772 | def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): | ||
3279 | 773 | """Check a single juju rmq unit for ssl and port in the config file.""" | ||
3280 | 774 | host = sentry_unit.info['public-address'] | ||
3281 | 775 | unit_name = sentry_unit.info['unit_name'] | ||
3282 | 776 | |||
3283 | 777 | conf_file = '/etc/rabbitmq/rabbitmq.config' | ||
3284 | 778 | conf_contents = str(self.file_contents_safe(sentry_unit, | ||
3285 | 779 | conf_file, max_wait=16)) | ||
3286 | 780 | # Checks | ||
3287 | 781 | conf_ssl = 'ssl' in conf_contents | ||
3288 | 782 | conf_port = str(port) in conf_contents | ||
3289 | 783 | |||
3290 | 784 | # Port explicitly checked in config | ||
3291 | 785 | if port and conf_port and conf_ssl: | ||
3292 | 786 | self.log.debug('SSL is enabled @{}:{} ' | ||
3293 | 787 | '({})'.format(host, port, unit_name)) | ||
3294 | 788 | return True | ||
3295 | 789 | elif port and not conf_port and conf_ssl: | ||
3296 | 790 | self.log.debug('SSL is enabled @{} but not on port {} ' | ||
3297 | 791 | '({})'.format(host, port, unit_name)) | ||
3298 | 792 | return False | ||
3299 | 793 | # Port not checked (useful when checking that ssl is disabled) | ||
3300 | 794 | elif not port and conf_ssl: | ||
3301 | 795 | self.log.debug('SSL is enabled @{}:{} ' | ||
3302 | 796 | '({})'.format(host, port, unit_name)) | ||
3303 | 797 | return True | ||
3304 | 798 | elif not conf_ssl: | ||
3305 | 799 | self.log.debug('SSL not enabled @{}:{} ' | ||
3306 | 800 | '({})'.format(host, port, unit_name)) | ||
3307 | 801 | return False | ||
3308 | 802 | else: | ||
3309 | 803 | msg = ('Unknown condition when checking SSL status @{}:{} ' | ||
3310 | 804 | '({})'.format(host, port, unit_name)) | ||
3311 | 805 | amulet.raise_status(amulet.FAIL, msg) | ||
3312 | 806 | |||
3313 | 807 | def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): | ||
3314 | 808 | """Check that ssl is enabled on rmq juju sentry units. | ||
3315 | 809 | |||
3316 | 810 | :param sentry_units: list of all rmq sentry units | ||
3317 | 811 | :param port: optional ssl port override to validate | ||
3318 | 812 | :returns: None if successful, otherwise return error message | ||
3319 | 813 | """ | ||
3320 | 814 | for sentry_unit in sentry_units: | ||
3321 | 815 | if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): | ||
3322 | 816 | return ('Unexpected condition: ssl is disabled on unit ' | ||
3323 | 817 | '({})'.format(sentry_unit.info['unit_name'])) | ||
3324 | 818 | return None | ||
3325 | 819 | |||
3326 | 820 | def validate_rmq_ssl_disabled_units(self, sentry_units): | ||
3327 | 821 | """Check that ssl is enabled on listed rmq juju sentry units. | ||
3328 | 822 | |||
3329 | 823 | :param sentry_units: list of all rmq sentry units | ||
3330 | 824 | :returns: True if successful. Raise on error. | ||
3331 | 825 | """ | ||
3332 | 826 | for sentry_unit in sentry_units: | ||
3333 | 827 | if self.rmq_ssl_is_enabled_on_unit(sentry_unit): | ||
3334 | 828 | return ('Unexpected condition: ssl is enabled on unit ' | ||
3335 | 829 | '({})'.format(sentry_unit.info['unit_name'])) | ||
3336 | 830 | return None | ||
3337 | 831 | |||
3338 | 832 | def configure_rmq_ssl_on(self, sentry_units, deployment, | ||
3339 | 833 | port=None, max_wait=60): | ||
3340 | 834 | """Turn ssl charm config option on, with optional non-default | ||
3341 | 835 | ssl port specification. Confirm that it is enabled on every | ||
3342 | 836 | unit. | ||
3343 | 837 | |||
3344 | 838 | :param sentry_units: list of sentry units | ||
3345 | 839 | :param deployment: amulet deployment object pointer | ||
3346 | 840 | :param port: amqp port, use defaults if None | ||
3347 | 841 | :param max_wait: maximum time to wait in seconds to confirm | ||
3348 | 842 | :returns: None if successful. Raise on error. | ||
3349 | 843 | """ | ||
3350 | 844 | self.log.debug('Setting ssl charm config option: on') | ||
3351 | 845 | |||
3352 | 846 | # Enable RMQ SSL | ||
3353 | 847 | config = {'ssl': 'on'} | ||
3354 | 848 | if port: | ||
3355 | 849 | config['ssl_port'] = port | ||
3356 | 850 | |||
3357 | 851 | deployment.d.configure('rabbitmq-server', config) | ||
3358 | 852 | |||
3359 | 853 | # Wait for unit status | ||
3360 | 854 | self.rmq_wait_for_cluster(deployment) | ||
3361 | 855 | |||
3362 | 856 | # Confirm | ||
3363 | 857 | tries = 0 | ||
3364 | 858 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
3365 | 859 | while ret and tries < (max_wait / 4): | ||
3366 | 860 | time.sleep(4) | ||
3367 | 861 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
3368 | 862 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
3369 | 863 | tries += 1 | ||
3370 | 864 | |||
3371 | 865 | if ret: | ||
3372 | 866 | amulet.raise_status(amulet.FAIL, ret) | ||
3373 | 867 | |||
3374 | 868 | def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): | ||
3375 | 869 | """Turn ssl charm config option off, confirm that it is disabled | ||
3376 | 870 | on every unit. | ||
3377 | 871 | |||
3378 | 872 | :param sentry_units: list of sentry units | ||
3379 | 873 | :param deployment: amulet deployment object pointer | ||
3380 | 874 | :param max_wait: maximum time to wait in seconds to confirm | ||
3381 | 875 | :returns: None if successful. Raise on error. | ||
3382 | 876 | """ | ||
3383 | 877 | self.log.debug('Setting ssl charm config option: off') | ||
3384 | 878 | |||
3385 | 879 | # Disable RMQ SSL | ||
3386 | 880 | config = {'ssl': 'off'} | ||
3387 | 881 | deployment.d.configure('rabbitmq-server', config) | ||
3388 | 882 | |||
3389 | 883 | # Wait for unit status | ||
3390 | 884 | self.rmq_wait_for_cluster(deployment) | ||
3391 | 885 | |||
3392 | 886 | # Confirm | ||
3393 | 887 | tries = 0 | ||
3394 | 888 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
3395 | 889 | while ret and tries < (max_wait / 4): | ||
3396 | 890 | time.sleep(4) | ||
3397 | 891 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
3398 | 892 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
3399 | 893 | tries += 1 | ||
3400 | 894 | |||
3401 | 895 | if ret: | ||
3402 | 896 | amulet.raise_status(amulet.FAIL, ret) | ||
3403 | 897 | |||
3404 | 898 | def connect_amqp_by_unit(self, sentry_unit, ssl=False, | ||
3405 | 899 | port=None, fatal=True, | ||
3406 | 900 | username="testuser1", password="changeme"): | ||
3407 | 901 | """Establish and return a pika amqp connection to the rabbitmq service | ||
3408 | 902 | running on a rmq juju unit. | ||
3409 | 903 | |||
3410 | 904 | :param sentry_unit: sentry unit pointer | ||
3411 | 905 | :param ssl: boolean, default to False | ||
3412 | 906 | :param port: amqp port, use defaults if None | ||
3413 | 907 | :param fatal: boolean, default to True (raises on connect error) | ||
3414 | 908 | :param username: amqp user name, default to testuser1 | ||
3415 | 909 | :param password: amqp user password | ||
3416 | 910 | :returns: pika amqp connection pointer or None if failed and non-fatal | ||
3417 | 911 | """ | ||
3418 | 912 | host = sentry_unit.info['public-address'] | ||
3419 | 913 | unit_name = sentry_unit.info['unit_name'] | ||
3420 | 914 | |||
3421 | 915 | # Default port logic if port is not specified | ||
3422 | 916 | if ssl and not port: | ||
3423 | 917 | port = 5671 | ||
3424 | 918 | elif not ssl and not port: | ||
3425 | 919 | port = 5672 | ||
3426 | 920 | |||
3427 | 921 | self.log.debug('Connecting to amqp on {}:{} ({}) as ' | ||
3428 | 922 | '{}...'.format(host, port, unit_name, username)) | ||
3429 | 923 | |||
3430 | 924 | try: | ||
3431 | 925 | credentials = pika.PlainCredentials(username, password) | ||
3432 | 926 | parameters = pika.ConnectionParameters(host=host, port=port, | ||
3433 | 927 | credentials=credentials, | ||
3434 | 928 | ssl=ssl, | ||
3435 | 929 | connection_attempts=3, | ||
3436 | 930 | retry_delay=5, | ||
3437 | 931 | socket_timeout=1) | ||
3438 | 932 | connection = pika.BlockingConnection(parameters) | ||
3439 | 933 | assert connection.server_properties['product'] == 'RabbitMQ' | ||
3440 | 934 | self.log.debug('Connect OK') | ||
3441 | 935 | return connection | ||
3442 | 936 | except Exception as e: | ||
3443 | 937 | msg = ('amqp connection failed to {}:{} as ' | ||
3444 | 938 | '{} ({})'.format(host, port, username, str(e))) | ||
3445 | 939 | if fatal: | ||
3446 | 940 | amulet.raise_status(amulet.FAIL, msg) | ||
3447 | 941 | else: | ||
3448 | 942 | self.log.warn(msg) | ||
3449 | 943 | return None | ||
3450 | 944 | |||
3451 | 945 | def publish_amqp_message_by_unit(self, sentry_unit, message, | ||
3452 | 946 | queue="test", ssl=False, | ||
3453 | 947 | username="testuser1", | ||
3454 | 948 | password="changeme", | ||
3455 | 949 | port=None): | ||
3456 | 950 | """Publish an amqp message to a rmq juju unit. | ||
3457 | 951 | |||
3458 | 952 | :param sentry_unit: sentry unit pointer | ||
3459 | 953 | :param message: amqp message string | ||
3460 | 954 | :param queue: message queue, default to test | ||
3461 | 955 | :param username: amqp user name, default to testuser1 | ||
3462 | 956 | :param password: amqp user password | ||
3463 | 957 | :param ssl: boolean, default to False | ||
3464 | 958 | :param port: amqp port, use defaults if None | ||
3465 | 959 | :returns: None. Raises exception if publish failed. | ||
3466 | 960 | """ | ||
3467 | 961 | self.log.debug('Publishing message to {} queue:\n{}'.format(queue, | ||
3468 | 962 | message)) | ||
3469 | 963 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
3470 | 964 | port=port, | ||
3471 | 965 | username=username, | ||
3472 | 966 | password=password) | ||
3473 | 967 | |||
3474 | 968 | # NOTE(beisner): extra debug here re: pika hang potential: | ||
3475 | 969 | # https://github.com/pika/pika/issues/297 | ||
3476 | 970 | # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw | ||
3477 | 971 | self.log.debug('Defining channel...') | ||
3478 | 972 | channel = connection.channel() | ||
3479 | 973 | self.log.debug('Declaring queue...') | ||
3480 | 974 | channel.queue_declare(queue=queue, auto_delete=False, durable=True) | ||
3481 | 975 | self.log.debug('Publishing message...') | ||
3482 | 976 | channel.basic_publish(exchange='', routing_key=queue, body=message) | ||
3483 | 977 | self.log.debug('Closing channel...') | ||
3484 | 978 | channel.close() | ||
3485 | 979 | self.log.debug('Closing connection...') | ||
3486 | 980 | connection.close() | ||
3487 | 981 | |||
3488 | 982 | def get_amqp_message_by_unit(self, sentry_unit, queue="test", | ||
3489 | 983 | username="testuser1", | ||
3490 | 984 | password="changeme", | ||
3491 | 985 | ssl=False, port=None): | ||
3492 | 986 | """Get an amqp message from a rmq juju unit. | ||
3493 | 987 | |||
3494 | 988 | :param sentry_unit: sentry unit pointer | ||
3495 | 989 | :param queue: message queue, default to test | ||
3496 | 990 | :param username: amqp user name, default to testuser1 | ||
3497 | 991 | :param password: amqp user password | ||
3498 | 992 | :param ssl: boolean, default to False | ||
3499 | 993 | :param port: amqp port, use defaults if None | ||
3500 | 994 | :returns: amqp message body as string. Raise if get fails. | ||
3501 | 995 | """ | ||
3502 | 996 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
3503 | 997 | port=port, | ||
3504 | 998 | username=username, | ||
3505 | 999 | password=password) | ||
3506 | 1000 | channel = connection.channel() | ||
3507 | 1001 | method_frame, _, body = channel.basic_get(queue) | ||
3508 | 1002 | |||
3509 | 1003 | if method_frame: | ||
3510 | 1004 | self.log.debug('Retreived message from {} queue:\n{}'.format(queue, | ||
3511 | 1005 | body)) | ||
3512 | 1006 | channel.basic_ack(method_frame.delivery_tag) | ||
3513 | 1007 | channel.close() | ||
3514 | 1008 | connection.close() | ||
3515 | 1009 | return body | ||
3516 | 1010 | else: | ||
3517 | 1011 | msg = 'No message retrieved.' | ||
3518 | 1012 | amulet.raise_status(amulet.FAIL, msg) | ||
3519 | 605 | 1013 | ||
3520 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
3521 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-07-29 18:07:31 +0000 | |||
3522 | +++ hooks/charmhelpers/contrib/openstack/context.py 2016-05-19 03:33:34 +0000 | |||
3523 | @@ -14,12 +14,13 @@ | |||
3524 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
3525 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
3526 | 16 | 16 | ||
3527 | 17 | import glob | ||
3528 | 17 | import json | 18 | import json |
3529 | 18 | import os | 19 | import os |
3530 | 19 | import re | 20 | import re |
3531 | 20 | import time | 21 | import time |
3532 | 21 | from base64 import b64decode | 22 | from base64 import b64decode |
3534 | 22 | from subprocess import check_call | 23 | from subprocess import check_call, CalledProcessError |
3535 | 23 | 24 | ||
3536 | 24 | import six | 25 | import six |
3537 | 25 | import yaml | 26 | import yaml |
3538 | @@ -44,16 +45,20 @@ | |||
3539 | 44 | INFO, | 45 | INFO, |
3540 | 45 | WARNING, | 46 | WARNING, |
3541 | 46 | ERROR, | 47 | ERROR, |
3542 | 48 | status_set, | ||
3543 | 47 | ) | 49 | ) |
3544 | 48 | 50 | ||
3545 | 49 | from charmhelpers.core.sysctl import create as sysctl_create | 51 | from charmhelpers.core.sysctl import create as sysctl_create |
3546 | 50 | from charmhelpers.core.strutils import bool_from_string | 52 | from charmhelpers.core.strutils import bool_from_string |
3547 | 51 | 53 | ||
3548 | 52 | from charmhelpers.core.host import ( | 54 | from charmhelpers.core.host import ( |
3549 | 55 | get_bond_master, | ||
3550 | 56 | is_phy_iface, | ||
3551 | 53 | list_nics, | 57 | list_nics, |
3552 | 54 | get_nic_hwaddr, | 58 | get_nic_hwaddr, |
3553 | 55 | mkdir, | 59 | mkdir, |
3554 | 56 | write_file, | 60 | write_file, |
3555 | 61 | pwgen, | ||
3556 | 57 | ) | 62 | ) |
3557 | 58 | from charmhelpers.contrib.hahelpers.cluster import ( | 63 | from charmhelpers.contrib.hahelpers.cluster import ( |
3558 | 59 | determine_apache_port, | 64 | determine_apache_port, |
3559 | @@ -84,6 +89,14 @@ | |||
3560 | 84 | is_bridge_member, | 89 | is_bridge_member, |
3561 | 85 | ) | 90 | ) |
3562 | 86 | from charmhelpers.contrib.openstack.utils import get_host_ip | 91 | from charmhelpers.contrib.openstack.utils import get_host_ip |
3563 | 92 | from charmhelpers.core.unitdata import kv | ||
3564 | 93 | |||
3565 | 94 | try: | ||
3566 | 95 | import psutil | ||
3567 | 96 | except ImportError: | ||
3568 | 97 | apt_install('python-psutil', fatal=True) | ||
3569 | 98 | import psutil | ||
3570 | 99 | |||
3571 | 87 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 100 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
3572 | 88 | ADDRESS_TYPES = ['admin', 'internal', 'public'] | 101 | ADDRESS_TYPES = ['admin', 'internal', 'public'] |
3573 | 89 | 102 | ||
3574 | @@ -192,10 +205,50 @@ | |||
3575 | 192 | class OSContextGenerator(object): | 205 | class OSContextGenerator(object): |
3576 | 193 | """Base class for all context generators.""" | 206 | """Base class for all context generators.""" |
3577 | 194 | interfaces = [] | 207 | interfaces = [] |
3578 | 208 | related = False | ||
3579 | 209 | complete = False | ||
3580 | 210 | missing_data = [] | ||
3581 | 195 | 211 | ||
3582 | 196 | def __call__(self): | 212 | def __call__(self): |
3583 | 197 | raise NotImplementedError | 213 | raise NotImplementedError |
3584 | 198 | 214 | ||
3585 | 215 | def context_complete(self, ctxt): | ||
3586 | 216 | """Check for missing data for the required context data. | ||
3587 | 217 | Set self.missing_data if it exists and return False. | ||
3588 | 218 | Set self.complete if no missing data and return True. | ||
3589 | 219 | """ | ||
3590 | 220 | # Fresh start | ||
3591 | 221 | self.complete = False | ||
3592 | 222 | self.missing_data = [] | ||
3593 | 223 | for k, v in six.iteritems(ctxt): | ||
3594 | 224 | if v is None or v == '': | ||
3595 | 225 | if k not in self.missing_data: | ||
3596 | 226 | self.missing_data.append(k) | ||
3597 | 227 | |||
3598 | 228 | if self.missing_data: | ||
3599 | 229 | self.complete = False | ||
3600 | 230 | log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) | ||
3601 | 231 | else: | ||
3602 | 232 | self.complete = True | ||
3603 | 233 | return self.complete | ||
3604 | 234 | |||
3605 | 235 | def get_related(self): | ||
3606 | 236 | """Check if any of the context interfaces have relation ids. | ||
3607 | 237 | Set self.related and return True if one of the interfaces | ||
3608 | 238 | has relation ids. | ||
3609 | 239 | """ | ||
3610 | 240 | # Fresh start | ||
3611 | 241 | self.related = False | ||
3612 | 242 | try: | ||
3613 | 243 | for interface in self.interfaces: | ||
3614 | 244 | if relation_ids(interface): | ||
3615 | 245 | self.related = True | ||
3616 | 246 | return self.related | ||
3617 | 247 | except AttributeError as e: | ||
3618 | 248 | log("{} {}" | ||
3619 | 249 | "".format(self, e), 'INFO') | ||
3620 | 250 | return self.related | ||
3621 | 251 | |||
3622 | 199 | 252 | ||
3623 | 200 | class SharedDBContext(OSContextGenerator): | 253 | class SharedDBContext(OSContextGenerator): |
3624 | 201 | interfaces = ['shared-db'] | 254 | interfaces = ['shared-db'] |
3625 | @@ -211,6 +264,7 @@ | |||
3626 | 211 | self.database = database | 264 | self.database = database |
3627 | 212 | self.user = user | 265 | self.user = user |
3628 | 213 | self.ssl_dir = ssl_dir | 266 | self.ssl_dir = ssl_dir |
3629 | 267 | self.rel_name = self.interfaces[0] | ||
3630 | 214 | 268 | ||
3631 | 215 | def __call__(self): | 269 | def __call__(self): |
3632 | 216 | self.database = self.database or config('database') | 270 | self.database = self.database or config('database') |
3633 | @@ -244,6 +298,7 @@ | |||
3634 | 244 | password_setting = self.relation_prefix + '_password' | 298 | password_setting = self.relation_prefix + '_password' |
3635 | 245 | 299 | ||
3636 | 246 | for rid in relation_ids(self.interfaces[0]): | 300 | for rid in relation_ids(self.interfaces[0]): |
3637 | 301 | self.related = True | ||
3638 | 247 | for unit in related_units(rid): | 302 | for unit in related_units(rid): |
3639 | 248 | rdata = relation_get(rid=rid, unit=unit) | 303 | rdata = relation_get(rid=rid, unit=unit) |
3640 | 249 | host = rdata.get('db_host') | 304 | host = rdata.get('db_host') |
3641 | @@ -255,7 +310,7 @@ | |||
3642 | 255 | 'database_password': rdata.get(password_setting), | 310 | 'database_password': rdata.get(password_setting), |
3643 | 256 | 'database_type': 'mysql' | 311 | 'database_type': 'mysql' |
3644 | 257 | } | 312 | } |
3646 | 258 | if context_complete(ctxt): | 313 | if self.context_complete(ctxt): |
3647 | 259 | db_ssl(rdata, ctxt, self.ssl_dir) | 314 | db_ssl(rdata, ctxt, self.ssl_dir) |
3648 | 260 | return ctxt | 315 | return ctxt |
3649 | 261 | return {} | 316 | return {} |
3650 | @@ -276,6 +331,7 @@ | |||
3651 | 276 | 331 | ||
3652 | 277 | ctxt = {} | 332 | ctxt = {} |
3653 | 278 | for rid in relation_ids(self.interfaces[0]): | 333 | for rid in relation_ids(self.interfaces[0]): |
3654 | 334 | self.related = True | ||
3655 | 279 | for unit in related_units(rid): | 335 | for unit in related_units(rid): |
3656 | 280 | rel_host = relation_get('host', rid=rid, unit=unit) | 336 | rel_host = relation_get('host', rid=rid, unit=unit) |
3657 | 281 | rel_user = relation_get('user', rid=rid, unit=unit) | 337 | rel_user = relation_get('user', rid=rid, unit=unit) |
3658 | @@ -285,7 +341,7 @@ | |||
3659 | 285 | 'database_user': rel_user, | 341 | 'database_user': rel_user, |
3660 | 286 | 'database_password': rel_passwd, | 342 | 'database_password': rel_passwd, |
3661 | 287 | 'database_type': 'postgresql'} | 343 | 'database_type': 'postgresql'} |
3663 | 288 | if context_complete(ctxt): | 344 | if self.context_complete(ctxt): |
3664 | 289 | return ctxt | 345 | return ctxt |
3665 | 290 | 346 | ||
3666 | 291 | return {} | 347 | return {} |
3667 | @@ -346,6 +402,7 @@ | |||
3668 | 346 | ctxt['signing_dir'] = cachedir | 402 | ctxt['signing_dir'] = cachedir |
3669 | 347 | 403 | ||
3670 | 348 | for rid in relation_ids(self.rel_name): | 404 | for rid in relation_ids(self.rel_name): |
3671 | 405 | self.related = True | ||
3672 | 349 | for unit in related_units(rid): | 406 | for unit in related_units(rid): |
3673 | 350 | rdata = relation_get(rid=rid, unit=unit) | 407 | rdata = relation_get(rid=rid, unit=unit) |
3674 | 351 | serv_host = rdata.get('service_host') | 408 | serv_host = rdata.get('service_host') |
3675 | @@ -354,6 +411,7 @@ | |||
3676 | 354 | auth_host = format_ipv6_addr(auth_host) or auth_host | 411 | auth_host = format_ipv6_addr(auth_host) or auth_host |
3677 | 355 | svc_protocol = rdata.get('service_protocol') or 'http' | 412 | svc_protocol = rdata.get('service_protocol') or 'http' |
3678 | 356 | auth_protocol = rdata.get('auth_protocol') or 'http' | 413 | auth_protocol = rdata.get('auth_protocol') or 'http' |
3679 | 414 | api_version = rdata.get('api_version') or '2.0' | ||
3680 | 357 | ctxt.update({'service_port': rdata.get('service_port'), | 415 | ctxt.update({'service_port': rdata.get('service_port'), |
3681 | 358 | 'service_host': serv_host, | 416 | 'service_host': serv_host, |
3682 | 359 | 'auth_host': auth_host, | 417 | 'auth_host': auth_host, |
3683 | @@ -362,9 +420,10 @@ | |||
3684 | 362 | 'admin_user': rdata.get('service_username'), | 420 | 'admin_user': rdata.get('service_username'), |
3685 | 363 | 'admin_password': rdata.get('service_password'), | 421 | 'admin_password': rdata.get('service_password'), |
3686 | 364 | 'service_protocol': svc_protocol, | 422 | 'service_protocol': svc_protocol, |
3688 | 365 | 'auth_protocol': auth_protocol}) | 423 | 'auth_protocol': auth_protocol, |
3689 | 424 | 'api_version': api_version}) | ||
3690 | 366 | 425 | ||
3692 | 367 | if context_complete(ctxt): | 426 | if self.context_complete(ctxt): |
3693 | 368 | # NOTE(jamespage) this is required for >= icehouse | 427 | # NOTE(jamespage) this is required for >= icehouse |
3694 | 369 | # so a missing value just indicates keystone needs | 428 | # so a missing value just indicates keystone needs |
3695 | 370 | # upgrading | 429 | # upgrading |
3696 | @@ -403,6 +462,7 @@ | |||
3697 | 403 | ctxt = {} | 462 | ctxt = {} |
3698 | 404 | for rid in relation_ids(self.rel_name): | 463 | for rid in relation_ids(self.rel_name): |
3699 | 405 | ha_vip_only = False | 464 | ha_vip_only = False |
3700 | 465 | self.related = True | ||
3701 | 406 | for unit in related_units(rid): | 466 | for unit in related_units(rid): |
3702 | 407 | if relation_get('clustered', rid=rid, unit=unit): | 467 | if relation_get('clustered', rid=rid, unit=unit): |
3703 | 408 | ctxt['clustered'] = True | 468 | ctxt['clustered'] = True |
3704 | @@ -435,7 +495,7 @@ | |||
3705 | 435 | ha_vip_only = relation_get('ha-vip-only', | 495 | ha_vip_only = relation_get('ha-vip-only', |
3706 | 436 | rid=rid, unit=unit) is not None | 496 | rid=rid, unit=unit) is not None |
3707 | 437 | 497 | ||
3709 | 438 | if context_complete(ctxt): | 498 | if self.context_complete(ctxt): |
3710 | 439 | if 'rabbit_ssl_ca' in ctxt: | 499 | if 'rabbit_ssl_ca' in ctxt: |
3711 | 440 | if not self.ssl_dir: | 500 | if not self.ssl_dir: |
3712 | 441 | log("Charm not setup for ssl support but ssl ca " | 501 | log("Charm not setup for ssl support but ssl ca " |
3713 | @@ -467,7 +527,7 @@ | |||
3714 | 467 | ctxt['oslo_messaging_flags'] = config_flags_parser( | 527 | ctxt['oslo_messaging_flags'] = config_flags_parser( |
3715 | 468 | oslo_messaging_flags) | 528 | oslo_messaging_flags) |
3716 | 469 | 529 | ||
3718 | 470 | if not context_complete(ctxt): | 530 | if not self.complete: |
3719 | 471 | return {} | 531 | return {} |
3720 | 472 | 532 | ||
3721 | 473 | return ctxt | 533 | return ctxt |
3722 | @@ -483,13 +543,15 @@ | |||
3723 | 483 | 543 | ||
3724 | 484 | log('Generating template context for ceph', level=DEBUG) | 544 | log('Generating template context for ceph', level=DEBUG) |
3725 | 485 | mon_hosts = [] | 545 | mon_hosts = [] |
3729 | 486 | auth = None | 546 | ctxt = { |
3730 | 487 | key = None | 547 | 'use_syslog': str(config('use-syslog')).lower() |
3731 | 488 | use_syslog = str(config('use-syslog')).lower() | 548 | } |
3732 | 489 | for rid in relation_ids('ceph'): | 549 | for rid in relation_ids('ceph'): |
3733 | 490 | for unit in related_units(rid): | 550 | for unit in related_units(rid): |
3736 | 491 | auth = relation_get('auth', rid=rid, unit=unit) | 551 | if not ctxt.get('auth'): |
3737 | 492 | key = relation_get('key', rid=rid, unit=unit) | 552 | ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) |
3738 | 553 | if not ctxt.get('key'): | ||
3739 | 554 | ctxt['key'] = relation_get('key', rid=rid, unit=unit) | ||
3740 | 493 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, | 555 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, |
3741 | 494 | unit=unit) | 556 | unit=unit) |
3742 | 495 | unit_priv_addr = relation_get('private-address', rid=rid, | 557 | unit_priv_addr = relation_get('private-address', rid=rid, |
3743 | @@ -498,15 +560,12 @@ | |||
3744 | 498 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr | 560 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr |
3745 | 499 | mon_hosts.append(ceph_addr) | 561 | mon_hosts.append(ceph_addr) |
3746 | 500 | 562 | ||
3751 | 501 | ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), | 563 | ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) |
3748 | 502 | 'auth': auth, | ||
3749 | 503 | 'key': key, | ||
3750 | 504 | 'use_syslog': use_syslog} | ||
3752 | 505 | 564 | ||
3753 | 506 | if not os.path.isdir('/etc/ceph'): | 565 | if not os.path.isdir('/etc/ceph'): |
3754 | 507 | os.mkdir('/etc/ceph') | 566 | os.mkdir('/etc/ceph') |
3755 | 508 | 567 | ||
3757 | 509 | if not context_complete(ctxt): | 568 | if not self.context_complete(ctxt): |
3758 | 510 | return {} | 569 | return {} |
3759 | 511 | 570 | ||
3760 | 512 | ensure_packages(['ceph-common']) | 571 | ensure_packages(['ceph-common']) |
3761 | @@ -579,15 +638,28 @@ | |||
3762 | 579 | if config('haproxy-client-timeout'): | 638 | if config('haproxy-client-timeout'): |
3763 | 580 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') | 639 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') |
3764 | 581 | 640 | ||
3765 | 641 | if config('haproxy-queue-timeout'): | ||
3766 | 642 | ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') | ||
3767 | 643 | |||
3768 | 644 | if config('haproxy-connect-timeout'): | ||
3769 | 645 | ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') | ||
3770 | 646 | |||
3771 | 582 | if config('prefer-ipv6'): | 647 | if config('prefer-ipv6'): |
3772 | 583 | ctxt['ipv6'] = True | 648 | ctxt['ipv6'] = True |
3773 | 584 | ctxt['local_host'] = 'ip6-localhost' | 649 | ctxt['local_host'] = 'ip6-localhost' |
3774 | 585 | ctxt['haproxy_host'] = '::' | 650 | ctxt['haproxy_host'] = '::' |
3775 | 586 | ctxt['stat_port'] = ':::8888' | ||
3776 | 587 | else: | 651 | else: |
3777 | 588 | ctxt['local_host'] = '127.0.0.1' | 652 | ctxt['local_host'] = '127.0.0.1' |
3778 | 589 | ctxt['haproxy_host'] = '0.0.0.0' | 653 | ctxt['haproxy_host'] = '0.0.0.0' |
3780 | 590 | ctxt['stat_port'] = ':8888' | 654 | |
3781 | 655 | ctxt['stat_port'] = '8888' | ||
3782 | 656 | |||
3783 | 657 | db = kv() | ||
3784 | 658 | ctxt['stat_password'] = db.get('stat-password') | ||
3785 | 659 | if not ctxt['stat_password']: | ||
3786 | 660 | ctxt['stat_password'] = db.set('stat-password', | ||
3787 | 661 | pwgen(32)) | ||
3788 | 662 | db.flush() | ||
3789 | 591 | 663 | ||
3790 | 592 | for frontend in cluster_hosts: | 664 | for frontend in cluster_hosts: |
3791 | 593 | if (len(cluster_hosts[frontend]['backends']) > 1 or | 665 | if (len(cluster_hosts[frontend]['backends']) > 1 or |
3792 | @@ -878,19 +950,6 @@ | |||
3793 | 878 | 950 | ||
3794 | 879 | return calico_ctxt | 951 | return calico_ctxt |
3795 | 880 | 952 | ||
3796 | 881 | def pg_ctxt(self): | ||
3797 | 882 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
3798 | 883 | self.network_manager) | ||
3799 | 884 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
3800 | 885 | self.network_manager) | ||
3801 | 886 | pg_ctxt = {'core_plugin': driver, | ||
3802 | 887 | 'neutron_plugin': 'plumgrid', | ||
3803 | 888 | 'neutron_security_groups': self.neutron_security_groups, | ||
3804 | 889 | 'local_ip': unit_private_ip(), | ||
3805 | 890 | 'config': config} | ||
3806 | 891 | |||
3807 | 892 | return pg_ctxt | ||
3808 | 893 | |||
3809 | 894 | def neutron_ctxt(self): | 953 | def neutron_ctxt(self): |
3810 | 895 | if https(): | 954 | if https(): |
3811 | 896 | proto = 'https' | 955 | proto = 'https' |
3812 | @@ -906,6 +965,31 @@ | |||
3813 | 906 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} | 965 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} |
3814 | 907 | return ctxt | 966 | return ctxt |
3815 | 908 | 967 | ||
3816 | 968 | def pg_ctxt(self): | ||
3817 | 969 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
3818 | 970 | self.network_manager) | ||
3819 | 971 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
3820 | 972 | self.network_manager) | ||
3821 | 973 | ovs_ctxt = {'core_plugin': driver, | ||
3822 | 974 | 'neutron_plugin': 'plumgrid', | ||
3823 | 975 | 'neutron_security_groups': self.neutron_security_groups, | ||
3824 | 976 | 'local_ip': unit_private_ip(), | ||
3825 | 977 | 'config': config} | ||
3826 | 978 | return ovs_ctxt | ||
3827 | 979 | |||
3828 | 980 | def midonet_ctxt(self): | ||
3829 | 981 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
3830 | 982 | self.network_manager) | ||
3831 | 983 | midonet_config = neutron_plugin_attribute(self.plugin, 'config', | ||
3832 | 984 | self.network_manager) | ||
3833 | 985 | mido_ctxt = {'core_plugin': driver, | ||
3834 | 986 | 'neutron_plugin': 'midonet', | ||
3835 | 987 | 'neutron_security_groups': self.neutron_security_groups, | ||
3836 | 988 | 'local_ip': unit_private_ip(), | ||
3837 | 989 | 'config': midonet_config} | ||
3838 | 990 | |||
3839 | 991 | return mido_ctxt | ||
3840 | 992 | |||
3841 | 909 | def __call__(self): | 993 | def __call__(self): |
3842 | 910 | if self.network_manager not in ['quantum', 'neutron']: | 994 | if self.network_manager not in ['quantum', 'neutron']: |
3843 | 911 | return {} | 995 | return {} |
3844 | @@ -927,6 +1011,8 @@ | |||
3845 | 927 | ctxt.update(self.nuage_ctxt()) | 1011 | ctxt.update(self.nuage_ctxt()) |
3846 | 928 | elif self.plugin == 'plumgrid': | 1012 | elif self.plugin == 'plumgrid': |
3847 | 929 | ctxt.update(self.pg_ctxt()) | 1013 | ctxt.update(self.pg_ctxt()) |
3848 | 1014 | elif self.plugin == 'midonet': | ||
3849 | 1015 | ctxt.update(self.midonet_ctxt()) | ||
3850 | 930 | 1016 | ||
3851 | 931 | alchemy_flags = config('neutron-alchemy-flags') | 1017 | alchemy_flags = config('neutron-alchemy-flags') |
3852 | 932 | if alchemy_flags: | 1018 | if alchemy_flags: |
3853 | @@ -938,7 +1024,6 @@ | |||
3854 | 938 | 1024 | ||
3855 | 939 | 1025 | ||
3856 | 940 | class NeutronPortContext(OSContextGenerator): | 1026 | class NeutronPortContext(OSContextGenerator): |
3857 | 941 | NIC_PREFIXES = ['eth', 'bond'] | ||
3858 | 942 | 1027 | ||
3859 | 943 | def resolve_ports(self, ports): | 1028 | def resolve_ports(self, ports): |
3860 | 944 | """Resolve NICs not yet bound to bridge(s) | 1029 | """Resolve NICs not yet bound to bridge(s) |
3861 | @@ -950,7 +1035,18 @@ | |||
3862 | 950 | 1035 | ||
3863 | 951 | hwaddr_to_nic = {} | 1036 | hwaddr_to_nic = {} |
3864 | 952 | hwaddr_to_ip = {} | 1037 | hwaddr_to_ip = {} |
3866 | 953 | for nic in list_nics(self.NIC_PREFIXES): | 1038 | for nic in list_nics(): |
3867 | 1039 | # Ignore virtual interfaces (bond masters will be identified from | ||
3868 | 1040 | # their slaves) | ||
3869 | 1041 | if not is_phy_iface(nic): | ||
3870 | 1042 | continue | ||
3871 | 1043 | |||
3872 | 1044 | _nic = get_bond_master(nic) | ||
3873 | 1045 | if _nic: | ||
3874 | 1046 | log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), | ||
3875 | 1047 | level=DEBUG) | ||
3876 | 1048 | nic = _nic | ||
3877 | 1049 | |||
3878 | 954 | hwaddr = get_nic_hwaddr(nic) | 1050 | hwaddr = get_nic_hwaddr(nic) |
3879 | 955 | hwaddr_to_nic[hwaddr] = nic | 1051 | hwaddr_to_nic[hwaddr] = nic |
3880 | 956 | addresses = get_ipv4_addr(nic, fatal=False) | 1052 | addresses = get_ipv4_addr(nic, fatal=False) |
3881 | @@ -976,7 +1072,8 @@ | |||
3882 | 976 | # trust it to be the real external network). | 1072 | # trust it to be the real external network). |
3883 | 977 | resolved.append(entry) | 1073 | resolved.append(entry) |
3884 | 978 | 1074 | ||
3886 | 979 | return resolved | 1075 | # Ensure no duplicates |
3887 | 1076 | return list(set(resolved)) | ||
3888 | 980 | 1077 | ||
3889 | 981 | 1078 | ||
3890 | 982 | class OSConfigFlagContext(OSContextGenerator): | 1079 | class OSConfigFlagContext(OSContextGenerator): |
3891 | @@ -1016,6 +1113,20 @@ | |||
3892 | 1016 | config_flags_parser(config_flags)} | 1113 | config_flags_parser(config_flags)} |
3893 | 1017 | 1114 | ||
3894 | 1018 | 1115 | ||
3895 | 1116 | class LibvirtConfigFlagsContext(OSContextGenerator): | ||
3896 | 1117 | """ | ||
3897 | 1118 | This context provides support for extending | ||
3898 | 1119 | the libvirt section through user-defined flags. | ||
3899 | 1120 | """ | ||
3900 | 1121 | def __call__(self): | ||
3901 | 1122 | ctxt = {} | ||
3902 | 1123 | libvirt_flags = config('libvirt-flags') | ||
3903 | 1124 | if libvirt_flags: | ||
3904 | 1125 | ctxt['libvirt_flags'] = config_flags_parser( | ||
3905 | 1126 | libvirt_flags) | ||
3906 | 1127 | return ctxt | ||
3907 | 1128 | |||
3908 | 1129 | |||
3909 | 1019 | class SubordinateConfigContext(OSContextGenerator): | 1130 | class SubordinateConfigContext(OSContextGenerator): |
3910 | 1020 | 1131 | ||
3911 | 1021 | """ | 1132 | """ |
3912 | @@ -1048,7 +1159,7 @@ | |||
3913 | 1048 | 1159 | ||
3914 | 1049 | ctxt = { | 1160 | ctxt = { |
3915 | 1050 | ... other context ... | 1161 | ... other context ... |
3917 | 1051 | 'subordinate_config': { | 1162 | 'subordinate_configuration': { |
3918 | 1052 | 'DEFAULT': { | 1163 | 'DEFAULT': { |
3919 | 1053 | 'key1': 'value1', | 1164 | 'key1': 'value1', |
3920 | 1054 | }, | 1165 | }, |
3921 | @@ -1066,13 +1177,22 @@ | |||
3922 | 1066 | :param config_file : Service's config file to query sections | 1177 | :param config_file : Service's config file to query sections |
3923 | 1067 | :param interface : Subordinate interface to inspect | 1178 | :param interface : Subordinate interface to inspect |
3924 | 1068 | """ | 1179 | """ |
3925 | 1069 | self.service = service | ||
3926 | 1070 | self.config_file = config_file | 1180 | self.config_file = config_file |
3928 | 1071 | self.interface = interface | 1181 | if isinstance(service, list): |
3929 | 1182 | self.services = service | ||
3930 | 1183 | else: | ||
3931 | 1184 | self.services = [service] | ||
3932 | 1185 | if isinstance(interface, list): | ||
3933 | 1186 | self.interfaces = interface | ||
3934 | 1187 | else: | ||
3935 | 1188 | self.interfaces = [interface] | ||
3936 | 1072 | 1189 | ||
3937 | 1073 | def __call__(self): | 1190 | def __call__(self): |
3938 | 1074 | ctxt = {'sections': {}} | 1191 | ctxt = {'sections': {}} |
3940 | 1075 | for rid in relation_ids(self.interface): | 1192 | rids = [] |
3941 | 1193 | for interface in self.interfaces: | ||
3942 | 1194 | rids.extend(relation_ids(interface)) | ||
3943 | 1195 | for rid in rids: | ||
3944 | 1076 | for unit in related_units(rid): | 1196 | for unit in related_units(rid): |
3945 | 1077 | sub_config = relation_get('subordinate_configuration', | 1197 | sub_config = relation_get('subordinate_configuration', |
3946 | 1078 | rid=rid, unit=unit) | 1198 | rid=rid, unit=unit) |
3947 | @@ -1080,33 +1200,37 @@ | |||
3948 | 1080 | try: | 1200 | try: |
3949 | 1081 | sub_config = json.loads(sub_config) | 1201 | sub_config = json.loads(sub_config) |
3950 | 1082 | except: | 1202 | except: |
3978 | 1083 | log('Could not parse JSON from subordinate_config ' | 1203 | log('Could not parse JSON from ' |
3979 | 1084 | 'setting from %s' % rid, level=ERROR) | 1204 | 'subordinate_configuration setting from %s' |
3980 | 1085 | continue | 1205 | % rid, level=ERROR) |
3981 | 1086 | 1206 | continue | |
3982 | 1087 | if self.service not in sub_config: | 1207 | |
3983 | 1088 | log('Found subordinate_config on %s but it contained' | 1208 | for service in self.services: |
3984 | 1089 | 'nothing for %s service' % (rid, self.service), | 1209 | if service not in sub_config: |
3985 | 1090 | level=INFO) | 1210 | log('Found subordinate_configuration on %s but it ' |
3986 | 1091 | continue | 1211 | 'contained nothing for %s service' |
3987 | 1092 | 1212 | % (rid, service), level=INFO) | |
3988 | 1093 | sub_config = sub_config[self.service] | 1213 | continue |
3989 | 1094 | if self.config_file not in sub_config: | 1214 | |
3990 | 1095 | log('Found subordinate_config on %s but it contained' | 1215 | sub_config = sub_config[service] |
3991 | 1096 | 'nothing for %s' % (rid, self.config_file), | 1216 | if self.config_file not in sub_config: |
3992 | 1097 | level=INFO) | 1217 | log('Found subordinate_configuration on %s but it ' |
3993 | 1098 | continue | 1218 | 'contained nothing for %s' |
3994 | 1099 | 1219 | % (rid, self.config_file), level=INFO) | |
3995 | 1100 | sub_config = sub_config[self.config_file] | 1220 | continue |
3996 | 1101 | for k, v in six.iteritems(sub_config): | 1221 | |
3997 | 1102 | if k == 'sections': | 1222 | sub_config = sub_config[self.config_file] |
3998 | 1103 | for section, config_dict in six.iteritems(v): | 1223 | for k, v in six.iteritems(sub_config): |
3999 | 1104 | log("adding section '%s'" % (section), | 1224 | if k == 'sections': |
4000 | 1105 | level=DEBUG) | 1225 | for section, config_list in six.iteritems(v): |
4001 | 1106 | ctxt[k][section] = config_dict | 1226 | log("adding section '%s'" % (section), |
4002 | 1107 | else: | 1227 | level=DEBUG) |
4003 | 1108 | ctxt[k] = v | 1228 | if ctxt[k].get(section): |
4004 | 1109 | 1229 | ctxt[k][section].extend(config_list) | |
4005 | 1230 | else: | ||
4006 | 1231 | ctxt[k][section] = config_list | ||
4007 | 1232 | else: | ||
4008 | 1233 | ctxt[k] = v | ||
4009 | 1110 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) | 1234 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
4010 | 1111 | return ctxt | 1235 | return ctxt |
4011 | 1112 | 1236 | ||
4012 | @@ -1143,13 +1267,11 @@ | |||
4013 | 1143 | 1267 | ||
4014 | 1144 | @property | 1268 | @property |
4015 | 1145 | def num_cpus(self): | 1269 | def num_cpus(self): |
4023 | 1146 | try: | 1270 | # NOTE: use cpu_count if present (16.04 support) |
4024 | 1147 | from psutil import NUM_CPUS | 1271 | if hasattr(psutil, 'cpu_count'): |
4025 | 1148 | except ImportError: | 1272 | return psutil.cpu_count() |
4026 | 1149 | apt_install('python-psutil', fatal=True) | 1273 | else: |
4027 | 1150 | from psutil import NUM_CPUS | 1274 | return psutil.NUM_CPUS |
4021 | 1151 | |||
4022 | 1152 | return NUM_CPUS | ||
4028 | 1153 | 1275 | ||
4029 | 1154 | def __call__(self): | 1276 | def __call__(self): |
4030 | 1155 | multiplier = config('worker-multiplier') or 0 | 1277 | multiplier = config('worker-multiplier') or 0 |
4031 | @@ -1283,15 +1405,19 @@ | |||
4032 | 1283 | def __call__(self): | 1405 | def __call__(self): |
4033 | 1284 | ports = config('data-port') | 1406 | ports = config('data-port') |
4034 | 1285 | if ports: | 1407 | if ports: |
4035 | 1408 | # Map of {port/mac:bridge} | ||
4036 | 1286 | portmap = parse_data_port_mappings(ports) | 1409 | portmap = parse_data_port_mappings(ports) |
4038 | 1287 | ports = portmap.values() | 1410 | ports = portmap.keys() |
4039 | 1411 | # Resolve provided ports or mac addresses and filter out those | ||
4040 | 1412 | # already attached to a bridge. | ||
4041 | 1288 | resolved = self.resolve_ports(ports) | 1413 | resolved = self.resolve_ports(ports) |
4042 | 1414 | # FIXME: is this necessary? | ||
4043 | 1289 | normalized = {get_nic_hwaddr(port): port for port in resolved | 1415 | normalized = {get_nic_hwaddr(port): port for port in resolved |
4044 | 1290 | if port not in ports} | 1416 | if port not in ports} |
4045 | 1291 | normalized.update({port: port for port in resolved | 1417 | normalized.update({port: port for port in resolved |
4046 | 1292 | if port in ports}) | 1418 | if port in ports}) |
4047 | 1293 | if resolved: | 1419 | if resolved: |
4049 | 1294 | return {bridge: normalized[port] for bridge, port in | 1420 | return {normalized[port]: bridge for port, bridge in |
4050 | 1295 | six.iteritems(portmap) if port in normalized.keys()} | 1421 | six.iteritems(portmap) if port in normalized.keys()} |
4051 | 1296 | 1422 | ||
4052 | 1297 | return None | 1423 | return None |
4053 | @@ -1302,12 +1428,22 @@ | |||
4054 | 1302 | def __call__(self): | 1428 | def __call__(self): |
4055 | 1303 | ctxt = {} | 1429 | ctxt = {} |
4056 | 1304 | mappings = super(PhyNICMTUContext, self).__call__() | 1430 | mappings = super(PhyNICMTUContext, self).__call__() |
4059 | 1305 | if mappings and mappings.values(): | 1431 | if mappings and mappings.keys(): |
4060 | 1306 | ports = mappings.values() | 1432 | ports = sorted(mappings.keys()) |
4061 | 1307 | napi_settings = NeutronAPIContext()() | 1433 | napi_settings = NeutronAPIContext()() |
4062 | 1308 | mtu = napi_settings.get('network_device_mtu') | 1434 | mtu = napi_settings.get('network_device_mtu') |
4063 | 1435 | all_ports = set() | ||
4064 | 1436 | # If any of ports is a vlan device, its underlying device must have | ||
4065 | 1437 | # mtu applied first. | ||
4066 | 1438 | for port in ports: | ||
4067 | 1439 | for lport in glob.glob("/sys/class/net/%s/lower_*" % port): | ||
4068 | 1440 | lport = os.path.basename(lport) | ||
4069 | 1441 | all_ports.add(lport.split('_')[1]) | ||
4070 | 1442 | |||
4071 | 1443 | all_ports = list(all_ports) | ||
4072 | 1444 | all_ports.extend(ports) | ||
4073 | 1309 | if mtu: | 1445 | if mtu: |
4075 | 1310 | ctxt["devs"] = '\\n'.join(ports) | 1446 | ctxt["devs"] = '\\n'.join(all_ports) |
4076 | 1311 | ctxt['mtu'] = mtu | 1447 | ctxt['mtu'] = mtu |
4077 | 1312 | 1448 | ||
4078 | 1313 | return ctxt | 1449 | return ctxt |
4079 | @@ -1338,7 +1474,110 @@ | |||
4080 | 1338 | rdata.get('service_protocol') or 'http', | 1474 | rdata.get('service_protocol') or 'http', |
4081 | 1339 | 'auth_protocol': | 1475 | 'auth_protocol': |
4082 | 1340 | rdata.get('auth_protocol') or 'http', | 1476 | rdata.get('auth_protocol') or 'http', |
4083 | 1477 | 'api_version': | ||
4084 | 1478 | rdata.get('api_version') or '2.0', | ||
4085 | 1341 | } | 1479 | } |
4087 | 1342 | if context_complete(ctxt): | 1480 | if self.context_complete(ctxt): |
4088 | 1343 | return ctxt | 1481 | return ctxt |
4089 | 1344 | return {} | 1482 | return {} |
4090 | 1483 | |||
4091 | 1484 | |||
4092 | 1485 | class InternalEndpointContext(OSContextGenerator): | ||
4093 | 1486 | """Internal endpoint context. | ||
4094 | 1487 | |||
4095 | 1488 | This context provides the endpoint type used for communication between | ||
4096 | 1489 | services e.g. between Nova and Cinder internally. Openstack uses Public | ||
4097 | 1490 | endpoints by default so this allows admins to optionally use internal | ||
4098 | 1491 | endpoints. | ||
4099 | 1492 | """ | ||
4100 | 1493 | def __call__(self): | ||
4101 | 1494 | return {'use_internal_endpoints': config('use-internal-endpoints')} | ||
4102 | 1495 | |||
4103 | 1496 | |||
4104 | 1497 | class AppArmorContext(OSContextGenerator): | ||
4105 | 1498 | """Base class for apparmor contexts.""" | ||
4106 | 1499 | |||
4107 | 1500 | def __init__(self): | ||
4108 | 1501 | self._ctxt = None | ||
4109 | 1502 | self.aa_profile = None | ||
4110 | 1503 | self.aa_utils_packages = ['apparmor-utils'] | ||
4111 | 1504 | |||
4112 | 1505 | @property | ||
4113 | 1506 | def ctxt(self): | ||
4114 | 1507 | if self._ctxt is not None: | ||
4115 | 1508 | return self._ctxt | ||
4116 | 1509 | self._ctxt = self._determine_ctxt() | ||
4117 | 1510 | return self._ctxt | ||
4118 | 1511 | |||
4119 | 1512 | def _determine_ctxt(self): | ||
4120 | 1513 | """ | ||
4121 | 1514 | Validate aa-profile-mode settings is disable, enforce, or complain. | ||
4122 | 1515 | |||
4123 | 1516 | :return ctxt: Dictionary of the apparmor profile or None | ||
4124 | 1517 | """ | ||
4125 | 1518 | if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: | ||
4126 | 1519 | ctxt = {'aa-profile-mode': config('aa-profile-mode')} | ||
4127 | 1520 | else: | ||
4128 | 1521 | ctxt = None | ||
4129 | 1522 | return ctxt | ||
4130 | 1523 | |||
4131 | 1524 | def __call__(self): | ||
4132 | 1525 | return self.ctxt | ||
4133 | 1526 | |||
4134 | 1527 | def install_aa_utils(self): | ||
4135 | 1528 | """ | ||
4136 | 1529 | Install packages required for apparmor configuration. | ||
4137 | 1530 | """ | ||
4138 | 1531 | log("Installing apparmor utils.") | ||
4139 | 1532 | ensure_packages(self.aa_utils_packages) | ||
4140 | 1533 | |||
4141 | 1534 | def manually_disable_aa_profile(self): | ||
4142 | 1535 | """ | ||
4143 | 1536 | Manually disable an apparmor profile. | ||
4144 | 1537 | |||
4145 | 1538 | If aa-profile-mode is set to disabled (default) this is required as the | ||
4146 | 1539 | template has been written but apparmor is yet unaware of the profile | ||
4147 | 1540 | and aa-disable aa-profile fails. Without this the profile would kick | ||
4148 | 1541 | into enforce mode on the next service restart. | ||
4149 | 1542 | |||
4150 | 1543 | """ | ||
4151 | 1544 | profile_path = '/etc/apparmor.d' | ||
4152 | 1545 | disable_path = '/etc/apparmor.d/disable' | ||
4153 | 1546 | if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): | ||
4154 | 1547 | os.symlink(os.path.join(profile_path, self.aa_profile), | ||
4155 | 1548 | os.path.join(disable_path, self.aa_profile)) | ||
4156 | 1549 | |||
4157 | 1550 | def setup_aa_profile(self): | ||
4158 | 1551 | """ | ||
4159 | 1552 | Setup an apparmor profile. | ||
4160 | 1553 | The ctxt dictionary will contain the apparmor profile mode and | ||
4161 | 1554 | the apparmor profile name. | ||
4162 | 1555 | Makes calls out to aa-disable, aa-complain, or aa-enforce to setup | ||
4163 | 1556 | the apparmor profile. | ||
4164 | 1557 | """ | ||
4165 | 1558 | self() | ||
4166 | 1559 | if not self.ctxt: | ||
4167 | 1560 | log("Not enabling apparmor Profile") | ||
4168 | 1561 | return | ||
4169 | 1562 | self.install_aa_utils() | ||
4170 | 1563 | cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])] | ||
4171 | 1564 | cmd.append(self.ctxt['aa-profile']) | ||
4172 | 1565 | log("Setting up the apparmor profile for {} in {} mode." | ||
4173 | 1566 | "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode'])) | ||
4174 | 1567 | try: | ||
4175 | 1568 | check_call(cmd) | ||
4176 | 1569 | except CalledProcessError as e: | ||
4177 | 1570 | # If aa-profile-mode is set to disabled (default) manual | ||
4178 | 1571 | # disabling is required as the template has been written but | ||
4179 | 1572 | # apparmor is yet unaware of the profile and aa-disable aa-profile | ||
4180 | 1573 | # fails. If aa-disable learns to read profile files first this can | ||
4181 | 1574 | # be removed. | ||
4182 | 1575 | if self.ctxt['aa-profile-mode'] == 'disable': | ||
4183 | 1576 | log("Manually disabling the apparmor profile for {}." | ||
4184 | 1577 | "".format(self.ctxt['aa-profile'])) | ||
4185 | 1578 | self.manually_disable_aa_profile() | ||
4186 | 1579 | return | ||
4187 | 1580 | status_set('blocked', "Apparmor profile {} failed to be set to {}." | ||
4188 | 1581 | "".format(self.ctxt['aa-profile'], | ||
4189 | 1582 | self.ctxt['aa-profile-mode'])) | ||
4190 | 1583 | raise e | ||
4191 | 1345 | 1584 | ||
4192 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
4193 | --- hooks/charmhelpers/contrib/openstack/ip.py 2015-07-29 18:07:31 +0000 | |||
4194 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2016-05-19 03:33:34 +0000 | |||
4195 | @@ -14,16 +14,19 @@ | |||
4196 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
4197 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
4198 | 16 | 16 | ||
4199 | 17 | |||
4200 | 17 | from charmhelpers.core.hookenv import ( | 18 | from charmhelpers.core.hookenv import ( |
4201 | 18 | config, | 19 | config, |
4202 | 19 | unit_get, | 20 | unit_get, |
4203 | 20 | service_name, | 21 | service_name, |
4204 | 22 | network_get_primary_address, | ||
4205 | 21 | ) | 23 | ) |
4206 | 22 | from charmhelpers.contrib.network.ip import ( | 24 | from charmhelpers.contrib.network.ip import ( |
4207 | 23 | get_address_in_network, | 25 | get_address_in_network, |
4208 | 24 | is_address_in_network, | 26 | is_address_in_network, |
4209 | 25 | is_ipv6, | 27 | is_ipv6, |
4210 | 26 | get_ipv6_addr, | 28 | get_ipv6_addr, |
4211 | 29 | resolve_network_cidr, | ||
4212 | 27 | ) | 30 | ) |
4213 | 28 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | 31 | from charmhelpers.contrib.hahelpers.cluster import is_clustered |
4214 | 29 | 32 | ||
4215 | @@ -33,16 +36,19 @@ | |||
4216 | 33 | 36 | ||
4217 | 34 | ADDRESS_MAP = { | 37 | ADDRESS_MAP = { |
4218 | 35 | PUBLIC: { | 38 | PUBLIC: { |
4219 | 39 | 'binding': 'public', | ||
4220 | 36 | 'config': 'os-public-network', | 40 | 'config': 'os-public-network', |
4221 | 37 | 'fallback': 'public-address', | 41 | 'fallback': 'public-address', |
4222 | 38 | 'override': 'os-public-hostname', | 42 | 'override': 'os-public-hostname', |
4223 | 39 | }, | 43 | }, |
4224 | 40 | INTERNAL: { | 44 | INTERNAL: { |
4225 | 45 | 'binding': 'internal', | ||
4226 | 41 | 'config': 'os-internal-network', | 46 | 'config': 'os-internal-network', |
4227 | 42 | 'fallback': 'private-address', | 47 | 'fallback': 'private-address', |
4228 | 43 | 'override': 'os-internal-hostname', | 48 | 'override': 'os-internal-hostname', |
4229 | 44 | }, | 49 | }, |
4230 | 45 | ADMIN: { | 50 | ADMIN: { |
4231 | 51 | 'binding': 'admin', | ||
4232 | 46 | 'config': 'os-admin-network', | 52 | 'config': 'os-admin-network', |
4233 | 47 | 'fallback': 'private-address', | 53 | 'fallback': 'private-address', |
4234 | 48 | 'override': 'os-admin-hostname', | 54 | 'override': 'os-admin-hostname', |
4235 | @@ -110,7 +116,7 @@ | |||
4236 | 110 | correct network. If clustered with no nets defined, return primary vip. | 116 | correct network. If clustered with no nets defined, return primary vip. |
4237 | 111 | 117 | ||
4238 | 112 | If not clustered, return unit address ensuring address is on configured net | 118 | If not clustered, return unit address ensuring address is on configured net |
4240 | 113 | split if one is configured. | 119 | split if one is configured, or a Juju 2.0 extra-binding has been used. |
4241 | 114 | 120 | ||
4242 | 115 | :param endpoint_type: Network endpoing type | 121 | :param endpoint_type: Network endpoing type |
4243 | 116 | """ | 122 | """ |
4244 | @@ -125,23 +131,45 @@ | |||
4245 | 125 | net_type = ADDRESS_MAP[endpoint_type]['config'] | 131 | net_type = ADDRESS_MAP[endpoint_type]['config'] |
4246 | 126 | net_addr = config(net_type) | 132 | net_addr = config(net_type) |
4247 | 127 | net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] | 133 | net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] |
4248 | 134 | binding = ADDRESS_MAP[endpoint_type]['binding'] | ||
4249 | 128 | clustered = is_clustered() | 135 | clustered = is_clustered() |
4255 | 129 | if clustered: | 136 | |
4256 | 130 | if not net_addr: | 137 | if clustered and vips: |
4257 | 131 | # If no net-splits defined, we expect a single vip | 138 | if net_addr: |
4253 | 132 | resolved_address = vips[0] | ||
4254 | 133 | else: | ||
4258 | 134 | for vip in vips: | 139 | for vip in vips: |
4259 | 135 | if is_address_in_network(net_addr, vip): | 140 | if is_address_in_network(net_addr, vip): |
4260 | 136 | resolved_address = vip | 141 | resolved_address = vip |
4261 | 137 | break | 142 | break |
4262 | 143 | else: | ||
4263 | 144 | # NOTE: endeavour to check vips against network space | ||
4264 | 145 | # bindings | ||
4265 | 146 | try: | ||
4266 | 147 | bound_cidr = resolve_network_cidr( | ||
4267 | 148 | network_get_primary_address(binding) | ||
4268 | 149 | ) | ||
4269 | 150 | for vip in vips: | ||
4270 | 151 | if is_address_in_network(bound_cidr, vip): | ||
4271 | 152 | resolved_address = vip | ||
4272 | 153 | break | ||
4273 | 154 | except NotImplementedError: | ||
4274 | 155 | # If no net-splits configured and no support for extra | ||
4275 | 156 | # bindings/network spaces so we expect a single vip | ||
4276 | 157 | resolved_address = vips[0] | ||
4277 | 138 | else: | 158 | else: |
4278 | 139 | if config('prefer-ipv6'): | 159 | if config('prefer-ipv6'): |
4279 | 140 | fallback_addr = get_ipv6_addr(exc_list=vips)[0] | 160 | fallback_addr = get_ipv6_addr(exc_list=vips)[0] |
4280 | 141 | else: | 161 | else: |
4281 | 142 | fallback_addr = unit_get(net_fallback) | 162 | fallback_addr = unit_get(net_fallback) |
4282 | 143 | 163 | ||
4284 | 144 | resolved_address = get_address_in_network(net_addr, fallback_addr) | 164 | if net_addr: |
4285 | 165 | resolved_address = get_address_in_network(net_addr, fallback_addr) | ||
4286 | 166 | else: | ||
4287 | 167 | # NOTE: only try to use extra bindings if legacy network | ||
4288 | 168 | # configuration is not in use | ||
4289 | 169 | try: | ||
4290 | 170 | resolved_address = network_get_primary_address(binding) | ||
4291 | 171 | except NotImplementedError: | ||
4292 | 172 | resolved_address = fallback_addr | ||
4293 | 145 | 173 | ||
4294 | 146 | if resolved_address is None: | 174 | if resolved_address is None: |
4295 | 147 | raise ValueError("Unable to resolve a suitable IP address based on " | 175 | raise ValueError("Unable to resolve a suitable IP address based on " |
4296 | 148 | 176 | ||
4297 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
4298 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-10-02 15:06:23 +0000 | |||
4299 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2016-05-19 03:33:34 +0000 | |||
4300 | @@ -50,7 +50,7 @@ | |||
4301 | 50 | if kernel_version() >= (3, 13): | 50 | if kernel_version() >= (3, 13): |
4302 | 51 | return [] | 51 | return [] |
4303 | 52 | else: | 52 | else: |
4305 | 53 | return ['openvswitch-datapath-dkms'] | 53 | return [headers_package(), 'openvswitch-datapath-dkms'] |
4306 | 54 | 54 | ||
4307 | 55 | 55 | ||
4308 | 56 | # legacy | 56 | # legacy |
4309 | @@ -70,7 +70,7 @@ | |||
4310 | 70 | relation_prefix='neutron', | 70 | relation_prefix='neutron', |
4311 | 71 | ssl_dir=QUANTUM_CONF_DIR)], | 71 | ssl_dir=QUANTUM_CONF_DIR)], |
4312 | 72 | 'services': ['quantum-plugin-openvswitch-agent'], | 72 | 'services': ['quantum-plugin-openvswitch-agent'], |
4314 | 73 | 'packages': [[headers_package()] + determine_dkms_package(), | 73 | 'packages': [determine_dkms_package(), |
4315 | 74 | ['quantum-plugin-openvswitch-agent']], | 74 | ['quantum-plugin-openvswitch-agent']], |
4316 | 75 | 'server_packages': ['quantum-server', | 75 | 'server_packages': ['quantum-server', |
4317 | 76 | 'quantum-plugin-openvswitch'], | 76 | 'quantum-plugin-openvswitch'], |
4318 | @@ -111,7 +111,7 @@ | |||
4319 | 111 | relation_prefix='neutron', | 111 | relation_prefix='neutron', |
4320 | 112 | ssl_dir=NEUTRON_CONF_DIR)], | 112 | ssl_dir=NEUTRON_CONF_DIR)], |
4321 | 113 | 'services': ['neutron-plugin-openvswitch-agent'], | 113 | 'services': ['neutron-plugin-openvswitch-agent'], |
4323 | 114 | 'packages': [[headers_package()] + determine_dkms_package(), | 114 | 'packages': [determine_dkms_package(), |
4324 | 115 | ['neutron-plugin-openvswitch-agent']], | 115 | ['neutron-plugin-openvswitch-agent']], |
4325 | 116 | 'server_packages': ['neutron-server', | 116 | 'server_packages': ['neutron-server', |
4326 | 117 | 'neutron-plugin-openvswitch'], | 117 | 'neutron-plugin-openvswitch'], |
4327 | @@ -155,7 +155,7 @@ | |||
4328 | 155 | relation_prefix='neutron', | 155 | relation_prefix='neutron', |
4329 | 156 | ssl_dir=NEUTRON_CONF_DIR)], | 156 | ssl_dir=NEUTRON_CONF_DIR)], |
4330 | 157 | 'services': [], | 157 | 'services': [], |
4332 | 158 | 'packages': [[headers_package()] + determine_dkms_package(), | 158 | 'packages': [determine_dkms_package(), |
4333 | 159 | ['neutron-plugin-cisco']], | 159 | ['neutron-plugin-cisco']], |
4334 | 160 | 'server_packages': ['neutron-server', | 160 | 'server_packages': ['neutron-server', |
4335 | 161 | 'neutron-plugin-cisco'], | 161 | 'neutron-plugin-cisco'], |
4336 | @@ -174,7 +174,7 @@ | |||
4337 | 174 | 'neutron-dhcp-agent', | 174 | 'neutron-dhcp-agent', |
4338 | 175 | 'nova-api-metadata', | 175 | 'nova-api-metadata', |
4339 | 176 | 'etcd'], | 176 | 'etcd'], |
4341 | 177 | 'packages': [[headers_package()] + determine_dkms_package(), | 177 | 'packages': [determine_dkms_package(), |
4342 | 178 | ['calico-compute', | 178 | ['calico-compute', |
4343 | 179 | 'bird', | 179 | 'bird', |
4344 | 180 | 'neutron-dhcp-agent', | 180 | 'neutron-dhcp-agent', |
4345 | @@ -209,6 +209,20 @@ | |||
4346 | 209 | 'server_packages': ['neutron-server', | 209 | 'server_packages': ['neutron-server', |
4347 | 210 | 'neutron-plugin-plumgrid'], | 210 | 'neutron-plugin-plumgrid'], |
4348 | 211 | 'server_services': ['neutron-server'] | 211 | 'server_services': ['neutron-server'] |
4349 | 212 | }, | ||
4350 | 213 | 'midonet': { | ||
4351 | 214 | 'config': '/etc/neutron/plugins/midonet/midonet.ini', | ||
4352 | 215 | 'driver': 'midonet.neutron.plugin.MidonetPluginV2', | ||
4353 | 216 | 'contexts': [ | ||
4354 | 217 | context.SharedDBContext(user=config('neutron-database-user'), | ||
4355 | 218 | database=config('neutron-database'), | ||
4356 | 219 | relation_prefix='neutron', | ||
4357 | 220 | ssl_dir=NEUTRON_CONF_DIR)], | ||
4358 | 221 | 'services': [], | ||
4359 | 222 | 'packages': [determine_dkms_package()], | ||
4360 | 223 | 'server_packages': ['neutron-server', | ||
4361 | 224 | 'python-neutron-plugin-midonet'], | ||
4362 | 225 | 'server_services': ['neutron-server'] | ||
4363 | 212 | } | 226 | } |
4364 | 213 | } | 227 | } |
4365 | 214 | if release >= 'icehouse': | 228 | if release >= 'icehouse': |
4366 | @@ -219,6 +233,20 @@ | |||
4367 | 219 | 'neutron-plugin-ml2'] | 233 | 'neutron-plugin-ml2'] |
4368 | 220 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards | 234 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards |
4369 | 221 | plugins['nvp'] = plugins['nsx'] | 235 | plugins['nvp'] = plugins['nsx'] |
4370 | 236 | if release >= 'kilo': | ||
4371 | 237 | plugins['midonet']['driver'] = ( | ||
4372 | 238 | 'neutron.plugins.midonet.plugin.MidonetPluginV2') | ||
4373 | 239 | if release >= 'liberty': | ||
4374 | 240 | plugins['midonet']['driver'] = ( | ||
4375 | 241 | 'midonet.neutron.plugin_v1.MidonetPluginV2') | ||
4376 | 242 | plugins['midonet']['server_packages'].remove( | ||
4377 | 243 | 'python-neutron-plugin-midonet') | ||
4378 | 244 | plugins['midonet']['server_packages'].append( | ||
4379 | 245 | 'python-networking-midonet') | ||
4380 | 246 | plugins['plumgrid']['driver'] = ( | ||
4381 | 247 | 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') | ||
4382 | 248 | plugins['plumgrid']['server_packages'].remove( | ||
4383 | 249 | 'neutron-plugin-plumgrid') | ||
4384 | 222 | return plugins | 250 | return plugins |
4385 | 223 | 251 | ||
4386 | 224 | 252 | ||
4387 | @@ -269,17 +297,30 @@ | |||
4388 | 269 | return 'neutron' | 297 | return 'neutron' |
4389 | 270 | 298 | ||
4390 | 271 | 299 | ||
4392 | 272 | def parse_mappings(mappings): | 300 | def parse_mappings(mappings, key_rvalue=False): |
4393 | 301 | """By default mappings are lvalue keyed. | ||
4394 | 302 | |||
4395 | 303 | If key_rvalue is True, the mapping will be reversed to allow multiple | ||
4396 | 304 | configs for the same lvalue. | ||
4397 | 305 | """ | ||
4398 | 273 | parsed = {} | 306 | parsed = {} |
4399 | 274 | if mappings: | 307 | if mappings: |
4400 | 275 | mappings = mappings.split() | 308 | mappings = mappings.split() |
4401 | 276 | for m in mappings: | 309 | for m in mappings: |
4402 | 277 | p = m.partition(':') | 310 | p = m.partition(':') |
4406 | 278 | key = p[0].strip() | 311 | |
4407 | 279 | if p[1]: | 312 | if key_rvalue: |
4408 | 280 | parsed[key] = p[2].strip() | 313 | key_index = 2 |
4409 | 314 | val_index = 0 | ||
4410 | 315 | # if there is no rvalue skip to next | ||
4411 | 316 | if not p[1]: | ||
4412 | 317 | continue | ||
4413 | 281 | else: | 318 | else: |
4415 | 282 | parsed[key] = '' | 319 | key_index = 0 |
4416 | 320 | val_index = 2 | ||
4417 | 321 | |||
4418 | 322 | key = p[key_index].strip() | ||
4419 | 323 | parsed[key] = p[val_index].strip() | ||
4420 | 283 | 324 | ||
4421 | 284 | return parsed | 325 | return parsed |
4422 | 285 | 326 | ||
4423 | @@ -297,25 +338,25 @@ | |||
4424 | 297 | def parse_data_port_mappings(mappings, default_bridge='br-data'): | 338 | def parse_data_port_mappings(mappings, default_bridge='br-data'): |
4425 | 298 | """Parse data port mappings. | 339 | """Parse data port mappings. |
4426 | 299 | 340 | ||
4428 | 300 | Mappings must be a space-delimited list of bridge:port mappings. | 341 | Mappings must be a space-delimited list of bridge:port. |
4429 | 301 | 342 | ||
4431 | 302 | Returns dict of the form {bridge:port}. | 343 | Returns dict of the form {port:bridge} where ports may be mac addresses or |
4432 | 344 | interface names. | ||
4433 | 303 | """ | 345 | """ |
4435 | 304 | _mappings = parse_mappings(mappings) | 346 | |
4436 | 347 | # NOTE(dosaboy): we use rvalue for key to allow multiple values to be | ||
4437 | 348 | # proposed for <port> since it may be a mac address which will differ | ||
4438 | 349 | # across units this allowing first-known-good to be chosen. | ||
4439 | 350 | _mappings = parse_mappings(mappings, key_rvalue=True) | ||
4440 | 305 | if not _mappings or list(_mappings.values()) == ['']: | 351 | if not _mappings or list(_mappings.values()) == ['']: |
4441 | 306 | if not mappings: | 352 | if not mappings: |
4442 | 307 | return {} | 353 | return {} |
4443 | 308 | 354 | ||
4444 | 309 | # For backwards-compatibility we need to support port-only provided in | 355 | # For backwards-compatibility we need to support port-only provided in |
4445 | 310 | # config. | 356 | # config. |
4454 | 311 | _mappings = {default_bridge: mappings.split()[0]} | 357 | _mappings = {mappings.split()[0]: default_bridge} |
4455 | 312 | 358 | ||
4456 | 313 | bridges = _mappings.keys() | 359 | ports = _mappings.keys() |
4449 | 314 | ports = _mappings.values() | ||
4450 | 315 | if len(set(bridges)) != len(bridges): | ||
4451 | 316 | raise Exception("It is not allowed to have more than one port " | ||
4452 | 317 | "configured on the same bridge") | ||
4453 | 318 | |||
4457 | 319 | if len(set(ports)) != len(ports): | 360 | if len(set(ports)) != len(ports): |
4458 | 320 | raise Exception("It is not allowed to have the same port configured " | 361 | raise Exception("It is not allowed to have the same port configured " |
4459 | 321 | "on more than one bridge") | 362 | "on more than one bridge") |
4460 | 322 | 363 | ||
4461 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
4462 | --- hooks/charmhelpers/contrib/openstack/templating.py 2015-07-29 18:07:31 +0000 | |||
4463 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2016-05-19 03:33:34 +0000 | |||
4464 | @@ -18,7 +18,7 @@ | |||
4465 | 18 | 18 | ||
4466 | 19 | import six | 19 | import six |
4467 | 20 | 20 | ||
4469 | 21 | from charmhelpers.fetch import apt_install | 21 | from charmhelpers.fetch import apt_install, apt_update |
4470 | 22 | from charmhelpers.core.hookenv import ( | 22 | from charmhelpers.core.hookenv import ( |
4471 | 23 | log, | 23 | log, |
4472 | 24 | ERROR, | 24 | ERROR, |
4473 | @@ -29,6 +29,7 @@ | |||
4474 | 29 | try: | 29 | try: |
4475 | 30 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | 30 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
4476 | 31 | except ImportError: | 31 | except ImportError: |
4477 | 32 | apt_update(fatal=True) | ||
4478 | 32 | apt_install('python-jinja2', fatal=True) | 33 | apt_install('python-jinja2', fatal=True) |
4479 | 33 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | 34 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
4480 | 34 | 35 | ||
4481 | @@ -112,7 +113,7 @@ | |||
4482 | 112 | 113 | ||
4483 | 113 | def complete_contexts(self): | 114 | def complete_contexts(self): |
4484 | 114 | ''' | 115 | ''' |
4486 | 115 | Return a list of interfaces that have atisfied contexts. | 116 | Return a list of interfaces that have satisfied contexts. |
4487 | 116 | ''' | 117 | ''' |
4488 | 117 | if self._complete_contexts: | 118 | if self._complete_contexts: |
4489 | 118 | return self._complete_contexts | 119 | return self._complete_contexts |
4490 | @@ -293,3 +294,30 @@ | |||
4491 | 293 | [interfaces.extend(i.complete_contexts()) | 294 | [interfaces.extend(i.complete_contexts()) |
4492 | 294 | for i in six.itervalues(self.templates)] | 295 | for i in six.itervalues(self.templates)] |
4493 | 295 | return interfaces | 296 | return interfaces |
4494 | 297 | |||
4495 | 298 | def get_incomplete_context_data(self, interfaces): | ||
4496 | 299 | ''' | ||
4497 | 300 | Return dictionary of relation status of interfaces and any missing | ||
4498 | 301 | required context data. Example: | ||
4499 | 302 | {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, | ||
4500 | 303 | 'zeromq-configuration': {'related': False}} | ||
4501 | 304 | ''' | ||
4502 | 305 | incomplete_context_data = {} | ||
4503 | 306 | |||
4504 | 307 | for i in six.itervalues(self.templates): | ||
4505 | 308 | for context in i.contexts: | ||
4506 | 309 | for interface in interfaces: | ||
4507 | 310 | related = False | ||
4508 | 311 | if interface in context.interfaces: | ||
4509 | 312 | related = context.get_related() | ||
4510 | 313 | missing_data = context.missing_data | ||
4511 | 314 | if missing_data: | ||
4512 | 315 | incomplete_context_data[interface] = {'missing_data': missing_data} | ||
4513 | 316 | if related: | ||
4514 | 317 | if incomplete_context_data.get(interface): | ||
4515 | 318 | incomplete_context_data[interface].update({'related': True}) | ||
4516 | 319 | else: | ||
4517 | 320 | incomplete_context_data[interface] = {'related': True} | ||
4518 | 321 | else: | ||
4519 | 322 | incomplete_context_data[interface] = {'related': False} | ||
4520 | 323 | return incomplete_context_data | ||
4521 | 296 | 324 | ||
4522 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
4523 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-07-29 18:07:31 +0000 | |||
4524 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2016-05-19 03:33:34 +0000 | |||
4525 | @@ -1,5 +1,3 @@ | |||
4526 | 1 | #!/usr/bin/python | ||
4527 | 2 | |||
4528 | 3 | # Copyright 2014-2015 Canonical Limited. | 1 | # Copyright 2014-2015 Canonical Limited. |
4529 | 4 | # | 2 | # |
4530 | 5 | # This file is part of charm-helpers. | 3 | # This file is part of charm-helpers. |
4531 | @@ -24,8 +22,14 @@ | |||
4532 | 24 | import json | 22 | import json |
4533 | 25 | import os | 23 | import os |
4534 | 26 | import sys | 24 | import sys |
4535 | 25 | import re | ||
4536 | 26 | import itertools | ||
4537 | 27 | import functools | ||
4538 | 27 | 28 | ||
4539 | 28 | import six | 29 | import six |
4540 | 30 | import tempfile | ||
4541 | 31 | import traceback | ||
4542 | 32 | import uuid | ||
4543 | 29 | import yaml | 33 | import yaml |
4544 | 30 | 34 | ||
4545 | 31 | from charmhelpers.contrib.network import ip | 35 | from charmhelpers.contrib.network import ip |
4546 | @@ -35,12 +39,18 @@ | |||
4547 | 35 | ) | 39 | ) |
4548 | 36 | 40 | ||
4549 | 37 | from charmhelpers.core.hookenv import ( | 41 | from charmhelpers.core.hookenv import ( |
4550 | 42 | action_fail, | ||
4551 | 43 | action_set, | ||
4552 | 38 | config, | 44 | config, |
4553 | 39 | log as juju_log, | 45 | log as juju_log, |
4554 | 40 | charm_dir, | 46 | charm_dir, |
4555 | 47 | DEBUG, | ||
4556 | 41 | INFO, | 48 | INFO, |
4557 | 49 | related_units, | ||
4558 | 42 | relation_ids, | 50 | relation_ids, |
4560 | 43 | relation_set | 51 | relation_set, |
4561 | 52 | status_set, | ||
4562 | 53 | hook_name | ||
4563 | 44 | ) | 54 | ) |
4564 | 45 | 55 | ||
4565 | 46 | from charmhelpers.contrib.storage.linux.lvm import ( | 56 | from charmhelpers.contrib.storage.linux.lvm import ( |
4566 | @@ -50,7 +60,9 @@ | |||
4567 | 50 | ) | 60 | ) |
4568 | 51 | 61 | ||
4569 | 52 | from charmhelpers.contrib.network.ip import ( | 62 | from charmhelpers.contrib.network.ip import ( |
4571 | 53 | get_ipv6_addr | 63 | get_ipv6_addr, |
4572 | 64 | is_ipv6, | ||
4573 | 65 | port_has_listener, | ||
4574 | 54 | ) | 66 | ) |
4575 | 55 | 67 | ||
4576 | 56 | from charmhelpers.contrib.python.packages import ( | 68 | from charmhelpers.contrib.python.packages import ( |
4577 | @@ -58,7 +70,15 @@ | |||
4578 | 58 | pip_install, | 70 | pip_install, |
4579 | 59 | ) | 71 | ) |
4580 | 60 | 72 | ||
4582 | 61 | from charmhelpers.core.host import lsb_release, mounts, umount | 73 | from charmhelpers.core.host import ( |
4583 | 74 | lsb_release, | ||
4584 | 75 | mounts, | ||
4585 | 76 | umount, | ||
4586 | 77 | service_running, | ||
4587 | 78 | service_pause, | ||
4588 | 79 | service_resume, | ||
4589 | 80 | restart_on_change_helper, | ||
4590 | 81 | ) | ||
4591 | 62 | from charmhelpers.fetch import apt_install, apt_cache, install_remote | 82 | from charmhelpers.fetch import apt_install, apt_cache, install_remote |
4592 | 63 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk | 83 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
4593 | 64 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device | 84 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device |
4594 | @@ -69,7 +89,6 @@ | |||
4595 | 69 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' | 89 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' |
4596 | 70 | 'restricted main multiverse universe') | 90 | 'restricted main multiverse universe') |
4597 | 71 | 91 | ||
4598 | 72 | |||
4599 | 73 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ | 92 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ |
4600 | 74 | ('oneiric', 'diablo'), | 93 | ('oneiric', 'diablo'), |
4601 | 75 | ('precise', 'essex'), | 94 | ('precise', 'essex'), |
4602 | @@ -80,6 +99,7 @@ | |||
4603 | 80 | ('utopic', 'juno'), | 99 | ('utopic', 'juno'), |
4604 | 81 | ('vivid', 'kilo'), | 100 | ('vivid', 'kilo'), |
4605 | 82 | ('wily', 'liberty'), | 101 | ('wily', 'liberty'), |
4606 | 102 | ('xenial', 'mitaka'), | ||
4607 | 83 | ]) | 103 | ]) |
4608 | 84 | 104 | ||
4609 | 85 | 105 | ||
4610 | @@ -93,31 +113,74 @@ | |||
4611 | 93 | ('2014.2', 'juno'), | 113 | ('2014.2', 'juno'), |
4612 | 94 | ('2015.1', 'kilo'), | 114 | ('2015.1', 'kilo'), |
4613 | 95 | ('2015.2', 'liberty'), | 115 | ('2015.2', 'liberty'), |
4614 | 116 | ('2016.1', 'mitaka'), | ||
4615 | 96 | ]) | 117 | ]) |
4616 | 97 | 118 | ||
4618 | 98 | # The ugly duckling | 119 | # The ugly duckling - must list releases oldest to newest |
4619 | 99 | SWIFT_CODENAMES = OrderedDict([ | 120 | SWIFT_CODENAMES = OrderedDict([ |
4639 | 100 | ('1.4.3', 'diablo'), | 121 | ('diablo', |
4640 | 101 | ('1.4.8', 'essex'), | 122 | ['1.4.3']), |
4641 | 102 | ('1.7.4', 'folsom'), | 123 | ('essex', |
4642 | 103 | ('1.8.0', 'grizzly'), | 124 | ['1.4.8']), |
4643 | 104 | ('1.7.7', 'grizzly'), | 125 | ('folsom', |
4644 | 105 | ('1.7.6', 'grizzly'), | 126 | ['1.7.4']), |
4645 | 106 | ('1.10.0', 'havana'), | 127 | ('grizzly', |
4646 | 107 | ('1.9.1', 'havana'), | 128 | ['1.7.6', '1.7.7', '1.8.0']), |
4647 | 108 | ('1.9.0', 'havana'), | 129 | ('havana', |
4648 | 109 | ('1.13.1', 'icehouse'), | 130 | ['1.9.0', '1.9.1', '1.10.0']), |
4649 | 110 | ('1.13.0', 'icehouse'), | 131 | ('icehouse', |
4650 | 111 | ('1.12.0', 'icehouse'), | 132 | ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), |
4651 | 112 | ('1.11.0', 'icehouse'), | 133 | ('juno', |
4652 | 113 | ('2.0.0', 'juno'), | 134 | ['2.0.0', '2.1.0', '2.2.0']), |
4653 | 114 | ('2.1.0', 'juno'), | 135 | ('kilo', |
4654 | 115 | ('2.2.0', 'juno'), | 136 | ['2.2.1', '2.2.2']), |
4655 | 116 | ('2.2.1', 'kilo'), | 137 | ('liberty', |
4656 | 117 | ('2.2.2', 'kilo'), | 138 | ['2.3.0', '2.4.0', '2.5.0']), |
4657 | 118 | ('2.3.0', 'liberty'), | 139 | ('mitaka', |
4658 | 140 | ['2.5.0', '2.6.0', '2.7.0']), | ||
4659 | 119 | ]) | 141 | ]) |
4660 | 120 | 142 | ||
4661 | 143 | # >= Liberty version->codename mapping | ||
4662 | 144 | PACKAGE_CODENAMES = { | ||
4663 | 145 | 'nova-common': OrderedDict([ | ||
4664 | 146 | ('12.0', 'liberty'), | ||
4665 | 147 | ('13.0', 'mitaka'), | ||
4666 | 148 | ]), | ||
4667 | 149 | 'neutron-common': OrderedDict([ | ||
4668 | 150 | ('7.0', 'liberty'), | ||
4669 | 151 | ('8.0', 'mitaka'), | ||
4670 | 152 | ]), | ||
4671 | 153 | 'cinder-common': OrderedDict([ | ||
4672 | 154 | ('7.0', 'liberty'), | ||
4673 | 155 | ('8.0', 'mitaka'), | ||
4674 | 156 | ]), | ||
4675 | 157 | 'keystone': OrderedDict([ | ||
4676 | 158 | ('8.0', 'liberty'), | ||
4677 | 159 | ('8.1', 'liberty'), | ||
4678 | 160 | ('9.0', 'mitaka'), | ||
4679 | 161 | ]), | ||
4680 | 162 | 'horizon-common': OrderedDict([ | ||
4681 | 163 | ('8.0', 'liberty'), | ||
4682 | 164 | ('9.0', 'mitaka'), | ||
4683 | 165 | ]), | ||
4684 | 166 | 'ceilometer-common': OrderedDict([ | ||
4685 | 167 | ('5.0', 'liberty'), | ||
4686 | 168 | ('6.0', 'mitaka'), | ||
4687 | 169 | ]), | ||
4688 | 170 | 'heat-common': OrderedDict([ | ||
4689 | 171 | ('5.0', 'liberty'), | ||
4690 | 172 | ('6.0', 'mitaka'), | ||
4691 | 173 | ]), | ||
4692 | 174 | 'glance-common': OrderedDict([ | ||
4693 | 175 | ('11.0', 'liberty'), | ||
4694 | 176 | ('12.0', 'mitaka'), | ||
4695 | 177 | ]), | ||
4696 | 178 | 'openstack-dashboard': OrderedDict([ | ||
4697 | 179 | ('8.0', 'liberty'), | ||
4698 | 180 | ('9.0', 'mitaka'), | ||
4699 | 181 | ]), | ||
4700 | 182 | } | ||
4701 | 183 | |||
4702 | 121 | DEFAULT_LOOPBACK_SIZE = '5G' | 184 | DEFAULT_LOOPBACK_SIZE = '5G' |
4703 | 122 | 185 | ||
4704 | 123 | 186 | ||
4705 | @@ -167,9 +230,9 @@ | |||
4706 | 167 | error_out(e) | 230 | error_out(e) |
4707 | 168 | 231 | ||
4708 | 169 | 232 | ||
4710 | 170 | def get_os_version_codename(codename): | 233 | def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): |
4711 | 171 | '''Determine OpenStack version number from codename.''' | 234 | '''Determine OpenStack version number from codename.''' |
4713 | 172 | for k, v in six.iteritems(OPENSTACK_CODENAMES): | 235 | for k, v in six.iteritems(version_map): |
4714 | 173 | if v == codename: | 236 | if v == codename: |
4715 | 174 | return k | 237 | return k |
4716 | 175 | e = 'Could not derive OpenStack version for '\ | 238 | e = 'Could not derive OpenStack version for '\ |
4717 | @@ -177,6 +240,33 @@ | |||
4718 | 177 | error_out(e) | 240 | error_out(e) |
4719 | 178 | 241 | ||
4720 | 179 | 242 | ||
4721 | 243 | def get_os_version_codename_swift(codename): | ||
4722 | 244 | '''Determine OpenStack version number of swift from codename.''' | ||
4723 | 245 | for k, v in six.iteritems(SWIFT_CODENAMES): | ||
4724 | 246 | if k == codename: | ||
4725 | 247 | return v[-1] | ||
4726 | 248 | e = 'Could not derive swift version for '\ | ||
4727 | 249 | 'codename: %s' % codename | ||
4728 | 250 | error_out(e) | ||
4729 | 251 | |||
4730 | 252 | |||
4731 | 253 | def get_swift_codename(version): | ||
4732 | 254 | '''Determine OpenStack codename that corresponds to swift version.''' | ||
4733 | 255 | codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] | ||
4734 | 256 | if len(codenames) > 1: | ||
4735 | 257 | # If more than one release codename contains this version we determine | ||
4736 | 258 | # the actual codename based on the highest available install source. | ||
4737 | 259 | for codename in reversed(codenames): | ||
4738 | 260 | releases = UBUNTU_OPENSTACK_RELEASE | ||
4739 | 261 | release = [k for k, v in six.iteritems(releases) if codename in v] | ||
4740 | 262 | ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) | ||
4741 | 263 | if codename in ret or release[0] in ret: | ||
4742 | 264 | return codename | ||
4743 | 265 | elif len(codenames) == 1: | ||
4744 | 266 | return codenames[0] | ||
4745 | 267 | return None | ||
4746 | 268 | |||
4747 | 269 | |||
4748 | 180 | def get_os_codename_package(package, fatal=True): | 270 | def get_os_codename_package(package, fatal=True): |
4749 | 181 | '''Derive OpenStack release codename from an installed package.''' | 271 | '''Derive OpenStack release codename from an installed package.''' |
4750 | 182 | import apt_pkg as apt | 272 | import apt_pkg as apt |
4751 | @@ -201,20 +291,33 @@ | |||
4752 | 201 | error_out(e) | 291 | error_out(e) |
4753 | 202 | 292 | ||
4754 | 203 | vers = apt.upstream_version(pkg.current_ver.ver_str) | 293 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
4769 | 204 | 294 | if 'swift' in pkg.name: | |
4770 | 205 | try: | 295 | # Fully x.y.z match for swift versions |
4771 | 206 | if 'swift' in pkg.name: | 296 | match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) |
4772 | 207 | swift_vers = vers[:5] | 297 | else: |
4773 | 208 | if swift_vers not in SWIFT_CODENAMES: | 298 | # x.y match only for 20XX.X |
4774 | 209 | # Deal with 1.10.0 upward | 299 | # and ignore patch level for other packages |
4775 | 210 | swift_vers = vers[:6] | 300 | match = re.match('^(\d+)\.(\d+)', vers) |
4776 | 211 | return SWIFT_CODENAMES[swift_vers] | 301 | |
4777 | 212 | else: | 302 | if match: |
4778 | 213 | vers = vers[:6] | 303 | vers = match.group(0) |
4779 | 214 | return OPENSTACK_CODENAMES[vers] | 304 | |
4780 | 215 | except KeyError: | 305 | # >= Liberty independent project versions |
4781 | 216 | e = 'Could not determine OpenStack codename for version %s' % vers | 306 | if (package in PACKAGE_CODENAMES and |
4782 | 217 | error_out(e) | 307 | vers in PACKAGE_CODENAMES[package]): |
4783 | 308 | return PACKAGE_CODENAMES[package][vers] | ||
4784 | 309 | else: | ||
4785 | 310 | # < Liberty co-ordinated project versions | ||
4786 | 311 | try: | ||
4787 | 312 | if 'swift' in pkg.name: | ||
4788 | 313 | return get_swift_codename(vers) | ||
4789 | 314 | else: | ||
4790 | 315 | return OPENSTACK_CODENAMES[vers] | ||
4791 | 316 | except KeyError: | ||
4792 | 317 | if not fatal: | ||
4793 | 318 | return None | ||
4794 | 319 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
4795 | 320 | error_out(e) | ||
4796 | 218 | 321 | ||
4797 | 219 | 322 | ||
4798 | 220 | def get_os_version_package(pkg, fatal=True): | 323 | def get_os_version_package(pkg, fatal=True): |
4799 | @@ -226,12 +329,14 @@ | |||
4800 | 226 | 329 | ||
4801 | 227 | if 'swift' in pkg: | 330 | if 'swift' in pkg: |
4802 | 228 | vers_map = SWIFT_CODENAMES | 331 | vers_map = SWIFT_CODENAMES |
4803 | 332 | for cname, version in six.iteritems(vers_map): | ||
4804 | 333 | if cname == codename: | ||
4805 | 334 | return version[-1] | ||
4806 | 229 | else: | 335 | else: |
4807 | 230 | vers_map = OPENSTACK_CODENAMES | 336 | vers_map = OPENSTACK_CODENAMES |
4812 | 231 | 337 | for version, cname in six.iteritems(vers_map): | |
4813 | 232 | for version, cname in six.iteritems(vers_map): | 338 | if cname == codename: |
4814 | 233 | if cname == codename: | 339 | return version |
4811 | 234 | return version | ||
4815 | 235 | # e = "Could not determine OpenStack version for package: %s" % pkg | 340 | # e = "Could not determine OpenStack version for package: %s" % pkg |
4816 | 236 | # error_out(e) | 341 | # error_out(e) |
4817 | 237 | 342 | ||
4818 | @@ -256,12 +361,42 @@ | |||
4819 | 256 | 361 | ||
4820 | 257 | 362 | ||
4821 | 258 | def import_key(keyid): | 363 | def import_key(keyid): |
4828 | 259 | cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ | 364 | key = keyid.strip() |
4829 | 260 | "--recv-keys %s" % keyid | 365 | if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and |
4830 | 261 | try: | 366 | key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): |
4831 | 262 | subprocess.check_call(cmd.split(' ')) | 367 | juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) |
4832 | 263 | except subprocess.CalledProcessError: | 368 | juju_log("Importing ASCII Armor PGP key", level=DEBUG) |
4833 | 264 | error_out("Error importing repo key %s" % keyid) | 369 | with tempfile.NamedTemporaryFile() as keyfile: |
4834 | 370 | with open(keyfile.name, 'w') as fd: | ||
4835 | 371 | fd.write(key) | ||
4836 | 372 | fd.write("\n") | ||
4837 | 373 | |||
4838 | 374 | cmd = ['apt-key', 'add', keyfile.name] | ||
4839 | 375 | try: | ||
4840 | 376 | subprocess.check_call(cmd) | ||
4841 | 377 | except subprocess.CalledProcessError: | ||
4842 | 378 | error_out("Error importing PGP key '%s'" % key) | ||
4843 | 379 | else: | ||
4844 | 380 | juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) | ||
4845 | 381 | juju_log("Importing PGP key from keyserver", level=DEBUG) | ||
4846 | 382 | cmd = ['apt-key', 'adv', '--keyserver', | ||
4847 | 383 | 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] | ||
4848 | 384 | try: | ||
4849 | 385 | subprocess.check_call(cmd) | ||
4850 | 386 | except subprocess.CalledProcessError: | ||
4851 | 387 | error_out("Error importing PGP key '%s'" % key) | ||
4852 | 388 | |||
4853 | 389 | |||
4854 | 390 | def get_source_and_pgp_key(input): | ||
4855 | 391 | """Look for a pgp key ID or ascii-armor key in the given input.""" | ||
4856 | 392 | index = input.strip() | ||
4857 | 393 | index = input.rfind('|') | ||
4858 | 394 | if index < 0: | ||
4859 | 395 | return input, None | ||
4860 | 396 | |||
4861 | 397 | key = input[index + 1:].strip('|') | ||
4862 | 398 | source = input[:index] | ||
4863 | 399 | return source, key | ||
4864 | 265 | 400 | ||
4865 | 266 | 401 | ||
4866 | 267 | def configure_installation_source(rel): | 402 | def configure_installation_source(rel): |
4867 | @@ -273,16 +408,16 @@ | |||
4868 | 273 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | 408 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: |
4869 | 274 | f.write(DISTRO_PROPOSED % ubuntu_rel) | 409 | f.write(DISTRO_PROPOSED % ubuntu_rel) |
4870 | 275 | elif rel[:4] == "ppa:": | 410 | elif rel[:4] == "ppa:": |
4872 | 276 | src = rel | 411 | src, key = get_source_and_pgp_key(rel) |
4873 | 412 | if key: | ||
4874 | 413 | import_key(key) | ||
4875 | 414 | |||
4876 | 277 | subprocess.check_call(["add-apt-repository", "-y", src]) | 415 | subprocess.check_call(["add-apt-repository", "-y", src]) |
4877 | 278 | elif rel[:3] == "deb": | 416 | elif rel[:3] == "deb": |
4882 | 279 | l = len(rel.split('|')) | 417 | src, key = get_source_and_pgp_key(rel) |
4883 | 280 | if l == 2: | 418 | if key: |
4880 | 281 | src, key = rel.split('|') | ||
4881 | 282 | juju_log("Importing PPA key from keyserver for %s" % src) | ||
4884 | 283 | import_key(key) | 419 | import_key(key) |
4887 | 284 | elif l == 1: | 420 | |
4886 | 285 | src = rel | ||
4888 | 286 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | 421 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: |
4889 | 287 | f.write(src) | 422 | f.write(src) |
4890 | 288 | elif rel[:6] == 'cloud:': | 423 | elif rel[:6] == 'cloud:': |
4891 | @@ -327,6 +462,9 @@ | |||
4892 | 327 | 'liberty': 'trusty-updates/liberty', | 462 | 'liberty': 'trusty-updates/liberty', |
4893 | 328 | 'liberty/updates': 'trusty-updates/liberty', | 463 | 'liberty/updates': 'trusty-updates/liberty', |
4894 | 329 | 'liberty/proposed': 'trusty-proposed/liberty', | 464 | 'liberty/proposed': 'trusty-proposed/liberty', |
4895 | 465 | 'mitaka': 'trusty-updates/mitaka', | ||
4896 | 466 | 'mitaka/updates': 'trusty-updates/mitaka', | ||
4897 | 467 | 'mitaka/proposed': 'trusty-proposed/mitaka', | ||
4898 | 330 | } | 468 | } |
4899 | 331 | 469 | ||
4900 | 332 | try: | 470 | try: |
4901 | @@ -392,9 +530,18 @@ | |||
4902 | 392 | import apt_pkg as apt | 530 | import apt_pkg as apt |
4903 | 393 | src = config('openstack-origin') | 531 | src = config('openstack-origin') |
4904 | 394 | cur_vers = get_os_version_package(package) | 532 | cur_vers = get_os_version_package(package) |
4906 | 395 | available_vers = get_os_version_install_source(src) | 533 | if "swift" in package: |
4907 | 534 | codename = get_os_codename_install_source(src) | ||
4908 | 535 | avail_vers = get_os_version_codename_swift(codename) | ||
4909 | 536 | else: | ||
4910 | 537 | avail_vers = get_os_version_install_source(src) | ||
4911 | 396 | apt.init() | 538 | apt.init() |
4913 | 397 | return apt.version_compare(available_vers, cur_vers) == 1 | 539 | if "swift" in package: |
4914 | 540 | major_cur_vers = cur_vers.split('.', 1)[0] | ||
4915 | 541 | major_avail_vers = avail_vers.split('.', 1)[0] | ||
4916 | 542 | major_diff = apt.version_compare(major_avail_vers, major_cur_vers) | ||
4917 | 543 | return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) | ||
4918 | 544 | return apt.version_compare(avail_vers, cur_vers) == 1 | ||
4919 | 398 | 545 | ||
4920 | 399 | 546 | ||
4921 | 400 | def ensure_block_device(block_device): | 547 | def ensure_block_device(block_device): |
4922 | @@ -469,6 +616,12 @@ | |||
4923 | 469 | relation_prefix=None): | 616 | relation_prefix=None): |
4924 | 470 | hosts = get_ipv6_addr(dynamic_only=False) | 617 | hosts = get_ipv6_addr(dynamic_only=False) |
4925 | 471 | 618 | ||
4926 | 619 | if config('vip'): | ||
4927 | 620 | vips = config('vip').split() | ||
4928 | 621 | for vip in vips: | ||
4929 | 622 | if vip and is_ipv6(vip): | ||
4930 | 623 | hosts.append(vip) | ||
4931 | 624 | |||
4932 | 472 | kwargs = {'database': database, | 625 | kwargs = {'database': database, |
4933 | 473 | 'username': database_user, | 626 | 'username': database_user, |
4934 | 474 | 'hostname': json.dumps(hosts)} | 627 | 'hostname': json.dumps(hosts)} |
4935 | @@ -517,7 +670,7 @@ | |||
4936 | 517 | return yaml.load(projects_yaml) | 670 | return yaml.load(projects_yaml) |
4937 | 518 | 671 | ||
4938 | 519 | 672 | ||
4940 | 520 | def git_clone_and_install(projects_yaml, core_project, depth=1): | 673 | def git_clone_and_install(projects_yaml, core_project): |
4941 | 521 | """ | 674 | """ |
4942 | 522 | Clone/install all specified OpenStack repositories. | 675 | Clone/install all specified OpenStack repositories. |
4943 | 523 | 676 | ||
4944 | @@ -567,6 +720,9 @@ | |||
4945 | 567 | for p in projects['repositories']: | 720 | for p in projects['repositories']: |
4946 | 568 | repo = p['repository'] | 721 | repo = p['repository'] |
4947 | 569 | branch = p['branch'] | 722 | branch = p['branch'] |
4948 | 723 | depth = '1' | ||
4949 | 724 | if 'depth' in p.keys(): | ||
4950 | 725 | depth = p['depth'] | ||
4951 | 570 | if p['name'] == 'requirements': | 726 | if p['name'] == 'requirements': |
4952 | 571 | repo_dir = _git_clone_and_install_single(repo, branch, depth, | 727 | repo_dir = _git_clone_and_install_single(repo, branch, depth, |
4953 | 572 | parent_dir, http_proxy, | 728 | parent_dir, http_proxy, |
4954 | @@ -611,19 +767,14 @@ | |||
4955 | 611 | """ | 767 | """ |
4956 | 612 | Clone and install a single git repository. | 768 | Clone and install a single git repository. |
4957 | 613 | """ | 769 | """ |
4958 | 614 | dest_dir = os.path.join(parent_dir, os.path.basename(repo)) | ||
4959 | 615 | |||
4960 | 616 | if not os.path.exists(parent_dir): | 770 | if not os.path.exists(parent_dir): |
4961 | 617 | juju_log('Directory already exists at {}. ' | 771 | juju_log('Directory already exists at {}. ' |
4962 | 618 | 'No need to create directory.'.format(parent_dir)) | 772 | 'No need to create directory.'.format(parent_dir)) |
4963 | 619 | os.mkdir(parent_dir) | 773 | os.mkdir(parent_dir) |
4964 | 620 | 774 | ||
4971 | 621 | if not os.path.exists(dest_dir): | 775 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
4972 | 622 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) | 776 | repo_dir = install_remote( |
4973 | 623 | repo_dir = install_remote(repo, dest=parent_dir, branch=branch, | 777 | repo, dest=parent_dir, branch=branch, depth=depth) |
4968 | 624 | depth=depth) | ||
4969 | 625 | else: | ||
4970 | 626 | repo_dir = dest_dir | ||
4974 | 627 | 778 | ||
4975 | 628 | venv = os.path.join(parent_dir, 'venv') | 779 | venv = os.path.join(parent_dir, 'venv') |
4976 | 629 | 780 | ||
4977 | @@ -704,3 +855,721 @@ | |||
4978 | 704 | return projects[key] | 855 | return projects[key] |
4979 | 705 | 856 | ||
4980 | 706 | return None | 857 | return None |
4981 | 858 | |||
4982 | 859 | |||
4983 | 860 | def os_workload_status(configs, required_interfaces, charm_func=None): | ||
4984 | 861 | """ | ||
4985 | 862 | Decorator to set workload status based on complete contexts | ||
4986 | 863 | """ | ||
4987 | 864 | def wrap(f): | ||
4988 | 865 | @wraps(f) | ||
4989 | 866 | def wrapped_f(*args, **kwargs): | ||
4990 | 867 | # Run the original function first | ||
4991 | 868 | f(*args, **kwargs) | ||
4992 | 869 | # Set workload status now that contexts have been | ||
4993 | 870 | # acted on | ||
4994 | 871 | set_os_workload_status(configs, required_interfaces, charm_func) | ||
4995 | 872 | return wrapped_f | ||
4996 | 873 | return wrap | ||
4997 | 874 | |||
4998 | 875 | |||
4999 | 876 | def set_os_workload_status(configs, required_interfaces, charm_func=None, | ||
5000 | 877 | services=None, ports=None): |
The diff has been truncated for viewing.