Merge lp:~le-charmers/charms/trusty/keystone/leadership-election into lp:~openstack-charmers-archive/charms/trusty/keystone/next
- Trusty Tahr (14.04)
- leadership-election
- Merge into next
Proposed by
Edward Hope-Morley
Status: | Merged |
---|---|
Merged at revision: | 151 |
Proposed branch: | lp:~le-charmers/charms/trusty/keystone/leadership-election |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/keystone/next |
Diff against target: |
1683 lines (+597/-235) 17 files modified
charm-helpers-tests.yaml (+1/-1) hooks/charmhelpers/contrib/hahelpers/cluster.py (+37/-2) hooks/charmhelpers/contrib/openstack/neutron.py (+10/-5) hooks/charmhelpers/contrib/openstack/templates/section-zeromq (+0/-14) hooks/charmhelpers/contrib/openstack/utils.py (+65/-18) hooks/charmhelpers/contrib/peerstorage/__init__.py (+123/-3) hooks/charmhelpers/contrib/python/packages.py (+28/-5) hooks/charmhelpers/contrib/unison/__init__.py (+5/-4) hooks/charmhelpers/core/hookenv.py (+147/-10) hooks/charmhelpers/core/host.py (+1/-1) hooks/charmhelpers/core/services/base.py (+32/-11) hooks/charmhelpers/fetch/__init__.py (+1/-1) hooks/charmhelpers/fetch/giturl.py (+7/-5) hooks/keystone_hooks.py (+16/-12) hooks/keystone_ssl.py (+3/-27) hooks/keystone_utils.py (+88/-65) unit_tests/test_keystone_hooks.py (+33/-51) |
To merge this branch: | bzr merge lp:~le-charmers/charms/trusty/keystone/leadership-election |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+255016@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
- 158. By Edward Hope-Morley
-
synced /next
- 159. By Edward Hope-Morley
-
make sync
- 160. By Liam Young
-
Merged trunk in + LE charmhelper sync
- 161. By Liam Young
-
Resync le charm helpers
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'charm-helpers-tests.yaml' |
2 | --- charm-helpers-tests.yaml 2015-05-13 09:42:17 +0000 |
3 | +++ charm-helpers-tests.yaml 2015-06-04 08:44:44 +0000 |
4 | @@ -1,4 +1,4 @@ |
5 | -branch: lp:charm-helpers |
6 | +branch: lp:charm-helpers |
7 | destination: tests/charmhelpers |
8 | include: |
9 | - contrib.amulet |
10 | |
11 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' |
12 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-03-18 18:59:03 +0000 |
13 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-06-04 08:44:44 +0000 |
14 | @@ -44,6 +44,7 @@ |
15 | ERROR, |
16 | WARNING, |
17 | unit_get, |
18 | + is_leader as juju_is_leader |
19 | ) |
20 | from charmhelpers.core.decorators import ( |
21 | retry_on_exception, |
22 | @@ -52,6 +53,8 @@ |
23 | bool_from_string, |
24 | ) |
25 | |
26 | +DC_RESOURCE_NAME = 'DC' |
27 | + |
28 | |
29 | class HAIncompleteConfig(Exception): |
30 | pass |
31 | @@ -66,12 +69,21 @@ |
32 | Returns True if the charm executing this is the elected cluster leader. |
33 | |
34 | It relies on two mechanisms to determine leadership: |
35 | - 1. If the charm is part of a corosync cluster, call corosync to |
36 | + 1. If juju is sufficiently new and leadership election is supported, |
37 | + the is_leader command will be used. |
38 | + 2. If the charm is part of a corosync cluster, call corosync to |
39 | determine leadership. |
40 | - 2. If the charm is not part of a corosync cluster, the leader is |
41 | + 3. If the charm is not part of a corosync cluster, the leader is |
42 | determined as being "the alive unit with the lowest unit numer". In |
43 | other words, the oldest surviving unit. |
44 | """ |
45 | + try: |
46 | + return juju_is_leader() |
47 | + except NotImplementedError: |
48 | + log('Juju leadership election feature not enabled' |
49 | + ', using fallback support', |
50 | + level=WARNING) |
51 | + |
52 | if is_clustered(): |
53 | if not is_crm_leader(resource): |
54 | log('Deferring action to CRM leader.', level=INFO) |
55 | @@ -95,6 +107,27 @@ |
56 | return False |
57 | |
58 | |
59 | +def is_crm_dc(): |
60 | + """ |
61 | + Determine leadership by querying the pacemaker Designated Controller |
62 | + """ |
63 | + cmd = ['crm', 'status'] |
64 | + try: |
65 | + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
66 | + if not isinstance(status, six.text_type): |
67 | + status = six.text_type(status, "utf-8") |
68 | + except subprocess.CalledProcessError: |
69 | + return False |
70 | + current_dc = '' |
71 | + for line in status.split('\n'): |
72 | + if line.startswith('Current DC'): |
73 | + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum |
74 | + current_dc = line.split(':')[1].split()[0] |
75 | + if current_dc == get_unit_hostname(): |
76 | + return True |
77 | + return False |
78 | + |
79 | + |
80 | @retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) |
81 | def is_crm_leader(resource, retry=False): |
82 | """ |
83 | @@ -104,6 +137,8 @@ |
84 | We allow this operation to be retried to avoid the possibility of getting a |
85 | false negative. See LP #1396246 for more info. |
86 | """ |
87 | + if resource == DC_RESOURCE_NAME: |
88 | + return is_crm_dc() |
89 | cmd = ['crm', 'resource', 'show', resource] |
90 | try: |
91 | status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
92 | |
93 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' |
94 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-04-16 19:55:16 +0000 |
95 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-06-04 08:44:44 +0000 |
96 | @@ -256,11 +256,14 @@ |
97 | def parse_mappings(mappings): |
98 | parsed = {} |
99 | if mappings: |
100 | - mappings = mappings.split(' ') |
101 | + mappings = mappings.split() |
102 | for m in mappings: |
103 | p = m.partition(':') |
104 | - if p[1] == ':': |
105 | - parsed[p[0].strip()] = p[2].strip() |
106 | + key = p[0].strip() |
107 | + if p[1]: |
108 | + parsed[key] = p[2].strip() |
109 | + else: |
110 | + parsed[key] = '' |
111 | |
112 | return parsed |
113 | |
114 | @@ -283,13 +286,13 @@ |
115 | Returns dict of the form {bridge:port}. |
116 | """ |
117 | _mappings = parse_mappings(mappings) |
118 | - if not _mappings: |
119 | + if not _mappings or list(_mappings.values()) == ['']: |
120 | if not mappings: |
121 | return {} |
122 | |
123 | # For backwards-compatibility we need to support port-only provided in |
124 | # config. |
125 | - _mappings = {default_bridge: mappings.split(' ')[0]} |
126 | + _mappings = {default_bridge: mappings.split()[0]} |
127 | |
128 | bridges = _mappings.keys() |
129 | ports = _mappings.values() |
130 | @@ -309,6 +312,8 @@ |
131 | |
132 | Mappings must be a space-delimited list of provider:start:end mappings. |
133 | |
134 | + The start:end range is optional and may be omitted. |
135 | + |
136 | Returns dict of the form {provider: (start, end)}. |
137 | """ |
138 | _mappings = parse_mappings(mappings) |
139 | |
140 | === added file 'hooks/charmhelpers/contrib/openstack/templates/section-zeromq' |
141 | --- hooks/charmhelpers/contrib/openstack/templates/section-zeromq 1970-01-01 00:00:00 +0000 |
142 | +++ hooks/charmhelpers/contrib/openstack/templates/section-zeromq 2015-06-04 08:44:44 +0000 |
143 | @@ -0,0 +1,14 @@ |
144 | +{% if zmq_host -%} |
145 | +# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) |
146 | +rpc_backend = zmq |
147 | +rpc_zmq_host = {{ zmq_host }} |
148 | +{% if zmq_redis_address -%} |
149 | +rpc_zmq_matchmaker = redis |
150 | +matchmaker_heartbeat_freq = 15 |
151 | +matchmaker_heartbeat_ttl = 30 |
152 | +[matchmaker_redis] |
153 | +host = {{ zmq_redis_address }} |
154 | +{% else -%} |
155 | +rpc_zmq_matchmaker = ring |
156 | +{% endif -%} |
157 | +{% endif -%} |
158 | |
159 | === removed file 'hooks/charmhelpers/contrib/openstack/templates/section-zeromq' |
160 | --- hooks/charmhelpers/contrib/openstack/templates/section-zeromq 2015-04-09 02:17:36 +0000 |
161 | +++ hooks/charmhelpers/contrib/openstack/templates/section-zeromq 1970-01-01 00:00:00 +0000 |
162 | @@ -1,14 +0,0 @@ |
163 | -{% if zmq_host -%} |
164 | -# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) |
165 | -rpc_backend = zmq |
166 | -rpc_zmq_host = {{ zmq_host }} |
167 | -{% if zmq_redis_address -%} |
168 | -rpc_zmq_matchmaker = redis |
169 | -matchmaker_heartbeat_freq = 15 |
170 | -matchmaker_heartbeat_ttl = 30 |
171 | -[matchmaker_redis] |
172 | -host = {{ zmq_redis_address }} |
173 | -{% else -%} |
174 | -rpc_zmq_matchmaker = ring |
175 | -{% endif -%} |
176 | -{% endif -%} |
177 | |
178 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
179 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-04-16 14:09:47 +0000 |
180 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-06-04 08:44:44 +0000 |
181 | @@ -53,9 +53,13 @@ |
182 | get_ipv6_addr |
183 | ) |
184 | |
185 | +from charmhelpers.contrib.python.packages import ( |
186 | + pip_create_virtualenv, |
187 | + pip_install, |
188 | +) |
189 | + |
190 | from charmhelpers.core.host import lsb_release, mounts, umount |
191 | from charmhelpers.fetch import apt_install, apt_cache, install_remote |
192 | -from charmhelpers.contrib.python.packages import pip_install |
193 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
194 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device |
195 | |
196 | @@ -497,7 +501,17 @@ |
197 | requirements_dir = None |
198 | |
199 | |
200 | -def git_clone_and_install(projects_yaml, core_project): |
201 | +def _git_yaml_load(projects_yaml): |
202 | + """ |
203 | + Load the specified yaml into a dictionary. |
204 | + """ |
205 | + if not projects_yaml: |
206 | + return None |
207 | + |
208 | + return yaml.load(projects_yaml) |
209 | + |
210 | + |
211 | +def git_clone_and_install(projects_yaml, core_project, depth=1): |
212 | """ |
213 | Clone/install all specified OpenStack repositories. |
214 | |
215 | @@ -510,23 +524,22 @@ |
216 | repository: 'git://git.openstack.org/openstack/requirements.git', |
217 | branch: 'stable/icehouse'} |
218 | directory: /mnt/openstack-git |
219 | - http_proxy: http://squid.internal:3128 |
220 | - https_proxy: https://squid.internal:3128 |
221 | + http_proxy: squid-proxy-url |
222 | + https_proxy: squid-proxy-url |
223 | |
224 | The directory, http_proxy, and https_proxy keys are optional. |
225 | """ |
226 | global requirements_dir |
227 | parent_dir = '/mnt/openstack-git' |
228 | - |
229 | - if not projects_yaml: |
230 | - return |
231 | - |
232 | - projects = yaml.load(projects_yaml) |
233 | + http_proxy = None |
234 | + |
235 | + projects = _git_yaml_load(projects_yaml) |
236 | _git_validate_projects_yaml(projects, core_project) |
237 | |
238 | old_environ = dict(os.environ) |
239 | |
240 | if 'http_proxy' in projects.keys(): |
241 | + http_proxy = projects['http_proxy'] |
242 | os.environ['http_proxy'] = projects['http_proxy'] |
243 | if 'https_proxy' in projects.keys(): |
244 | os.environ['https_proxy'] = projects['https_proxy'] |
245 | @@ -534,15 +547,19 @@ |
246 | if 'directory' in projects.keys(): |
247 | parent_dir = projects['directory'] |
248 | |
249 | + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
250 | + |
251 | for p in projects['repositories']: |
252 | repo = p['repository'] |
253 | branch = p['branch'] |
254 | if p['name'] == 'requirements': |
255 | - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, |
256 | + repo_dir = _git_clone_and_install_single(repo, branch, depth, |
257 | + parent_dir, http_proxy, |
258 | update_requirements=False) |
259 | requirements_dir = repo_dir |
260 | else: |
261 | - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, |
262 | + repo_dir = _git_clone_and_install_single(repo, branch, depth, |
263 | + parent_dir, http_proxy, |
264 | update_requirements=True) |
265 | |
266 | os.environ = old_environ |
267 | @@ -574,7 +591,8 @@ |
268 | error_out('openstack-origin-git key \'{}\' is missing'.format(key)) |
269 | |
270 | |
271 | -def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): |
272 | +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, |
273 | + update_requirements): |
274 | """ |
275 | Clone and install a single git repository. |
276 | """ |
277 | @@ -587,7 +605,8 @@ |
278 | |
279 | if not os.path.exists(dest_dir): |
280 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
281 | - repo_dir = install_remote(repo, dest=parent_dir, branch=branch) |
282 | + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, |
283 | + depth=depth) |
284 | else: |
285 | repo_dir = dest_dir |
286 | |
287 | @@ -598,7 +617,12 @@ |
288 | _git_update_requirements(repo_dir, requirements_dir) |
289 | |
290 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) |
291 | - pip_install(repo_dir) |
292 | + if http_proxy: |
293 | + pip_install(repo_dir, proxy=http_proxy, |
294 | + venv=os.path.join(parent_dir, 'venv')) |
295 | + else: |
296 | + pip_install(repo_dir, |
297 | + venv=os.path.join(parent_dir, 'venv')) |
298 | |
299 | return repo_dir |
300 | |
301 | @@ -621,16 +645,27 @@ |
302 | os.chdir(orig_dir) |
303 | |
304 | |
305 | +def git_pip_venv_dir(projects_yaml): |
306 | + """ |
307 | + Return the pip virtualenv path. |
308 | + """ |
309 | + parent_dir = '/mnt/openstack-git' |
310 | + |
311 | + projects = _git_yaml_load(projects_yaml) |
312 | + |
313 | + if 'directory' in projects.keys(): |
314 | + parent_dir = projects['directory'] |
315 | + |
316 | + return os.path.join(parent_dir, 'venv') |
317 | + |
318 | + |
319 | def git_src_dir(projects_yaml, project): |
320 | """ |
321 | Return the directory where the specified project's source is located. |
322 | """ |
323 | parent_dir = '/mnt/openstack-git' |
324 | |
325 | - if not projects_yaml: |
326 | - return |
327 | - |
328 | - projects = yaml.load(projects_yaml) |
329 | + projects = _git_yaml_load(projects_yaml) |
330 | |
331 | if 'directory' in projects.keys(): |
332 | parent_dir = projects['directory'] |
333 | @@ -640,3 +675,15 @@ |
334 | return os.path.join(parent_dir, os.path.basename(p['repository'])) |
335 | |
336 | return None |
337 | + |
338 | + |
339 | +def git_yaml_value(projects_yaml, key): |
340 | + """ |
341 | + Return the value in projects_yaml for the specified key. |
342 | + """ |
343 | + projects = _git_yaml_load(projects_yaml) |
344 | + |
345 | + if key in projects.keys(): |
346 | + return projects[key] |
347 | + |
348 | + return None |
349 | |
350 | === modified file 'hooks/charmhelpers/contrib/peerstorage/__init__.py' |
351 | --- hooks/charmhelpers/contrib/peerstorage/__init__.py 2015-03-11 11:45:09 +0000 |
352 | +++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2015-06-04 08:44:44 +0000 |
353 | @@ -14,14 +14,19 @@ |
354 | # You should have received a copy of the GNU Lesser General Public License |
355 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
356 | |
357 | +import json |
358 | import six |
359 | + |
360 | from charmhelpers.core.hookenv import relation_id as current_relation_id |
361 | from charmhelpers.core.hookenv import ( |
362 | is_relation_made, |
363 | relation_ids, |
364 | - relation_get, |
365 | + relation_get as _relation_get, |
366 | local_unit, |
367 | - relation_set, |
368 | + relation_set as _relation_set, |
369 | + leader_get as _leader_get, |
370 | + leader_set, |
371 | + is_leader, |
372 | ) |
373 | |
374 | |
375 | @@ -54,6 +59,105 @@ |
376 | """ |
377 | |
378 | |
379 | +def leader_get(attribute=None): |
380 | + """Wrapper to ensure that settings are migrated from the peer relation. |
381 | + |
382 | + This is to support upgrading an environment that does not support |
383 | + Juju leadership election to one that does. |
384 | + |
385 | + If a setting is not extant in the leader-get but is on the relation-get |
386 | + peer rel, it is migrated and marked as such so that it is not re-migrated. |
387 | + """ |
388 | + migration_key = '__leader_get_migrated_settings__' |
389 | + if not is_leader(): |
390 | + return _leader_get(attribute=attribute) |
391 | + |
392 | + settings_migrated = False |
393 | + leader_settings = _leader_get(attribute=attribute) |
394 | + previously_migrated = _leader_get(attribute=migration_key) |
395 | + |
396 | + if previously_migrated: |
397 | + migrated = set(json.loads(previously_migrated)) |
398 | + else: |
399 | + migrated = set([]) |
400 | + |
401 | + try: |
402 | + if migration_key in leader_settings: |
403 | + del leader_settings[migration_key] |
404 | + except TypeError: |
405 | + pass |
406 | + |
407 | + if attribute: |
408 | + if attribute in migrated: |
409 | + return leader_settings |
410 | + |
411 | + # If attribute not present in leader db, check if this unit has set |
412 | + # the attribute in the peer relation |
413 | + if not leader_settings: |
414 | + peer_setting = relation_get(attribute=attribute, unit=local_unit()) |
415 | + if peer_setting: |
416 | + leader_set(settings={attribute: peer_setting}) |
417 | + leader_settings = peer_setting |
418 | + |
419 | + if leader_settings: |
420 | + settings_migrated = True |
421 | + migrated.add(attribute) |
422 | + else: |
423 | + r_settings = relation_get(unit=local_unit()) |
424 | + if r_settings: |
425 | + for key in set(r_settings.keys()).difference(migrated): |
426 | + # Leader setting wins |
427 | + if not leader_settings.get(key): |
428 | + leader_settings[key] = r_settings[key] |
429 | + |
430 | + settings_migrated = True |
431 | + migrated.add(key) |
432 | + |
433 | + if settings_migrated: |
434 | + leader_set(**leader_settings) |
435 | + |
436 | + if migrated and settings_migrated: |
437 | + migrated = json.dumps(list(migrated)) |
438 | + leader_set(settings={migration_key: migrated}) |
439 | + |
440 | + return leader_settings |
441 | + |
442 | + |
443 | +def relation_set(relation_id=None, relation_settings=None, **kwargs): |
444 | + """Attempt to use leader-set if supported in the current version of Juju, |
445 | + otherwise falls back on relation-set. |
446 | + |
447 | + Note that we only attempt to use leader-set if the provided relation_id is |
448 | + a peer relation id or no relation id is provided (in which case we assume |
449 | + we are within the peer relation context). |
450 | + """ |
451 | + try: |
452 | + if relation_id in relation_ids('cluster'): |
453 | + return leader_set(settings=relation_settings, **kwargs) |
454 | + else: |
455 | + raise NotImplementedError |
456 | + except NotImplementedError: |
457 | + return _relation_set(relation_id=relation_id, |
458 | + relation_settings=relation_settings, **kwargs) |
459 | + |
460 | + |
461 | +def relation_get(attribute=None, unit=None, rid=None): |
462 | + """Attempt to use leader-get if supported in the current version of Juju, |
463 | + otherwise falls back on relation-get. |
464 | + |
465 | + Note that we only attempt to use leader-get if the provided rid is a peer |
466 | + relation id or no relation id is provided (in which case we assume we are |
467 | + within the peer relation context). |
468 | + """ |
469 | + try: |
470 | + if rid in relation_ids('cluster'): |
471 | + return leader_get(attribute) |
472 | + else: |
473 | + raise NotImplementedError |
474 | + except NotImplementedError: |
475 | + return _relation_get(attribute=attribute, rid=rid, unit=unit) |
476 | + |
477 | + |
478 | def peer_retrieve(key, relation_name='cluster'): |
479 | """Retrieve a named key from peer relation `relation_name`.""" |
480 | cluster_rels = relation_ids(relation_name) |
481 | @@ -73,6 +177,8 @@ |
482 | exc_list = exc_list if exc_list else [] |
483 | peerdb_settings = peer_retrieve('-', relation_name=relation_name) |
484 | matched = {} |
485 | + if peerdb_settings is None: |
486 | + return matched |
487 | for k, v in peerdb_settings.items(): |
488 | full_prefix = prefix + delimiter |
489 | if k.startswith(full_prefix): |
490 | @@ -96,12 +202,26 @@ |
491 | 'peer relation {}'.format(relation_name)) |
492 | |
493 | |
494 | -def peer_echo(includes=None): |
495 | +def peer_echo(includes=None, force=False): |
496 | """Echo filtered attributes back onto the same relation for storage. |
497 | |
498 | This is a requirement to use the peerstorage module - it needs to be called |
499 | from the peer relation's changed hook. |
500 | + |
501 | + If Juju leader support exists this will be a noop unless force is True. |
502 | """ |
503 | + try: |
504 | + is_leader() |
505 | + except NotImplementedError: |
506 | + pass |
507 | + else: |
508 | + if not force: |
509 | + return # NOOP if leader-election is supported |
510 | + |
511 | + # Use original non-leader calls |
512 | + relation_get = _relation_get |
513 | + relation_set = _relation_set |
514 | + |
515 | rdata = relation_get() |
516 | echo_data = {} |
517 | if includes is None: |
518 | |
519 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' |
520 | --- hooks/charmhelpers/contrib/python/packages.py 2015-03-11 11:45:09 +0000 |
521 | +++ hooks/charmhelpers/contrib/python/packages.py 2015-06-04 08:44:44 +0000 |
522 | @@ -17,8 +17,11 @@ |
523 | # You should have received a copy of the GNU Lesser General Public License |
524 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
525 | |
526 | +import os |
527 | +import subprocess |
528 | + |
529 | from charmhelpers.fetch import apt_install, apt_update |
530 | -from charmhelpers.core.hookenv import log |
531 | +from charmhelpers.core.hookenv import charm_dir, log |
532 | |
533 | try: |
534 | from pip import main as pip_execute |
535 | @@ -51,11 +54,15 @@ |
536 | pip_execute(command) |
537 | |
538 | |
539 | -def pip_install(package, fatal=False, upgrade=False, **options): |
540 | +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): |
541 | """Install a python package""" |
542 | - command = ["install"] |
543 | + if venv: |
544 | + venv_python = os.path.join(venv, 'bin/pip') |
545 | + command = [venv_python, "install"] |
546 | + else: |
547 | + command = ["install"] |
548 | |
549 | - available_options = ('proxy', 'src', 'log', "index-url", ) |
550 | + available_options = ('proxy', 'src', 'log', 'index-url', ) |
551 | for option in parse_options(options, available_options): |
552 | command.append(option) |
553 | |
554 | @@ -69,7 +76,10 @@ |
555 | |
556 | log("Installing {} package with options: {}".format(package, |
557 | command)) |
558 | - pip_execute(command) |
559 | + if venv: |
560 | + subprocess.check_call(command) |
561 | + else: |
562 | + pip_execute(command) |
563 | |
564 | |
565 | def pip_uninstall(package, **options): |
566 | @@ -94,3 +104,16 @@ |
567 | """Returns the list of current python installed packages |
568 | """ |
569 | return pip_execute(["list"]) |
570 | + |
571 | + |
572 | +def pip_create_virtualenv(path=None): |
573 | + """Create an isolated Python environment.""" |
574 | + apt_install('python-virtualenv') |
575 | + |
576 | + if path: |
577 | + venv_path = path |
578 | + else: |
579 | + venv_path = os.path.join(charm_dir(), 'venv') |
580 | + |
581 | + if not os.path.exists(venv_path): |
582 | + subprocess.check_call(['virtualenv', venv_path]) |
583 | |
584 | === modified file 'hooks/charmhelpers/contrib/unison/__init__.py' |
585 | --- hooks/charmhelpers/contrib/unison/__init__.py 2015-03-18 18:59:03 +0000 |
586 | +++ hooks/charmhelpers/contrib/unison/__init__.py 2015-06-04 08:44:44 +0000 |
587 | @@ -63,6 +63,7 @@ |
588 | from charmhelpers.core.host import ( |
589 | adduser, |
590 | add_user_to_group, |
591 | + pwgen, |
592 | ) |
593 | |
594 | from charmhelpers.core.hookenv import ( |
595 | @@ -140,7 +141,7 @@ |
596 | ssh_dir = os.path.join(home_dir, '.ssh') |
597 | auth_keys = os.path.join(ssh_dir, 'authorized_keys') |
598 | log('Syncing authorized_keys @ %s.' % auth_keys) |
599 | - with open(auth_keys, 'wb') as out: |
600 | + with open(auth_keys, 'w') as out: |
601 | for k in keys: |
602 | out.write('%s\n' % k) |
603 | |
604 | @@ -152,16 +153,16 @@ |
605 | khosts = [] |
606 | for host in hosts: |
607 | cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] |
608 | - remote_key = check_output(cmd).strip() |
609 | + remote_key = check_output(cmd, universal_newlines=True).strip() |
610 | khosts.append(remote_key) |
611 | log('Syncing known_hosts @ %s.' % known_hosts) |
612 | - with open(known_hosts, 'wb') as out: |
613 | + with open(known_hosts, 'w') as out: |
614 | for host in khosts: |
615 | out.write('%s\n' % host) |
616 | |
617 | |
618 | def ensure_user(user, group=None): |
619 | - adduser(user) |
620 | + adduser(user, pwgen()) |
621 | if group: |
622 | add_user_to_group(user, group) |
623 | |
624 | |
625 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
626 | --- hooks/charmhelpers/core/hookenv.py 2015-04-15 15:21:50 +0000 |
627 | +++ hooks/charmhelpers/core/hookenv.py 2015-06-04 08:44:44 +0000 |
628 | @@ -21,12 +21,14 @@ |
629 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
630 | |
631 | from __future__ import print_function |
632 | +from functools import wraps |
633 | import os |
634 | import json |
635 | import yaml |
636 | import subprocess |
637 | import sys |
638 | import errno |
639 | +import tempfile |
640 | from subprocess import CalledProcessError |
641 | |
642 | import six |
643 | @@ -58,15 +60,17 @@ |
644 | |
645 | will cache the result of unit_get + 'test' for future calls. |
646 | """ |
647 | + @wraps(func) |
648 | def wrapper(*args, **kwargs): |
649 | global cache |
650 | key = str((func, args, kwargs)) |
651 | try: |
652 | return cache[key] |
653 | except KeyError: |
654 | - res = func(*args, **kwargs) |
655 | - cache[key] = res |
656 | - return res |
657 | + pass # Drop out of the exception handler scope. |
658 | + res = func(*args, **kwargs) |
659 | + cache[key] = res |
660 | + return res |
661 | return wrapper |
662 | |
663 | |
664 | @@ -178,7 +182,7 @@ |
665 | |
666 | def remote_unit(): |
667 | """The remote unit for the current relation hook""" |
668 | - return os.environ['JUJU_REMOTE_UNIT'] |
669 | + return os.environ.get('JUJU_REMOTE_UNIT', None) |
670 | |
671 | |
672 | def service_name(): |
673 | @@ -250,6 +254,12 @@ |
674 | except KeyError: |
675 | return (self._prev_dict or {})[key] |
676 | |
677 | + def get(self, key, default=None): |
678 | + try: |
679 | + return self[key] |
680 | + except KeyError: |
681 | + return default |
682 | + |
683 | def keys(self): |
684 | prev_keys = [] |
685 | if self._prev_dict is not None: |
686 | @@ -353,18 +363,49 @@ |
687 | """Set relation information for the current unit""" |
688 | relation_settings = relation_settings if relation_settings else {} |
689 | relation_cmd_line = ['relation-set'] |
690 | + accepts_file = "--file" in subprocess.check_output( |
691 | + relation_cmd_line + ["--help"], universal_newlines=True) |
692 | if relation_id is not None: |
693 | relation_cmd_line.extend(('-r', relation_id)) |
694 | - for k, v in (list(relation_settings.items()) + list(kwargs.items())): |
695 | - if v is None: |
696 | - relation_cmd_line.append('{}='.format(k)) |
697 | - else: |
698 | - relation_cmd_line.append('{}={}'.format(k, v)) |
699 | - subprocess.check_call(relation_cmd_line) |
700 | + settings = relation_settings.copy() |
701 | + settings.update(kwargs) |
702 | + for key, value in settings.items(): |
703 | + # Force value to be a string: it always should, but some call |
704 | + # sites pass in things like dicts or numbers. |
705 | + if value is not None: |
706 | + settings[key] = "{}".format(value) |
707 | + if accepts_file: |
708 | + # --file was introduced in Juju 1.23.2. Use it by default if |
709 | + # available, since otherwise we'll break if the relation data is |
710 | + # too big. Ideally we should tell relation-set to read the data from |
711 | + # stdin, but that feature is broken in 1.23.2: Bug #1454678. |
712 | + with tempfile.NamedTemporaryFile(delete=False) as settings_file: |
713 | + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) |
714 | + subprocess.check_call( |
715 | + relation_cmd_line + ["--file", settings_file.name]) |
716 | + os.remove(settings_file.name) |
717 | + else: |
718 | + for key, value in settings.items(): |
719 | + if value is None: |
720 | + relation_cmd_line.append('{}='.format(key)) |
721 | + else: |
722 | + relation_cmd_line.append('{}={}'.format(key, value)) |
723 | + subprocess.check_call(relation_cmd_line) |
724 | # Flush cache of any relation-gets for local unit |
725 | flush(local_unit()) |
726 | |
727 | |
728 | +def relation_clear(r_id=None): |
729 | + ''' Clears any relation data already set on relation r_id ''' |
730 | + settings = relation_get(rid=r_id, |
731 | + unit=local_unit()) |
732 | + for setting in settings: |
733 | + if setting not in ['public-address', 'private-address']: |
734 | + settings[setting] = None |
735 | + relation_set(relation_id=r_id, |
736 | + **settings) |
737 | + |
738 | + |
739 | @cached |
740 | def relation_ids(reltype=None): |
741 | """A list of relation_ids""" |
742 | @@ -509,6 +550,11 @@ |
743 | return None |
744 | |
745 | |
746 | +def unit_public_ip(): |
747 | + """Get this unit's public IP address""" |
748 | + return unit_get('public-address') |
749 | + |
750 | + |
751 | def unit_private_ip(): |
752 | """Get this unit's private IP address""" |
753 | return unit_get('private-address') |
754 | @@ -605,3 +651,94 @@ |
755 | |
756 | The results set by action_set are preserved.""" |
757 | subprocess.check_call(['action-fail', message]) |
758 | + |
759 | + |
760 | +def status_set(workload_state, message): |
761 | + """Set the workload state with a message |
762 | + |
763 | + Use status-set to set the workload state with a message which is visible |
764 | + to the user via juju status. If the status-set command is not found then |
765 | + assume this is juju < 1.23 and juju-log the message unstead. |
766 | + |
767 | + workload_state -- valid juju workload state. |
768 | + message -- status update message |
769 | + """ |
770 | + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] |
771 | + if workload_state not in valid_states: |
772 | + raise ValueError( |
773 | + '{!r} is not a valid workload state'.format(workload_state) |
774 | + ) |
775 | + cmd = ['status-set', workload_state, message] |
776 | + try: |
777 | + ret = subprocess.call(cmd) |
778 | + if ret == 0: |
779 | + return |
780 | + except OSError as e: |
781 | + if e.errno != errno.ENOENT: |
782 | + raise |
783 | + log_message = 'status-set failed: {} {}'.format(workload_state, |
784 | + message) |
785 | + log(log_message, level='INFO') |
786 | + |
787 | + |
788 | +def status_get(): |
789 | + """Retrieve the previously set juju workload state |
790 | + |
791 | + If the status-set command is not found then assume this is juju < 1.23 and |
792 | + return 'unknown' |
793 | + """ |
794 | + cmd = ['status-get'] |
795 | + try: |
796 | + raw_status = subprocess.check_output(cmd, universal_newlines=True) |
797 | + status = raw_status.rstrip() |
798 | + return status |
799 | + except OSError as e: |
800 | + if e.errno == errno.ENOENT: |
801 | + return 'unknown' |
802 | + else: |
803 | + raise |
804 | + |
805 | + |
806 | +def translate_exc(from_exc, to_exc): |
807 | + def inner_translate_exc1(f): |
808 | + def inner_translate_exc2(*args, **kwargs): |
809 | + try: |
810 | + return f(*args, **kwargs) |
811 | + except from_exc: |
812 | + raise to_exc |
813 | + |
814 | + return inner_translate_exc2 |
815 | + |
816 | + return inner_translate_exc1 |
817 | + |
818 | + |
819 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
820 | +def is_leader(): |
821 | + """Does the current unit hold the juju leadership |
822 | + |
823 | + Uses juju to determine whether the current unit is the leader of its peers |
824 | + """ |
825 | + cmd = ['is-leader', '--format=json'] |
826 | + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
827 | + |
828 | + |
829 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
830 | +def leader_get(attribute=None): |
831 | + """Juju leader get value(s)""" |
832 | + cmd = ['leader-get', '--format=json'] + [attribute or '-'] |
833 | + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
834 | + |
835 | + |
836 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
837 | +def leader_set(settings=None, **kwargs): |
838 | + """Juju leader set value(s)""" |
839 | + log("Juju leader-set '%s'" % (settings), level=DEBUG) |
840 | + cmd = ['leader-set'] |
841 | + settings = settings or {} |
842 | + settings.update(kwargs) |
843 | + for k, v in settings.iteritems(): |
844 | + if v is None: |
845 | + cmd.append('{}='.format(k)) |
846 | + else: |
847 | + cmd.append('{}={}'.format(k, v)) |
848 | + subprocess.check_call(cmd) |
849 | |
850 | === modified file 'hooks/charmhelpers/core/host.py' |
851 | --- hooks/charmhelpers/core/host.py 2015-03-30 11:43:06 +0000 |
852 | +++ hooks/charmhelpers/core/host.py 2015-06-04 08:44:44 +0000 |
853 | @@ -90,7 +90,7 @@ |
854 | ['service', service_name, 'status'], |
855 | stderr=subprocess.STDOUT).decode('UTF-8') |
856 | except subprocess.CalledProcessError as e: |
857 | - return 'unrecognized service' not in e.output |
858 | + return b'unrecognized service' not in e.output |
859 | else: |
860 | return True |
861 | |
862 | |
863 | === modified file 'hooks/charmhelpers/core/services/base.py' |
864 | --- hooks/charmhelpers/core/services/base.py 2015-03-11 11:45:09 +0000 |
865 | +++ hooks/charmhelpers/core/services/base.py 2015-06-04 08:44:44 +0000 |
866 | @@ -15,9 +15,9 @@ |
867 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
868 | |
869 | import os |
870 | -import re |
871 | import json |
872 | -from collections import Iterable |
873 | +from inspect import getargspec |
874 | +from collections import Iterable, OrderedDict |
875 | |
876 | from charmhelpers.core import host |
877 | from charmhelpers.core import hookenv |
878 | @@ -119,7 +119,7 @@ |
879 | """ |
880 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') |
881 | self._ready = None |
882 | - self.services = {} |
883 | + self.services = OrderedDict() |
884 | for service in services or []: |
885 | service_name = service['service'] |
886 | self.services[service_name] = service |
887 | @@ -132,8 +132,8 @@ |
888 | if hook_name == 'stop': |
889 | self.stop_services() |
890 | else: |
891 | + self.reconfigure_services() |
892 | self.provide_data() |
893 | - self.reconfigure_services() |
894 | cfg = hookenv.config() |
895 | if cfg.implicit_save: |
896 | cfg.save() |
897 | @@ -145,15 +145,36 @@ |
898 | A provider must have a `name` attribute, which indicates which relation |
899 | to set data on, and a `provide_data()` method, which returns a dict of |
900 | data to set. |
901 | + |
902 | + The `provide_data()` method can optionally accept two parameters: |
903 | + |
904 | + * ``remote_service`` The name of the remote service that the data will |
905 | + be provided to. The `provide_data()` method will be called once |
906 | + for each connected service (not unit). This allows the method to |
907 | + tailor its data to the given service. |
908 | + * ``service_ready`` Whether or not the service definition had all of |
909 | + its requirements met, and thus the ``data_ready`` callbacks run. |
910 | + |
911 | + Note that the ``provided_data`` methods are now called **after** the |
912 | + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks |
913 | + a chance to generate any data necessary for the providing to the remote |
914 | + services. |
915 | """ |
916 | - hook_name = hookenv.hook_name() |
917 | - for service in self.services.values(): |
918 | + for service_name, service in self.services.items(): |
919 | + service_ready = self.is_ready(service_name) |
920 | for provider in service.get('provided_data', []): |
921 | - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): |
922 | - data = provider.provide_data() |
923 | - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data |
924 | - if _ready: |
925 | - hookenv.relation_set(None, data) |
926 | + for relid in hookenv.relation_ids(provider.name): |
927 | + units = hookenv.related_units(relid) |
928 | + if not units: |
929 | + continue |
930 | + remote_service = units[0].split('/')[0] |
931 | + argspec = getargspec(provider.provide_data) |
932 | + if len(argspec.args) > 1: |
933 | + data = provider.provide_data(remote_service, service_ready) |
934 | + else: |
935 | + data = provider.provide_data() |
936 | + if data: |
937 | + hookenv.relation_set(relid, data) |
938 | |
939 | def reconfigure_services(self, *service_names): |
940 | """ |
941 | |
942 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
943 | --- hooks/charmhelpers/fetch/__init__.py 2015-03-11 11:45:09 +0000 |
944 | +++ hooks/charmhelpers/fetch/__init__.py 2015-06-04 08:44:44 +0000 |
945 | @@ -158,7 +158,7 @@ |
946 | |
947 | def apt_cache(in_memory=True): |
948 | """Build and return an apt cache""" |
949 | - import apt_pkg |
950 | + from apt import apt_pkg |
951 | apt_pkg.init() |
952 | if in_memory: |
953 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
954 | |
955 | === modified file 'hooks/charmhelpers/fetch/giturl.py' |
956 | --- hooks/charmhelpers/fetch/giturl.py 2015-03-11 11:45:09 +0000 |
957 | +++ hooks/charmhelpers/fetch/giturl.py 2015-06-04 08:44:44 +0000 |
958 | @@ -45,14 +45,16 @@ |
959 | else: |
960 | return True |
961 | |
962 | - def clone(self, source, dest, branch): |
963 | + def clone(self, source, dest, branch, depth=None): |
964 | if not self.can_handle(source): |
965 | raise UnhandledSource("Cannot handle {}".format(source)) |
966 | |
967 | - repo = Repo.clone_from(source, dest) |
968 | - repo.git.checkout(branch) |
969 | + if depth: |
970 | + Repo.clone_from(source, dest, branch=branch, depth=depth) |
971 | + else: |
972 | + Repo.clone_from(source, dest, branch=branch) |
973 | |
974 | - def install(self, source, branch="master", dest=None): |
975 | + def install(self, source, branch="master", dest=None, depth=None): |
976 | url_parts = self.parse_url(source) |
977 | branch_name = url_parts.path.strip("/").split("/")[-1] |
978 | if dest: |
979 | @@ -63,7 +65,7 @@ |
980 | if not os.path.exists(dest_dir): |
981 | mkdir(dest_dir, perms=0o755) |
982 | try: |
983 | - self.clone(source, dest_dir, branch) |
984 | + self.clone(source, dest_dir, branch, depth) |
985 | except GitCommandError as e: |
986 | raise UnhandledSource(e.message) |
987 | except OSError as e: |
988 | |
989 | === modified file 'hooks/keystone_hooks.py' |
990 | --- hooks/keystone_hooks.py 2015-04-01 14:39:21 +0000 |
991 | +++ hooks/keystone_hooks.py 2015-06-04 08:44:44 +0000 |
992 | @@ -2,7 +2,6 @@ |
993 | import hashlib |
994 | import json |
995 | import os |
996 | -import stat |
997 | import sys |
998 | |
999 | from subprocess import check_call |
1000 | @@ -68,18 +67,18 @@ |
1001 | setup_ipv6, |
1002 | send_notifications, |
1003 | check_peer_actions, |
1004 | - CA_CERT_PATH, |
1005 | - ensure_permissions, |
1006 | get_ssl_sync_request_units, |
1007 | is_ssl_cert_master, |
1008 | is_db_ready, |
1009 | clear_ssl_synced_units, |
1010 | is_db_initialised, |
1011 | + update_certs_if_available, |
1012 | is_pki_enabled, |
1013 | ensure_ssl_dir, |
1014 | ensure_pki_dir_permissions, |
1015 | force_ssl_sync, |
1016 | filter_null, |
1017 | + ensure_ssl_dirs, |
1018 | ) |
1019 | |
1020 | from charmhelpers.contrib.hahelpers.cluster import ( |
1021 | @@ -149,13 +148,7 @@ |
1022 | |
1023 | check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/']) |
1024 | |
1025 | - # Ensure unison can write to certs dir. |
1026 | - # FIXME: need to a better way around this e.g. move cert to it's own dir |
1027 | - # and give that unison permissions. |
1028 | - path = os.path.dirname(CA_CERT_PATH) |
1029 | - perms = int(oct(stat.S_IMODE(os.stat(path).st_mode) | |
1030 | - (stat.S_IWGRP | stat.S_IXGRP)), base=8) |
1031 | - ensure_permissions(path, group='keystone', perms=perms) |
1032 | + ensure_ssl_dirs() |
1033 | |
1034 | save_script_rc() |
1035 | configure_https() |
1036 | @@ -423,6 +416,7 @@ |
1037 | @hooks.hook('cluster-relation-changed', |
1038 | 'cluster-relation-departed') |
1039 | @restart_on_change(restart_map(), stopstart=True) |
1040 | +@update_certs_if_available |
1041 | def cluster_changed(): |
1042 | unison.ssh_authorized_peers(user=SSH_USER, |
1043 | group='juju_keystone', |
1044 | @@ -430,9 +424,9 @@ |
1045 | ensure_local_user=True) |
1046 | # NOTE(jamespage) re-echo passwords for peer storage |
1047 | echo_whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master', |
1048 | - 'db-initialised'] |
1049 | + 'db-initialised', 'ssl-cert-available-updates'] |
1050 | log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG) |
1051 | - peer_echo(includes=echo_whitelist) |
1052 | + peer_echo(includes=echo_whitelist, force=True) |
1053 | |
1054 | check_peer_actions() |
1055 | |
1056 | @@ -466,6 +460,14 @@ |
1057 | CONFIGS.write_all() |
1058 | |
1059 | |
1060 | +@hooks.hook('leader-settings-changed') |
1061 | +def leader_settings_changed(): |
1062 | + log('Firing identity_changed hook for all related services.') |
1063 | + for rid in relation_ids('identity-service'): |
1064 | + for unit in related_units(rid): |
1065 | + identity_changed(relation_id=rid, remote_unit=unit) |
1066 | + |
1067 | + |
1068 | @hooks.hook('ha-relation-joined') |
1069 | def ha_joined(relation_id=None): |
1070 | cluster_config = get_hacluster_config() |
1071 | @@ -575,6 +577,8 @@ |
1072 | peer_interface='cluster', |
1073 | ensure_local_user=True) |
1074 | |
1075 | + ensure_ssl_dirs() |
1076 | + |
1077 | CONFIGS.write_all() |
1078 | update_nrpe_config() |
1079 | |
1080 | |
1081 | === modified file 'hooks/keystone_ssl.py' |
1082 | --- hooks/keystone_ssl.py 2015-02-18 17:20:23 +0000 |
1083 | +++ hooks/keystone_ssl.py 2015-06-04 08:44:44 +0000 |
1084 | @@ -5,12 +5,10 @@ |
1085 | import subprocess |
1086 | import tarfile |
1087 | import tempfile |
1088 | -import time |
1089 | |
1090 | from charmhelpers.core.hookenv import ( |
1091 | log, |
1092 | DEBUG, |
1093 | - WARNING, |
1094 | ) |
1095 | |
1096 | CA_EXPIRY = '365' |
1097 | @@ -312,31 +310,9 @@ |
1098 | if os.path.isfile(crtpath): |
1099 | log('Found existing certificate for %s.' % common_name, |
1100 | level=DEBUG) |
1101 | - max_retries = 3 |
1102 | - while True: |
1103 | - mtime = os.path.getmtime(crtpath) |
1104 | - |
1105 | - crt = open(crtpath, 'r').read() |
1106 | - try: |
1107 | - key = open(keypath, 'r').read() |
1108 | - except: |
1109 | - msg = ('Could not load ssl private key for %s from %s' % |
1110 | - (common_name, keypath)) |
1111 | - raise Exception(msg) |
1112 | - |
1113 | - # Ensure we are not reading a file that is being written to |
1114 | - if mtime != os.path.getmtime(crtpath): |
1115 | - max_retries -= 1 |
1116 | - if max_retries == 0: |
1117 | - msg = ("crt contents changed during read - retry " |
1118 | - "failed") |
1119 | - raise Exception(msg) |
1120 | - |
1121 | - log("crt contents changed during read - re-reading", |
1122 | - level=WARNING) |
1123 | - time.sleep(1) |
1124 | - else: |
1125 | - return crt, key |
1126 | + crt = open(crtpath, 'r').read() |
1127 | + key = open(keypath, 'r').read() |
1128 | + return crt, key |
1129 | |
1130 | crt, key = self._create_certificate(common_name, common_name) |
1131 | return open(crt, 'r').read(), open(key, 'r').read() |
1132 | |
1133 | === modified file 'hooks/keystone_utils.py' |
1134 | --- hooks/keystone_utils.py 2015-05-08 11:43:00 +0000 |
1135 | +++ hooks/keystone_utils.py 2015-06-04 08:44:44 +0000 |
1136 | @@ -8,6 +8,7 @@ |
1137 | import re |
1138 | import shutil |
1139 | import subprocess |
1140 | +import tarfile |
1141 | import threading |
1142 | import time |
1143 | import urlparse |
1144 | @@ -71,7 +72,6 @@ |
1145 | DEBUG, |
1146 | INFO, |
1147 | WARNING, |
1148 | - ERROR, |
1149 | ) |
1150 | |
1151 | from charmhelpers.fetch import ( |
1152 | @@ -160,6 +160,8 @@ |
1153 | |
1154 | APACHE_SSL_DIR = '/etc/apache2/ssl/keystone' |
1155 | SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/' |
1156 | +SYNC_DIR = '/var/lib/keystone/juju_sync/' |
1157 | +SSL_SYNC_ARCHIVE = os.path.join(SYNC_DIR, 'juju-ssl-sync.tar') |
1158 | SSL_DIR = '/var/lib/keystone/juju_ssl/' |
1159 | PKI_CERTS_DIR = os.path.join(SSL_DIR, 'pki') |
1160 | SSL_CA_NAME = 'Ubuntu Cloud' |
1161 | @@ -382,26 +384,20 @@ |
1162 | return |
1163 | |
1164 | |
1165 | -def set_db_initialised(): |
1166 | - for rid in relation_ids('cluster'): |
1167 | - relation_set(relation_settings={'db-initialised': 'True'}, |
1168 | - relation_id=rid) |
1169 | - |
1170 | - |
1171 | def is_db_initialised(): |
1172 | - for rid in relation_ids('cluster'): |
1173 | - units = related_units(rid) + [local_unit()] |
1174 | - for unit in units: |
1175 | - db_initialised = relation_get(attribute='db-initialised', |
1176 | - unit=unit, rid=rid) |
1177 | - if db_initialised: |
1178 | - log("Database is initialised", level=DEBUG) |
1179 | - return True |
1180 | + if relation_ids('cluster'): |
1181 | + inited = peer_retrieve('db-initialised') |
1182 | + if inited and bool_from_string(inited): |
1183 | + log("Database is initialised", level=DEBUG) |
1184 | + return True |
1185 | |
1186 | log("Database is NOT initialised", level=DEBUG) |
1187 | return False |
1188 | |
1189 | |
1190 | +# NOTE(jamespage): Retry deals with sync issues during one-shot HA deploys. |
1191 | +# mysql might be restarting or suchlike. |
1192 | +@retry_on_exception(5, base_delay=3, exc_type=subprocess.CalledProcessError) |
1193 | def migrate_database(): |
1194 | """Runs keystone-manage to initialize a new database or migrate existing""" |
1195 | log('Migrating the keystone database.', level=INFO) |
1196 | @@ -413,7 +409,7 @@ |
1197 | subprocess.check_output(cmd) |
1198 | service_start('keystone') |
1199 | time.sleep(10) |
1200 | - set_db_initialised() |
1201 | + peer_store('db-initialised', 'True') |
1202 | |
1203 | # OLD |
1204 | |
1205 | @@ -768,6 +764,16 @@ |
1206 | return passwd |
1207 | |
1208 | |
1209 | +def ensure_ssl_dirs(): |
1210 | + """Ensure unison has access to these dirs.""" |
1211 | + for path in [SYNC_FLAGS_DIR, SYNC_DIR]: |
1212 | + if not os.path.isdir(path): |
1213 | + mkdir(path, SSH_USER, 'juju_keystone', 0o775) |
1214 | + else: |
1215 | + ensure_permissions(path, user=SSH_USER, group='keystone', |
1216 | + perms=0o755) |
1217 | + |
1218 | + |
1219 | def ensure_permissions(path, user=None, group=None, perms=None, recurse=False, |
1220 | maxdepth=50): |
1221 | """Set chownand chmod for path |
1222 | @@ -864,7 +870,7 @@ |
1223 | service.strip(), action)) |
1224 | log("Creating action %s" % (flagfile), level=DEBUG) |
1225 | write_file(flagfile, content='', owner=SSH_USER, group='keystone', |
1226 | - perms=0o644) |
1227 | + perms=0o744) |
1228 | |
1229 | |
1230 | def create_peer_actions(actions): |
1231 | @@ -873,7 +879,7 @@ |
1232 | flagfile = os.path.join(SYNC_FLAGS_DIR, action) |
1233 | log("Creating action %s" % (flagfile), level=DEBUG) |
1234 | write_file(flagfile, content='', owner=SSH_USER, group='keystone', |
1235 | - perms=0o644) |
1236 | + perms=0o744) |
1237 | |
1238 | |
1239 | @retry_on_exception(3, base_delay=2, exc_type=subprocess.CalledProcessError) |
1240 | @@ -1011,6 +1017,22 @@ |
1241 | return True |
1242 | |
1243 | |
1244 | +def stage_paths_for_sync(paths): |
1245 | + shutil.rmtree(SYNC_DIR) |
1246 | + ensure_ssl_dirs() |
1247 | + with tarfile.open(SSL_SYNC_ARCHIVE, 'w') as fd: |
1248 | + for path in paths: |
1249 | + if os.path.exists(path): |
1250 | + log("Adding path '%s' sync tarball" % (path), level=DEBUG) |
1251 | + fd.add(path) |
1252 | + else: |
1253 | + log("Path '%s' does not exist - not adding to sync " |
1254 | + "tarball" % (path), level=INFO) |
1255 | + |
1256 | + ensure_permissions(SYNC_DIR, user=SSH_USER, group='keystone', |
1257 | + perms=0o755, recurse=True) |
1258 | + |
1259 | + |
1260 | def is_pki_enabled(): |
1261 | enable_pki = config('enable-pki') |
1262 | if enable_pki and bool_from_string(enable_pki): |
1263 | @@ -1025,6 +1047,33 @@ |
1264 | perms=0o755, recurse=True) |
1265 | |
1266 | |
1267 | +def update_certs_if_available(f): |
1268 | + def _inner_update_certs_if_available(*args, **kwargs): |
1269 | + path = None |
1270 | + for rid in relation_ids('cluster'): |
1271 | + path = relation_get(attribute='ssl-cert-available-updates', |
1272 | + rid=rid, unit=local_unit()) |
1273 | + |
1274 | + if path and os.path.exists(path): |
1275 | + log("Updating certs from '%s'" % (path), level=DEBUG) |
1276 | + with tarfile.open(path) as fd: |
1277 | + files = ["/%s" % m.name for m in fd.getmembers()] |
1278 | + fd.extractall(path='/') |
1279 | + |
1280 | + for syncfile in files: |
1281 | + ensure_permissions(syncfile, user='keystone', group='keystone', |
1282 | + perms=0o744, recurse=True) |
1283 | + |
1284 | + # Mark as complete |
1285 | + os.rename(path, "%s.complete" % (path)) |
1286 | + else: |
1287 | + log("No cert updates available", level=DEBUG) |
1288 | + |
1289 | + return f(*args, **kwargs) |
1290 | + |
1291 | + return _inner_update_certs_if_available |
1292 | + |
1293 | + |
1294 | def synchronize_ca(fatal=False): |
1295 | """Broadcast service credentials to peers. |
1296 | |
1297 | @@ -1038,7 +1087,7 @@ |
1298 | |
1299 | Returns a dictionary of settings to be set on the cluster relation. |
1300 | """ |
1301 | - paths_to_sync = [SYNC_FLAGS_DIR] |
1302 | + paths_to_sync = [] |
1303 | peer_service_actions = {'restart': []} |
1304 | peer_actions = [] |
1305 | |
1306 | @@ -1068,9 +1117,6 @@ |
1307 | paths_to_sync.append(PKI_CERTS_DIR) |
1308 | peer_actions.append('ensure-pki-permissions') |
1309 | |
1310 | - # Ensure unique |
1311 | - paths_to_sync = list(set(paths_to_sync)) |
1312 | - |
1313 | if not paths_to_sync: |
1314 | log("Nothing to sync - skipping", level=DEBUG) |
1315 | return {} |
1316 | @@ -1083,50 +1129,27 @@ |
1317 | |
1318 | create_peer_actions(peer_actions) |
1319 | |
1320 | - cluster_rel_settings = {} |
1321 | - |
1322 | - retries = 3 |
1323 | - while True: |
1324 | - hash1 = hashlib.sha256() |
1325 | - for path in paths_to_sync: |
1326 | - update_hash_from_path(hash1, path) |
1327 | - |
1328 | - try: |
1329 | - synced_units = unison_sync(paths_to_sync) |
1330 | - if synced_units: |
1331 | - # Format here needs to match that used when peers request sync |
1332 | - synced_units = [u.replace('/', '-') for u in synced_units] |
1333 | - cluster_rel_settings['ssl-synced-units'] = \ |
1334 | - json.dumps(synced_units) |
1335 | - except Exception as exc: |
1336 | - if fatal: |
1337 | - raise |
1338 | - else: |
1339 | - log("Sync failed but fatal=False - %s" % (exc), level=INFO) |
1340 | - return {} |
1341 | - |
1342 | - hash2 = hashlib.sha256() |
1343 | - for path in paths_to_sync: |
1344 | - update_hash_from_path(hash2, path) |
1345 | - |
1346 | - # Detect whether someone else has synced to this unit while we did our |
1347 | - # transfer. |
1348 | - if hash1.hexdigest() != hash2.hexdigest(): |
1349 | - retries -= 1 |
1350 | - if retries > 0: |
1351 | - log("SSL dir contents changed during sync - retrying unison " |
1352 | - "sync %s more times" % (retries), level=WARNING) |
1353 | - else: |
1354 | - log("SSL dir contents changed during sync - retries failed", |
1355 | - level=ERROR) |
1356 | - return {} |
1357 | - else: |
1358 | - break |
1359 | - |
1360 | - hash = hash1.hexdigest() |
1361 | - log("Sending restart-services-trigger=%s to all peers" % (hash), |
1362 | + paths_to_sync = list(set(paths_to_sync)) |
1363 | + stage_paths_for_sync(paths_to_sync) |
1364 | + |
1365 | + hash1 = hashlib.sha256() |
1366 | + for path in paths_to_sync: |
1367 | + update_hash_from_path(hash1, path) |
1368 | + |
1369 | + cluster_rel_settings = {'ssl-cert-available-updates': SSL_SYNC_ARCHIVE, |
1370 | + 'sync-hash': hash1.hexdigest()} |
1371 | + |
1372 | + synced_units = unison_sync([SSL_SYNC_ARCHIVE, SYNC_FLAGS_DIR]) |
1373 | + if synced_units: |
1374 | + # Format here needs to match that used when peers request sync |
1375 | + synced_units = [u.replace('/', '-') for u in synced_units] |
1376 | + cluster_rel_settings['ssl-synced-units'] = \ |
1377 | + json.dumps(synced_units) |
1378 | + |
1379 | + trigger = str(uuid.uuid4()) |
1380 | + log("Sending restart-services-trigger=%s to all peers" % (trigger), |
1381 | level=DEBUG) |
1382 | - cluster_rel_settings['restart-services-trigger'] = hash |
1383 | + cluster_rel_settings['restart-services-trigger'] = trigger |
1384 | |
1385 | log("Sync complete", level=DEBUG) |
1386 | return cluster_rel_settings |
1387 | |
1388 | === added symlink 'hooks/leader-settings-changed' |
1389 | === target is u'keystone_hooks.py' |
1390 | === modified file 'unit_tests/test_keystone_hooks.py' |
1391 | --- unit_tests/test_keystone_hooks.py 2015-04-17 12:10:54 +0000 |
1392 | +++ unit_tests/test_keystone_hooks.py 2015-06-04 08:44:44 +0000 |
1393 | @@ -59,6 +59,9 @@ |
1394 | 'add_service_to_keystone', |
1395 | 'synchronize_ca_if_changed', |
1396 | 'update_nrpe_config', |
1397 | + 'ensure_ssl_dirs', |
1398 | + 'is_db_initialised', |
1399 | + 'is_db_ready', |
1400 | # other |
1401 | 'check_call', |
1402 | 'execd_preinstall', |
1403 | @@ -237,18 +240,15 @@ |
1404 | configs.write = MagicMock() |
1405 | hooks.pgsql_db_changed() |
1406 | |
1407 | - @patch.object(hooks, 'is_db_initialised') |
1408 | - @patch.object(hooks, 'is_db_ready') |
1409 | @patch('keystone_utils.log') |
1410 | @patch('keystone_utils.ensure_ssl_cert_master') |
1411 | @patch.object(hooks, 'CONFIGS') |
1412 | @patch.object(hooks, 'identity_changed') |
1413 | def test_db_changed_allowed(self, identity_changed, configs, |
1414 | mock_ensure_ssl_cert_master, |
1415 | - mock_log, mock_is_db_ready, |
1416 | - mock_is_db_initialised): |
1417 | - mock_is_db_initialised.return_value = True |
1418 | - mock_is_db_ready.return_value = True |
1419 | + mock_log): |
1420 | + self.is_db_initialised.return_value = True |
1421 | + self.is_db_ready.return_value = True |
1422 | mock_ensure_ssl_cert_master.return_value = False |
1423 | self.relation_ids.return_value = ['identity-service:0'] |
1424 | self.related_units.return_value = ['unit/0'] |
1425 | @@ -262,15 +262,13 @@ |
1426 | relation_id='identity-service:0', |
1427 | remote_unit='unit/0') |
1428 | |
1429 | - @patch.object(hooks, 'is_db_ready') |
1430 | @patch('keystone_utils.log') |
1431 | @patch('keystone_utils.ensure_ssl_cert_master') |
1432 | @patch.object(hooks, 'CONFIGS') |
1433 | @patch.object(hooks, 'identity_changed') |
1434 | def test_db_changed_not_allowed(self, identity_changed, configs, |
1435 | - mock_ensure_ssl_cert_master, mock_log, |
1436 | - mock_is_db_ready): |
1437 | - mock_is_db_ready.return_value = False |
1438 | + mock_ensure_ssl_cert_master, mock_log): |
1439 | + self.is_db_ready.return_value = False |
1440 | mock_ensure_ssl_cert_master.return_value = False |
1441 | self.relation_ids.return_value = ['identity-service:0'] |
1442 | self.related_units.return_value = ['unit/0'] |
1443 | @@ -284,15 +282,12 @@ |
1444 | |
1445 | @patch('keystone_utils.log') |
1446 | @patch('keystone_utils.ensure_ssl_cert_master') |
1447 | - @patch.object(hooks, 'is_db_initialised') |
1448 | - @patch.object(hooks, 'is_db_ready') |
1449 | @patch.object(hooks, 'CONFIGS') |
1450 | @patch.object(hooks, 'identity_changed') |
1451 | def test_postgresql_db_changed(self, identity_changed, configs, |
1452 | - mock_is_db_ready, mock_is_db_initialised, |
1453 | mock_ensure_ssl_cert_master, mock_log): |
1454 | - mock_is_db_initialised.return_value = True |
1455 | - mock_is_db_ready.return_value = True |
1456 | + self.is_db_initialised.return_value = True |
1457 | + self.is_db_ready.return_value = True |
1458 | mock_ensure_ssl_cert_master.return_value = False |
1459 | self.relation_ids.return_value = ['identity-service:0'] |
1460 | self.related_units.return_value = ['unit/0'] |
1461 | @@ -309,15 +304,13 @@ |
1462 | @patch.object(hooks, 'git_install_requested') |
1463 | @patch('keystone_utils.log') |
1464 | @patch('keystone_utils.ensure_ssl_cert_master') |
1465 | + @patch('keystone_utils.ensure_ssl_dirs') |
1466 | @patch.object(hooks, 'ensure_pki_dir_permissions') |
1467 | @patch.object(hooks, 'ensure_ssl_dir') |
1468 | @patch.object(hooks, 'is_pki_enabled') |
1469 | @patch.object(hooks, 'is_ssl_cert_master') |
1470 | @patch.object(hooks, 'send_ssl_sync_request') |
1471 | - @patch.object(hooks, 'is_db_initialised') |
1472 | - @patch.object(hooks, 'is_db_ready') |
1473 | @patch.object(hooks, 'peer_units') |
1474 | - @patch.object(hooks, 'ensure_permissions') |
1475 | @patch.object(hooks, 'admin_relation_changed') |
1476 | @patch.object(hooks, 'cluster_joined') |
1477 | @patch.object(unison, 'ensure_user') |
1478 | @@ -331,22 +324,20 @@ |
1479 | ensure_user, |
1480 | cluster_joined, |
1481 | admin_relation_changed, |
1482 | - ensure_permissions, |
1483 | mock_peer_units, |
1484 | - mock_is_db_ready, |
1485 | - mock_is_db_initialised, |
1486 | mock_send_ssl_sync_request, |
1487 | mock_is_ssl_cert_master, |
1488 | mock_is_pki_enabled, |
1489 | mock_ensure_ssl_dir, |
1490 | mock_ensure_pki_dir_permissions, |
1491 | + mock_ensure_ssl_dirs, |
1492 | mock_ensure_ssl_cert_master, |
1493 | mock_log, git_requested): |
1494 | git_requested.return_value = False |
1495 | mock_is_pki_enabled.return_value = True |
1496 | mock_is_ssl_cert_master.return_value = True |
1497 | - mock_is_db_initialised.return_value = True |
1498 | - mock_is_db_ready.return_value = True |
1499 | + self.is_db_initialised.return_value = True |
1500 | + self.is_db_ready.return_value = True |
1501 | self.openstack_upgrade_available.return_value = False |
1502 | self.is_elected_leader.return_value = True |
1503 | # avoid having to mock syncer |
1504 | @@ -374,13 +365,13 @@ |
1505 | @patch.object(hooks, 'git_install_requested') |
1506 | @patch('keystone_utils.log') |
1507 | @patch('keystone_utils.ensure_ssl_cert_master') |
1508 | + @patch('keystone_utils.ensure_ssl_dirs') |
1509 | @patch.object(hooks, 'update_all_identity_relation_units') |
1510 | @patch.object(hooks, 'ensure_pki_dir_permissions') |
1511 | @patch.object(hooks, 'ensure_ssl_dir') |
1512 | @patch.object(hooks, 'is_pki_enabled') |
1513 | @patch.object(hooks, 'peer_units') |
1514 | @patch.object(hooks, 'is_ssl_cert_master') |
1515 | - @patch.object(hooks, 'ensure_permissions') |
1516 | @patch.object(hooks, 'cluster_joined') |
1517 | @patch.object(unison, 'ensure_user') |
1518 | @patch.object(unison, 'get_homedir') |
1519 | @@ -391,13 +382,13 @@ |
1520 | identity_changed, |
1521 | configs, get_homedir, |
1522 | ensure_user, cluster_joined, |
1523 | - ensure_permissions, |
1524 | mock_is_ssl_cert_master, |
1525 | mock_peer_units, |
1526 | mock_is_pki_enabled, |
1527 | mock_ensure_ssl_dir, |
1528 | mock_ensure_pki_permissions, |
1529 | mock_update_all_id_rel_units, |
1530 | + ensure_ssl_dirs, |
1531 | mock_ensure_ssl_cert_master, |
1532 | mock_log, git_requested): |
1533 | git_requested.return_value = False |
1534 | @@ -423,15 +414,13 @@ |
1535 | @patch.object(hooks, 'git_install_requested') |
1536 | @patch('keystone_utils.log') |
1537 | @patch('keystone_utils.ensure_ssl_cert_master') |
1538 | + @patch('keystone_utils.ensure_ssl_dirs') |
1539 | @patch.object(hooks, 'ensure_pki_dir_permissions') |
1540 | @patch.object(hooks, 'ensure_ssl_dir') |
1541 | @patch.object(hooks, 'is_pki_enabled') |
1542 | @patch.object(hooks, 'is_ssl_cert_master') |
1543 | @patch.object(hooks, 'send_ssl_sync_request') |
1544 | - @patch.object(hooks, 'is_db_initialised') |
1545 | - @patch.object(hooks, 'is_db_ready') |
1546 | @patch.object(hooks, 'peer_units') |
1547 | - @patch.object(hooks, 'ensure_permissions') |
1548 | @patch.object(hooks, 'admin_relation_changed') |
1549 | @patch.object(hooks, 'cluster_joined') |
1550 | @patch.object(unison, 'ensure_user') |
1551 | @@ -444,22 +433,20 @@ |
1552 | configs, get_homedir, |
1553 | ensure_user, cluster_joined, |
1554 | admin_relation_changed, |
1555 | - ensure_permissions, |
1556 | mock_peer_units, |
1557 | - mock_is_db_ready, |
1558 | - mock_is_db_initialised, |
1559 | mock_send_ssl_sync_request, |
1560 | mock_is_ssl_cert_master, |
1561 | mock_is_pki_enabled, |
1562 | mock_ensure_ssl_dir, |
1563 | mock_ensure_pki_permissions, |
1564 | + mock_ensure_ssl_dirs, |
1565 | mock_ensure_ssl_cert_master, |
1566 | mock_log, git_requested): |
1567 | git_requested.return_value = False |
1568 | mock_is_pki_enabled.return_value = True |
1569 | mock_is_ssl_cert_master.return_value = True |
1570 | - mock_is_db_ready.return_value = True |
1571 | - mock_is_db_initialised.return_value = True |
1572 | + self.is_db_ready.return_value = True |
1573 | + self.is_db_initialised.return_value = True |
1574 | self.openstack_upgrade_available.return_value = True |
1575 | self.is_elected_leader.return_value = True |
1576 | # avoid having to mock syncer |
1577 | @@ -496,7 +483,6 @@ |
1578 | @patch.object(hooks, 'is_db_initialised') |
1579 | @patch.object(hooks, 'is_db_ready') |
1580 | @patch.object(hooks, 'peer_units') |
1581 | - @patch.object(hooks, 'ensure_permissions') |
1582 | @patch.object(hooks, 'admin_relation_changed') |
1583 | @patch.object(hooks, 'cluster_joined') |
1584 | @patch.object(unison, 'ensure_user') |
1585 | @@ -508,7 +494,7 @@ |
1586 | identity_changed, |
1587 | configs, get_homedir, ensure_user, |
1588 | cluster_joined, admin_relation_changed, |
1589 | - ensure_permissions, mock_peer_units, |
1590 | + mock_peer_units, |
1591 | mock_is_db_ready, |
1592 | mock_is_db_initialised, |
1593 | mock_send_ssl_sync_request, |
1594 | @@ -546,18 +532,15 @@ |
1595 | self.assertFalse(self.openstack_upgrade_available.called) |
1596 | self.assertFalse(self.do_openstack_upgrade.called) |
1597 | |
1598 | - @patch.object(hooks, 'is_db_initialised') |
1599 | - @patch.object(hooks, 'is_db_ready') |
1600 | @patch('keystone_utils.log') |
1601 | @patch('keystone_utils.ensure_ssl_cert_master') |
1602 | @patch.object(hooks, 'hashlib') |
1603 | @patch.object(hooks, 'send_notifications') |
1604 | def test_identity_changed_leader(self, mock_send_notifications, |
1605 | mock_hashlib, mock_ensure_ssl_cert_master, |
1606 | - mock_log, mock_is_db_ready, |
1607 | - mock_is_db_initialised): |
1608 | - mock_is_db_initialised.return_value = True |
1609 | - mock_is_db_ready.return_value = True |
1610 | + mock_log): |
1611 | + self.is_db_initialised.return_value = True |
1612 | + self.is_db_ready.return_value = True |
1613 | mock_ensure_ssl_cert_master.return_value = False |
1614 | hooks.identity_changed( |
1615 | relation_id='identity-service:0', |
1616 | @@ -597,6 +580,7 @@ |
1617 | @patch.object(hooks, 'get_ssl_sync_request_units') |
1618 | @patch.object(hooks, 'is_ssl_cert_master') |
1619 | @patch.object(hooks, 'peer_units') |
1620 | + @patch('keystone_utils.relation_ids') |
1621 | @patch('keystone_utils.config') |
1622 | @patch('keystone_utils.log') |
1623 | @patch('keystone_utils.ensure_ssl_cert_master') |
1624 | @@ -607,7 +591,8 @@ |
1625 | def test_cluster_changed(self, configs, ssh_authorized_peers, |
1626 | check_peer_actions, mock_synchronize_ca, |
1627 | mock_ensure_ssl_cert_master, |
1628 | - mock_log, mock_config, mock_peer_units, |
1629 | + mock_log, mock_config, mock_relation_ids, |
1630 | + mock_peer_units, |
1631 | mock_is_ssl_cert_master, |
1632 | mock_get_ssl_sync_request_units, |
1633 | mock_update_all_identity_relation_units): |
1634 | @@ -618,6 +603,7 @@ |
1635 | mock_is_ssl_cert_master.return_value = False |
1636 | mock_peer_units.return_value = ['unit/0'] |
1637 | mock_ensure_ssl_cert_master.return_value = False |
1638 | + mock_relation_ids.return_value = [] |
1639 | self.is_elected_leader.return_value = False |
1640 | |
1641 | def fake_rel_get(attribute=None, *args, **kwargs): |
1642 | @@ -632,8 +618,8 @@ |
1643 | |
1644 | hooks.cluster_changed() |
1645 | whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master', |
1646 | - 'db-initialised'] |
1647 | - self.peer_echo.assert_called_with(includes=whitelist) |
1648 | + 'db-initialised', 'ssl-cert-available-updates'] |
1649 | + self.peer_echo.assert_called_with(force=True, includes=whitelist) |
1650 | ssh_authorized_peers.assert_called_with( |
1651 | user=self.ssh_user, group='juju_keystone', |
1652 | peer_interface='cluster', ensure_local_user=True) |
1653 | @@ -733,18 +719,14 @@ |
1654 | |
1655 | @patch('keystone_utils.log') |
1656 | @patch('keystone_utils.ensure_ssl_cert_master') |
1657 | - @patch.object(hooks, 'is_db_ready') |
1658 | - @patch.object(hooks, 'is_db_initialised') |
1659 | @patch.object(hooks, 'identity_changed') |
1660 | @patch.object(hooks, 'CONFIGS') |
1661 | def test_ha_relation_changed_clustered_leader(self, configs, |
1662 | identity_changed, |
1663 | - mock_is_db_initialised, |
1664 | - mock_is_db_ready, |
1665 | mock_ensure_ssl_cert_master, |
1666 | mock_log): |
1667 | - mock_is_db_initialised.return_value = True |
1668 | - mock_is_db_ready.return_value = True |
1669 | + self.is_db_initialised.return_value = True |
1670 | + self.is_db_ready.return_value = True |
1671 | mock_ensure_ssl_cert_master.return_value = False |
1672 | self.relation_get.return_value = True |
1673 | self.is_elected_leader.return_value = True |
1674 | @@ -807,8 +789,8 @@ |
1675 | mock_is_elected_leader, |
1676 | mock_relation_ids, |
1677 | mock_log, |
1678 | + mock_is_db_initialised, |
1679 | mock_is_db_ready, |
1680 | - mock_is_db_initialised, |
1681 | git_requested): |
1682 | mock_is_db_initialised.return_value = True |
1683 | mock_is_db_ready.return_value = True |