Merge lp:~le-charmers/charms/trusty/hacluster/leadership-election into lp:~openstack-charmers/charms/trusty/hacluster/next
- Trusty Tahr (14.04)
- leadership-election
- Merge into next
Proposed by
Edward Hope-Morley
Status: | Work in progress |
---|---|
Proposed branch: | lp:~le-charmers/charms/trusty/hacluster/leadership-election |
Merge into: | lp:~openstack-charmers/charms/trusty/hacluster/next |
Diff against target: |
1869 lines (+1432/-69) (has conflicts) 13 files modified
hooks/charmhelpers/contrib/hahelpers/cluster.py (+56/-6) hooks/charmhelpers/contrib/openstack/utils.py (+167/-2) hooks/charmhelpers/contrib/python/packages.py (+31/-5) hooks/charmhelpers/core/hookenv.py (+212/-38) hooks/charmhelpers/core/host.py (+1/-1) hooks/charmhelpers/core/services/base.py (+32/-11) hooks/charmhelpers/core/strutils.py (+42/-0) hooks/charmhelpers/core/unitdata.py (+477/-0) hooks/charmhelpers/fetch/__init__.py (+1/-1) hooks/charmhelpers/fetch/giturl.py (+7/-5) hooks/cmap.py (+85/-0) hooks/hooks.py (+317/-0) hooks/pcmk.py (+4/-0) Text conflict in hooks/charmhelpers/contrib/hahelpers/cluster.py Text conflict in hooks/charmhelpers/contrib/openstack/utils.py Text conflict in hooks/charmhelpers/contrib/python/packages.py Text conflict in hooks/charmhelpers/core/hookenv.py Conflict adding file hooks/charmhelpers/core/strutils.py. Moved existing file to hooks/charmhelpers/core/strutils.py.moved. Conflict adding file hooks/charmhelpers/core/unitdata.py. Moved existing file to hooks/charmhelpers/core/unitdata.py.moved. Text conflict in hooks/hooks.py |
To merge this branch: | bzr merge lp:~le-charmers/charms/trusty/hacluster/leadership-election |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+255011@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
- 63. By James Page
-
Use trunk helpers
- 64. By James Page
-
Resync helpers
Unmerged revisions
- 64. By James Page
-
Resync helpers
- 63. By James Page
-
Use trunk helpers
- 62. By Edward Hope-Morley
-
synced l/e ch
- 61. By Edward Hope-Morley
-
synced /next and le ch
- 60. By Edward Hope-Morley
-
synced charm-helpers
- 59. By Billy Olsen
-
Add wait between stop/start because a restart wedges pacemaker
- 58. By Billy Olsen
-
Update quorum expected vote count for multicast.
- 57. By Billy Olsen
-
Fix check for two_node check
- 56. By Billy Olsen
-
Remove two_node and transport change checks for now.
- 55. By Billy Olsen
-
ch sync and lint cleanup
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' |
2 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-04-30 19:35:31 +0000 |
3 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-06-09 10:12:24 +0000 |
4 | @@ -44,13 +44,22 @@ |
5 | ERROR, |
6 | WARNING, |
7 | unit_get, |
8 | + is_leader as juju_is_leader |
9 | ) |
10 | from charmhelpers.core.decorators import ( |
11 | retry_on_exception, |
12 | ) |
13 | -from charmhelpers.core.strutils import ( |
14 | - bool_from_string, |
15 | -) |
16 | +<<<<<<< TREE |
17 | +from charmhelpers.core.strutils import ( |
18 | + bool_from_string, |
19 | +) |
20 | +======= |
21 | +from charmhelpers.core.strutils import ( |
22 | + bool_from_string, |
23 | +) |
24 | + |
25 | +DC_RESOURCE_NAME = 'DC' |
26 | +>>>>>>> MERGE-SOURCE |
27 | |
28 | |
29 | class HAIncompleteConfig(Exception): |
30 | @@ -61,17 +70,30 @@ |
31 | pass |
32 | |
33 | |
34 | +class CRMDCNotFound(Exception): |
35 | + pass |
36 | + |
37 | + |
38 | def is_elected_leader(resource): |
39 | """ |
40 | Returns True if the charm executing this is the elected cluster leader. |
41 | |
42 | It relies on two mechanisms to determine leadership: |
43 | - 1. If the charm is part of a corosync cluster, call corosync to |
44 | + 1. If juju is sufficiently new and leadership election is supported, |
45 | + the is_leader command will be used. |
46 | + 2. If the charm is part of a corosync cluster, call corosync to |
47 | determine leadership. |
48 | - 2. If the charm is not part of a corosync cluster, the leader is |
49 | + 3. If the charm is not part of a corosync cluster, the leader is |
50 | determined as being "the alive unit with the lowest unit numer". In |
51 | other words, the oldest surviving unit. |
52 | """ |
53 | + try: |
54 | + return juju_is_leader() |
55 | + except NotImplementedError: |
56 | + log('Juju leadership election feature not enabled' |
57 | + ', using fallback support', |
58 | + level=WARNING) |
59 | + |
60 | if is_clustered(): |
61 | if not is_crm_leader(resource): |
62 | log('Deferring action to CRM leader.', level=INFO) |
63 | @@ -95,7 +117,33 @@ |
64 | return False |
65 | |
66 | |
67 | -@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) |
68 | +def is_crm_dc(): |
69 | + """ |
70 | + Determine leadership by querying the pacemaker Designated Controller |
71 | + """ |
72 | + cmd = ['crm', 'status'] |
73 | + try: |
74 | + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
75 | + if not isinstance(status, six.text_type): |
76 | + status = six.text_type(status, "utf-8") |
77 | + except subprocess.CalledProcessError as ex: |
78 | + raise CRMDCNotFound(str(ex)) |
79 | + |
80 | + current_dc = '' |
81 | + for line in status.split('\n'): |
82 | + if line.startswith('Current DC'): |
83 | + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum |
84 | + current_dc = line.split(':')[1].split()[0] |
85 | + if current_dc == get_unit_hostname(): |
86 | + return True |
87 | + elif current_dc == 'NONE': |
88 | + raise CRMDCNotFound('Current DC: NONE') |
89 | + |
90 | + return False |
91 | + |
92 | + |
93 | +@retry_on_exception(5, base_delay=2, |
94 | + exc_type=(CRMResourceNotFound, CRMDCNotFound)) |
95 | def is_crm_leader(resource, retry=False): |
96 | """ |
97 | Returns True if the charm calling this is the elected corosync leader, |
98 | @@ -104,6 +152,8 @@ |
99 | We allow this operation to be retried to avoid the possibility of getting a |
100 | false negative. See LP #1396246 for more info. |
101 | """ |
102 | + if resource == DC_RESOURCE_NAME: |
103 | + return is_crm_dc() |
104 | cmd = ['crm', 'resource', 'show', resource] |
105 | try: |
106 | status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
107 | |
108 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
109 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-04-30 19:35:31 +0000 |
110 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-06-09 10:12:24 +0000 |
111 | @@ -53,9 +53,13 @@ |
112 | get_ipv6_addr |
113 | ) |
114 | |
115 | +from charmhelpers.contrib.python.packages import ( |
116 | + pip_create_virtualenv, |
117 | + pip_install, |
118 | +) |
119 | + |
120 | from charmhelpers.core.host import lsb_release, mounts, umount |
121 | from charmhelpers.fetch import apt_install, apt_cache, install_remote |
122 | -from charmhelpers.contrib.python.packages import pip_install |
123 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
124 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device |
125 | |
126 | @@ -497,6 +501,7 @@ |
127 | requirements_dir = None |
128 | |
129 | |
130 | +<<<<<<< TREE |
131 | def git_clone_and_install(projects_yaml, core_project): |
132 | """ |
133 | Clone/install all specified OpenStack repositories. |
134 | @@ -584,10 +589,118 @@ |
135 | juju_log('Directory already exists at {}. ' |
136 | 'No need to create directory.'.format(parent_dir)) |
137 | os.mkdir(parent_dir) |
138 | +======= |
139 | +def _git_yaml_load(projects_yaml): |
140 | + """ |
141 | + Load the specified yaml into a dictionary. |
142 | + """ |
143 | + if not projects_yaml: |
144 | + return None |
145 | + |
146 | + return yaml.load(projects_yaml) |
147 | + |
148 | + |
149 | +def git_clone_and_install(projects_yaml, core_project, depth=1): |
150 | + """ |
151 | + Clone/install all specified OpenStack repositories. |
152 | + |
153 | + The expected format of projects_yaml is: |
154 | + repositories: |
155 | + - {name: keystone, |
156 | + repository: 'git://git.openstack.org/openstack/keystone.git', |
157 | + branch: 'stable/icehouse'} |
158 | + - {name: requirements, |
159 | + repository: 'git://git.openstack.org/openstack/requirements.git', |
160 | + branch: 'stable/icehouse'} |
161 | + directory: /mnt/openstack-git |
162 | + http_proxy: squid-proxy-url |
163 | + https_proxy: squid-proxy-url |
164 | + |
165 | + The directory, http_proxy, and https_proxy keys are optional. |
166 | + """ |
167 | + global requirements_dir |
168 | + parent_dir = '/mnt/openstack-git' |
169 | + http_proxy = None |
170 | + |
171 | + projects = _git_yaml_load(projects_yaml) |
172 | + _git_validate_projects_yaml(projects, core_project) |
173 | + |
174 | + old_environ = dict(os.environ) |
175 | + |
176 | + if 'http_proxy' in projects.keys(): |
177 | + http_proxy = projects['http_proxy'] |
178 | + os.environ['http_proxy'] = projects['http_proxy'] |
179 | + if 'https_proxy' in projects.keys(): |
180 | + os.environ['https_proxy'] = projects['https_proxy'] |
181 | + |
182 | + if 'directory' in projects.keys(): |
183 | + parent_dir = projects['directory'] |
184 | + |
185 | + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
186 | + |
187 | + for p in projects['repositories']: |
188 | + repo = p['repository'] |
189 | + branch = p['branch'] |
190 | + if p['name'] == 'requirements': |
191 | + repo_dir = _git_clone_and_install_single(repo, branch, depth, |
192 | + parent_dir, http_proxy, |
193 | + update_requirements=False) |
194 | + requirements_dir = repo_dir |
195 | + else: |
196 | + repo_dir = _git_clone_and_install_single(repo, branch, depth, |
197 | + parent_dir, http_proxy, |
198 | + update_requirements=True) |
199 | + |
200 | + os.environ = old_environ |
201 | + |
202 | + |
203 | +def _git_validate_projects_yaml(projects, core_project): |
204 | + """ |
205 | + Validate the projects yaml. |
206 | + """ |
207 | + _git_ensure_key_exists('repositories', projects) |
208 | + |
209 | + for project in projects['repositories']: |
210 | + _git_ensure_key_exists('name', project.keys()) |
211 | + _git_ensure_key_exists('repository', project.keys()) |
212 | + _git_ensure_key_exists('branch', project.keys()) |
213 | + |
214 | + if projects['repositories'][0]['name'] != 'requirements': |
215 | + error_out('{} git repo must be specified first'.format('requirements')) |
216 | + |
217 | + if projects['repositories'][-1]['name'] != core_project: |
218 | + error_out('{} git repo must be specified last'.format(core_project)) |
219 | + |
220 | + |
221 | +def _git_ensure_key_exists(key, keys): |
222 | + """ |
223 | + Ensure that key exists in keys. |
224 | + """ |
225 | + if key not in keys: |
226 | + error_out('openstack-origin-git key \'{}\' is missing'.format(key)) |
227 | + |
228 | + |
229 | +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, |
230 | + update_requirements): |
231 | + """ |
232 | + Clone and install a single git repository. |
233 | + """ |
234 | + dest_dir = os.path.join(parent_dir, os.path.basename(repo)) |
235 | + |
236 | + if not os.path.exists(parent_dir): |
237 | + juju_log('Directory already exists at {}. ' |
238 | + 'No need to create directory.'.format(parent_dir)) |
239 | + os.mkdir(parent_dir) |
240 | +>>>>>>> MERGE-SOURCE |
241 | |
242 | if not os.path.exists(dest_dir): |
243 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
244 | +<<<<<<< TREE |
245 | repo_dir = install_remote(repo, dest=parent_dir, branch=branch) |
246 | +======= |
247 | + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, |
248 | + depth=depth) |
249 | +>>>>>>> MERGE-SOURCE |
250 | else: |
251 | repo_dir = dest_dir |
252 | |
253 | @@ -598,7 +711,12 @@ |
254 | _git_update_requirements(repo_dir, requirements_dir) |
255 | |
256 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) |
257 | - pip_install(repo_dir) |
258 | + if http_proxy: |
259 | + pip_install(repo_dir, proxy=http_proxy, |
260 | + venv=os.path.join(parent_dir, 'venv')) |
261 | + else: |
262 | + pip_install(repo_dir, |
263 | + venv=os.path.join(parent_dir, 'venv')) |
264 | |
265 | return repo_dir |
266 | |
267 | @@ -619,6 +737,7 @@ |
268 | package = os.path.basename(package_dir) |
269 | error_out("Error updating {} from global-requirements.txt".format(package)) |
270 | os.chdir(orig_dir) |
271 | +<<<<<<< TREE |
272 | |
273 | |
274 | def git_src_dir(projects_yaml, project): |
275 | @@ -640,3 +759,49 @@ |
276 | return os.path.join(parent_dir, os.path.basename(p['repository'])) |
277 | |
278 | return None |
279 | +======= |
280 | + |
281 | + |
282 | +def git_pip_venv_dir(projects_yaml): |
283 | + """ |
284 | + Return the pip virtualenv path. |
285 | + """ |
286 | + parent_dir = '/mnt/openstack-git' |
287 | + |
288 | + projects = _git_yaml_load(projects_yaml) |
289 | + |
290 | + if 'directory' in projects.keys(): |
291 | + parent_dir = projects['directory'] |
292 | + |
293 | + return os.path.join(parent_dir, 'venv') |
294 | + |
295 | + |
296 | +def git_src_dir(projects_yaml, project): |
297 | + """ |
298 | + Return the directory where the specified project's source is located. |
299 | + """ |
300 | + parent_dir = '/mnt/openstack-git' |
301 | + |
302 | + projects = _git_yaml_load(projects_yaml) |
303 | + |
304 | + if 'directory' in projects.keys(): |
305 | + parent_dir = projects['directory'] |
306 | + |
307 | + for p in projects['repositories']: |
308 | + if p['name'] == project: |
309 | + return os.path.join(parent_dir, os.path.basename(p['repository'])) |
310 | + |
311 | + return None |
312 | + |
313 | + |
314 | +def git_yaml_value(projects_yaml, key): |
315 | + """ |
316 | + Return the value in projects_yaml for the specified key. |
317 | + """ |
318 | + projects = _git_yaml_load(projects_yaml) |
319 | + |
320 | + if key in projects.keys(): |
321 | + return projects[key] |
322 | + |
323 | + return None |
324 | +>>>>>>> MERGE-SOURCE |
325 | |
326 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' |
327 | --- hooks/charmhelpers/contrib/python/packages.py 2015-04-30 19:35:31 +0000 |
328 | +++ hooks/charmhelpers/contrib/python/packages.py 2015-06-09 10:12:24 +0000 |
329 | @@ -17,8 +17,14 @@ |
330 | # You should have received a copy of the GNU Lesser General Public License |
331 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
332 | |
333 | +<<<<<<< TREE |
334 | +======= |
335 | +import os |
336 | +import subprocess |
337 | + |
338 | +>>>>>>> MERGE-SOURCE |
339 | from charmhelpers.fetch import apt_install, apt_update |
340 | -from charmhelpers.core.hookenv import log |
341 | +from charmhelpers.core.hookenv import charm_dir, log |
342 | |
343 | try: |
344 | from pip import main as pip_execute |
345 | @@ -51,11 +57,15 @@ |
346 | pip_execute(command) |
347 | |
348 | |
349 | -def pip_install(package, fatal=False, upgrade=False, **options): |
350 | +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): |
351 | """Install a python package""" |
352 | - command = ["install"] |
353 | + if venv: |
354 | + venv_python = os.path.join(venv, 'bin/pip') |
355 | + command = [venv_python, "install"] |
356 | + else: |
357 | + command = ["install"] |
358 | |
359 | - available_options = ('proxy', 'src', 'log', "index-url", ) |
360 | + available_options = ('proxy', 'src', 'log', 'index-url', ) |
361 | for option in parse_options(options, available_options): |
362 | command.append(option) |
363 | |
364 | @@ -69,7 +79,10 @@ |
365 | |
366 | log("Installing {} package with options: {}".format(package, |
367 | command)) |
368 | - pip_execute(command) |
369 | + if venv: |
370 | + subprocess.check_call(command) |
371 | + else: |
372 | + pip_execute(command) |
373 | |
374 | |
375 | def pip_uninstall(package, **options): |
376 | @@ -94,3 +107,16 @@ |
377 | """Returns the list of current python installed packages |
378 | """ |
379 | return pip_execute(["list"]) |
380 | + |
381 | + |
382 | +def pip_create_virtualenv(path=None): |
383 | + """Create an isolated Python environment.""" |
384 | + apt_install('python-virtualenv') |
385 | + |
386 | + if path: |
387 | + venv_path = path |
388 | + else: |
389 | + venv_path = os.path.join(charm_dir(), 'venv') |
390 | + |
391 | + if not os.path.exists(venv_path): |
392 | + subprocess.check_call(['virtualenv', venv_path]) |
393 | |
394 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
395 | --- hooks/charmhelpers/core/hookenv.py 2015-04-30 19:35:31 +0000 |
396 | +++ hooks/charmhelpers/core/hookenv.py 2015-06-09 10:12:24 +0000 |
397 | @@ -20,13 +20,23 @@ |
398 | # Authors: |
399 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
400 | |
401 | -from __future__ import print_function |
402 | +<<<<<<< TREE |
403 | +from __future__ import print_function |
404 | +======= |
405 | +from __future__ import print_function |
406 | +from functools import wraps |
407 | +>>>>>>> MERGE-SOURCE |
408 | import os |
409 | import json |
410 | import yaml |
411 | import subprocess |
412 | import sys |
413 | -import errno |
414 | +<<<<<<< TREE |
415 | +import errno |
416 | +======= |
417 | +import errno |
418 | +import tempfile |
419 | +>>>>>>> MERGE-SOURCE |
420 | from subprocess import CalledProcessError |
421 | |
422 | import six |
423 | @@ -58,15 +68,17 @@ |
424 | |
425 | will cache the result of unit_get + 'test' for future calls. |
426 | """ |
427 | + @wraps(func) |
428 | def wrapper(*args, **kwargs): |
429 | global cache |
430 | key = str((func, args, kwargs)) |
431 | try: |
432 | return cache[key] |
433 | except KeyError: |
434 | - res = func(*args, **kwargs) |
435 | - cache[key] = res |
436 | - return res |
437 | + pass # Drop out of the exception handler scope. |
438 | + res = func(*args, **kwargs) |
439 | + cache[key] = res |
440 | + return res |
441 | return wrapper |
442 | |
443 | |
444 | @@ -178,7 +190,7 @@ |
445 | |
446 | def remote_unit(): |
447 | """The remote unit for the current relation hook""" |
448 | - return os.environ['JUJU_REMOTE_UNIT'] |
449 | + return os.environ.get('JUJU_REMOTE_UNIT', None) |
450 | |
451 | |
452 | def service_name(): |
453 | @@ -250,6 +262,12 @@ |
454 | except KeyError: |
455 | return (self._prev_dict or {})[key] |
456 | |
457 | + def get(self, key, default=None): |
458 | + try: |
459 | + return self[key] |
460 | + except KeyError: |
461 | + return default |
462 | + |
463 | def keys(self): |
464 | prev_keys = [] |
465 | if self._prev_dict is not None: |
466 | @@ -353,18 +371,49 @@ |
467 | """Set relation information for the current unit""" |
468 | relation_settings = relation_settings if relation_settings else {} |
469 | relation_cmd_line = ['relation-set'] |
470 | + accepts_file = "--file" in subprocess.check_output( |
471 | + relation_cmd_line + ["--help"], universal_newlines=True) |
472 | if relation_id is not None: |
473 | relation_cmd_line.extend(('-r', relation_id)) |
474 | - for k, v in (list(relation_settings.items()) + list(kwargs.items())): |
475 | - if v is None: |
476 | - relation_cmd_line.append('{}='.format(k)) |
477 | - else: |
478 | - relation_cmd_line.append('{}={}'.format(k, v)) |
479 | - subprocess.check_call(relation_cmd_line) |
480 | + settings = relation_settings.copy() |
481 | + settings.update(kwargs) |
482 | + for key, value in settings.items(): |
483 | + # Force value to be a string: it always should, but some call |
484 | + # sites pass in things like dicts or numbers. |
485 | + if value is not None: |
486 | + settings[key] = "{}".format(value) |
487 | + if accepts_file: |
488 | + # --file was introduced in Juju 1.23.2. Use it by default if |
489 | + # available, since otherwise we'll break if the relation data is |
490 | + # too big. Ideally we should tell relation-set to read the data from |
491 | + # stdin, but that feature is broken in 1.23.2: Bug #1454678. |
492 | + with tempfile.NamedTemporaryFile(delete=False) as settings_file: |
493 | + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) |
494 | + subprocess.check_call( |
495 | + relation_cmd_line + ["--file", settings_file.name]) |
496 | + os.remove(settings_file.name) |
497 | + else: |
498 | + for key, value in settings.items(): |
499 | + if value is None: |
500 | + relation_cmd_line.append('{}='.format(key)) |
501 | + else: |
502 | + relation_cmd_line.append('{}={}'.format(key, value)) |
503 | + subprocess.check_call(relation_cmd_line) |
504 | # Flush cache of any relation-gets for local unit |
505 | flush(local_unit()) |
506 | |
507 | |
508 | +def relation_clear(r_id=None): |
509 | + ''' Clears any relation data already set on relation r_id ''' |
510 | + settings = relation_get(rid=r_id, |
511 | + unit=local_unit()) |
512 | + for setting in settings: |
513 | + if setting not in ['public-address', 'private-address']: |
514 | + settings[setting] = None |
515 | + relation_set(relation_id=r_id, |
516 | + **settings) |
517 | + |
518 | + |
519 | @cached |
520 | def relation_ids(reltype=None): |
521 | """A list of relation_ids""" |
522 | @@ -509,6 +558,11 @@ |
523 | return None |
524 | |
525 | |
526 | +def unit_public_ip(): |
527 | + """Get this unit's public IP address""" |
528 | + return unit_get('public-address') |
529 | + |
530 | + |
531 | def unit_private_ip(): |
532 | """Get this unit's private IP address""" |
533 | return unit_get('private-address') |
534 | @@ -579,29 +633,149 @@ |
535 | def charm_dir(): |
536 | """Return the root directory of the current charm""" |
537 | return os.environ.get('CHARM_DIR') |
538 | - |
539 | - |
540 | -@cached |
541 | -def action_get(key=None): |
542 | - """Gets the value of an action parameter, or all key/value param pairs""" |
543 | - cmd = ['action-get'] |
544 | - if key is not None: |
545 | - cmd.append(key) |
546 | - cmd.append('--format=json') |
547 | - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
548 | - return action_data |
549 | - |
550 | - |
551 | -def action_set(values): |
552 | - """Sets the values to be returned after the action finishes""" |
553 | - cmd = ['action-set'] |
554 | - for k, v in list(values.items()): |
555 | - cmd.append('{}={}'.format(k, v)) |
556 | - subprocess.check_call(cmd) |
557 | - |
558 | - |
559 | -def action_fail(message): |
560 | - """Sets the action status to failed and sets the error message. |
561 | - |
562 | - The results set by action_set are preserved.""" |
563 | - subprocess.check_call(['action-fail', message]) |
564 | +<<<<<<< TREE |
565 | + |
566 | + |
567 | +@cached |
568 | +def action_get(key=None): |
569 | + """Gets the value of an action parameter, or all key/value param pairs""" |
570 | + cmd = ['action-get'] |
571 | + if key is not None: |
572 | + cmd.append(key) |
573 | + cmd.append('--format=json') |
574 | + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
575 | + return action_data |
576 | + |
577 | + |
578 | +def action_set(values): |
579 | + """Sets the values to be returned after the action finishes""" |
580 | + cmd = ['action-set'] |
581 | + for k, v in list(values.items()): |
582 | + cmd.append('{}={}'.format(k, v)) |
583 | + subprocess.check_call(cmd) |
584 | + |
585 | + |
586 | +def action_fail(message): |
587 | + """Sets the action status to failed and sets the error message. |
588 | + |
589 | + The results set by action_set are preserved.""" |
590 | + subprocess.check_call(['action-fail', message]) |
591 | +======= |
592 | + |
593 | + |
594 | +@cached |
595 | +def action_get(key=None): |
596 | + """Gets the value of an action parameter, or all key/value param pairs""" |
597 | + cmd = ['action-get'] |
598 | + if key is not None: |
599 | + cmd.append(key) |
600 | + cmd.append('--format=json') |
601 | + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
602 | + return action_data |
603 | + |
604 | + |
605 | +def action_set(values): |
606 | + """Sets the values to be returned after the action finishes""" |
607 | + cmd = ['action-set'] |
608 | + for k, v in list(values.items()): |
609 | + cmd.append('{}={}'.format(k, v)) |
610 | + subprocess.check_call(cmd) |
611 | + |
612 | + |
613 | +def action_fail(message): |
614 | + """Sets the action status to failed and sets the error message. |
615 | + |
616 | + The results set by action_set are preserved.""" |
617 | + subprocess.check_call(['action-fail', message]) |
618 | + |
619 | + |
620 | +def status_set(workload_state, message): |
621 | + """Set the workload state with a message |
622 | + |
623 | + Use status-set to set the workload state with a message which is visible |
624 | + to the user via juju status. If the status-set command is not found then |
625 | + assume this is juju < 1.23 and juju-log the message unstead. |
626 | + |
627 | + workload_state -- valid juju workload state. |
628 | + message -- status update message |
629 | + """ |
630 | + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] |
631 | + if workload_state not in valid_states: |
632 | + raise ValueError( |
633 | + '{!r} is not a valid workload state'.format(workload_state) |
634 | + ) |
635 | + cmd = ['status-set', workload_state, message] |
636 | + try: |
637 | + ret = subprocess.call(cmd) |
638 | + if ret == 0: |
639 | + return |
640 | + except OSError as e: |
641 | + if e.errno != errno.ENOENT: |
642 | + raise |
643 | + log_message = 'status-set failed: {} {}'.format(workload_state, |
644 | + message) |
645 | + log(log_message, level='INFO') |
646 | + |
647 | + |
648 | +def status_get(): |
649 | + """Retrieve the previously set juju workload state |
650 | + |
651 | + If the status-set command is not found then assume this is juju < 1.23 and |
652 | + return 'unknown' |
653 | + """ |
654 | + cmd = ['status-get'] |
655 | + try: |
656 | + raw_status = subprocess.check_output(cmd, universal_newlines=True) |
657 | + status = raw_status.rstrip() |
658 | + return status |
659 | + except OSError as e: |
660 | + if e.errno == errno.ENOENT: |
661 | + return 'unknown' |
662 | + else: |
663 | + raise |
664 | + |
665 | + |
666 | +def translate_exc(from_exc, to_exc): |
667 | + def inner_translate_exc1(f): |
668 | + def inner_translate_exc2(*args, **kwargs): |
669 | + try: |
670 | + return f(*args, **kwargs) |
671 | + except from_exc: |
672 | + raise to_exc |
673 | + |
674 | + return inner_translate_exc2 |
675 | + |
676 | + return inner_translate_exc1 |
677 | + |
678 | + |
679 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
680 | +def is_leader(): |
681 | + """Does the current unit hold the juju leadership |
682 | + |
683 | + Uses juju to determine whether the current unit is the leader of its peers |
684 | + """ |
685 | + cmd = ['is-leader', '--format=json'] |
686 | + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
687 | + |
688 | + |
689 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
690 | +def leader_get(attribute=None): |
691 | + """Juju leader get value(s)""" |
692 | + cmd = ['leader-get', '--format=json'] + [attribute or '-'] |
693 | + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
694 | + |
695 | + |
696 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
697 | +def leader_set(settings=None, **kwargs): |
698 | + """Juju leader set value(s)""" |
699 | + log("Juju leader-set '%s'" % (settings), level=DEBUG) |
700 | + cmd = ['leader-set'] |
701 | + settings = settings or {} |
702 | + settings.update(kwargs) |
703 | + for k, v in settings.iteritems(): |
704 | + if v is None: |
705 | + cmd.append('{}='.format(k)) |
706 | + else: |
707 | + cmd.append('{}={}'.format(k, v)) |
708 | + subprocess.check_call(cmd) |
709 | +>>>>>>> MERGE-SOURCE |
710 | |
711 | === modified file 'hooks/charmhelpers/core/host.py' |
712 | --- hooks/charmhelpers/core/host.py 2015-04-30 19:35:31 +0000 |
713 | +++ hooks/charmhelpers/core/host.py 2015-06-09 10:12:24 +0000 |
714 | @@ -90,7 +90,7 @@ |
715 | ['service', service_name, 'status'], |
716 | stderr=subprocess.STDOUT).decode('UTF-8') |
717 | except subprocess.CalledProcessError as e: |
718 | - return 'unrecognized service' not in e.output |
719 | + return b'unrecognized service' not in e.output |
720 | else: |
721 | return True |
722 | |
723 | |
724 | === modified file 'hooks/charmhelpers/core/services/base.py' |
725 | --- hooks/charmhelpers/core/services/base.py 2015-01-26 10:44:46 +0000 |
726 | +++ hooks/charmhelpers/core/services/base.py 2015-06-09 10:12:24 +0000 |
727 | @@ -15,9 +15,9 @@ |
728 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
729 | |
730 | import os |
731 | -import re |
732 | import json |
733 | -from collections import Iterable |
734 | +from inspect import getargspec |
735 | +from collections import Iterable, OrderedDict |
736 | |
737 | from charmhelpers.core import host |
738 | from charmhelpers.core import hookenv |
739 | @@ -119,7 +119,7 @@ |
740 | """ |
741 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') |
742 | self._ready = None |
743 | - self.services = {} |
744 | + self.services = OrderedDict() |
745 | for service in services or []: |
746 | service_name = service['service'] |
747 | self.services[service_name] = service |
748 | @@ -132,8 +132,8 @@ |
749 | if hook_name == 'stop': |
750 | self.stop_services() |
751 | else: |
752 | + self.reconfigure_services() |
753 | self.provide_data() |
754 | - self.reconfigure_services() |
755 | cfg = hookenv.config() |
756 | if cfg.implicit_save: |
757 | cfg.save() |
758 | @@ -145,15 +145,36 @@ |
759 | A provider must have a `name` attribute, which indicates which relation |
760 | to set data on, and a `provide_data()` method, which returns a dict of |
761 | data to set. |
762 | + |
763 | + The `provide_data()` method can optionally accept two parameters: |
764 | + |
765 | + * ``remote_service`` The name of the remote service that the data will |
766 | + be provided to. The `provide_data()` method will be called once |
767 | + for each connected service (not unit). This allows the method to |
768 | + tailor its data to the given service. |
769 | + * ``service_ready`` Whether or not the service definition had all of |
770 | + its requirements met, and thus the ``data_ready`` callbacks run. |
771 | + |
772 | + Note that the ``provided_data`` methods are now called **after** the |
773 | + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks |
774 | + a chance to generate any data necessary for the providing to the remote |
775 | + services. |
776 | """ |
777 | - hook_name = hookenv.hook_name() |
778 | - for service in self.services.values(): |
779 | + for service_name, service in self.services.items(): |
780 | + service_ready = self.is_ready(service_name) |
781 | for provider in service.get('provided_data', []): |
782 | - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): |
783 | - data = provider.provide_data() |
784 | - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data |
785 | - if _ready: |
786 | - hookenv.relation_set(None, data) |
787 | + for relid in hookenv.relation_ids(provider.name): |
788 | + units = hookenv.related_units(relid) |
789 | + if not units: |
790 | + continue |
791 | + remote_service = units[0].split('/')[0] |
792 | + argspec = getargspec(provider.provide_data) |
793 | + if len(argspec.args) > 1: |
794 | + data = provider.provide_data(remote_service, service_ready) |
795 | + else: |
796 | + data = provider.provide_data() |
797 | + if data: |
798 | + hookenv.relation_set(relid, data) |
799 | |
800 | def reconfigure_services(self, *service_names): |
801 | """ |
802 | |
803 | === added file 'hooks/charmhelpers/core/strutils.py' |
804 | --- hooks/charmhelpers/core/strutils.py 1970-01-01 00:00:00 +0000 |
805 | +++ hooks/charmhelpers/core/strutils.py 2015-06-09 10:12:24 +0000 |
806 | @@ -0,0 +1,42 @@ |
807 | +#!/usr/bin/env python |
808 | +# -*- coding: utf-8 -*- |
809 | + |
810 | +# Copyright 2014-2015 Canonical Limited. |
811 | +# |
812 | +# This file is part of charm-helpers. |
813 | +# |
814 | +# charm-helpers is free software: you can redistribute it and/or modify |
815 | +# it under the terms of the GNU Lesser General Public License version 3 as |
816 | +# published by the Free Software Foundation. |
817 | +# |
818 | +# charm-helpers is distributed in the hope that it will be useful, |
819 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
820 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
821 | +# GNU Lesser General Public License for more details. |
822 | +# |
823 | +# You should have received a copy of the GNU Lesser General Public License |
824 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
825 | + |
826 | +import six |
827 | + |
828 | + |
829 | +def bool_from_string(value): |
830 | + """Interpret string value as boolean. |
831 | + |
832 | + Returns True if value translates to True otherwise False. |
833 | + """ |
834 | + if isinstance(value, six.string_types): |
835 | + value = six.text_type(value) |
836 | + else: |
837 | + msg = "Unable to interpret non-string value '%s' as boolean" % (value) |
838 | + raise ValueError(msg) |
839 | + |
840 | + value = value.strip().lower() |
841 | + |
842 | + if value in ['y', 'yes', 'true', 't', 'on']: |
843 | + return True |
844 | + elif value in ['n', 'no', 'false', 'f', 'off']: |
845 | + return False |
846 | + |
847 | + msg = "Unable to interpret string value '%s' as boolean" % (value) |
848 | + raise ValueError(msg) |
849 | |
850 | === renamed file 'hooks/charmhelpers/core/strutils.py' => 'hooks/charmhelpers/core/strutils.py.moved' |
851 | === added file 'hooks/charmhelpers/core/unitdata.py' |
852 | --- hooks/charmhelpers/core/unitdata.py 1970-01-01 00:00:00 +0000 |
853 | +++ hooks/charmhelpers/core/unitdata.py 2015-06-09 10:12:24 +0000 |
854 | @@ -0,0 +1,477 @@ |
855 | +#!/usr/bin/env python |
856 | +# -*- coding: utf-8 -*- |
857 | +# |
858 | +# Copyright 2014-2015 Canonical Limited. |
859 | +# |
860 | +# This file is part of charm-helpers. |
861 | +# |
862 | +# charm-helpers is free software: you can redistribute it and/or modify |
863 | +# it under the terms of the GNU Lesser General Public License version 3 as |
864 | +# published by the Free Software Foundation. |
865 | +# |
866 | +# charm-helpers is distributed in the hope that it will be useful, |
867 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
868 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
869 | +# GNU Lesser General Public License for more details. |
870 | +# |
871 | +# You should have received a copy of the GNU Lesser General Public License |
872 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
873 | +# |
874 | +# |
875 | +# Authors: |
876 | +# Kapil Thangavelu <kapil.foss@gmail.com> |
877 | +# |
878 | +""" |
879 | +Intro |
880 | +----- |
881 | + |
882 | +A simple way to store state in units. This provides a key value |
883 | +storage with support for versioned, transactional operation, |
884 | +and can calculate deltas from previous values to simplify unit logic |
885 | +when processing changes. |
886 | + |
887 | + |
888 | +Hook Integration |
889 | +---------------- |
890 | + |
891 | +There are several extant frameworks for hook execution, including |
892 | + |
893 | + - charmhelpers.core.hookenv.Hooks |
894 | + - charmhelpers.core.services.ServiceManager |
895 | + |
896 | +The storage classes are framework agnostic, one simple integration is |
897 | +via the HookData contextmanager. It will record the current hook |
898 | +execution environment (including relation data, config data, etc.), |
899 | +setup a transaction and allow easy access to the changes from |
900 | +previously seen values. One consequence of the integration is the |
901 | +reservation of particular keys ('rels', 'unit', 'env', 'config', |
902 | +'charm_revisions') for their respective values. |
903 | + |
904 | +Here's a fully worked integration example using hookenv.Hooks:: |
905 | + |
906 | + from charmhelper.core import hookenv, unitdata |
907 | + |
908 | + hook_data = unitdata.HookData() |
909 | + db = unitdata.kv() |
910 | + hooks = hookenv.Hooks() |
911 | + |
912 | + @hooks.hook |
913 | + def config_changed(): |
914 | + # Print all changes to configuration from previously seen |
915 | + # values. |
916 | + for changed, (prev, cur) in hook_data.conf.items(): |
917 | + print('config changed', changed, |
918 | + 'previous value', prev, |
919 | + 'current value', cur) |
920 | + |
921 | + # Get some unit specific bookeeping |
922 | + if not db.get('pkg_key'): |
923 | + key = urllib.urlopen('https://example.com/pkg_key').read() |
924 | + db.set('pkg_key', key) |
925 | + |
926 | + # Directly access all charm config as a mapping. |
927 | + conf = db.getrange('config', True) |
928 | + |
929 | + # Directly access all relation data as a mapping |
930 | + rels = db.getrange('rels', True) |
931 | + |
932 | + if __name__ == '__main__': |
933 | + with hook_data(): |
934 | + hook.execute() |
935 | + |
936 | + |
937 | +A more basic integration is via the hook_scope context manager which simply |
938 | +manages transaction scope (and records hook name, and timestamp):: |
939 | + |
940 | + >>> from unitdata import kv |
941 | + >>> db = kv() |
942 | + >>> with db.hook_scope('install'): |
943 | + ... # do work, in transactional scope. |
944 | + ... db.set('x', 1) |
945 | + >>> db.get('x') |
946 | + 1 |
947 | + |
948 | + |
949 | +Usage |
950 | +----- |
951 | + |
952 | +Values are automatically json de/serialized to preserve basic typing |
953 | +and complex data struct capabilities (dicts, lists, ints, booleans, etc). |
954 | + |
955 | +Individual values can be manipulated via get/set:: |
956 | + |
957 | + >>> kv.set('y', True) |
958 | + >>> kv.get('y') |
959 | + True |
960 | + |
961 | + # We can set complex values (dicts, lists) as a single key. |
962 | + >>> kv.set('config', {'a': 1, 'b': True'}) |
963 | + |
964 | + # Also supports returning dictionaries as a record which |
965 | + # provides attribute access. |
966 | + >>> config = kv.get('config', record=True) |
967 | + >>> config.b |
968 | + True |
969 | + |
970 | + |
971 | +Groups of keys can be manipulated with update/getrange:: |
972 | + |
973 | + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") |
974 | + >>> kv.getrange('gui.', strip=True) |
975 | + {'z': 1, 'y': 2} |
976 | + |
977 | +When updating values, its very helpful to understand which values |
978 | +have actually changed and how have they changed. The storage |
979 | +provides a delta method to provide for this:: |
980 | + |
981 | + >>> data = {'debug': True, 'option': 2} |
982 | + >>> delta = kv.delta(data, 'config.') |
983 | + >>> delta.debug.previous |
984 | + None |
985 | + >>> delta.debug.current |
986 | + True |
987 | + >>> delta |
988 | + {'debug': (None, True), 'option': (None, 2)} |
989 | + |
990 | +Note the delta method does not persist the actual change, it needs to |
991 | +be explicitly saved via 'update' method:: |
992 | + |
993 | + >>> kv.update(data, 'config.') |
994 | + |
995 | +Values modified in the context of a hook scope retain historical values |
996 | +associated to the hookname. |
997 | + |
998 | + >>> with db.hook_scope('config-changed'): |
999 | + ... db.set('x', 42) |
1000 | + >>> db.gethistory('x') |
1001 | + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), |
1002 | + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] |
1003 | + |
1004 | +""" |
1005 | + |
1006 | +import collections |
1007 | +import contextlib |
1008 | +import datetime |
1009 | +import json |
1010 | +import os |
1011 | +import pprint |
1012 | +import sqlite3 |
1013 | +import sys |
1014 | + |
1015 | +__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>' |
1016 | + |
1017 | + |
1018 | +class Storage(object): |
1019 | + """Simple key value database for local unit state within charms. |
1020 | + |
1021 | + Modifications are automatically committed at hook exit. That's |
1022 | + currently regardless of exit code. |
1023 | + |
1024 | + To support dicts, lists, integer, floats, and booleans values |
1025 | + are automatically json encoded/decoded. |
1026 | + """ |
1027 | + def __init__(self, path=None): |
1028 | + self.db_path = path |
1029 | + if path is None: |
1030 | + self.db_path = os.path.join( |
1031 | + os.environ.get('CHARM_DIR', ''), '.unit-state.db') |
1032 | + self.conn = sqlite3.connect('%s' % self.db_path) |
1033 | + self.cursor = self.conn.cursor() |
1034 | + self.revision = None |
1035 | + self._closed = False |
1036 | + self._init() |
1037 | + |
1038 | + def close(self): |
1039 | + if self._closed: |
1040 | + return |
1041 | + self.flush(False) |
1042 | + self.cursor.close() |
1043 | + self.conn.close() |
1044 | + self._closed = True |
1045 | + |
1046 | + def _scoped_query(self, stmt, params=None): |
1047 | + if params is None: |
1048 | + params = [] |
1049 | + return stmt, params |
1050 | + |
1051 | + def get(self, key, default=None, record=False): |
1052 | + self.cursor.execute( |
1053 | + *self._scoped_query( |
1054 | + 'select data from kv where key=?', [key])) |
1055 | + result = self.cursor.fetchone() |
1056 | + if not result: |
1057 | + return default |
1058 | + if record: |
1059 | + return Record(json.loads(result[0])) |
1060 | + return json.loads(result[0]) |
1061 | + |
1062 | + def getrange(self, key_prefix, strip=False): |
1063 | + stmt = "select key, data from kv where key like '%s%%'" % key_prefix |
1064 | + self.cursor.execute(*self._scoped_query(stmt)) |
1065 | + result = self.cursor.fetchall() |
1066 | + |
1067 | + if not result: |
1068 | + return None |
1069 | + if not strip: |
1070 | + key_prefix = '' |
1071 | + return dict([ |
1072 | + (k[len(key_prefix):], json.loads(v)) for k, v in result]) |
1073 | + |
1074 | + def update(self, mapping, prefix=""): |
1075 | + for k, v in mapping.items(): |
1076 | + self.set("%s%s" % (prefix, k), v) |
1077 | + |
1078 | + def unset(self, key): |
1079 | + self.cursor.execute('delete from kv where key=?', [key]) |
1080 | + if self.revision and self.cursor.rowcount: |
1081 | + self.cursor.execute( |
1082 | + 'insert into kv_revisions values (?, ?, ?)', |
1083 | + [key, self.revision, json.dumps('DELETED')]) |
1084 | + |
1085 | + def set(self, key, value): |
1086 | + serialized = json.dumps(value) |
1087 | + |
1088 | + self.cursor.execute( |
1089 | + 'select data from kv where key=?', [key]) |
1090 | + exists = self.cursor.fetchone() |
1091 | + |
1092 | + # Skip mutations to the same value |
1093 | + if exists: |
1094 | + if exists[0] == serialized: |
1095 | + return value |
1096 | + |
1097 | + if not exists: |
1098 | + self.cursor.execute( |
1099 | + 'insert into kv (key, data) values (?, ?)', |
1100 | + (key, serialized)) |
1101 | + else: |
1102 | + self.cursor.execute(''' |
1103 | + update kv |
1104 | + set data = ? |
1105 | + where key = ?''', [serialized, key]) |
1106 | + |
1107 | + # Save |
1108 | + if not self.revision: |
1109 | + return value |
1110 | + |
1111 | + self.cursor.execute( |
1112 | + 'select 1 from kv_revisions where key=? and revision=?', |
1113 | + [key, self.revision]) |
1114 | + exists = self.cursor.fetchone() |
1115 | + |
1116 | + if not exists: |
1117 | + self.cursor.execute( |
1118 | + '''insert into kv_revisions ( |
1119 | + revision, key, data) values (?, ?, ?)''', |
1120 | + (self.revision, key, serialized)) |
1121 | + else: |
1122 | + self.cursor.execute( |
1123 | + ''' |
1124 | + update kv_revisions |
1125 | + set data = ? |
1126 | + where key = ? |
1127 | + and revision = ?''', |
1128 | + [serialized, key, self.revision]) |
1129 | + |
1130 | + return value |
1131 | + |
1132 | + def delta(self, mapping, prefix): |
1133 | + """ |
1134 | + return a delta containing values that have changed. |
1135 | + """ |
1136 | + previous = self.getrange(prefix, strip=True) |
1137 | + if not previous: |
1138 | + pk = set() |
1139 | + else: |
1140 | + pk = set(previous.keys()) |
1141 | + ck = set(mapping.keys()) |
1142 | + delta = DeltaSet() |
1143 | + |
1144 | + # added |
1145 | + for k in ck.difference(pk): |
1146 | + delta[k] = Delta(None, mapping[k]) |
1147 | + |
1148 | + # removed |
1149 | + for k in pk.difference(ck): |
1150 | + delta[k] = Delta(previous[k], None) |
1151 | + |
1152 | + # changed |
1153 | + for k in pk.intersection(ck): |
1154 | + c = mapping[k] |
1155 | + p = previous[k] |
1156 | + if c != p: |
1157 | + delta[k] = Delta(p, c) |
1158 | + |
1159 | + return delta |
1160 | + |
1161 | + @contextlib.contextmanager |
1162 | + def hook_scope(self, name=""): |
1163 | + """Scope all future interactions to the current hook execution |
1164 | + revision.""" |
1165 | + assert not self.revision |
1166 | + self.cursor.execute( |
1167 | + 'insert into hooks (hook, date) values (?, ?)', |
1168 | + (name or sys.argv[0], |
1169 | + datetime.datetime.utcnow().isoformat())) |
1170 | + self.revision = self.cursor.lastrowid |
1171 | + try: |
1172 | + yield self.revision |
1173 | + self.revision = None |
1174 | + except: |
1175 | + self.flush(False) |
1176 | + self.revision = None |
1177 | + raise |
1178 | + else: |
1179 | + self.flush() |
1180 | + |
1181 | + def flush(self, save=True): |
1182 | + if save: |
1183 | + self.conn.commit() |
1184 | + elif self._closed: |
1185 | + return |
1186 | + else: |
1187 | + self.conn.rollback() |
1188 | + |
1189 | + def _init(self): |
1190 | + self.cursor.execute(''' |
1191 | + create table if not exists kv ( |
1192 | + key text, |
1193 | + data text, |
1194 | + primary key (key) |
1195 | + )''') |
1196 | + self.cursor.execute(''' |
1197 | + create table if not exists kv_revisions ( |
1198 | + key text, |
1199 | + revision integer, |
1200 | + data text, |
1201 | + primary key (key, revision) |
1202 | + )''') |
1203 | + self.cursor.execute(''' |
1204 | + create table if not exists hooks ( |
1205 | + version integer primary key autoincrement, |
1206 | + hook text, |
1207 | + date text |
1208 | + )''') |
1209 | + self.conn.commit() |
1210 | + |
1211 | + def gethistory(self, key, deserialize=False): |
1212 | + self.cursor.execute( |
1213 | + ''' |
1214 | + select kv.revision, kv.key, kv.data, h.hook, h.date |
1215 | + from kv_revisions kv, |
1216 | + hooks h |
1217 | + where kv.key=? |
1218 | + and kv.revision = h.version |
1219 | + ''', [key]) |
1220 | + if deserialize is False: |
1221 | + return self.cursor.fetchall() |
1222 | + return map(_parse_history, self.cursor.fetchall()) |
1223 | + |
1224 | + def debug(self, fh=sys.stderr): |
1225 | + self.cursor.execute('select * from kv') |
1226 | + pprint.pprint(self.cursor.fetchall(), stream=fh) |
1227 | + self.cursor.execute('select * from kv_revisions') |
1228 | + pprint.pprint(self.cursor.fetchall(), stream=fh) |
1229 | + |
1230 | + |
1231 | +def _parse_history(d): |
1232 | + return (d[0], d[1], json.loads(d[2]), d[3], |
1233 | + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) |
1234 | + |
1235 | + |
1236 | +class HookData(object): |
1237 | + """Simple integration for existing hook exec frameworks. |
1238 | + |
1239 | + Records all unit information, and stores deltas for processing |
1240 | + by the hook. |
1241 | + |
1242 | + Sample:: |
1243 | + |
1244 | + from charmhelper.core import hookenv, unitdata |
1245 | + |
1246 | + changes = unitdata.HookData() |
1247 | + db = unitdata.kv() |
1248 | + hooks = hookenv.Hooks() |
1249 | + |
1250 | + @hooks.hook |
1251 | + def config_changed(): |
1252 | + # View all changes to configuration |
1253 | + for changed, (prev, cur) in changes.conf.items(): |
1254 | + print('config changed', changed, |
1255 | + 'previous value', prev, |
1256 | + 'current value', cur) |
1257 | + |
1258 | + # Get some unit specific bookeeping |
1259 | + if not db.get('pkg_key'): |
1260 | + key = urllib.urlopen('https://example.com/pkg_key').read() |
1261 | + db.set('pkg_key', key) |
1262 | + |
1263 | + if __name__ == '__main__': |
1264 | + with changes(): |
1265 | + hook.execute() |
1266 | + |
1267 | + """ |
1268 | + def __init__(self): |
1269 | + self.kv = kv() |
1270 | + self.conf = None |
1271 | + self.rels = None |
1272 | + |
1273 | + @contextlib.contextmanager |
1274 | + def __call__(self): |
1275 | + from charmhelpers.core import hookenv |
1276 | + hook_name = hookenv.hook_name() |
1277 | + |
1278 | + with self.kv.hook_scope(hook_name): |
1279 | + self._record_charm_version(hookenv.charm_dir()) |
1280 | + delta_config, delta_relation = self._record_hook(hookenv) |
1281 | + yield self.kv, delta_config, delta_relation |
1282 | + |
1283 | + def _record_charm_version(self, charm_dir): |
1284 | + # Record revisions.. charm revisions are meaningless |
1285 | + # to charm authors as they don't control the revision. |
1286 | + # so logic dependnent on revision is not particularly |
1287 | + # useful, however it is useful for debugging analysis. |
1288 | + charm_rev = open( |
1289 | + os.path.join(charm_dir, 'revision')).read().strip() |
1290 | + charm_rev = charm_rev or '0' |
1291 | + revs = self.kv.get('charm_revisions', []) |
1292 | + if charm_rev not in revs: |
1293 | + revs.append(charm_rev.strip() or '0') |
1294 | + self.kv.set('charm_revisions', revs) |
1295 | + |
1296 | + def _record_hook(self, hookenv): |
1297 | + data = hookenv.execution_environment() |
1298 | + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') |
1299 | + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') |
1300 | + self.kv.set('env', dict(data['env'])) |
1301 | + self.kv.set('unit', data['unit']) |
1302 | + self.kv.set('relid', data.get('relid')) |
1303 | + return conf_delta, rels_delta |
1304 | + |
1305 | + |
1306 | +class Record(dict): |
1307 | + |
1308 | + __slots__ = () |
1309 | + |
1310 | + def __getattr__(self, k): |
1311 | + if k in self: |
1312 | + return self[k] |
1313 | + raise AttributeError(k) |
1314 | + |
1315 | + |
1316 | +class DeltaSet(Record): |
1317 | + |
1318 | + __slots__ = () |
1319 | + |
1320 | + |
1321 | +Delta = collections.namedtuple('Delta', ['previous', 'current']) |
1322 | + |
1323 | + |
1324 | +_KV = None |
1325 | + |
1326 | + |
1327 | +def kv(): |
1328 | + global _KV |
1329 | + if _KV is None: |
1330 | + _KV = Storage() |
1331 | + return _KV |
1332 | |
1333 | === renamed file 'hooks/charmhelpers/core/unitdata.py' => 'hooks/charmhelpers/core/unitdata.py.moved' |
1334 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
1335 | --- hooks/charmhelpers/fetch/__init__.py 2015-01-26 10:44:46 +0000 |
1336 | +++ hooks/charmhelpers/fetch/__init__.py 2015-06-09 10:12:24 +0000 |
1337 | @@ -158,7 +158,7 @@ |
1338 | |
1339 | def apt_cache(in_memory=True): |
1340 | """Build and return an apt cache""" |
1341 | - import apt_pkg |
1342 | + from apt import apt_pkg |
1343 | apt_pkg.init() |
1344 | if in_memory: |
1345 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
1346 | |
1347 | === modified file 'hooks/charmhelpers/fetch/giturl.py' |
1348 | --- hooks/charmhelpers/fetch/giturl.py 2015-04-30 19:35:31 +0000 |
1349 | +++ hooks/charmhelpers/fetch/giturl.py 2015-06-09 10:12:24 +0000 |
1350 | @@ -45,14 +45,16 @@ |
1351 | else: |
1352 | return True |
1353 | |
1354 | - def clone(self, source, dest, branch): |
1355 | + def clone(self, source, dest, branch, depth=None): |
1356 | if not self.can_handle(source): |
1357 | raise UnhandledSource("Cannot handle {}".format(source)) |
1358 | |
1359 | - repo = Repo.clone_from(source, dest) |
1360 | - repo.git.checkout(branch) |
1361 | + if depth: |
1362 | + Repo.clone_from(source, dest, branch=branch, depth=depth) |
1363 | + else: |
1364 | + Repo.clone_from(source, dest, branch=branch) |
1365 | |
1366 | - def install(self, source, branch="master", dest=None): |
1367 | + def install(self, source, branch="master", dest=None, depth=None): |
1368 | url_parts = self.parse_url(source) |
1369 | branch_name = url_parts.path.strip("/").split("/")[-1] |
1370 | if dest: |
1371 | @@ -63,7 +65,7 @@ |
1372 | if not os.path.exists(dest_dir): |
1373 | mkdir(dest_dir, perms=0o755) |
1374 | try: |
1375 | - self.clone(source, dest_dir, branch) |
1376 | + self.clone(source, dest_dir, branch, depth) |
1377 | except GitCommandError as e: |
1378 | raise UnhandledSource(e.message) |
1379 | except OSError as e: |
1380 | |
1381 | === added file 'hooks/cmap.py' |
1382 | --- hooks/cmap.py 1970-01-01 00:00:00 +0000 |
1383 | +++ hooks/cmap.py 2015-06-09 10:12:24 +0000 |
1384 | @@ -0,0 +1,85 @@ |
1385 | +#!/usr/bin/env python |
1386 | +# -*- coding: utf-8 -*- |
1387 | +# |
1388 | +# Copyright 2015 Canonical Limited. |
1389 | +# |
1390 | + |
1391 | +import subprocess |
1392 | +from __builtin__ import classmethod |
1393 | + |
1394 | + |
1395 | +class CMap(object): |
1396 | + |
1397 | + UINT_32 = 'u32' |
1398 | + |
1399 | + STRING = 'str' |
1400 | + |
1401 | + class Property(object): |
1402 | + """Represents a property within the corosync cmap""" |
1403 | + |
1404 | + def __init__(self, key, k_type, value): |
1405 | + self.key = key |
1406 | + if k_type.startswith('('): |
1407 | + self.k_type = k_type[1:-1] |
1408 | + else: |
1409 | + self.k_type = k_type |
1410 | + self.value = value |
1411 | + |
1412 | + def __eq__(self, o): |
1413 | + return str(self) == str(o) |
1414 | + |
1415 | + def __str__(self): |
1416 | + return self.key |
1417 | + |
1418 | + def __init__(self): |
1419 | + super(CMap, self).__init__() |
1420 | + |
1421 | + def _parse_property(self, line): |
1422 | + """Creates a property from an output line from the corosync-cmapctl |
1423 | + command output.""" |
1424 | + k, t, _, v = line.split(' ') |
1425 | + return CMap.Property(k, t, v) |
1426 | + |
1427 | + @property |
1428 | + def nodelist(self): |
1429 | + """Returns the nodelist properties in the current cmap for corosync.""" |
1430 | + return self.get_properties('nodelist') |
1431 | + |
1432 | + def get_property(self, key): |
1433 | + """Returns the current values of a property in the current cmap.""" |
1434 | + props = self.get_properties(key) |
1435 | + if props: |
1436 | + return props[0] |
1437 | + else: |
1438 | + return None |
1439 | + |
1440 | + def get_properties(self, key): |
1441 | + """Returns a list of Property objects containing the values.""" |
1442 | + output = subprocess.check_output(['corosync-cmapctl', key]) |
1443 | + output = output.strip() |
1444 | + return [self._parse_property(line) for line in output.split('\n')] |
1445 | + |
1446 | + @classmethod |
1447 | + def add(cls, key, k_type, value): |
1448 | + """Add an entry to the cmap.""" |
1449 | + subprocess.check_call(['corosync-cmapctl', '-s', key, |
1450 | + k_type, str(value)]) |
1451 | + |
1452 | + # Alias set to add |
1453 | + set = add |
1454 | + |
1455 | + @classmethod |
1456 | + def add_property(cls, prop): |
1457 | + """Adds a CMap.Property to the corosync cmap. |
1458 | + |
1459 | + :param prop: a CMap.Property which should be added. |
1460 | + """ |
1461 | + cls.add(prop.key, prop.k_type, prop.value) |
1462 | + |
1463 | + @classmethod |
1464 | + def delete(cls, key, delete_all=False): |
1465 | + delete_me = key |
1466 | + if hasattr(key, 'key'): |
1467 | + delete_me = key.key |
1468 | + flag = '-D' if delete_all else '-d' |
1469 | + subprocess.check_call(['corosync-cmapctl', flag, delete_me]) |
1470 | |
1471 | === added symlink 'hooks/hanode-relation-departed' |
1472 | === target is u'hooks.py' |
1473 | === modified file 'hooks/hooks.py' |
1474 | --- hooks/hooks.py 2015-06-03 12:18:49 +0000 |
1475 | +++ hooks/hooks.py 2015-06-09 10:12:24 +0000 |
1476 | @@ -9,11 +9,20 @@ |
1477 | |
1478 | import pcmk |
1479 | import socket |
1480 | +import six |
1481 | +import time |
1482 | + |
1483 | +from cmap import CMap |
1484 | |
1485 | from charmhelpers.core.hookenv import ( |
1486 | log, |
1487 | +<<<<<<< TREE |
1488 | DEBUG, |
1489 | INFO, |
1490 | +======= |
1491 | + WARNING, |
1492 | + relation_get, |
1493 | +>>>>>>> MERGE-SOURCE |
1494 | related_units, |
1495 | relation_ids, |
1496 | relation_get, |
1497 | @@ -22,6 +31,14 @@ |
1498 | Hooks, |
1499 | UnregisteredHookError, |
1500 | local_unit, |
1501 | +<<<<<<< TREE |
1502 | +======= |
1503 | + unit_private_ip, |
1504 | + is_leader, |
1505 | + leader_set, leader_get, |
1506 | + INFO, |
1507 | + ERROR, |
1508 | +>>>>>>> MERGE-SOURCE |
1509 | ) |
1510 | |
1511 | from charmhelpers.core.host import ( |
1512 | @@ -117,6 +134,8 @@ |
1513 | |
1514 | @hooks.hook() |
1515 | def config_changed(): |
1516 | + store_cluster_size() |
1517 | + |
1518 | if config('prefer-ipv6'): |
1519 | assert_charm_supports_ipv6() |
1520 | |
1521 | @@ -143,6 +162,7 @@ |
1522 | def upgrade_charm(): |
1523 | install() |
1524 | |
1525 | +<<<<<<< TREE |
1526 | update_nrpe_config() |
1527 | |
1528 | |
1529 | @@ -153,11 +173,303 @@ |
1530 | ensure_ipv6_requirements(None) |
1531 | |
1532 | ha_relation_changed() |
1533 | +======= |
1534 | + |
1535 | +def restart_corosync(): |
1536 | + if service_running("pacemaker"): |
1537 | + service_stop("pacemaker") |
1538 | + service_stop('corosync') |
1539 | + # If using a simple restart then there are cases in which |
1540 | + # the pacemaker service cannot talk to corosync and times |
1541 | + # out, causing long durations of when pacemaker is not available. |
1542 | + time.sleep(5) |
1543 | + service_start("corosync") |
1544 | + service_start("pacemaker") |
1545 | + |
1546 | + |
1547 | +def restart_corosync_on_change(): |
1548 | + '''Simple decorator to restart corosync if any of its config changes''' |
1549 | + def wrap(f): |
1550 | + def wrapped_f(*args): |
1551 | + checksums = {} |
1552 | + for path in COROSYNC_CONF_FILES: |
1553 | + checksums[path] = file_hash(path) |
1554 | + return_data = f(*args) |
1555 | + # NOTE: this assumes that this call is always done around |
1556 | + # configure_corosync, which returns true if configuration |
1557 | + # files where actually generated |
1558 | + if return_data: |
1559 | + for path in COROSYNC_CONF_FILES: |
1560 | + if checksums[path] != file_hash(path): |
1561 | + if not path == COROSYNC_CONF: |
1562 | + restart_corosync() |
1563 | + break |
1564 | + if not update_corosync_dynamically(): |
1565 | + restart_corosync() |
1566 | + break |
1567 | + return return_data |
1568 | + return wrapped_f |
1569 | + return wrap |
1570 | + |
1571 | + |
1572 | +def update_corosync_dynamically(): |
1573 | + """ |
1574 | + Attempts to update corosync dynamically rather than triggering a restart. |
1575 | + |
1576 | + If corosync is using unicast (udpu) transport new nodes are added in by |
1577 | + updating the nodelist in the corosync cmap. The quorum and vote count is |
1578 | + determined based upon the nodelist (unicast) or the expected vote count |
1579 | + (multicast). |
1580 | + |
1581 | + Switching in and out of two_node vote quorum mode requires a restart of |
1582 | + the corosync and pacemaker services. |
1583 | + |
1584 | + :returns: True if the corosync configuration was able to be updated on |
1585 | + the fly, False if the corosync configuration was not able to |
1586 | + be updated on the fly and needs a restart. |
1587 | + """ |
1588 | + # If the corosync services aren't running, then just need to restart. |
1589 | + if not service_running('corosync') or not service_running('pacemaker'): |
1590 | + return False |
1591 | + |
1592 | + if quorum_two_node_changed(): |
1593 | + log('Changed the value for the two_node votequorum. ' |
1594 | + 'Restart required.', INFO) |
1595 | + return False |
1596 | + |
1597 | + if did_corosync_transport_change(): |
1598 | + log('Corosync transport changed between unicast and multicast', |
1599 | + 'Restart required.', INFO) |
1600 | + return False |
1601 | + |
1602 | + if get_transport() == 'udpu': |
1603 | + # Only need to add in the nodes for unicast because membership |
1604 | + # is dynamic for multicast by the nature of multicast. |
1605 | + try: |
1606 | + add_udp_nodes_into_corosync() |
1607 | + except Exception as e: |
1608 | + log('Error occurred adding nodes into corosync.', ERROR) |
1609 | + log(e, ERROR) |
1610 | + return False |
1611 | + else: |
1612 | + # Need to update the vote count to the expected value. |
1613 | + update_multicast_votecount() |
1614 | + |
1615 | + return True |
1616 | + |
1617 | + |
1618 | +def quorum_two_node_changed(): |
1619 | + """ |
1620 | + Determines if the current value for the two_node votequorum behavior |
1621 | + would be specified differently based on the current in-memory config |
1622 | + versus the value being written to disk. |
1623 | + |
1624 | + :returns: True if the two_node value would differ, False otherwise. |
1625 | + """ |
1626 | + corosync_cmap = CMap() |
1627 | + prop = corosync_cmap.get_property('runtime.votequorum.two_node') |
1628 | + log('Current runtime.votequorum.two_node value is: %s' % prop.value) |
1629 | + assert prop is not None |
1630 | + expected = '0' |
1631 | + if len(get_ha_nodes()) == 2: |
1632 | + expected = '1' |
1633 | + |
1634 | + if str(prop.value) == str(expected): |
1635 | + return False |
1636 | + else: |
1637 | + return True |
1638 | + |
1639 | + |
1640 | +def did_corosync_transport_change(): |
1641 | + """ |
1642 | + Determines if the current transport mode for corosync changed. |
1643 | + |
1644 | + There's no straightforward way of asking the running corosync which |
1645 | + particular transport mode it is using. However, we know that the |
1646 | + cluster which is running a unicast (udpu) transport relies on having |
1647 | + a nodelist configured, which is available by querying the cmap. Thus |
1648 | + if the written mode is 'udp' and there exists a nodelist, then the |
1649 | + corosync transport must have changed. The same is true for the opposite |
1650 | + scenario. |
1651 | + |
1652 | + :returns: True if the corosync transport changed between unicast and |
1653 | + multicast. False if they are the same. |
1654 | + """ |
1655 | + corosync_cmap = CMap() |
1656 | + nodelist = corosync_cmap.nodelist |
1657 | + transport = get_transport() |
1658 | + |
1659 | + if not nodelist and transport == 'udpu': |
1660 | + return True |
1661 | + elif nodelist and transport == 'udp': |
1662 | + return True |
1663 | + else: |
1664 | + return False |
1665 | + |
1666 | + |
1667 | +def add_udp_nodes_into_corosync(): |
1668 | + """ |
1669 | + Adds a new node dynamically to the cluster for udp nodes. |
1670 | + |
1671 | + Attempts to add a udp node by directly adding the node to the corosync |
1672 | + cmap information. |
1673 | + """ |
1674 | + corosync_cmap = CMap() |
1675 | + node_props = sorted(corosync_cmap.nodelist, key=lambda x: x.key) |
1676 | + |
1677 | + # Determine the next node index in the node list. |
1678 | + node_ids = [int(p.value) for p in node_props if p.key.endswith('nodeid')] |
1679 | + nodes_to_add = get_ha_nodes() |
1680 | + # Remove the nodes from the node map which are already in the |
1681 | + for node_id in node_ids: |
1682 | + nodes_to_add.pop(node_id, None) |
1683 | + # TODO(wolsen) - a return value of None above will indicate that |
1684 | + # there exists an extra node within the configuration and it should |
1685 | + # (maybe?) be removed. Revisit this later. |
1686 | + |
1687 | + # For each node which needs to be added, tell the current corosync node |
1688 | + # that there is a new node available. |
1689 | + for node_id, node_ip in six.iteritems(nodes_to_add): |
1690 | + log('Adding node %s with ip %s...' % (node_id, node_ip), INFO) |
1691 | + CMap.add('nodelist.node.%s.nodeid' % node_id, CMap.UINT_32, node_id) |
1692 | + CMap.add('nodelist.node.%s.ring0_addr' % node_id, CMap.STRING, node_ip) |
1693 | + |
1694 | + |
1695 | +def update_multicast_votecount(): |
1696 | + """ |
1697 | + Update the corosync expected vote count dynamically on a running cluster. |
1698 | + """ |
1699 | + corosync_cmap = CMap() |
1700 | + current_vote_prop = corosync_cmap.get('quorum.expected_votes') |
1701 | + ha_nodes = get_ha_nodes() |
1702 | + node_count = len(ha_nodes) |
1703 | + expected_value = target_cluster_size() |
1704 | + try: |
1705 | + # Feature sniff to see if the leadership election capabilities |
1706 | + # are present in order to determine what is correct for the |
1707 | + # expected vote count. |
1708 | + is_leader() |
1709 | + except NotImplementedError: |
1710 | + expected_value = max(node_count, int(config('cluster-count'))) |
1711 | + |
1712 | + if not int(current_vote_prop.value) == expected_value: |
1713 | + log('Updating quorum.expected_votes from %s to %s' % |
1714 | + (current_vote_prop.value, expected_value), INFO) |
1715 | + CMap.set('quorum.expected_votes', CMap.UINT_32, expected_value) |
1716 | + |
1717 | + |
1718 | +@restart_corosync_on_change() |
1719 | +def configure_corosync(): |
1720 | + log('Configuring and (maybe) restarting corosync') |
1721 | + return emit_base_conf() and emit_corosync_conf() |
1722 | + |
1723 | + |
1724 | +def configure_monitor_host(): |
1725 | + '''Configure extra monitor host for better network failure detection''' |
1726 | + log('Checking monitor host configuration') |
1727 | + monitor_host = config('monitor_host') |
1728 | + if monitor_host: |
1729 | + if not pcmk.crm_opt_exists('ping'): |
1730 | + log('Implementing monitor host' |
1731 | + ' configuration (host: %s)' % monitor_host) |
1732 | + monitor_interval = config('monitor_interval') |
1733 | + cmd = 'crm -w -F configure primitive ping' \ |
1734 | + ' ocf:pacemaker:ping params host_list="%s"' \ |
1735 | + ' multiplier="100" op monitor interval="%s"' %\ |
1736 | + (monitor_host, monitor_interval) |
1737 | + pcmk.commit(cmd) |
1738 | + cmd = 'crm -w -F configure clone cl_ping ping' \ |
1739 | + ' meta interleave="true"' |
1740 | + pcmk.commit(cmd) |
1741 | + else: |
1742 | + log('Reconfiguring monitor host' |
1743 | + ' configuration (host: %s)' % monitor_host) |
1744 | + cmd = 'crm -w -F resource param ping set host_list="%s"' %\ |
1745 | + monitor_host |
1746 | + else: |
1747 | + if pcmk.crm_opt_exists('ping'): |
1748 | + log('Disabling monitor host configuration') |
1749 | + pcmk.commit('crm -w -F resource stop ping') |
1750 | + pcmk.commit('crm -w -F configure delete ping') |
1751 | + |
1752 | + |
1753 | +def configure_cluster_global(): |
1754 | + '''Configure global cluster options''' |
1755 | + log('Applying global cluster configuration') |
1756 | + if int(config('cluster_count')) >= 3: |
1757 | + # NOTE(jamespage) if 3 or more nodes, then quorum can be |
1758 | + # managed effectively, so stop if quorum lost |
1759 | + log('Configuring no-quorum-policy to stop') |
1760 | + cmd = "crm configure property no-quorum-policy=stop" |
1761 | + else: |
1762 | + # NOTE(jamespage) if less that 3 nodes, quorum not possible |
1763 | + # so ignore |
1764 | + log('Configuring no-quorum-policy to ignore') |
1765 | + cmd = "crm configure property no-quorum-policy=ignore" |
1766 | + pcmk.commit(cmd) |
1767 | + |
1768 | + cmd = 'crm configure rsc_defaults $id="rsc-options"' \ |
1769 | + ' resource-stickiness="100"' |
1770 | + pcmk.commit(cmd) |
1771 | + |
1772 | + |
1773 | +def parse_data(relid, unit, key): |
1774 | + '''Simple helper to ast parse relation data''' |
1775 | + data = relation_get(key, unit, relid) |
1776 | + if data: |
1777 | + return ast.literal_eval(data) |
1778 | + else: |
1779 | + return {} |
1780 | +>>>>>>> MERGE-SOURCE |
1781 | + |
1782 | + |
1783 | +@hooks.hook('hanode-relation-joined') |
1784 | +def hanode_joined(): |
1785 | + configure_principle_cluster_resources() |
1786 | + store_cluster_size() |
1787 | + |
1788 | + |
1789 | +@hooks.hook('hanode-relation-departed') |
1790 | +def hanode_departed(): |
1791 | + store_cluster_size() |
1792 | + |
1793 | + |
1794 | +def store_cluster_size(): |
1795 | + try: |
1796 | + if is_leader(): |
1797 | + cluster_count = int(config('cluster_count')) |
1798 | + actual_size = len(peer_units('hanode')) + 1 |
1799 | + if actual_size < cluster_count: |
1800 | + leader_set(settings={'cluster-size': cluster_count}) |
1801 | + else: |
1802 | + leader_set(settings={'cluster-size': actual_size}) |
1803 | + except NotImplementedError: |
1804 | + log("Unable to use leader storage to configure target cluster size" |
1805 | + ", expanding clusters will be unreliable without changing " |
1806 | + "cluster_count first", level=WARNING) |
1807 | + |
1808 | + |
1809 | +def target_cluster_size(): |
1810 | + try: |
1811 | + leader_cluster_size = leader_get('cluster-size') |
1812 | + if leader_cluster_size: |
1813 | + return int(leader_cluster_size) |
1814 | + except NotImplementedError: |
1815 | + log("Unable to use leader storage to configure target cluster size" |
1816 | + ", expanding clusters will be unreliable without changing " |
1817 | + "cluster_count first", level=WARNING) |
1818 | + return int(config('cluster_count')) |
1819 | |
1820 | |
1821 | @hooks.hook('ha-relation-joined', |
1822 | +<<<<<<< TREE |
1823 | 'ha-relation-changed') |
1824 | def ha_relation_changed(): |
1825 | +======= |
1826 | + 'ha-relation-changed', |
1827 | + 'hanode-relation-changed') |
1828 | +def configure_principle_cluster_resources(): |
1829 | +>>>>>>> MERGE-SOURCE |
1830 | # Check that we are related to a principle and that |
1831 | # it has already provided the required corosync configuration |
1832 | if not get_corosync_conf(): |
1833 | @@ -175,9 +487,14 @@ |
1834 | |
1835 | # Check that there's enough nodes in order to perform the |
1836 | # configuration of the HA cluster |
1837 | +<<<<<<< TREE |
1838 | if len(get_cluster_nodes()) < int(config('cluster_count')): |
1839 | log('Not enough nodes in cluster, deferring configuration', |
1840 | level=INFO) |
1841 | +======= |
1842 | + if (len(get_cluster_nodes()) < target_cluster_size()): |
1843 | + log('Not enough nodes in cluster, deferring configuration') |
1844 | +>>>>>>> MERGE-SOURCE |
1845 | return |
1846 | |
1847 | relids = relation_ids('ha') |
1848 | |
1849 | === modified file 'hooks/pcmk.py' |
1850 | --- hooks/pcmk.py 2015-04-30 19:35:31 +0000 |
1851 | +++ hooks/pcmk.py 2015-06-09 10:12:24 +0000 |
1852 | @@ -1,6 +1,7 @@ |
1853 | import commands |
1854 | import subprocess |
1855 | import socket |
1856 | +import time |
1857 | |
1858 | from charmhelpers.core.hookenv import ( |
1859 | log, |
1860 | @@ -14,6 +15,9 @@ |
1861 | while not crm_up: |
1862 | output = commands.getstatusoutput("crm node list")[1] |
1863 | crm_up = hostname in output |
1864 | + # Sleep between attempts to give corosync and pacemaker |
1865 | + # a chance to wake up. |
1866 | + time.sleep(5) |
1867 | |
1868 | |
1869 | def commit(cmd): |