Merge lp:~le-charmers/charms/trusty/nova-cloud-controller/leadership-election into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next

Proposed by Edward Hope-Morley
Status: Merged
Merged at revision: 164
Proposed branch: lp:~le-charmers/charms/trusty/nova-cloud-controller/leadership-election
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next
Diff against target: 1074 lines (+476/-75)
14 files modified
hooks/charmhelpers/contrib/hahelpers/cluster.py (+37/-2)
hooks/charmhelpers/contrib/openstack/neutron.py (+10/-5)
hooks/charmhelpers/contrib/openstack/utils.py (+65/-18)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+123/-3)
hooks/charmhelpers/contrib/python/packages.py (+28/-5)
hooks/charmhelpers/core/hookenv.py (+147/-10)
hooks/charmhelpers/core/host.py (+1/-1)
hooks/charmhelpers/core/services/base.py (+32/-11)
hooks/charmhelpers/fetch/__init__.py (+1/-1)
hooks/charmhelpers/fetch/giturl.py (+7/-5)
hooks/nova_cc_hooks.py (+5/-4)
hooks/nova_cc_utils.py (+12/-2)
unit_tests/test_nova_cc_hooks.py (+3/-3)
unit_tests/test_nova_cc_utils.py (+5/-5)
To merge this branch: bzr merge lp:~le-charmers/charms/trusty/nova-cloud-controller/leadership-election
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+255015@code.launchpad.net
To post a comment you must log in.
159. By Liam Young

Merged trunk in + LE charmhelper sync

160. By Liam Young

Fix unit_tests

161. By Liam Young

Resync le charm helpers

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
2--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-03-16 14:17:04 +0000
3+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-06-04 08:45:15 +0000
4@@ -44,6 +44,7 @@
5 ERROR,
6 WARNING,
7 unit_get,
8+ is_leader as juju_is_leader
9 )
10 from charmhelpers.core.decorators import (
11 retry_on_exception,
12@@ -52,6 +53,8 @@
13 bool_from_string,
14 )
15
16+DC_RESOURCE_NAME = 'DC'
17+
18
19 class HAIncompleteConfig(Exception):
20 pass
21@@ -66,12 +69,21 @@
22 Returns True if the charm executing this is the elected cluster leader.
23
24 It relies on two mechanisms to determine leadership:
25- 1. If the charm is part of a corosync cluster, call corosync to
26+ 1. If juju is sufficiently new and leadership election is supported,
27+ the is_leader command will be used.
28+ 2. If the charm is part of a corosync cluster, call corosync to
29 determine leadership.
30- 2. If the charm is not part of a corosync cluster, the leader is
31+ 3. If the charm is not part of a corosync cluster, the leader is
32 determined as being "the alive unit with the lowest unit numer". In
33 other words, the oldest surviving unit.
34 """
35+ try:
36+ return juju_is_leader()
37+ except NotImplementedError:
38+ log('Juju leadership election feature not enabled'
39+ ', using fallback support',
40+ level=WARNING)
41+
42 if is_clustered():
43 if not is_crm_leader(resource):
44 log('Deferring action to CRM leader.', level=INFO)
45@@ -95,6 +107,27 @@
46 return False
47
48
49+def is_crm_dc():
50+ """
51+ Determine leadership by querying the pacemaker Designated Controller
52+ """
53+ cmd = ['crm', 'status']
54+ try:
55+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
56+ if not isinstance(status, six.text_type):
57+ status = six.text_type(status, "utf-8")
58+ except subprocess.CalledProcessError:
59+ return False
60+ current_dc = ''
61+ for line in status.split('\n'):
62+ if line.startswith('Current DC'):
63+ # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
64+ current_dc = line.split(':')[1].split()[0]
65+ if current_dc == get_unit_hostname():
66+ return True
67+ return False
68+
69+
70 @retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
71 def is_crm_leader(resource, retry=False):
72 """
73@@ -104,6 +137,8 @@
74 We allow this operation to be retried to avoid the possibility of getting a
75 false negative. See LP #1396246 for more info.
76 """
77+ if resource == DC_RESOURCE_NAME:
78+ return is_crm_dc()
79 cmd = ['crm', 'resource', 'show', resource]
80 try:
81 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
82
83=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
84--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-04-13 08:51:41 +0000
85+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-06-04 08:45:15 +0000
86@@ -256,11 +256,14 @@
87 def parse_mappings(mappings):
88 parsed = {}
89 if mappings:
90- mappings = mappings.split(' ')
91+ mappings = mappings.split()
92 for m in mappings:
93 p = m.partition(':')
94- if p[1] == ':':
95- parsed[p[0].strip()] = p[2].strip()
96+ key = p[0].strip()
97+ if p[1]:
98+ parsed[key] = p[2].strip()
99+ else:
100+ parsed[key] = ''
101
102 return parsed
103
104@@ -283,13 +286,13 @@
105 Returns dict of the form {bridge:port}.
106 """
107 _mappings = parse_mappings(mappings)
108- if not _mappings:
109+ if not _mappings or list(_mappings.values()) == ['']:
110 if not mappings:
111 return {}
112
113 # For backwards-compatibility we need to support port-only provided in
114 # config.
115- _mappings = {default_bridge: mappings.split(' ')[0]}
116+ _mappings = {default_bridge: mappings.split()[0]}
117
118 bridges = _mappings.keys()
119 ports = _mappings.values()
120@@ -309,6 +312,8 @@
121
122 Mappings must be a space-delimited list of provider:start:end mappings.
123
124+ The start:end range is optional and may be omitted.
125+
126 Returns dict of the form {provider: (start, end)}.
127 """
128 _mappings = parse_mappings(mappings)
129
130=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
131--- hooks/charmhelpers/contrib/openstack/utils.py 2015-04-20 08:39:44 +0000
132+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-06-04 08:45:15 +0000
133@@ -53,9 +53,13 @@
134 get_ipv6_addr
135 )
136
137+from charmhelpers.contrib.python.packages import (
138+ pip_create_virtualenv,
139+ pip_install,
140+)
141+
142 from charmhelpers.core.host import lsb_release, mounts, umount
143 from charmhelpers.fetch import apt_install, apt_cache, install_remote
144-from charmhelpers.contrib.python.packages import pip_install
145 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
146 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
147
148@@ -497,7 +501,17 @@
149 requirements_dir = None
150
151
152-def git_clone_and_install(projects_yaml, core_project):
153+def _git_yaml_load(projects_yaml):
154+ """
155+ Load the specified yaml into a dictionary.
156+ """
157+ if not projects_yaml:
158+ return None
159+
160+ return yaml.load(projects_yaml)
161+
162+
163+def git_clone_and_install(projects_yaml, core_project, depth=1):
164 """
165 Clone/install all specified OpenStack repositories.
166
167@@ -510,23 +524,22 @@
168 repository: 'git://git.openstack.org/openstack/requirements.git',
169 branch: 'stable/icehouse'}
170 directory: /mnt/openstack-git
171- http_proxy: http://squid.internal:3128
172- https_proxy: https://squid.internal:3128
173+ http_proxy: squid-proxy-url
174+ https_proxy: squid-proxy-url
175
176 The directory, http_proxy, and https_proxy keys are optional.
177 """
178 global requirements_dir
179 parent_dir = '/mnt/openstack-git'
180-
181- if not projects_yaml:
182- return
183-
184- projects = yaml.load(projects_yaml)
185+ http_proxy = None
186+
187+ projects = _git_yaml_load(projects_yaml)
188 _git_validate_projects_yaml(projects, core_project)
189
190 old_environ = dict(os.environ)
191
192 if 'http_proxy' in projects.keys():
193+ http_proxy = projects['http_proxy']
194 os.environ['http_proxy'] = projects['http_proxy']
195 if 'https_proxy' in projects.keys():
196 os.environ['https_proxy'] = projects['https_proxy']
197@@ -534,15 +547,19 @@
198 if 'directory' in projects.keys():
199 parent_dir = projects['directory']
200
201+ pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
202+
203 for p in projects['repositories']:
204 repo = p['repository']
205 branch = p['branch']
206 if p['name'] == 'requirements':
207- repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
208+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
209+ parent_dir, http_proxy,
210 update_requirements=False)
211 requirements_dir = repo_dir
212 else:
213- repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
214+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
215+ parent_dir, http_proxy,
216 update_requirements=True)
217
218 os.environ = old_environ
219@@ -574,7 +591,8 @@
220 error_out('openstack-origin-git key \'{}\' is missing'.format(key))
221
222
223-def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
224+def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
225+ update_requirements):
226 """
227 Clone and install a single git repository.
228 """
229@@ -587,7 +605,8 @@
230
231 if not os.path.exists(dest_dir):
232 juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
233- repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
234+ repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
235+ depth=depth)
236 else:
237 repo_dir = dest_dir
238
239@@ -598,7 +617,12 @@
240 _git_update_requirements(repo_dir, requirements_dir)
241
242 juju_log('Installing git repo from dir: {}'.format(repo_dir))
243- pip_install(repo_dir)
244+ if http_proxy:
245+ pip_install(repo_dir, proxy=http_proxy,
246+ venv=os.path.join(parent_dir, 'venv'))
247+ else:
248+ pip_install(repo_dir,
249+ venv=os.path.join(parent_dir, 'venv'))
250
251 return repo_dir
252
253@@ -621,16 +645,27 @@
254 os.chdir(orig_dir)
255
256
257+def git_pip_venv_dir(projects_yaml):
258+ """
259+ Return the pip virtualenv path.
260+ """
261+ parent_dir = '/mnt/openstack-git'
262+
263+ projects = _git_yaml_load(projects_yaml)
264+
265+ if 'directory' in projects.keys():
266+ parent_dir = projects['directory']
267+
268+ return os.path.join(parent_dir, 'venv')
269+
270+
271 def git_src_dir(projects_yaml, project):
272 """
273 Return the directory where the specified project's source is located.
274 """
275 parent_dir = '/mnt/openstack-git'
276
277- if not projects_yaml:
278- return
279-
280- projects = yaml.load(projects_yaml)
281+ projects = _git_yaml_load(projects_yaml)
282
283 if 'directory' in projects.keys():
284 parent_dir = projects['directory']
285@@ -640,3 +675,15 @@
286 return os.path.join(parent_dir, os.path.basename(p['repository']))
287
288 return None
289+
290+
291+def git_yaml_value(projects_yaml, key):
292+ """
293+ Return the value in projects_yaml for the specified key.
294+ """
295+ projects = _git_yaml_load(projects_yaml)
296+
297+ if key in projects.keys():
298+ return projects[key]
299+
300+ return None
301
302=== modified file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
303--- hooks/charmhelpers/contrib/peerstorage/__init__.py 2015-03-31 11:39:19 +0000
304+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2015-06-04 08:45:15 +0000
305@@ -14,14 +14,19 @@
306 # You should have received a copy of the GNU Lesser General Public License
307 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
308
309+import json
310 import six
311+
312 from charmhelpers.core.hookenv import relation_id as current_relation_id
313 from charmhelpers.core.hookenv import (
314 is_relation_made,
315 relation_ids,
316- relation_get,
317+ relation_get as _relation_get,
318 local_unit,
319- relation_set,
320+ relation_set as _relation_set,
321+ leader_get as _leader_get,
322+ leader_set,
323+ is_leader,
324 )
325
326
327@@ -54,6 +59,105 @@
328 """
329
330
331+def leader_get(attribute=None):
332+ """Wrapper to ensure that settings are migrated from the peer relation.
333+
334+ This is to support upgrading an environment that does not support
335+ Juju leadership election to one that does.
336+
337+ If a setting is not extant in the leader-get but is on the relation-get
338+ peer rel, it is migrated and marked as such so that it is not re-migrated.
339+ """
340+ migration_key = '__leader_get_migrated_settings__'
341+ if not is_leader():
342+ return _leader_get(attribute=attribute)
343+
344+ settings_migrated = False
345+ leader_settings = _leader_get(attribute=attribute)
346+ previously_migrated = _leader_get(attribute=migration_key)
347+
348+ if previously_migrated:
349+ migrated = set(json.loads(previously_migrated))
350+ else:
351+ migrated = set([])
352+
353+ try:
354+ if migration_key in leader_settings:
355+ del leader_settings[migration_key]
356+ except TypeError:
357+ pass
358+
359+ if attribute:
360+ if attribute in migrated:
361+ return leader_settings
362+
363+ # If attribute not present in leader db, check if this unit has set
364+ # the attribute in the peer relation
365+ if not leader_settings:
366+ peer_setting = relation_get(attribute=attribute, unit=local_unit())
367+ if peer_setting:
368+ leader_set(settings={attribute: peer_setting})
369+ leader_settings = peer_setting
370+
371+ if leader_settings:
372+ settings_migrated = True
373+ migrated.add(attribute)
374+ else:
375+ r_settings = relation_get(unit=local_unit())
376+ if r_settings:
377+ for key in set(r_settings.keys()).difference(migrated):
378+ # Leader setting wins
379+ if not leader_settings.get(key):
380+ leader_settings[key] = r_settings[key]
381+
382+ settings_migrated = True
383+ migrated.add(key)
384+
385+ if settings_migrated:
386+ leader_set(**leader_settings)
387+
388+ if migrated and settings_migrated:
389+ migrated = json.dumps(list(migrated))
390+ leader_set(settings={migration_key: migrated})
391+
392+ return leader_settings
393+
394+
395+def relation_set(relation_id=None, relation_settings=None, **kwargs):
396+ """Attempt to use leader-set if supported in the current version of Juju,
397+ otherwise falls back on relation-set.
398+
399+ Note that we only attempt to use leader-set if the provided relation_id is
400+ a peer relation id or no relation id is provided (in which case we assume
401+ we are within the peer relation context).
402+ """
403+ try:
404+ if relation_id in relation_ids('cluster'):
405+ return leader_set(settings=relation_settings, **kwargs)
406+ else:
407+ raise NotImplementedError
408+ except NotImplementedError:
409+ return _relation_set(relation_id=relation_id,
410+ relation_settings=relation_settings, **kwargs)
411+
412+
413+def relation_get(attribute=None, unit=None, rid=None):
414+ """Attempt to use leader-get if supported in the current version of Juju,
415+ otherwise falls back on relation-get.
416+
417+ Note that we only attempt to use leader-get if the provided rid is a peer
418+ relation id or no relation id is provided (in which case we assume we are
419+ within the peer relation context).
420+ """
421+ try:
422+ if rid in relation_ids('cluster'):
423+ return leader_get(attribute)
424+ else:
425+ raise NotImplementedError
426+ except NotImplementedError:
427+ return _relation_get(attribute=attribute, rid=rid, unit=unit)
428+
429+
430 def peer_retrieve(key, relation_name='cluster'):
431 """Retrieve a named key from peer relation `relation_name`."""
432 cluster_rels = relation_ids(relation_name)
433@@ -73,6 +177,8 @@
434 exc_list = exc_list if exc_list else []
435 peerdb_settings = peer_retrieve('-', relation_name=relation_name)
436 matched = {}
437+ if peerdb_settings is None:
438+ return matched
439 for k, v in peerdb_settings.items():
440 full_prefix = prefix + delimiter
441 if k.startswith(full_prefix):
442@@ -96,12 +202,26 @@
443 'peer relation {}'.format(relation_name))
444
445
446-def peer_echo(includes=None):
447+def peer_echo(includes=None, force=False):
448 """Echo filtered attributes back onto the same relation for storage.
449
450 This is a requirement to use the peerstorage module - it needs to be called
451 from the peer relation's changed hook.
452+
453+ If Juju leader support exists this will be a noop unless force is True.
454 """
455+ try:
456+ is_leader()
457+ except NotImplementedError:
458+ pass
459+ else:
460+ if not force:
461+ return # NOOP if leader-election is supported
462+
463+ # Use original non-leader calls
464+ relation_get = _relation_get
465+ relation_set = _relation_set
466+
467 rdata = relation_get()
468 echo_data = {}
469 if includes is None:
470
471=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
472--- hooks/charmhelpers/contrib/python/packages.py 2015-03-13 13:01:00 +0000
473+++ hooks/charmhelpers/contrib/python/packages.py 2015-06-04 08:45:15 +0000
474@@ -17,8 +17,11 @@
475 # You should have received a copy of the GNU Lesser General Public License
476 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
477
478+import os
479+import subprocess
480+
481 from charmhelpers.fetch import apt_install, apt_update
482-from charmhelpers.core.hookenv import log
483+from charmhelpers.core.hookenv import charm_dir, log
484
485 try:
486 from pip import main as pip_execute
487@@ -51,11 +54,15 @@
488 pip_execute(command)
489
490
491-def pip_install(package, fatal=False, upgrade=False, **options):
492+def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
493 """Install a python package"""
494- command = ["install"]
495+ if venv:
496+ venv_python = os.path.join(venv, 'bin/pip')
497+ command = [venv_python, "install"]
498+ else:
499+ command = ["install"]
500
501- available_options = ('proxy', 'src', 'log', "index-url", )
502+ available_options = ('proxy', 'src', 'log', 'index-url', )
503 for option in parse_options(options, available_options):
504 command.append(option)
505
506@@ -69,7 +76,10 @@
507
508 log("Installing {} package with options: {}".format(package,
509 command))
510- pip_execute(command)
511+ if venv:
512+ subprocess.check_call(command)
513+ else:
514+ pip_execute(command)
515
516
517 def pip_uninstall(package, **options):
518@@ -94,3 +104,16 @@
519 """Returns the list of current python installed packages
520 """
521 return pip_execute(["list"])
522+
523+
524+def pip_create_virtualenv(path=None):
525+ """Create an isolated Python environment."""
526+ apt_install('python-virtualenv')
527+
528+ if path:
529+ venv_path = path
530+ else:
531+ venv_path = os.path.join(charm_dir(), 'venv')
532+
533+ if not os.path.exists(venv_path):
534+ subprocess.check_call(['virtualenv', venv_path])
535
536=== modified file 'hooks/charmhelpers/core/hookenv.py'
537--- hooks/charmhelpers/core/hookenv.py 2015-04-13 08:51:41 +0000
538+++ hooks/charmhelpers/core/hookenv.py 2015-06-04 08:45:15 +0000
539@@ -21,12 +21,14 @@
540 # Charm Helpers Developers <juju@lists.ubuntu.com>
541
542 from __future__ import print_function
543+from functools import wraps
544 import os
545 import json
546 import yaml
547 import subprocess
548 import sys
549 import errno
550+import tempfile
551 from subprocess import CalledProcessError
552
553 import six
554@@ -58,15 +60,17 @@
555
556 will cache the result of unit_get + 'test' for future calls.
557 """
558+ @wraps(func)
559 def wrapper(*args, **kwargs):
560 global cache
561 key = str((func, args, kwargs))
562 try:
563 return cache[key]
564 except KeyError:
565- res = func(*args, **kwargs)
566- cache[key] = res
567- return res
568+ pass # Drop out of the exception handler scope.
569+ res = func(*args, **kwargs)
570+ cache[key] = res
571+ return res
572 return wrapper
573
574
575@@ -178,7 +182,7 @@
576
577 def remote_unit():
578 """The remote unit for the current relation hook"""
579- return os.environ['JUJU_REMOTE_UNIT']
580+ return os.environ.get('JUJU_REMOTE_UNIT', None)
581
582
583 def service_name():
584@@ -250,6 +254,12 @@
585 except KeyError:
586 return (self._prev_dict or {})[key]
587
588+ def get(self, key, default=None):
589+ try:
590+ return self[key]
591+ except KeyError:
592+ return default
593+
594 def keys(self):
595 prev_keys = []
596 if self._prev_dict is not None:
597@@ -353,18 +363,49 @@
598 """Set relation information for the current unit"""
599 relation_settings = relation_settings if relation_settings else {}
600 relation_cmd_line = ['relation-set']
601+ accepts_file = "--file" in subprocess.check_output(
602+ relation_cmd_line + ["--help"], universal_newlines=True)
603 if relation_id is not None:
604 relation_cmd_line.extend(('-r', relation_id))
605- for k, v in (list(relation_settings.items()) + list(kwargs.items())):
606- if v is None:
607- relation_cmd_line.append('{}='.format(k))
608- else:
609- relation_cmd_line.append('{}={}'.format(k, v))
610- subprocess.check_call(relation_cmd_line)
611+ settings = relation_settings.copy()
612+ settings.update(kwargs)
613+ for key, value in settings.items():
614+ # Force value to be a string: it always should, but some call
615+ # sites pass in things like dicts or numbers.
616+ if value is not None:
617+ settings[key] = "{}".format(value)
618+ if accepts_file:
619+ # --file was introduced in Juju 1.23.2. Use it by default if
620+ # available, since otherwise we'll break if the relation data is
621+ # too big. Ideally we should tell relation-set to read the data from
622+ # stdin, but that feature is broken in 1.23.2: Bug #1454678.
623+ with tempfile.NamedTemporaryFile(delete=False) as settings_file:
624+ settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
625+ subprocess.check_call(
626+ relation_cmd_line + ["--file", settings_file.name])
627+ os.remove(settings_file.name)
628+ else:
629+ for key, value in settings.items():
630+ if value is None:
631+ relation_cmd_line.append('{}='.format(key))
632+ else:
633+ relation_cmd_line.append('{}={}'.format(key, value))
634+ subprocess.check_call(relation_cmd_line)
635 # Flush cache of any relation-gets for local unit
636 flush(local_unit())
637
638
639+def relation_clear(r_id=None):
640+ ''' Clears any relation data already set on relation r_id '''
641+ settings = relation_get(rid=r_id,
642+ unit=local_unit())
643+ for setting in settings:
644+ if setting not in ['public-address', 'private-address']:
645+ settings[setting] = None
646+ relation_set(relation_id=r_id,
647+ **settings)
648+
649+
650 @cached
651 def relation_ids(reltype=None):
652 """A list of relation_ids"""
653@@ -509,6 +550,11 @@
654 return None
655
656
657+def unit_public_ip():
658+ """Get this unit's public IP address"""
659+ return unit_get('public-address')
660+
661+
662 def unit_private_ip():
663 """Get this unit's private IP address"""
664 return unit_get('private-address')
665@@ -605,3 +651,94 @@
666
667 The results set by action_set are preserved."""
668 subprocess.check_call(['action-fail', message])
669+
670+
671+def status_set(workload_state, message):
672+ """Set the workload state with a message
673+
674+ Use status-set to set the workload state with a message which is visible
675+ to the user via juju status. If the status-set command is not found then
676+ assume this is juju < 1.23 and juju-log the message unstead.
677+
678+ workload_state -- valid juju workload state.
679+ message -- status update message
680+ """
681+ valid_states = ['maintenance', 'blocked', 'waiting', 'active']
682+ if workload_state not in valid_states:
683+ raise ValueError(
684+ '{!r} is not a valid workload state'.format(workload_state)
685+ )
686+ cmd = ['status-set', workload_state, message]
687+ try:
688+ ret = subprocess.call(cmd)
689+ if ret == 0:
690+ return
691+ except OSError as e:
692+ if e.errno != errno.ENOENT:
693+ raise
694+ log_message = 'status-set failed: {} {}'.format(workload_state,
695+ message)
696+ log(log_message, level='INFO')
697+
698+
699+def status_get():
700+ """Retrieve the previously set juju workload state
701+
702+ If the status-set command is not found then assume this is juju < 1.23 and
703+ return 'unknown'
704+ """
705+ cmd = ['status-get']
706+ try:
707+ raw_status = subprocess.check_output(cmd, universal_newlines=True)
708+ status = raw_status.rstrip()
709+ return status
710+ except OSError as e:
711+ if e.errno == errno.ENOENT:
712+ return 'unknown'
713+ else:
714+ raise
715+
716+
717+def translate_exc(from_exc, to_exc):
718+ def inner_translate_exc1(f):
719+ def inner_translate_exc2(*args, **kwargs):
720+ try:
721+ return f(*args, **kwargs)
722+ except from_exc:
723+ raise to_exc
724+
725+ return inner_translate_exc2
726+
727+ return inner_translate_exc1
728+
729+
730+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
731+def is_leader():
732+ """Does the current unit hold the juju leadership
733+
734+ Uses juju to determine whether the current unit is the leader of its peers
735+ """
736+ cmd = ['is-leader', '--format=json']
737+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
738+
739+
740+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
741+def leader_get(attribute=None):
742+ """Juju leader get value(s)"""
743+ cmd = ['leader-get', '--format=json'] + [attribute or '-']
744+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
745+
746+
747+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
748+def leader_set(settings=None, **kwargs):
749+ """Juju leader set value(s)"""
750+ log("Juju leader-set '%s'" % (settings), level=DEBUG)
751+ cmd = ['leader-set']
752+ settings = settings or {}
753+ settings.update(kwargs)
754+ for k, v in settings.iteritems():
755+ if v is None:
756+ cmd.append('{}='.format(k))
757+ else:
758+ cmd.append('{}={}'.format(k, v))
759+ subprocess.check_call(cmd)
760
761=== modified file 'hooks/charmhelpers/core/host.py'
762--- hooks/charmhelpers/core/host.py 2015-03-31 14:56:11 +0000
763+++ hooks/charmhelpers/core/host.py 2015-06-04 08:45:15 +0000
764@@ -90,7 +90,7 @@
765 ['service', service_name, 'status'],
766 stderr=subprocess.STDOUT).decode('UTF-8')
767 except subprocess.CalledProcessError as e:
768- return 'unrecognized service' not in e.output
769+ return b'unrecognized service' not in e.output
770 else:
771 return True
772
773
774=== modified file 'hooks/charmhelpers/core/services/base.py'
775--- hooks/charmhelpers/core/services/base.py 2015-03-31 14:56:11 +0000
776+++ hooks/charmhelpers/core/services/base.py 2015-06-04 08:45:15 +0000
777@@ -15,9 +15,9 @@
778 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
779
780 import os
781-import re
782 import json
783-from collections import Iterable
784+from inspect import getargspec
785+from collections import Iterable, OrderedDict
786
787 from charmhelpers.core import host
788 from charmhelpers.core import hookenv
789@@ -119,7 +119,7 @@
790 """
791 self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
792 self._ready = None
793- self.services = {}
794+ self.services = OrderedDict()
795 for service in services or []:
796 service_name = service['service']
797 self.services[service_name] = service
798@@ -132,8 +132,8 @@
799 if hook_name == 'stop':
800 self.stop_services()
801 else:
802+ self.reconfigure_services()
803 self.provide_data()
804- self.reconfigure_services()
805 cfg = hookenv.config()
806 if cfg.implicit_save:
807 cfg.save()
808@@ -145,15 +145,36 @@
809 A provider must have a `name` attribute, which indicates which relation
810 to set data on, and a `provide_data()` method, which returns a dict of
811 data to set.
812+
813+ The `provide_data()` method can optionally accept two parameters:
814+
815+ * ``remote_service`` The name of the remote service that the data will
816+ be provided to. The `provide_data()` method will be called once
817+ for each connected service (not unit). This allows the method to
818+ tailor its data to the given service.
819+ * ``service_ready`` Whether or not the service definition had all of
820+ its requirements met, and thus the ``data_ready`` callbacks run.
821+
822+ Note that the ``provided_data`` methods are now called **after** the
823+ ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
824+ a chance to generate any data necessary for the providing to the remote
825+ services.
826 """
827- hook_name = hookenv.hook_name()
828- for service in self.services.values():
829+ for service_name, service in self.services.items():
830+ service_ready = self.is_ready(service_name)
831 for provider in service.get('provided_data', []):
832- if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
833- data = provider.provide_data()
834- _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
835- if _ready:
836- hookenv.relation_set(None, data)
837+ for relid in hookenv.relation_ids(provider.name):
838+ units = hookenv.related_units(relid)
839+ if not units:
840+ continue
841+ remote_service = units[0].split('/')[0]
842+ argspec = getargspec(provider.provide_data)
843+ if len(argspec.args) > 1:
844+ data = provider.provide_data(remote_service, service_ready)
845+ else:
846+ data = provider.provide_data()
847+ if data:
848+ hookenv.relation_set(relid, data)
849
850 def reconfigure_services(self, *service_names):
851 """
852
853=== modified file 'hooks/charmhelpers/fetch/__init__.py'
854--- hooks/charmhelpers/fetch/__init__.py 2015-03-31 14:56:11 +0000
855+++ hooks/charmhelpers/fetch/__init__.py 2015-06-04 08:45:15 +0000
856@@ -158,7 +158,7 @@
857
858 def apt_cache(in_memory=True):
859 """Build and return an apt cache"""
860- import apt_pkg
861+ from apt import apt_pkg
862 apt_pkg.init()
863 if in_memory:
864 apt_pkg.config.set("Dir::Cache::pkgcache", "")
865
866=== modified file 'hooks/charmhelpers/fetch/giturl.py'
867--- hooks/charmhelpers/fetch/giturl.py 2015-03-13 13:01:00 +0000
868+++ hooks/charmhelpers/fetch/giturl.py 2015-06-04 08:45:15 +0000
869@@ -45,14 +45,16 @@
870 else:
871 return True
872
873- def clone(self, source, dest, branch):
874+ def clone(self, source, dest, branch, depth=None):
875 if not self.can_handle(source):
876 raise UnhandledSource("Cannot handle {}".format(source))
877
878- repo = Repo.clone_from(source, dest)
879- repo.git.checkout(branch)
880+ if depth:
881+ Repo.clone_from(source, dest, branch=branch, depth=depth)
882+ else:
883+ Repo.clone_from(source, dest, branch=branch)
884
885- def install(self, source, branch="master", dest=None):
886+ def install(self, source, branch="master", dest=None, depth=None):
887 url_parts = self.parse_url(source)
888 branch_name = url_parts.path.strip("/").split("/")[-1]
889 if dest:
890@@ -63,7 +65,7 @@
891 if not os.path.exists(dest_dir):
892 mkdir(dest_dir, perms=0o755)
893 try:
894- self.clone(source, dest_dir, branch)
895+ self.clone(source, dest_dir, branch, depth)
896 except GitCommandError as e:
897 raise UnhandledSource(e.message)
898 except OSError as e:
899
900=== added symlink 'hooks/leader-settings-changed'
901=== target is u'nova_cc_hooks.py'
902=== modified file 'hooks/nova_cc_hooks.py'
903--- hooks/nova_cc_hooks.py 2015-05-22 18:19:15 +0000
904+++ hooks/nova_cc_hooks.py 2015-06-04 08:45:15 +0000
905@@ -101,7 +101,7 @@
906 )
907
908 from charmhelpers.contrib.hahelpers.cluster import (
909- eligible_leader,
910+ is_elected_leader,
911 get_hacluster_config,
912 )
913
914@@ -305,7 +305,7 @@
915 return
916 CONFIGS.write_all()
917
918- if eligible_leader(CLUSTER_RES):
919+ if is_elected_leader(CLUSTER_RES):
920 # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
921 # acl entry has been added. So, if the db supports passing a list of
922 # permitted units then check if we're in the list.
923@@ -334,7 +334,7 @@
924 return
925 CONFIGS.write_all()
926
927- if eligible_leader(CLUSTER_RES):
928+ if is_elected_leader(CLUSTER_RES):
929 migrate_nova_database()
930 log('Triggering remote cloud-compute restarts.')
931 [compute_joined(rid=rid, remote_restart=True)
932@@ -635,7 +635,8 @@
933
934
935 @hooks.hook('cluster-relation-changed',
936- 'cluster-relation-departed')
937+ 'cluster-relation-departed',
938+ 'leader-settings-changed')
939 @service_guard(guard_map(), CONFIGS,
940 active=config('service-guard'))
941 @restart_on_change(restart_map(), stopstart=True)
942
943=== modified file 'hooks/nova_cc_utils.py'
944--- hooks/nova_cc_utils.py 2015-04-22 14:36:20 +0000
945+++ hooks/nova_cc_utils.py 2015-06-04 08:45:15 +0000
946@@ -10,7 +10,7 @@
947 from charmhelpers.contrib.openstack.neutron import (
948 network_manager, neutron_plugin_attribute)
949
950-from charmhelpers.contrib.hahelpers.cluster import eligible_leader
951+from charmhelpers.contrib.hahelpers.cluster import is_elected_leader
952
953 from charmhelpers.contrib.peerstorage import peer_store
954
955@@ -53,6 +53,10 @@
956 is_ipv6
957 )
958
959+from charmhelpers.core.decorators import (
960+ retry_on_exception,
961+)
962+
963 import nova_cc_context
964
965 TEMPLATES = 'templates/'
966@@ -544,7 +548,7 @@
967 # NOTE(jamespage) default plugin switch to ml2@icehouse
968 ml2_migration()
969
970- if eligible_leader(CLUSTER_RES):
971+ if is_elected_leader(CLUSTER_RES):
972 migrate_nova_database()
973 [service_start(s) for s in services()]
974
975@@ -575,6 +579,9 @@
976 return 'cinder'
977
978
979+# NOTE(jamespage): Retry deals with sync issues during one-shot HA deploys.
980+# mysql might be restarting or suchlike.
981+@retry_on_exception(5, base_delay=3, exc_type=subprocess.CalledProcessError)
982 def migrate_nova_database():
983 '''Runs nova-manage to initialize a new database or migrate existing'''
984 log('Migrating the nova database.', level=INFO)
985@@ -588,6 +595,9 @@
986 cmd_all_services('start')
987
988
989+# NOTE(jamespage): Retry deals with sync issues during one-shot HA deploys.
990+# mysql might be restarting or suchlike.
991+@retry_on_exception(5, base_delay=3, exc_type=subprocess.CalledProcessError)
992 def migrate_neutron_database():
993 '''Runs neutron-db-manage to init a new database or migrate existing'''
994 log('Migrating the neutron database.', level=INFO)
995
996=== modified file 'unit_tests/test_nova_cc_hooks.py'
997--- unit_tests/test_nova_cc_hooks.py 2015-05-22 14:32:59 +0000
998+++ unit_tests/test_nova_cc_hooks.py 2015-06-04 08:45:15 +0000
999@@ -60,7 +60,7 @@
1000 'volume_service',
1001 'unit_get',
1002 'uuid',
1003- 'eligible_leader',
1004+ 'is_elected_leader',
1005 'keystone_ca_cert_b64',
1006 'neutron_plugin',
1007 'migrate_nova_database',
1008@@ -204,7 +204,7 @@
1009 _util_config.return_value = None
1010 self.is_relation_made.return_value = False
1011 self.network_manager.return_value = 'neutron'
1012- self.eligible_leader = True
1013+ self.is_elected_leader = True
1014 self.keystone_ca_cert_b64.return_value = 'foocert64'
1015 self.volume_service.return_value = 'cinder'
1016 self.unit_get.return_value = 'nova-cc-host1'
1017@@ -242,7 +242,7 @@
1018 napi.return_value = mock_NeutronAPIContext
1019 self.is_relation_made.return_value = True
1020 self.network_manager.return_value = 'neutron'
1021- self.eligible_leader = True
1022+ self.is_elected_leader = True
1023 self.keystone_ca_cert_b64.return_value = 'foocert64'
1024 self.volume_service.return_value = 'cinder'
1025 self.unit_get.return_value = 'nova-cc-host1'
1026
1027=== modified file 'unit_tests/test_nova_cc_utils.py'
1028--- unit_tests/test_nova_cc_utils.py 2015-04-22 19:22:30 +0000
1029+++ unit_tests/test_nova_cc_utils.py 2015-06-04 08:45:15 +0000
1030@@ -20,7 +20,7 @@
1031 'config',
1032 'configure_installation_source',
1033 'disable_policy_rcd',
1034- 'eligible_leader',
1035+ 'is_elected_leader',
1036 'enable_policy_rcd',
1037 'enable_services',
1038 'get_os_codename_install_source',
1039@@ -623,7 +623,7 @@
1040 self.get_os_codename_install_source.side_effect = [
1041 'havana',
1042 'icehouse']
1043- self.eligible_leader.return_value = True
1044+ self.is_elected_leader.return_value = True
1045 self.relation_ids.return_value = []
1046 utils.do_openstack_upgrade()
1047 expected = [call(['stamp', 'grizzly']), call(['upgrade', 'head']),
1048@@ -649,7 +649,7 @@
1049 get_step_upgrade_source.return_value = None
1050 self.os_release.return_value = 'havana'
1051 self.get_os_codename_install_source.return_value = 'icehouse'
1052- self.eligible_leader.return_value = True
1053+ self.is_elected_leader.return_value = True
1054 self.relation_ids.return_value = []
1055 utils.do_openstack_upgrade()
1056 self.neutron_db_manage.assert_called_with(['upgrade', 'head'])
1057@@ -672,7 +672,7 @@
1058 get_step_upgrade_source.return_value = None
1059 self.os_release.return_value = 'icehouse'
1060 self.get_os_codename_install_source.return_value = 'juno'
1061- self.eligible_leader.return_value = True
1062+ self.is_elected_leader.return_value = True
1063 self.relation_ids.return_value = []
1064 utils.do_openstack_upgrade()
1065 neutron_db_calls = [call(['stamp', 'icehouse']),
1066@@ -698,7 +698,7 @@
1067 get_step_upgrade_source.return_value = None
1068 self.os_release.return_value = 'juno'
1069 self.get_os_codename_install_source.return_value = 'kilo'
1070- self.eligible_leader.return_value = True
1071+ self.is_elected_leader.return_value = True
1072 self.relation_ids.return_value = []
1073 utils.do_openstack_upgrade()
1074 self.assertEquals(self.neutron_db_manage.call_count, 0)

Subscribers

People subscribed via source and target branches