Merge lp:~gnuoy/charms/trusty/cinder-ceph/next-charm-sync into lp:~openstack-charmers-archive/charms/trusty/cinder-ceph/next

Proposed by Liam Young
Status: Merged
Merged at revision: 17
Proposed branch: lp:~gnuoy/charms/trusty/cinder-ceph/next-charm-sync
Merge into: lp:~openstack-charmers-archive/charms/trusty/cinder-ceph/next
Diff against target: 1100 lines (+735/-52)
14 files modified
hooks/charmhelpers/contrib/hahelpers/cluster.py (+55/-13)
hooks/charmhelpers/contrib/network/ip.py (+19/-1)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+13/-7)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+86/-20)
hooks/charmhelpers/contrib/openstack/context.py (+31/-4)
hooks/charmhelpers/contrib/openstack/ip.py (+7/-3)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-3)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-0)
hooks/charmhelpers/core/host.py (+34/-1)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+305/-0)
hooks/charmhelpers/core/services/helpers.py (+125/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+1/-0)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/cinder-ceph/next-charm-sync
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+230637@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) wrote :

Approved by jamespage

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
2--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-24 10:26:05 +0000
3+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-08-13 14:00:05 +0000
4@@ -6,6 +6,11 @@
5 # Adam Gandelman <adamg@ubuntu.com>
6 #
7
8+"""
9+Helpers for clustering and determining "cluster leadership" and other
10+clustering-related helpers.
11+"""
12+
13 import subprocess
14 import os
15
16@@ -19,6 +24,7 @@
17 config as config_get,
18 INFO,
19 ERROR,
20+ WARNING,
21 unit_get,
22 )
23
24@@ -27,6 +33,29 @@
25 pass
26
27
28+def is_elected_leader(resource):
29+ """
30+ Returns True if the charm executing this is the elected cluster leader.
31+
32+ It relies on two mechanisms to determine leadership:
33+ 1. If the charm is part of a corosync cluster, call corosync to
34+ determine leadership.
35+ 2. If the charm is not part of a corosync cluster, the leader is
36+ determined as being "the alive unit with the lowest unit numer". In
37+ other words, the oldest surviving unit.
38+ """
39+ if is_clustered():
40+ if not is_crm_leader(resource):
41+ log('Deferring action to CRM leader.', level=INFO)
42+ return False
43+ else:
44+ peers = peer_units()
45+ if peers and not oldest_peer(peers):
46+ log('Deferring action to oldest service unit.', level=INFO)
47+ return False
48+ return True
49+
50+
51 def is_clustered():
52 for r_id in (relation_ids('ha') or []):
53 for unit in (relation_list(r_id) or []):
54@@ -38,7 +67,11 @@
55 return False
56
57
58-def is_leader(resource):
59+def is_crm_leader(resource):
60+ """
61+ Returns True if the charm calling this is the elected corosync leader,
62+ as returned by calling the external "crm" command.
63+ """
64 cmd = [
65 "crm", "resource",
66 "show", resource
67@@ -54,15 +87,31 @@
68 return False
69
70
71-def peer_units():
72+def is_leader(resource):
73+ log("is_leader is deprecated. Please consider using is_crm_leader "
74+ "instead.", level=WARNING)
75+ return is_crm_leader(resource)
76+
77+
78+def peer_units(peer_relation="cluster"):
79 peers = []
80- for r_id in (relation_ids('cluster') or []):
81+ for r_id in (relation_ids(peer_relation) or []):
82 for unit in (relation_list(r_id) or []):
83 peers.append(unit)
84 return peers
85
86
87+def peer_ips(peer_relation='cluster', addr_key='private-address'):
88+ '''Return a dict of peers and their private-address'''
89+ peers = {}
90+ for r_id in relation_ids(peer_relation):
91+ for unit in relation_list(r_id):
92+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
93+ return peers
94+
95+
96 def oldest_peer(peers):
97+ """Determines who the oldest peer is by comparing unit numbers."""
98 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
99 for peer in peers:
100 remote_unit_no = int(peer.split('/')[1])
101@@ -72,16 +121,9 @@
102
103
104 def eligible_leader(resource):
105- if is_clustered():
106- if not is_leader(resource):
107- log('Deferring action to CRM leader.', level=INFO)
108- return False
109- else:
110- peers = peer_units()
111- if peers and not oldest_peer(peers):
112- log('Deferring action to oldest service unit.', level=INFO)
113- return False
114- return True
115+ log("eligible_leader is deprecated. Please consider using "
116+ "is_elected_leader instead.", level=WARNING)
117+ return is_elected_leader(resource)
118
119
120 def https():
121
122=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
123--- hooks/charmhelpers/contrib/network/ip.py 2014-07-24 10:26:05 +0000
124+++ hooks/charmhelpers/contrib/network/ip.py 2014-08-13 14:00:05 +0000
125@@ -4,7 +4,7 @@
126
127 from charmhelpers.fetch import apt_install
128 from charmhelpers.core.hookenv import (
129- ERROR, log,
130+ ERROR, log, config,
131 )
132
133 try:
134@@ -154,3 +154,21 @@
135 get_iface_for_address = partial(_get_for_address, key='iface')
136
137 get_netmask_for_address = partial(_get_for_address, key='netmask')
138+
139+
140+def get_ipv6_addr(iface="eth0"):
141+ try:
142+ iface_addrs = netifaces.ifaddresses(iface)
143+ if netifaces.AF_INET6 not in iface_addrs:
144+ raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
145+
146+ addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
147+ ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
148+ and config('vip') != a['addr']]
149+ if not ipv6_addr:
150+ raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
151+
152+ return ipv6_addr[0]
153+
154+ except ValueError:
155+ raise ValueError("Invalid interface '%s'" % iface)
156
157=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
158--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-02 08:14:53 +0000
159+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-13 14:00:05 +0000
160@@ -4,8 +4,11 @@
161
162
163 class OpenStackAmuletDeployment(AmuletDeployment):
164- """This class inherits from AmuletDeployment and has additional support
165- that is specifically for use by OpenStack charms."""
166+ """OpenStack amulet deployment.
167+
168+ This class inherits from AmuletDeployment and has additional support
169+ that is specifically for use by OpenStack charms.
170+ """
171
172 def __init__(self, series=None, openstack=None, source=None):
173 """Initialize the deployment environment."""
174@@ -40,11 +43,14 @@
175 self.d.configure(service, config)
176
177 def _get_openstack_release(self):
178- """Return an integer representing the enum value of the openstack
179- release."""
180- self.precise_essex, self.precise_folsom, self.precise_grizzly, \
181- self.precise_havana, self.precise_icehouse, \
182- self.trusty_icehouse = range(6)
183+ """Get openstack release.
184+
185+ Return an integer representing the enum value of the openstack
186+ release.
187+ """
188+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
189+ self.precise_havana, self.precise_icehouse,
190+ self.trusty_icehouse) = range(6)
191 releases = {
192 ('precise', None): self.precise_essex,
193 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
194
195=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
196--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-02 08:14:53 +0000
197+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-13 14:00:05 +0000
198@@ -16,8 +16,11 @@
199
200
201 class OpenStackAmuletUtils(AmuletUtils):
202- """This class inherits from AmuletUtils and has additional support
203- that is specifically for use by OpenStack charms."""
204+ """OpenStack amulet utilities.
205+
206+ This class inherits from AmuletUtils and has additional support
207+ that is specifically for use by OpenStack charms.
208+ """
209
210 def __init__(self, log_level=ERROR):
211 """Initialize the deployment environment."""
212@@ -25,13 +28,17 @@
213
214 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
215 public_port, expected):
216- """Validate actual endpoint data vs expected endpoint data. The ports
217- are used to find the matching endpoint."""
218+ """Validate endpoint data.
219+
220+ Validate actual endpoint data vs expected endpoint data. The ports
221+ are used to find the matching endpoint.
222+ """
223 found = False
224 for ep in endpoints:
225 self.log.debug('endpoint: {}'.format(repr(ep)))
226- if admin_port in ep.adminurl and internal_port in ep.internalurl \
227- and public_port in ep.publicurl:
228+ if (admin_port in ep.adminurl and
229+ internal_port in ep.internalurl and
230+ public_port in ep.publicurl):
231 found = True
232 actual = {'id': ep.id,
233 'region': ep.region,
234@@ -47,8 +54,11 @@
235 return 'endpoint not found'
236
237 def validate_svc_catalog_endpoint_data(self, expected, actual):
238- """Validate a list of actual service catalog endpoints vs a list of
239- expected service catalog endpoints."""
240+ """Validate service catalog endpoint data.
241+
242+ Validate a list of actual service catalog endpoints vs a list of
243+ expected service catalog endpoints.
244+ """
245 self.log.debug('actual: {}'.format(repr(actual)))
246 for k, v in expected.iteritems():
247 if k in actual:
248@@ -60,8 +70,11 @@
249 return ret
250
251 def validate_tenant_data(self, expected, actual):
252- """Validate a list of actual tenant data vs list of expected tenant
253- data."""
254+ """Validate tenant data.
255+
256+ Validate a list of actual tenant data vs list of expected tenant
257+ data.
258+ """
259 self.log.debug('actual: {}'.format(repr(actual)))
260 for e in expected:
261 found = False
262@@ -78,8 +91,11 @@
263 return ret
264
265 def validate_role_data(self, expected, actual):
266- """Validate a list of actual role data vs a list of expected role
267- data."""
268+ """Validate role data.
269+
270+ Validate a list of actual role data vs a list of expected role
271+ data.
272+ """
273 self.log.debug('actual: {}'.format(repr(actual)))
274 for e in expected:
275 found = False
276@@ -95,8 +111,11 @@
277 return ret
278
279 def validate_user_data(self, expected, actual):
280- """Validate a list of actual user data vs a list of expected user
281- data."""
282+ """Validate user data.
283+
284+ Validate a list of actual user data vs a list of expected user
285+ data.
286+ """
287 self.log.debug('actual: {}'.format(repr(actual)))
288 for e in expected:
289 found = False
290@@ -114,21 +133,24 @@
291 return ret
292
293 def validate_flavor_data(self, expected, actual):
294- """Validate a list of actual flavors vs a list of expected flavors."""
295+ """Validate flavor data.
296+
297+ Validate a list of actual flavors vs a list of expected flavors.
298+ """
299 self.log.debug('actual: {}'.format(repr(actual)))
300 act = [a.name for a in actual]
301 return self._validate_list_data(expected, act)
302
303 def tenant_exists(self, keystone, tenant):
304- """Return True if tenant exists"""
305+ """Return True if tenant exists."""
306 return tenant in [t.name for t in keystone.tenants.list()]
307
308 def authenticate_keystone_admin(self, keystone_sentry, user, password,
309 tenant):
310 """Authenticates admin user with the keystone admin endpoint."""
311- service_ip = \
312- keystone_sentry.relation('shared-db',
313- 'mysql:shared-db')['private-address']
314+ unit = keystone_sentry
315+ service_ip = unit.relation('shared-db',
316+ 'mysql:shared-db')['private-address']
317 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
318 return keystone_client.Client(username=user, password=password,
319 tenant_name=tenant, auth_url=ep)
320@@ -177,12 +199,40 @@
321 image = glance.images.create(name=image_name, is_public=True,
322 disk_format='qcow2',
323 container_format='bare', data=f)
324+ count = 1
325+ status = image.status
326+ while status != 'active' and count < 10:
327+ time.sleep(3)
328+ image = glance.images.get(image.id)
329+ status = image.status
330+ self.log.debug('image status: {}'.format(status))
331+ count += 1
332+
333+ if status != 'active':
334+ self.log.error('image creation timed out')
335+ return None
336+
337 return image
338
339 def delete_image(self, glance, image):
340 """Delete the specified image."""
341+ num_before = len(list(glance.images.list()))
342 glance.images.delete(image)
343
344+ count = 1
345+ num_after = len(list(glance.images.list()))
346+ while num_after != (num_before - 1) and count < 10:
347+ time.sleep(3)
348+ num_after = len(list(glance.images.list()))
349+ self.log.debug('number of images: {}'.format(num_after))
350+ count += 1
351+
352+ if num_after != (num_before - 1):
353+ self.log.error('image deletion timed out')
354+ return False
355+
356+ return True
357+
358 def create_instance(self, nova, image_name, instance_name, flavor):
359 """Create the specified instance."""
360 image = nova.images.find(name=image_name)
361@@ -199,11 +249,27 @@
362 self.log.debug('instance status: {}'.format(status))
363 count += 1
364
365- if status == 'BUILD':
366+ if status != 'ACTIVE':
367+ self.log.error('instance creation timed out')
368 return None
369
370 return instance
371
372 def delete_instance(self, nova, instance):
373 """Delete the specified instance."""
374+ num_before = len(list(nova.servers.list()))
375 nova.servers.delete(instance)
376+
377+ count = 1
378+ num_after = len(list(nova.servers.list()))
379+ while num_after != (num_before - 1) and count < 10:
380+ time.sleep(3)
381+ num_after = len(list(nova.servers.list()))
382+ self.log.debug('number of instances: {}'.format(num_after))
383+ count += 1
384+
385+ if num_after != (num_before - 1):
386+ self.log.error('instance deletion timed out')
387+ return False
388+
389+ return True
390
391=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
392--- hooks/charmhelpers/contrib/openstack/context.py 2014-07-15 15:41:42 +0000
393+++ hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 14:00:05 +0000
394@@ -44,7 +44,10 @@
395 neutron_plugin_attribute,
396 )
397
398-from charmhelpers.contrib.network.ip import get_address_in_network
399+from charmhelpers.contrib.network.ip import (
400+ get_address_in_network,
401+ get_ipv6_addr,
402+)
403
404 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
405
406@@ -401,9 +404,12 @@
407
408 cluster_hosts = {}
409 l_unit = local_unit().replace('/', '-')
410- cluster_hosts[l_unit] = \
411- get_address_in_network(config('os-internal-network'),
412- unit_get('private-address'))
413+ if config('prefer-ipv6'):
414+ addr = get_ipv6_addr()
415+ else:
416+ addr = unit_get('private-address')
417+ cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
418+ addr)
419
420 for rid in relation_ids('cluster'):
421 for unit in related_units(rid):
422@@ -414,6 +420,16 @@
423 ctxt = {
424 'units': cluster_hosts,
425 }
426+
427+ if config('prefer-ipv6'):
428+ ctxt['local_host'] = 'ip6-localhost'
429+ ctxt['haproxy_host'] = '::'
430+ ctxt['stat_port'] = ':::8888'
431+ else:
432+ ctxt['local_host'] = '127.0.0.1'
433+ ctxt['haproxy_host'] = '0.0.0.0'
434+ ctxt['stat_port'] = ':8888'
435+
436 if len(cluster_hosts.keys()) > 1:
437 # Enable haproxy when we have enough peers.
438 log('Ensuring haproxy enabled in /etc/default/haproxy.')
439@@ -753,6 +769,17 @@
440 return ctxt
441
442
443+class LogLevelContext(OSContextGenerator):
444+
445+ def __call__(self):
446+ ctxt = {}
447+ ctxt['debug'] = \
448+ False if config('debug') is None else config('debug')
449+ ctxt['verbose'] = \
450+ False if config('verbose') is None else config('verbose')
451+ return ctxt
452+
453+
454 class SyslogContext(OSContextGenerator):
455
456 def __call__(self):
457
458=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
459--- hooks/charmhelpers/contrib/openstack/ip.py 2014-07-04 11:30:54 +0000
460+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 14:00:05 +0000
461@@ -7,6 +7,7 @@
462 get_address_in_network,
463 is_address_in_network,
464 is_ipv6,
465+ get_ipv6_addr,
466 )
467
468 from charmhelpers.contrib.hahelpers.cluster import is_clustered
469@@ -64,10 +65,13 @@
470 vip):
471 resolved_address = vip
472 else:
473+ if config('prefer-ipv6'):
474+ fallback_addr = get_ipv6_addr()
475+ else:
476+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
477 resolved_address = get_address_in_network(
478- config(_address_map[endpoint_type]['config']),
479- unit_get(_address_map[endpoint_type]['fallback'])
480- )
481+ config(_address_map[endpoint_type]['config']), fallback_addr)
482+
483 if resolved_address is None:
484 raise ValueError('Unable to resolve a suitable IP address'
485 ' based on charm state and configuration')
486
487=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
488--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-04 11:30:54 +0000
489+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-08-13 14:00:05 +0000
490@@ -1,6 +1,6 @@
491 global
492- log 127.0.0.1 local0
493- log 127.0.0.1 local1 notice
494+ log {{ local_host }} local0
495+ log {{ local_host }} local1 notice
496 maxconn 20000
497 user haproxy
498 group haproxy
499@@ -17,7 +17,7 @@
500 timeout client 30000
501 timeout server 30000
502
503-listen stats :8888
504+listen stats {{ stat_port }}
505 mode http
506 stats enable
507 stats hide-version
508
509=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
510--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-06-27 13:50:04 +0000
511+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-13 14:00:05 +0000
512@@ -46,5 +46,8 @@
513 :returns: boolean: True if the path represents a mounted device, False if
514 it doesn't.
515 '''
516+ is_partition = bool(re.search(r".*[0-9]+\b", device))
517 out = check_output(['mount'])
518+ if is_partition:
519+ return bool(re.search(device + r"\b", out))
520 return bool(re.search(device + r"[0-9]+\b", out))
521
522=== modified file 'hooks/charmhelpers/core/host.py'
523--- hooks/charmhelpers/core/host.py 2014-07-24 10:26:05 +0000
524+++ hooks/charmhelpers/core/host.py 2014-08-13 14:00:05 +0000
525@@ -12,6 +12,8 @@
526 import string
527 import subprocess
528 import hashlib
529+import shutil
530+from contextlib import contextmanager
531
532 from collections import OrderedDict
533
534@@ -52,7 +54,7 @@
535 def service_running(service):
536 """Determine whether a system service is running"""
537 try:
538- output = subprocess.check_output(['service', service, 'status'])
539+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
540 except subprocess.CalledProcessError:
541 return False
542 else:
543@@ -62,6 +64,16 @@
544 return False
545
546
547+def service_available(service_name):
548+ """Determine whether a system service is available"""
549+ try:
550+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
551+ except subprocess.CalledProcessError:
552+ return False
553+ else:
554+ return True
555+
556+
557 def adduser(username, password=None, shell='/bin/bash', system_user=False):
558 """Add a user to the system"""
559 try:
560@@ -329,3 +341,24 @@
561 pkgcache = apt_pkg.Cache()
562 pkg = pkgcache[package]
563 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
564+
565+
566+@contextmanager
567+def chdir(d):
568+ cur = os.getcwd()
569+ try:
570+ yield os.chdir(d)
571+ finally:
572+ os.chdir(cur)
573+
574+
575+def chownr(path, owner, group):
576+ uid = pwd.getpwnam(owner).pw_uid
577+ gid = grp.getgrnam(group).gr_gid
578+
579+ for root, dirs, files in os.walk(path):
580+ for name in dirs + files:
581+ full = os.path.join(root, name)
582+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
583+ if not broken_symlink:
584+ os.chown(full, uid, gid)
585
586=== added directory 'hooks/charmhelpers/core/services'
587=== added file 'hooks/charmhelpers/core/services/__init__.py'
588--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
589+++ hooks/charmhelpers/core/services/__init__.py 2014-08-13 14:00:05 +0000
590@@ -0,0 +1,2 @@
591+from .base import *
592+from .helpers import *
593
594=== added file 'hooks/charmhelpers/core/services/base.py'
595--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
596+++ hooks/charmhelpers/core/services/base.py 2014-08-13 14:00:05 +0000
597@@ -0,0 +1,305 @@
598+import os
599+import re
600+import json
601+from collections import Iterable
602+
603+from charmhelpers.core import host
604+from charmhelpers.core import hookenv
605+
606+
607+__all__ = ['ServiceManager', 'ManagerCallback',
608+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
609+ 'service_restart', 'service_stop']
610+
611+
612+class ServiceManager(object):
613+ def __init__(self, services=None):
614+ """
615+ Register a list of services, given their definitions.
616+
617+ Traditional charm authoring is focused on implementing hooks. That is,
618+ the charm author is thinking in terms of "What hook am I handling; what
619+ does this hook need to do?" However, in most cases, the real question
620+ should be "Do I have the information I need to configure and start this
621+ piece of software and, if so, what are the steps for doing so?" The
622+ ServiceManager framework tries to bring the focus to the data and the
623+ setup tasks, in the most declarative way possible.
624+
625+ Service definitions are dicts in the following formats (all keys except
626+ 'service' are optional)::
627+
628+ {
629+ "service": <service name>,
630+ "required_data": <list of required data contexts>,
631+ "data_ready": <one or more callbacks>,
632+ "data_lost": <one or more callbacks>,
633+ "start": <one or more callbacks>,
634+ "stop": <one or more callbacks>,
635+ "ports": <list of ports to manage>,
636+ }
637+
638+ The 'required_data' list should contain dicts of required data (or
639+ dependency managers that act like dicts and know how to collect the data).
640+ Only when all items in the 'required_data' list are populated are the list
641+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
642+ information.
643+
644+ The 'data_ready' value should be either a single callback, or a list of
645+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
646+ Each callback will be called with the service name as the only parameter.
647+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
648+ are fired.
649+
650+ The 'data_lost' value should be either a single callback, or a list of
651+ callbacks, to be called when a 'required_data' item no longer passes
652+ `is_ready()`. Each callback will be called with the service name as the
653+ only parameter. After all of the 'data_lost' callbacks are called,
654+ the 'stop' callbacks are fired.
655+
656+ The 'start' value should be either a single callback, or a list of
657+ callbacks, to be called when starting the service, after the 'data_ready'
658+ callbacks are complete. Each callback will be called with the service
659+ name as the only parameter. This defaults to
660+ `[host.service_start, services.open_ports]`.
661+
662+ The 'stop' value should be either a single callback, or a list of
663+ callbacks, to be called when stopping the service. If the service is
664+ being stopped because it no longer has all of its 'required_data', this
665+ will be called after all of the 'data_lost' callbacks are complete.
666+ Each callback will be called with the service name as the only parameter.
667+ This defaults to `[services.close_ports, host.service_stop]`.
668+
669+ The 'ports' value should be a list of ports to manage. The default
670+ 'start' handler will open the ports after the service is started,
671+ and the default 'stop' handler will close the ports prior to stopping
672+ the service.
673+
674+
675+ Examples:
676+
677+ The following registers an Upstart service called bingod that depends on
678+ a mongodb relation and which runs a custom `db_migrate` function prior to
679+ restarting the service, and a Runit service called spadesd::
680+
681+ manager = services.ServiceManager([
682+ {
683+ 'service': 'bingod',
684+ 'ports': [80, 443],
685+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
686+ 'data_ready': [
687+ services.template(source='bingod.conf'),
688+ services.template(source='bingod.ini',
689+ target='/etc/bingod.ini',
690+ owner='bingo', perms=0400),
691+ ],
692+ },
693+ {
694+ 'service': 'spadesd',
695+ 'data_ready': services.template(source='spadesd_run.j2',
696+ target='/etc/sv/spadesd/run',
697+ perms=0555),
698+ 'start': runit_start,
699+ 'stop': runit_stop,
700+ },
701+ ])
702+ manager.manage()
703+ """
704+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
705+ self._ready = None
706+ self.services = {}
707+ for service in services or []:
708+ service_name = service['service']
709+ self.services[service_name] = service
710+
711+ def manage(self):
712+ """
713+ Handle the current hook by doing The Right Thing with the registered services.
714+ """
715+ hook_name = hookenv.hook_name()
716+ if hook_name == 'stop':
717+ self.stop_services()
718+ else:
719+ self.provide_data()
720+ self.reconfigure_services()
721+
722+ def provide_data(self):
723+ hook_name = hookenv.hook_name()
724+ for service in self.services.values():
725+ for provider in service.get('provided_data', []):
726+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
727+ data = provider.provide_data()
728+ if provider._is_ready(data):
729+ hookenv.relation_set(None, data)
730+
731+ def reconfigure_services(self, *service_names):
732+ """
733+ Update all files for one or more registered services, and,
734+ if ready, optionally restart them.
735+
736+ If no service names are given, reconfigures all registered services.
737+ """
738+ for service_name in service_names or self.services.keys():
739+ if self.is_ready(service_name):
740+ self.fire_event('data_ready', service_name)
741+ self.fire_event('start', service_name, default=[
742+ service_restart,
743+ manage_ports])
744+ self.save_ready(service_name)
745+ else:
746+ if self.was_ready(service_name):
747+ self.fire_event('data_lost', service_name)
748+ self.fire_event('stop', service_name, default=[
749+ manage_ports,
750+ service_stop])
751+ self.save_lost(service_name)
752+
753+ def stop_services(self, *service_names):
754+ """
755+ Stop one or more registered services, by name.
756+
757+ If no service names are given, stops all registered services.
758+ """
759+ for service_name in service_names or self.services.keys():
760+ self.fire_event('stop', service_name, default=[
761+ manage_ports,
762+ service_stop])
763+
764+ def get_service(self, service_name):
765+ """
766+ Given the name of a registered service, return its service definition.
767+ """
768+ service = self.services.get(service_name)
769+ if not service:
770+ raise KeyError('Service not registered: %s' % service_name)
771+ return service
772+
773+ def fire_event(self, event_name, service_name, default=None):
774+ """
775+ Fire a data_ready, data_lost, start, or stop event on a given service.
776+ """
777+ service = self.get_service(service_name)
778+ callbacks = service.get(event_name, default)
779+ if not callbacks:
780+ return
781+ if not isinstance(callbacks, Iterable):
782+ callbacks = [callbacks]
783+ for callback in callbacks:
784+ if isinstance(callback, ManagerCallback):
785+ callback(self, service_name, event_name)
786+ else:
787+ callback(service_name)
788+
789+ def is_ready(self, service_name):
790+ """
791+ Determine if a registered service is ready, by checking its 'required_data'.
792+
793+ A 'required_data' item can be any mapping type, and is considered ready
794+ if `bool(item)` evaluates as True.
795+ """
796+ service = self.get_service(service_name)
797+ reqs = service.get('required_data', [])
798+ return all(bool(req) for req in reqs)
799+
800+ def _load_ready_file(self):
801+ if self._ready is not None:
802+ return
803+ if os.path.exists(self._ready_file):
804+ with open(self._ready_file) as fp:
805+ self._ready = set(json.load(fp))
806+ else:
807+ self._ready = set()
808+
809+ def _save_ready_file(self):
810+ if self._ready is None:
811+ return
812+ with open(self._ready_file, 'w') as fp:
813+ json.dump(list(self._ready), fp)
814+
815+ def save_ready(self, service_name):
816+ """
817+ Save an indicator that the given service is now data_ready.
818+ """
819+ self._load_ready_file()
820+ self._ready.add(service_name)
821+ self._save_ready_file()
822+
823+ def save_lost(self, service_name):
824+ """
825+ Save an indicator that the given service is no longer data_ready.
826+ """
827+ self._load_ready_file()
828+ self._ready.discard(service_name)
829+ self._save_ready_file()
830+
831+ def was_ready(self, service_name):
832+ """
833+ Determine if the given service was previously data_ready.
834+ """
835+ self._load_ready_file()
836+ return service_name in self._ready
837+
838+
839+class ManagerCallback(object):
840+ """
841+ Special case of a callback that takes the `ServiceManager` instance
842+ in addition to the service name.
843+
844+ Subclasses should implement `__call__` which should accept three parameters:
845+
846+ * `manager` The `ServiceManager` instance
847+ * `service_name` The name of the service it's being triggered for
848+ * `event_name` The name of the event that this callback is handling
849+ """
850+ def __call__(self, manager, service_name, event_name):
851+ raise NotImplementedError()
852+
853+
854+class PortManagerCallback(ManagerCallback):
855+ """
856+ Callback class that will open or close ports, for use as either
857+ a start or stop action.
858+ """
859+ def __call__(self, manager, service_name, event_name):
860+ service = manager.get_service(service_name)
861+ new_ports = service.get('ports', [])
862+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
863+ if os.path.exists(port_file):
864+ with open(port_file) as fp:
865+ old_ports = fp.read().split(',')
866+ for old_port in old_ports:
867+ if bool(old_port):
868+ old_port = int(old_port)
869+ if old_port not in new_ports:
870+ hookenv.close_port(old_port)
871+ with open(port_file, 'w') as fp:
872+ fp.write(','.join(str(port) for port in new_ports))
873+ for port in new_ports:
874+ if event_name == 'start':
875+ hookenv.open_port(port)
876+ elif event_name == 'stop':
877+ hookenv.close_port(port)
878+
879+
880+def service_stop(service_name):
881+ """
882+ Wrapper around host.service_stop to prevent spurious "unknown service"
883+ messages in the logs.
884+ """
885+ if host.service_running(service_name):
886+ host.service_stop(service_name)
887+
888+
889+def service_restart(service_name):
890+ """
891+ Wrapper around host.service_restart to prevent spurious "unknown service"
892+ messages in the logs.
893+ """
894+ if host.service_available(service_name):
895+ if host.service_running(service_name):
896+ host.service_restart(service_name)
897+ else:
898+ host.service_start(service_name)
899+
900+
901+# Convenience aliases
902+open_ports = close_ports = manage_ports = PortManagerCallback()
903
904=== added file 'hooks/charmhelpers/core/services/helpers.py'
905--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
906+++ hooks/charmhelpers/core/services/helpers.py 2014-08-13 14:00:05 +0000
907@@ -0,0 +1,125 @@
908+from charmhelpers.core import hookenv
909+from charmhelpers.core import templating
910+
911+from charmhelpers.core.services.base import ManagerCallback
912+
913+
914+__all__ = ['RelationContext', 'TemplateCallback',
915+ 'render_template', 'template']
916+
917+
918+class RelationContext(dict):
919+ """
920+ Base class for a context generator that gets relation data from juju.
921+
922+ Subclasses must provide the attributes `name`, which is the name of the
923+ interface of interest, `interface`, which is the type of the interface of
924+ interest, and `required_keys`, which is the set of keys required for the
925+ relation to be considered complete. The data for all interfaces matching
926+ the `name` attribute that are complete will used to populate the dictionary
927+ values (see `get_data`, below).
928+
929+ The generated context will be namespaced under the interface type, to prevent
930+ potential naming conflicts.
931+ """
932+ name = None
933+ interface = None
934+ required_keys = []
935+
936+ def __init__(self, *args, **kwargs):
937+ super(RelationContext, self).__init__(*args, **kwargs)
938+ self.get_data()
939+
940+ def __bool__(self):
941+ """
942+ Returns True if all of the required_keys are available.
943+ """
944+ return self.is_ready()
945+
946+ __nonzero__ = __bool__
947+
948+ def __repr__(self):
949+ return super(RelationContext, self).__repr__()
950+
951+ def is_ready(self):
952+ """
953+ Returns True if all of the `required_keys` are available from any units.
954+ """
955+ ready = len(self.get(self.name, [])) > 0
956+ if not ready:
957+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
958+ return ready
959+
960+ def _is_ready(self, unit_data):
961+ """
962+ Helper method that tests a set of relation data and returns True if
963+ all of the `required_keys` are present.
964+ """
965+ return set(unit_data.keys()).issuperset(set(self.required_keys))
966+
967+ def get_data(self):
968+ """
969+ Retrieve the relation data for each unit involved in a relation and,
970+ if complete, store it in a list under `self[self.name]`. This
971+ is automatically called when the RelationContext is instantiated.
972+
973+ The units are sorted lexographically first by the service ID, then by
974+ the unit ID. Thus, if an interface has two other services, 'db:1'
975+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
976+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
977+ set of data, the relation data for the units will be stored in the
978+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
979+
980+ If you only care about a single unit on the relation, you can just
981+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
982+ support multiple units on a relation, you should iterate over the list,
983+ like::
984+
985+ {% for unit in interface -%}
986+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
987+ {%- endfor %}
988+
989+ Note that since all sets of relation data from all related services and
990+ units are in a single list, if you need to know which service or unit a
991+ set of data came from, you'll need to extend this class to preserve
992+ that information.
993+ """
994+ if not hookenv.relation_ids(self.name):
995+ return
996+
997+ ns = self.setdefault(self.name, [])
998+ for rid in sorted(hookenv.relation_ids(self.name)):
999+ for unit in sorted(hookenv.related_units(rid)):
1000+ reldata = hookenv.relation_get(rid=rid, unit=unit)
1001+ if self._is_ready(reldata):
1002+ ns.append(reldata)
1003+
1004+ def provide_data(self):
1005+ """
1006+ Return data to be relation_set for this interface.
1007+ """
1008+ return {}
1009+
1010+
1011+class TemplateCallback(ManagerCallback):
1012+ """
1013+ Callback class that will render a template, for use as a ready action.
1014+ """
1015+ def __init__(self, source, target, owner='root', group='root', perms=0444):
1016+ self.source = source
1017+ self.target = target
1018+ self.owner = owner
1019+ self.group = group
1020+ self.perms = perms
1021+
1022+ def __call__(self, manager, service_name, event_name):
1023+ service = manager.get_service(service_name)
1024+ context = {}
1025+ for ctx in service.get('required_data', []):
1026+ context.update(ctx)
1027+ templating.render(self.source, self.target, context,
1028+ self.owner, self.group, self.perms)
1029+
1030+
1031+# Convenience aliases for templates
1032+render_template = template = TemplateCallback
1033
1034=== added file 'hooks/charmhelpers/core/templating.py'
1035--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
1036+++ hooks/charmhelpers/core/templating.py 2014-08-13 14:00:05 +0000
1037@@ -0,0 +1,51 @@
1038+import os
1039+
1040+from charmhelpers.core import host
1041+from charmhelpers.core import hookenv
1042+
1043+
1044+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
1045+ """
1046+ Render a template.
1047+
1048+ The `source` path, if not absolute, is relative to the `templates_dir`.
1049+
1050+ The `target` path should be absolute.
1051+
1052+ The context should be a dict containing the values to be replaced in the
1053+ template.
1054+
1055+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
1056+
1057+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
1058+
1059+ Note: Using this requires python-jinja2; if it is not installed, calling
1060+ this will attempt to use charmhelpers.fetch.apt_install to install it.
1061+ """
1062+ try:
1063+ from jinja2 import FileSystemLoader, Environment, exceptions
1064+ except ImportError:
1065+ try:
1066+ from charmhelpers.fetch import apt_install
1067+ except ImportError:
1068+ hookenv.log('Could not import jinja2, and could not import '
1069+ 'charmhelpers.fetch to install it',
1070+ level=hookenv.ERROR)
1071+ raise
1072+ apt_install('python-jinja2', fatal=True)
1073+ from jinja2 import FileSystemLoader, Environment, exceptions
1074+
1075+ if templates_dir is None:
1076+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
1077+ loader = Environment(loader=FileSystemLoader(templates_dir))
1078+ try:
1079+ source = source
1080+ template = loader.get_template(source)
1081+ except exceptions.TemplateNotFound as e:
1082+ hookenv.log('Could not load template %s from %s.' %
1083+ (source, templates_dir),
1084+ level=hookenv.ERROR)
1085+ raise e
1086+ content = template.render(context)
1087+ host.mkdir(os.path.dirname(target))
1088+ host.write_file(target, content, owner, group, perms)
1089
1090=== modified file 'hooks/charmhelpers/fetch/__init__.py'
1091--- hooks/charmhelpers/fetch/__init__.py 2014-06-27 13:50:04 +0000
1092+++ hooks/charmhelpers/fetch/__init__.py 2014-08-13 14:00:05 +0000
1093@@ -122,6 +122,7 @@
1094 # Tell apt to build an in-memory cache to prevent race conditions (if
1095 # another process is already building the cache).
1096 apt_pkg.config.set("Dir::Cache::pkgcache", "")
1097+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
1098
1099 cache = apt_pkg.Cache()
1100 _pkgs = []

Subscribers

People subscribed via source and target branches