Merge lp:~gnuoy/charms/trusty/nova-compute/next-charm-sync into lp:~openstack-charmers-archive/charms/trusty/nova-compute/next

Proposed by Liam Young
Status: Merged
Merged at revision: 77
Proposed branch: lp:~gnuoy/charms/trusty/nova-compute/next-charm-sync
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-compute/next
Diff against target: 840 lines (+616/-25)
12 files modified
hooks/charmhelpers/contrib/hahelpers/cluster.py (+46/-13)
hooks/charmhelpers/contrib/network/ip.py (+19/-1)
hooks/charmhelpers/contrib/openstack/context.py (+20/-4)
hooks/charmhelpers/contrib/openstack/ip.py (+7/-3)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-3)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-0)
hooks/charmhelpers/core/host.py (+34/-1)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+305/-0)
hooks/charmhelpers/core/services/helpers.py (+125/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+1/-0)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/nova-compute/next-charm-sync
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+230628@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) wrote :

Approved by jamespage

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
2--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-30 15:16:25 +0000
3+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-08-13 13:54:38 +0000
4@@ -6,6 +6,11 @@
5 # Adam Gandelman <adamg@ubuntu.com>
6 #
7
8+"""
9+Helpers for clustering and determining "cluster leadership" and other
10+clustering-related helpers.
11+"""
12+
13 import subprocess
14 import os
15
16@@ -19,6 +24,7 @@
17 config as config_get,
18 INFO,
19 ERROR,
20+ WARNING,
21 unit_get,
22 )
23
24@@ -27,6 +33,29 @@
25 pass
26
27
28+def is_elected_leader(resource):
29+ """
30+ Returns True if the charm executing this is the elected cluster leader.
31+
32+ It relies on two mechanisms to determine leadership:
33+ 1. If the charm is part of a corosync cluster, call corosync to
34+ determine leadership.
35+ 2. If the charm is not part of a corosync cluster, the leader is
36+ determined as being "the alive unit with the lowest unit numer". In
37+ other words, the oldest surviving unit.
38+ """
39+ if is_clustered():
40+ if not is_crm_leader(resource):
41+ log('Deferring action to CRM leader.', level=INFO)
42+ return False
43+ else:
44+ peers = peer_units()
45+ if peers and not oldest_peer(peers):
46+ log('Deferring action to oldest service unit.', level=INFO)
47+ return False
48+ return True
49+
50+
51 def is_clustered():
52 for r_id in (relation_ids('ha') or []):
53 for unit in (relation_list(r_id) or []):
54@@ -38,7 +67,11 @@
55 return False
56
57
58-def is_leader(resource):
59+def is_crm_leader(resource):
60+ """
61+ Returns True if the charm calling this is the elected corosync leader,
62+ as returned by calling the external "crm" command.
63+ """
64 cmd = [
65 "crm", "resource",
66 "show", resource
67@@ -54,9 +87,15 @@
68 return False
69
70
71-def peer_units():
72+def is_leader(resource):
73+ log("is_leader is deprecated. Please consider using is_crm_leader "
74+ "instead.", level=WARNING)
75+ return is_crm_leader(resource)
76+
77+
78+def peer_units(peer_relation="cluster"):
79 peers = []
80- for r_id in (relation_ids('cluster') or []):
81+ for r_id in (relation_ids(peer_relation) or []):
82 for unit in (relation_list(r_id) or []):
83 peers.append(unit)
84 return peers
85@@ -72,6 +111,7 @@
86
87
88 def oldest_peer(peers):
89+ """Determines who the oldest peer is by comparing unit numbers."""
90 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
91 for peer in peers:
92 remote_unit_no = int(peer.split('/')[1])
93@@ -81,16 +121,9 @@
94
95
96 def eligible_leader(resource):
97- if is_clustered():
98- if not is_leader(resource):
99- log('Deferring action to CRM leader.', level=INFO)
100- return False
101- else:
102- peers = peer_units()
103- if peers and not oldest_peer(peers):
104- log('Deferring action to oldest service unit.', level=INFO)
105- return False
106- return True
107+ log("eligible_leader is deprecated. Please consider using "
108+ "is_elected_leader instead.", level=WARNING)
109+ return is_elected_leader(resource)
110
111
112 def https():
113
114=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
115--- hooks/charmhelpers/contrib/network/ip.py 2014-07-28 12:06:56 +0000
116+++ hooks/charmhelpers/contrib/network/ip.py 2014-08-13 13:54:38 +0000
117@@ -4,7 +4,7 @@
118
119 from charmhelpers.fetch import apt_install
120 from charmhelpers.core.hookenv import (
121- ERROR, log,
122+ ERROR, log, config,
123 )
124
125 try:
126@@ -154,3 +154,21 @@
127 get_iface_for_address = partial(_get_for_address, key='iface')
128
129 get_netmask_for_address = partial(_get_for_address, key='netmask')
130+
131+
132+def get_ipv6_addr(iface="eth0"):
133+ try:
134+ iface_addrs = netifaces.ifaddresses(iface)
135+ if netifaces.AF_INET6 not in iface_addrs:
136+ raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
137+
138+ addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
139+ ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
140+ and config('vip') != a['addr']]
141+ if not ipv6_addr:
142+ raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
143+
144+ return ipv6_addr[0]
145+
146+ except ValueError:
147+ raise ValueError("Invalid interface '%s'" % iface)
148
149=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
150--- hooks/charmhelpers/contrib/openstack/context.py 2014-07-28 14:38:51 +0000
151+++ hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 13:54:38 +0000
152@@ -44,7 +44,10 @@
153 neutron_plugin_attribute,
154 )
155
156-from charmhelpers.contrib.network.ip import get_address_in_network
157+from charmhelpers.contrib.network.ip import (
158+ get_address_in_network,
159+ get_ipv6_addr,
160+)
161
162 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
163
164@@ -401,9 +404,12 @@
165
166 cluster_hosts = {}
167 l_unit = local_unit().replace('/', '-')
168- cluster_hosts[l_unit] = \
169- get_address_in_network(config('os-internal-network'),
170- unit_get('private-address'))
171+ if config('prefer-ipv6'):
172+ addr = get_ipv6_addr()
173+ else:
174+ addr = unit_get('private-address')
175+ cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
176+ addr)
177
178 for rid in relation_ids('cluster'):
179 for unit in related_units(rid):
180@@ -414,6 +420,16 @@
181 ctxt = {
182 'units': cluster_hosts,
183 }
184+
185+ if config('prefer-ipv6'):
186+ ctxt['local_host'] = 'ip6-localhost'
187+ ctxt['haproxy_host'] = '::'
188+ ctxt['stat_port'] = ':::8888'
189+ else:
190+ ctxt['local_host'] = '127.0.0.1'
191+ ctxt['haproxy_host'] = '0.0.0.0'
192+ ctxt['stat_port'] = ':8888'
193+
194 if len(cluster_hosts.keys()) > 1:
195 # Enable haproxy when we have enough peers.
196 log('Ensuring haproxy enabled in /etc/default/haproxy.')
197
198=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
199--- hooks/charmhelpers/contrib/openstack/ip.py 2014-07-28 11:39:21 +0000
200+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 13:54:38 +0000
201@@ -7,6 +7,7 @@
202 get_address_in_network,
203 is_address_in_network,
204 is_ipv6,
205+ get_ipv6_addr,
206 )
207
208 from charmhelpers.contrib.hahelpers.cluster import is_clustered
209@@ -64,10 +65,13 @@
210 vip):
211 resolved_address = vip
212 else:
213+ if config('prefer-ipv6'):
214+ fallback_addr = get_ipv6_addr()
215+ else:
216+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
217 resolved_address = get_address_in_network(
218- config(_address_map[endpoint_type]['config']),
219- unit_get(_address_map[endpoint_type]['fallback'])
220- )
221+ config(_address_map[endpoint_type]['config']), fallback_addr)
222+
223 if resolved_address is None:
224 raise ValueError('Unable to resolve a suitable IP address'
225 ' based on charm state and configuration')
226
227=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
228--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-28 14:38:51 +0000
229+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-08-13 13:54:38 +0000
230@@ -1,6 +1,6 @@
231 global
232- log 127.0.0.1 local0
233- log 127.0.0.1 local1 notice
234+ log {{ local_host }} local0
235+ log {{ local_host }} local1 notice
236 maxconn 20000
237 user haproxy
238 group haproxy
239@@ -17,7 +17,7 @@
240 timeout client 30000
241 timeout server 30000
242
243-listen stats :8888
244+listen stats {{ stat_port }}
245 mode http
246 stats enable
247 stats hide-version
248
249=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
250--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-28 14:38:51 +0000
251+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-13 13:54:38 +0000
252@@ -46,5 +46,8 @@
253 :returns: boolean: True if the path represents a mounted device, False if
254 it doesn't.
255 '''
256+ is_partition = bool(re.search(r".*[0-9]+\b", device))
257 out = check_output(['mount'])
258+ if is_partition:
259+ return bool(re.search(device + r"\b", out))
260 return bool(re.search(device + r"[0-9]+\b", out))
261
262=== modified file 'hooks/charmhelpers/core/host.py'
263--- hooks/charmhelpers/core/host.py 2014-07-28 14:38:51 +0000
264+++ hooks/charmhelpers/core/host.py 2014-08-13 13:54:38 +0000
265@@ -12,6 +12,8 @@
266 import string
267 import subprocess
268 import hashlib
269+import shutil
270+from contextlib import contextmanager
271
272 from collections import OrderedDict
273
274@@ -52,7 +54,7 @@
275 def service_running(service):
276 """Determine whether a system service is running"""
277 try:
278- output = subprocess.check_output(['service', service, 'status'])
279+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
280 except subprocess.CalledProcessError:
281 return False
282 else:
283@@ -62,6 +64,16 @@
284 return False
285
286
287+def service_available(service_name):
288+ """Determine whether a system service is available"""
289+ try:
290+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
291+ except subprocess.CalledProcessError:
292+ return False
293+ else:
294+ return True
295+
296+
297 def adduser(username, password=None, shell='/bin/bash', system_user=False):
298 """Add a user to the system"""
299 try:
300@@ -329,3 +341,24 @@
301 pkgcache = apt_pkg.Cache()
302 pkg = pkgcache[package]
303 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
304+
305+
306+@contextmanager
307+def chdir(d):
308+ cur = os.getcwd()
309+ try:
310+ yield os.chdir(d)
311+ finally:
312+ os.chdir(cur)
313+
314+
315+def chownr(path, owner, group):
316+ uid = pwd.getpwnam(owner).pw_uid
317+ gid = grp.getgrnam(group).gr_gid
318+
319+ for root, dirs, files in os.walk(path):
320+ for name in dirs + files:
321+ full = os.path.join(root, name)
322+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
323+ if not broken_symlink:
324+ os.chown(full, uid, gid)
325
326=== added directory 'hooks/charmhelpers/core/services'
327=== added file 'hooks/charmhelpers/core/services/__init__.py'
328--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
329+++ hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:54:38 +0000
330@@ -0,0 +1,2 @@
331+from .base import *
332+from .helpers import *
333
334=== added file 'hooks/charmhelpers/core/services/base.py'
335--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
336+++ hooks/charmhelpers/core/services/base.py 2014-08-13 13:54:38 +0000
337@@ -0,0 +1,305 @@
338+import os
339+import re
340+import json
341+from collections import Iterable
342+
343+from charmhelpers.core import host
344+from charmhelpers.core import hookenv
345+
346+
347+__all__ = ['ServiceManager', 'ManagerCallback',
348+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
349+ 'service_restart', 'service_stop']
350+
351+
352+class ServiceManager(object):
353+ def __init__(self, services=None):
354+ """
355+ Register a list of services, given their definitions.
356+
357+ Traditional charm authoring is focused on implementing hooks. That is,
358+ the charm author is thinking in terms of "What hook am I handling; what
359+ does this hook need to do?" However, in most cases, the real question
360+ should be "Do I have the information I need to configure and start this
361+ piece of software and, if so, what are the steps for doing so?" The
362+ ServiceManager framework tries to bring the focus to the data and the
363+ setup tasks, in the most declarative way possible.
364+
365+ Service definitions are dicts in the following formats (all keys except
366+ 'service' are optional)::
367+
368+ {
369+ "service": <service name>,
370+ "required_data": <list of required data contexts>,
371+ "data_ready": <one or more callbacks>,
372+ "data_lost": <one or more callbacks>,
373+ "start": <one or more callbacks>,
374+ "stop": <one or more callbacks>,
375+ "ports": <list of ports to manage>,
376+ }
377+
378+ The 'required_data' list should contain dicts of required data (or
379+ dependency managers that act like dicts and know how to collect the data).
380+ Only when all items in the 'required_data' list are populated are the list
381+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
382+ information.
383+
384+ The 'data_ready' value should be either a single callback, or a list of
385+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
386+ Each callback will be called with the service name as the only parameter.
387+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
388+ are fired.
389+
390+ The 'data_lost' value should be either a single callback, or a list of
391+ callbacks, to be called when a 'required_data' item no longer passes
392+ `is_ready()`. Each callback will be called with the service name as the
393+ only parameter. After all of the 'data_lost' callbacks are called,
394+ the 'stop' callbacks are fired.
395+
396+ The 'start' value should be either a single callback, or a list of
397+ callbacks, to be called when starting the service, after the 'data_ready'
398+ callbacks are complete. Each callback will be called with the service
399+ name as the only parameter. This defaults to
400+ `[host.service_start, services.open_ports]`.
401+
402+ The 'stop' value should be either a single callback, or a list of
403+ callbacks, to be called when stopping the service. If the service is
404+ being stopped because it no longer has all of its 'required_data', this
405+ will be called after all of the 'data_lost' callbacks are complete.
406+ Each callback will be called with the service name as the only parameter.
407+ This defaults to `[services.close_ports, host.service_stop]`.
408+
409+ The 'ports' value should be a list of ports to manage. The default
410+ 'start' handler will open the ports after the service is started,
411+ and the default 'stop' handler will close the ports prior to stopping
412+ the service.
413+
414+
415+ Examples:
416+
417+ The following registers an Upstart service called bingod that depends on
418+ a mongodb relation and which runs a custom `db_migrate` function prior to
419+ restarting the service, and a Runit service called spadesd::
420+
421+ manager = services.ServiceManager([
422+ {
423+ 'service': 'bingod',
424+ 'ports': [80, 443],
425+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
426+ 'data_ready': [
427+ services.template(source='bingod.conf'),
428+ services.template(source='bingod.ini',
429+ target='/etc/bingod.ini',
430+ owner='bingo', perms=0400),
431+ ],
432+ },
433+ {
434+ 'service': 'spadesd',
435+ 'data_ready': services.template(source='spadesd_run.j2',
436+ target='/etc/sv/spadesd/run',
437+ perms=0555),
438+ 'start': runit_start,
439+ 'stop': runit_stop,
440+ },
441+ ])
442+ manager.manage()
443+ """
444+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
445+ self._ready = None
446+ self.services = {}
447+ for service in services or []:
448+ service_name = service['service']
449+ self.services[service_name] = service
450+
451+ def manage(self):
452+ """
453+ Handle the current hook by doing The Right Thing with the registered services.
454+ """
455+ hook_name = hookenv.hook_name()
456+ if hook_name == 'stop':
457+ self.stop_services()
458+ else:
459+ self.provide_data()
460+ self.reconfigure_services()
461+
462+ def provide_data(self):
463+ hook_name = hookenv.hook_name()
464+ for service in self.services.values():
465+ for provider in service.get('provided_data', []):
466+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
467+ data = provider.provide_data()
468+ if provider._is_ready(data):
469+ hookenv.relation_set(None, data)
470+
471+ def reconfigure_services(self, *service_names):
472+ """
473+ Update all files for one or more registered services, and,
474+ if ready, optionally restart them.
475+
476+ If no service names are given, reconfigures all registered services.
477+ """
478+ for service_name in service_names or self.services.keys():
479+ if self.is_ready(service_name):
480+ self.fire_event('data_ready', service_name)
481+ self.fire_event('start', service_name, default=[
482+ service_restart,
483+ manage_ports])
484+ self.save_ready(service_name)
485+ else:
486+ if self.was_ready(service_name):
487+ self.fire_event('data_lost', service_name)
488+ self.fire_event('stop', service_name, default=[
489+ manage_ports,
490+ service_stop])
491+ self.save_lost(service_name)
492+
493+ def stop_services(self, *service_names):
494+ """
495+ Stop one or more registered services, by name.
496+
497+ If no service names are given, stops all registered services.
498+ """
499+ for service_name in service_names or self.services.keys():
500+ self.fire_event('stop', service_name, default=[
501+ manage_ports,
502+ service_stop])
503+
504+ def get_service(self, service_name):
505+ """
506+ Given the name of a registered service, return its service definition.
507+ """
508+ service = self.services.get(service_name)
509+ if not service:
510+ raise KeyError('Service not registered: %s' % service_name)
511+ return service
512+
513+ def fire_event(self, event_name, service_name, default=None):
514+ """
515+ Fire a data_ready, data_lost, start, or stop event on a given service.
516+ """
517+ service = self.get_service(service_name)
518+ callbacks = service.get(event_name, default)
519+ if not callbacks:
520+ return
521+ if not isinstance(callbacks, Iterable):
522+ callbacks = [callbacks]
523+ for callback in callbacks:
524+ if isinstance(callback, ManagerCallback):
525+ callback(self, service_name, event_name)
526+ else:
527+ callback(service_name)
528+
529+ def is_ready(self, service_name):
530+ """
531+ Determine if a registered service is ready, by checking its 'required_data'.
532+
533+ A 'required_data' item can be any mapping type, and is considered ready
534+ if `bool(item)` evaluates as True.
535+ """
536+ service = self.get_service(service_name)
537+ reqs = service.get('required_data', [])
538+ return all(bool(req) for req in reqs)
539+
540+ def _load_ready_file(self):
541+ if self._ready is not None:
542+ return
543+ if os.path.exists(self._ready_file):
544+ with open(self._ready_file) as fp:
545+ self._ready = set(json.load(fp))
546+ else:
547+ self._ready = set()
548+
549+ def _save_ready_file(self):
550+ if self._ready is None:
551+ return
552+ with open(self._ready_file, 'w') as fp:
553+ json.dump(list(self._ready), fp)
554+
555+ def save_ready(self, service_name):
556+ """
557+ Save an indicator that the given service is now data_ready.
558+ """
559+ self._load_ready_file()
560+ self._ready.add(service_name)
561+ self._save_ready_file()
562+
563+ def save_lost(self, service_name):
564+ """
565+ Save an indicator that the given service is no longer data_ready.
566+ """
567+ self._load_ready_file()
568+ self._ready.discard(service_name)
569+ self._save_ready_file()
570+
571+ def was_ready(self, service_name):
572+ """
573+ Determine if the given service was previously data_ready.
574+ """
575+ self._load_ready_file()
576+ return service_name in self._ready
577+
578+
579+class ManagerCallback(object):
580+ """
581+ Special case of a callback that takes the `ServiceManager` instance
582+ in addition to the service name.
583+
584+ Subclasses should implement `__call__` which should accept three parameters:
585+
586+ * `manager` The `ServiceManager` instance
587+ * `service_name` The name of the service it's being triggered for
588+ * `event_name` The name of the event that this callback is handling
589+ """
590+ def __call__(self, manager, service_name, event_name):
591+ raise NotImplementedError()
592+
593+
594+class PortManagerCallback(ManagerCallback):
595+ """
596+ Callback class that will open or close ports, for use as either
597+ a start or stop action.
598+ """
599+ def __call__(self, manager, service_name, event_name):
600+ service = manager.get_service(service_name)
601+ new_ports = service.get('ports', [])
602+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
603+ if os.path.exists(port_file):
604+ with open(port_file) as fp:
605+ old_ports = fp.read().split(',')
606+ for old_port in old_ports:
607+ if bool(old_port):
608+ old_port = int(old_port)
609+ if old_port not in new_ports:
610+ hookenv.close_port(old_port)
611+ with open(port_file, 'w') as fp:
612+ fp.write(','.join(str(port) for port in new_ports))
613+ for port in new_ports:
614+ if event_name == 'start':
615+ hookenv.open_port(port)
616+ elif event_name == 'stop':
617+ hookenv.close_port(port)
618+
619+
620+def service_stop(service_name):
621+ """
622+ Wrapper around host.service_stop to prevent spurious "unknown service"
623+ messages in the logs.
624+ """
625+ if host.service_running(service_name):
626+ host.service_stop(service_name)
627+
628+
629+def service_restart(service_name):
630+ """
631+ Wrapper around host.service_restart to prevent spurious "unknown service"
632+ messages in the logs.
633+ """
634+ if host.service_available(service_name):
635+ if host.service_running(service_name):
636+ host.service_restart(service_name)
637+ else:
638+ host.service_start(service_name)
639+
640+
641+# Convenience aliases
642+open_ports = close_ports = manage_ports = PortManagerCallback()
643
644=== added file 'hooks/charmhelpers/core/services/helpers.py'
645--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
646+++ hooks/charmhelpers/core/services/helpers.py 2014-08-13 13:54:38 +0000
647@@ -0,0 +1,125 @@
648+from charmhelpers.core import hookenv
649+from charmhelpers.core import templating
650+
651+from charmhelpers.core.services.base import ManagerCallback
652+
653+
654+__all__ = ['RelationContext', 'TemplateCallback',
655+ 'render_template', 'template']
656+
657+
658+class RelationContext(dict):
659+ """
660+ Base class for a context generator that gets relation data from juju.
661+
662+ Subclasses must provide the attributes `name`, which is the name of the
663+ interface of interest, `interface`, which is the type of the interface of
664+ interest, and `required_keys`, which is the set of keys required for the
665+ relation to be considered complete. The data for all interfaces matching
666+ the `name` attribute that are complete will used to populate the dictionary
667+ values (see `get_data`, below).
668+
669+ The generated context will be namespaced under the interface type, to prevent
670+ potential naming conflicts.
671+ """
672+ name = None
673+ interface = None
674+ required_keys = []
675+
676+ def __init__(self, *args, **kwargs):
677+ super(RelationContext, self).__init__(*args, **kwargs)
678+ self.get_data()
679+
680+ def __bool__(self):
681+ """
682+ Returns True if all of the required_keys are available.
683+ """
684+ return self.is_ready()
685+
686+ __nonzero__ = __bool__
687+
688+ def __repr__(self):
689+ return super(RelationContext, self).__repr__()
690+
691+ def is_ready(self):
692+ """
693+ Returns True if all of the `required_keys` are available from any units.
694+ """
695+ ready = len(self.get(self.name, [])) > 0
696+ if not ready:
697+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
698+ return ready
699+
700+ def _is_ready(self, unit_data):
701+ """
702+ Helper method that tests a set of relation data and returns True if
703+ all of the `required_keys` are present.
704+ """
705+ return set(unit_data.keys()).issuperset(set(self.required_keys))
706+
707+ def get_data(self):
708+ """
709+ Retrieve the relation data for each unit involved in a relation and,
710+ if complete, store it in a list under `self[self.name]`. This
711+ is automatically called when the RelationContext is instantiated.
712+
713+ The units are sorted lexographically first by the service ID, then by
714+ the unit ID. Thus, if an interface has two other services, 'db:1'
715+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
716+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
717+ set of data, the relation data for the units will be stored in the
718+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
719+
720+ If you only care about a single unit on the relation, you can just
721+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
722+ support multiple units on a relation, you should iterate over the list,
723+ like::
724+
725+ {% for unit in interface -%}
726+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
727+ {%- endfor %}
728+
729+ Note that since all sets of relation data from all related services and
730+ units are in a single list, if you need to know which service or unit a
731+ set of data came from, you'll need to extend this class to preserve
732+ that information.
733+ """
734+ if not hookenv.relation_ids(self.name):
735+ return
736+
737+ ns = self.setdefault(self.name, [])
738+ for rid in sorted(hookenv.relation_ids(self.name)):
739+ for unit in sorted(hookenv.related_units(rid)):
740+ reldata = hookenv.relation_get(rid=rid, unit=unit)
741+ if self._is_ready(reldata):
742+ ns.append(reldata)
743+
744+ def provide_data(self):
745+ """
746+ Return data to be relation_set for this interface.
747+ """
748+ return {}
749+
750+
751+class TemplateCallback(ManagerCallback):
752+ """
753+ Callback class that will render a template, for use as a ready action.
754+ """
755+ def __init__(self, source, target, owner='root', group='root', perms=0444):
756+ self.source = source
757+ self.target = target
758+ self.owner = owner
759+ self.group = group
760+ self.perms = perms
761+
762+ def __call__(self, manager, service_name, event_name):
763+ service = manager.get_service(service_name)
764+ context = {}
765+ for ctx in service.get('required_data', []):
766+ context.update(ctx)
767+ templating.render(self.source, self.target, context,
768+ self.owner, self.group, self.perms)
769+
770+
771+# Convenience aliases for templates
772+render_template = template = TemplateCallback
773
774=== added file 'hooks/charmhelpers/core/templating.py'
775--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
776+++ hooks/charmhelpers/core/templating.py 2014-08-13 13:54:38 +0000
777@@ -0,0 +1,51 @@
778+import os
779+
780+from charmhelpers.core import host
781+from charmhelpers.core import hookenv
782+
783+
784+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
785+ """
786+ Render a template.
787+
788+ The `source` path, if not absolute, is relative to the `templates_dir`.
789+
790+ The `target` path should be absolute.
791+
792+ The context should be a dict containing the values to be replaced in the
793+ template.
794+
795+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
796+
797+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
798+
799+ Note: Using this requires python-jinja2; if it is not installed, calling
800+ this will attempt to use charmhelpers.fetch.apt_install to install it.
801+ """
802+ try:
803+ from jinja2 import FileSystemLoader, Environment, exceptions
804+ except ImportError:
805+ try:
806+ from charmhelpers.fetch import apt_install
807+ except ImportError:
808+ hookenv.log('Could not import jinja2, and could not import '
809+ 'charmhelpers.fetch to install it',
810+ level=hookenv.ERROR)
811+ raise
812+ apt_install('python-jinja2', fatal=True)
813+ from jinja2 import FileSystemLoader, Environment, exceptions
814+
815+ if templates_dir is None:
816+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
817+ loader = Environment(loader=FileSystemLoader(templates_dir))
818+ try:
819+ source = source
820+ template = loader.get_template(source)
821+ except exceptions.TemplateNotFound as e:
822+ hookenv.log('Could not load template %s from %s.' %
823+ (source, templates_dir),
824+ level=hookenv.ERROR)
825+ raise e
826+ content = template.render(context)
827+ host.mkdir(os.path.dirname(target))
828+ host.write_file(target, content, owner, group, perms)
829
830=== modified file 'hooks/charmhelpers/fetch/__init__.py'
831--- hooks/charmhelpers/fetch/__init__.py 2014-07-28 14:38:51 +0000
832+++ hooks/charmhelpers/fetch/__init__.py 2014-08-13 13:54:38 +0000
833@@ -122,6 +122,7 @@
834 # Tell apt to build an in-memory cache to prevent race conditions (if
835 # another process is already building the cache).
836 apt_pkg.config.set("Dir::Cache::pkgcache", "")
837+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
838
839 cache = apt_pkg.Cache()
840 _pkgs = []

Subscribers

People subscribed via source and target branches