Merge lp:~gnuoy/charms/trusty/nova-cloud-controller/next-charm-sync into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next
- Trusty Tahr (14.04)
- next-charm-sync
- Merge into next
Proposed by
Liam Young
Status: | Merged |
---|---|
Merged at revision: | 94 |
Proposed branch: | lp:~gnuoy/charms/trusty/nova-cloud-controller/next-charm-sync |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next |
Diff against target: |
727 lines (+570/-12) 11 files modified
hooks/charmhelpers/contrib/network/ip.py (+19/-1) hooks/charmhelpers/contrib/openstack/context.py (+20/-4) hooks/charmhelpers/contrib/openstack/ip.py (+7/-3) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-3) hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-0) hooks/charmhelpers/core/host.py (+34/-1) hooks/charmhelpers/core/services/__init__.py (+2/-0) hooks/charmhelpers/core/services/base.py (+305/-0) hooks/charmhelpers/core/services/helpers.py (+125/-0) hooks/charmhelpers/core/templating.py (+51/-0) hooks/charmhelpers/fetch/__init__.py (+1/-0) |
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/nova-cloud-controller/next-charm-sync |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Liam Young (community) | Approve | ||
Review via email: mp+230629@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' |
2 | --- hooks/charmhelpers/contrib/network/ip.py 2014-07-28 12:05:42 +0000 |
3 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-08-13 13:55:05 +0000 |
4 | @@ -4,7 +4,7 @@ |
5 | |
6 | from charmhelpers.fetch import apt_install |
7 | from charmhelpers.core.hookenv import ( |
8 | - ERROR, log, |
9 | + ERROR, log, config, |
10 | ) |
11 | |
12 | try: |
13 | @@ -154,3 +154,21 @@ |
14 | get_iface_for_address = partial(_get_for_address, key='iface') |
15 | |
16 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
17 | + |
18 | + |
19 | +def get_ipv6_addr(iface="eth0"): |
20 | + try: |
21 | + iface_addrs = netifaces.ifaddresses(iface) |
22 | + if netifaces.AF_INET6 not in iface_addrs: |
23 | + raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) |
24 | + |
25 | + addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] |
26 | + ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') |
27 | + and config('vip') != a['addr']] |
28 | + if not ipv6_addr: |
29 | + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) |
30 | + |
31 | + return ipv6_addr[0] |
32 | + |
33 | + except ValueError: |
34 | + raise ValueError("Invalid interface '%s'" % iface) |
35 | |
36 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
37 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-07-28 14:41:41 +0000 |
38 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 13:55:05 +0000 |
39 | @@ -44,7 +44,10 @@ |
40 | neutron_plugin_attribute, |
41 | ) |
42 | |
43 | -from charmhelpers.contrib.network.ip import get_address_in_network |
44 | +from charmhelpers.contrib.network.ip import ( |
45 | + get_address_in_network, |
46 | + get_ipv6_addr, |
47 | +) |
48 | |
49 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
50 | |
51 | @@ -401,9 +404,12 @@ |
52 | |
53 | cluster_hosts = {} |
54 | l_unit = local_unit().replace('/', '-') |
55 | - cluster_hosts[l_unit] = \ |
56 | - get_address_in_network(config('os-internal-network'), |
57 | - unit_get('private-address')) |
58 | + if config('prefer-ipv6'): |
59 | + addr = get_ipv6_addr() |
60 | + else: |
61 | + addr = unit_get('private-address') |
62 | + cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), |
63 | + addr) |
64 | |
65 | for rid in relation_ids('cluster'): |
66 | for unit in related_units(rid): |
67 | @@ -414,6 +420,16 @@ |
68 | ctxt = { |
69 | 'units': cluster_hosts, |
70 | } |
71 | + |
72 | + if config('prefer-ipv6'): |
73 | + ctxt['local_host'] = 'ip6-localhost' |
74 | + ctxt['haproxy_host'] = '::' |
75 | + ctxt['stat_port'] = ':::8888' |
76 | + else: |
77 | + ctxt['local_host'] = '127.0.0.1' |
78 | + ctxt['haproxy_host'] = '0.0.0.0' |
79 | + ctxt['stat_port'] = ':8888' |
80 | + |
81 | if len(cluster_hosts.keys()) > 1: |
82 | # Enable haproxy when we have enough peers. |
83 | log('Ensuring haproxy enabled in /etc/default/haproxy.') |
84 | |
85 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' |
86 | --- hooks/charmhelpers/contrib/openstack/ip.py 2014-07-28 11:39:11 +0000 |
87 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 13:55:05 +0000 |
88 | @@ -7,6 +7,7 @@ |
89 | get_address_in_network, |
90 | is_address_in_network, |
91 | is_ipv6, |
92 | + get_ipv6_addr, |
93 | ) |
94 | |
95 | from charmhelpers.contrib.hahelpers.cluster import is_clustered |
96 | @@ -64,10 +65,13 @@ |
97 | vip): |
98 | resolved_address = vip |
99 | else: |
100 | + if config('prefer-ipv6'): |
101 | + fallback_addr = get_ipv6_addr() |
102 | + else: |
103 | + fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) |
104 | resolved_address = get_address_in_network( |
105 | - config(_address_map[endpoint_type]['config']), |
106 | - unit_get(_address_map[endpoint_type]['fallback']) |
107 | - ) |
108 | + config(_address_map[endpoint_type]['config']), fallback_addr) |
109 | + |
110 | if resolved_address is None: |
111 | raise ValueError('Unable to resolve a suitable IP address' |
112 | ' based on charm state and configuration') |
113 | |
114 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' |
115 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-28 14:41:41 +0000 |
116 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-08-13 13:55:05 +0000 |
117 | @@ -1,6 +1,6 @@ |
118 | global |
119 | - log 127.0.0.1 local0 |
120 | - log 127.0.0.1 local1 notice |
121 | + log {{ local_host }} local0 |
122 | + log {{ local_host }} local1 notice |
123 | maxconn 20000 |
124 | user haproxy |
125 | group haproxy |
126 | @@ -17,7 +17,7 @@ |
127 | timeout client 30000 |
128 | timeout server 30000 |
129 | |
130 | -listen stats :8888 |
131 | +listen stats {{ stat_port }} |
132 | mode http |
133 | stats enable |
134 | stats hide-version |
135 | |
136 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' |
137 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-28 14:41:41 +0000 |
138 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-13 13:55:05 +0000 |
139 | @@ -46,5 +46,8 @@ |
140 | :returns: boolean: True if the path represents a mounted device, False if |
141 | it doesn't. |
142 | ''' |
143 | + is_partition = bool(re.search(r".*[0-9]+\b", device)) |
144 | out = check_output(['mount']) |
145 | + if is_partition: |
146 | + return bool(re.search(device + r"\b", out)) |
147 | return bool(re.search(device + r"[0-9]+\b", out)) |
148 | |
149 | === modified file 'hooks/charmhelpers/core/host.py' |
150 | --- hooks/charmhelpers/core/host.py 2014-07-28 14:41:41 +0000 |
151 | +++ hooks/charmhelpers/core/host.py 2014-08-13 13:55:05 +0000 |
152 | @@ -12,6 +12,8 @@ |
153 | import string |
154 | import subprocess |
155 | import hashlib |
156 | +import shutil |
157 | +from contextlib import contextmanager |
158 | |
159 | from collections import OrderedDict |
160 | |
161 | @@ -52,7 +54,7 @@ |
162 | def service_running(service): |
163 | """Determine whether a system service is running""" |
164 | try: |
165 | - output = subprocess.check_output(['service', service, 'status']) |
166 | + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) |
167 | except subprocess.CalledProcessError: |
168 | return False |
169 | else: |
170 | @@ -62,6 +64,16 @@ |
171 | return False |
172 | |
173 | |
174 | +def service_available(service_name): |
175 | + """Determine whether a system service is available""" |
176 | + try: |
177 | + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) |
178 | + except subprocess.CalledProcessError: |
179 | + return False |
180 | + else: |
181 | + return True |
182 | + |
183 | + |
184 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
185 | """Add a user to the system""" |
186 | try: |
187 | @@ -329,3 +341,24 @@ |
188 | pkgcache = apt_pkg.Cache() |
189 | pkg = pkgcache[package] |
190 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
191 | + |
192 | + |
193 | +@contextmanager |
194 | +def chdir(d): |
195 | + cur = os.getcwd() |
196 | + try: |
197 | + yield os.chdir(d) |
198 | + finally: |
199 | + os.chdir(cur) |
200 | + |
201 | + |
202 | +def chownr(path, owner, group): |
203 | + uid = pwd.getpwnam(owner).pw_uid |
204 | + gid = grp.getgrnam(group).gr_gid |
205 | + |
206 | + for root, dirs, files in os.walk(path): |
207 | + for name in dirs + files: |
208 | + full = os.path.join(root, name) |
209 | + broken_symlink = os.path.lexists(full) and not os.path.exists(full) |
210 | + if not broken_symlink: |
211 | + os.chown(full, uid, gid) |
212 | |
213 | === added directory 'hooks/charmhelpers/core/services' |
214 | === added file 'hooks/charmhelpers/core/services/__init__.py' |
215 | --- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000 |
216 | +++ hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:55:05 +0000 |
217 | @@ -0,0 +1,2 @@ |
218 | +from .base import * |
219 | +from .helpers import * |
220 | |
221 | === added file 'hooks/charmhelpers/core/services/base.py' |
222 | --- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000 |
223 | +++ hooks/charmhelpers/core/services/base.py 2014-08-13 13:55:05 +0000 |
224 | @@ -0,0 +1,305 @@ |
225 | +import os |
226 | +import re |
227 | +import json |
228 | +from collections import Iterable |
229 | + |
230 | +from charmhelpers.core import host |
231 | +from charmhelpers.core import hookenv |
232 | + |
233 | + |
234 | +__all__ = ['ServiceManager', 'ManagerCallback', |
235 | + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', |
236 | + 'service_restart', 'service_stop'] |
237 | + |
238 | + |
239 | +class ServiceManager(object): |
240 | + def __init__(self, services=None): |
241 | + """ |
242 | + Register a list of services, given their definitions. |
243 | + |
244 | + Traditional charm authoring is focused on implementing hooks. That is, |
245 | + the charm author is thinking in terms of "What hook am I handling; what |
246 | + does this hook need to do?" However, in most cases, the real question |
247 | + should be "Do I have the information I need to configure and start this |
248 | + piece of software and, if so, what are the steps for doing so?" The |
249 | + ServiceManager framework tries to bring the focus to the data and the |
250 | + setup tasks, in the most declarative way possible. |
251 | + |
252 | + Service definitions are dicts in the following formats (all keys except |
253 | + 'service' are optional):: |
254 | + |
255 | + { |
256 | + "service": <service name>, |
257 | + "required_data": <list of required data contexts>, |
258 | + "data_ready": <one or more callbacks>, |
259 | + "data_lost": <one or more callbacks>, |
260 | + "start": <one or more callbacks>, |
261 | + "stop": <one or more callbacks>, |
262 | + "ports": <list of ports to manage>, |
263 | + } |
264 | + |
265 | + The 'required_data' list should contain dicts of required data (or |
266 | + dependency managers that act like dicts and know how to collect the data). |
267 | + Only when all items in the 'required_data' list are populated are the list |
268 | + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more |
269 | + information. |
270 | + |
271 | + The 'data_ready' value should be either a single callback, or a list of |
272 | + callbacks, to be called when all items in 'required_data' pass `is_ready()`. |
273 | + Each callback will be called with the service name as the only parameter. |
274 | + After all of the 'data_ready' callbacks are called, the 'start' callbacks |
275 | + are fired. |
276 | + |
277 | + The 'data_lost' value should be either a single callback, or a list of |
278 | + callbacks, to be called when a 'required_data' item no longer passes |
279 | + `is_ready()`. Each callback will be called with the service name as the |
280 | + only parameter. After all of the 'data_lost' callbacks are called, |
281 | + the 'stop' callbacks are fired. |
282 | + |
283 | + The 'start' value should be either a single callback, or a list of |
284 | + callbacks, to be called when starting the service, after the 'data_ready' |
285 | + callbacks are complete. Each callback will be called with the service |
286 | + name as the only parameter. This defaults to |
287 | + `[host.service_start, services.open_ports]`. |
288 | + |
289 | + The 'stop' value should be either a single callback, or a list of |
290 | + callbacks, to be called when stopping the service. If the service is |
291 | + being stopped because it no longer has all of its 'required_data', this |
292 | + will be called after all of the 'data_lost' callbacks are complete. |
293 | + Each callback will be called with the service name as the only parameter. |
294 | + This defaults to `[services.close_ports, host.service_stop]`. |
295 | + |
296 | + The 'ports' value should be a list of ports to manage. The default |
297 | + 'start' handler will open the ports after the service is started, |
298 | + and the default 'stop' handler will close the ports prior to stopping |
299 | + the service. |
300 | + |
301 | + |
302 | + Examples: |
303 | + |
304 | + The following registers an Upstart service called bingod that depends on |
305 | + a mongodb relation and which runs a custom `db_migrate` function prior to |
306 | + restarting the service, and a Runit service called spadesd:: |
307 | + |
308 | + manager = services.ServiceManager([ |
309 | + { |
310 | + 'service': 'bingod', |
311 | + 'ports': [80, 443], |
312 | + 'required_data': [MongoRelation(), config(), {'my': 'data'}], |
313 | + 'data_ready': [ |
314 | + services.template(source='bingod.conf'), |
315 | + services.template(source='bingod.ini', |
316 | + target='/etc/bingod.ini', |
317 | + owner='bingo', perms=0400), |
318 | + ], |
319 | + }, |
320 | + { |
321 | + 'service': 'spadesd', |
322 | + 'data_ready': services.template(source='spadesd_run.j2', |
323 | + target='/etc/sv/spadesd/run', |
324 | + perms=0555), |
325 | + 'start': runit_start, |
326 | + 'stop': runit_stop, |
327 | + }, |
328 | + ]) |
329 | + manager.manage() |
330 | + """ |
331 | + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') |
332 | + self._ready = None |
333 | + self.services = {} |
334 | + for service in services or []: |
335 | + service_name = service['service'] |
336 | + self.services[service_name] = service |
337 | + |
338 | + def manage(self): |
339 | + """ |
340 | + Handle the current hook by doing The Right Thing with the registered services. |
341 | + """ |
342 | + hook_name = hookenv.hook_name() |
343 | + if hook_name == 'stop': |
344 | + self.stop_services() |
345 | + else: |
346 | + self.provide_data() |
347 | + self.reconfigure_services() |
348 | + |
349 | + def provide_data(self): |
350 | + hook_name = hookenv.hook_name() |
351 | + for service in self.services.values(): |
352 | + for provider in service.get('provided_data', []): |
353 | + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): |
354 | + data = provider.provide_data() |
355 | + if provider._is_ready(data): |
356 | + hookenv.relation_set(None, data) |
357 | + |
358 | + def reconfigure_services(self, *service_names): |
359 | + """ |
360 | + Update all files for one or more registered services, and, |
361 | + if ready, optionally restart them. |
362 | + |
363 | + If no service names are given, reconfigures all registered services. |
364 | + """ |
365 | + for service_name in service_names or self.services.keys(): |
366 | + if self.is_ready(service_name): |
367 | + self.fire_event('data_ready', service_name) |
368 | + self.fire_event('start', service_name, default=[ |
369 | + service_restart, |
370 | + manage_ports]) |
371 | + self.save_ready(service_name) |
372 | + else: |
373 | + if self.was_ready(service_name): |
374 | + self.fire_event('data_lost', service_name) |
375 | + self.fire_event('stop', service_name, default=[ |
376 | + manage_ports, |
377 | + service_stop]) |
378 | + self.save_lost(service_name) |
379 | + |
380 | + def stop_services(self, *service_names): |
381 | + """ |
382 | + Stop one or more registered services, by name. |
383 | + |
384 | + If no service names are given, stops all registered services. |
385 | + """ |
386 | + for service_name in service_names or self.services.keys(): |
387 | + self.fire_event('stop', service_name, default=[ |
388 | + manage_ports, |
389 | + service_stop]) |
390 | + |
391 | + def get_service(self, service_name): |
392 | + """ |
393 | + Given the name of a registered service, return its service definition. |
394 | + """ |
395 | + service = self.services.get(service_name) |
396 | + if not service: |
397 | + raise KeyError('Service not registered: %s' % service_name) |
398 | + return service |
399 | + |
400 | + def fire_event(self, event_name, service_name, default=None): |
401 | + """ |
402 | + Fire a data_ready, data_lost, start, or stop event on a given service. |
403 | + """ |
404 | + service = self.get_service(service_name) |
405 | + callbacks = service.get(event_name, default) |
406 | + if not callbacks: |
407 | + return |
408 | + if not isinstance(callbacks, Iterable): |
409 | + callbacks = [callbacks] |
410 | + for callback in callbacks: |
411 | + if isinstance(callback, ManagerCallback): |
412 | + callback(self, service_name, event_name) |
413 | + else: |
414 | + callback(service_name) |
415 | + |
416 | + def is_ready(self, service_name): |
417 | + """ |
418 | + Determine if a registered service is ready, by checking its 'required_data'. |
419 | + |
420 | + A 'required_data' item can be any mapping type, and is considered ready |
421 | + if `bool(item)` evaluates as True. |
422 | + """ |
423 | + service = self.get_service(service_name) |
424 | + reqs = service.get('required_data', []) |
425 | + return all(bool(req) for req in reqs) |
426 | + |
427 | + def _load_ready_file(self): |
428 | + if self._ready is not None: |
429 | + return |
430 | + if os.path.exists(self._ready_file): |
431 | + with open(self._ready_file) as fp: |
432 | + self._ready = set(json.load(fp)) |
433 | + else: |
434 | + self._ready = set() |
435 | + |
436 | + def _save_ready_file(self): |
437 | + if self._ready is None: |
438 | + return |
439 | + with open(self._ready_file, 'w') as fp: |
440 | + json.dump(list(self._ready), fp) |
441 | + |
442 | + def save_ready(self, service_name): |
443 | + """ |
444 | + Save an indicator that the given service is now data_ready. |
445 | + """ |
446 | + self._load_ready_file() |
447 | + self._ready.add(service_name) |
448 | + self._save_ready_file() |
449 | + |
450 | + def save_lost(self, service_name): |
451 | + """ |
452 | + Save an indicator that the given service is no longer data_ready. |
453 | + """ |
454 | + self._load_ready_file() |
455 | + self._ready.discard(service_name) |
456 | + self._save_ready_file() |
457 | + |
458 | + def was_ready(self, service_name): |
459 | + """ |
460 | + Determine if the given service was previously data_ready. |
461 | + """ |
462 | + self._load_ready_file() |
463 | + return service_name in self._ready |
464 | + |
465 | + |
466 | +class ManagerCallback(object): |
467 | + """ |
468 | + Special case of a callback that takes the `ServiceManager` instance |
469 | + in addition to the service name. |
470 | + |
471 | + Subclasses should implement `__call__` which should accept three parameters: |
472 | + |
473 | + * `manager` The `ServiceManager` instance |
474 | + * `service_name` The name of the service it's being triggered for |
475 | + * `event_name` The name of the event that this callback is handling |
476 | + """ |
477 | + def __call__(self, manager, service_name, event_name): |
478 | + raise NotImplementedError() |
479 | + |
480 | + |
481 | +class PortManagerCallback(ManagerCallback): |
482 | + """ |
483 | + Callback class that will open or close ports, for use as either |
484 | + a start or stop action. |
485 | + """ |
486 | + def __call__(self, manager, service_name, event_name): |
487 | + service = manager.get_service(service_name) |
488 | + new_ports = service.get('ports', []) |
489 | + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) |
490 | + if os.path.exists(port_file): |
491 | + with open(port_file) as fp: |
492 | + old_ports = fp.read().split(',') |
493 | + for old_port in old_ports: |
494 | + if bool(old_port): |
495 | + old_port = int(old_port) |
496 | + if old_port not in new_ports: |
497 | + hookenv.close_port(old_port) |
498 | + with open(port_file, 'w') as fp: |
499 | + fp.write(','.join(str(port) for port in new_ports)) |
500 | + for port in new_ports: |
501 | + if event_name == 'start': |
502 | + hookenv.open_port(port) |
503 | + elif event_name == 'stop': |
504 | + hookenv.close_port(port) |
505 | + |
506 | + |
507 | +def service_stop(service_name): |
508 | + """ |
509 | + Wrapper around host.service_stop to prevent spurious "unknown service" |
510 | + messages in the logs. |
511 | + """ |
512 | + if host.service_running(service_name): |
513 | + host.service_stop(service_name) |
514 | + |
515 | + |
516 | +def service_restart(service_name): |
517 | + """ |
518 | + Wrapper around host.service_restart to prevent spurious "unknown service" |
519 | + messages in the logs. |
520 | + """ |
521 | + if host.service_available(service_name): |
522 | + if host.service_running(service_name): |
523 | + host.service_restart(service_name) |
524 | + else: |
525 | + host.service_start(service_name) |
526 | + |
527 | + |
528 | +# Convenience aliases |
529 | +open_ports = close_ports = manage_ports = PortManagerCallback() |
530 | |
531 | === added file 'hooks/charmhelpers/core/services/helpers.py' |
532 | --- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000 |
533 | +++ hooks/charmhelpers/core/services/helpers.py 2014-08-13 13:55:05 +0000 |
534 | @@ -0,0 +1,125 @@ |
535 | +from charmhelpers.core import hookenv |
536 | +from charmhelpers.core import templating |
537 | + |
538 | +from charmhelpers.core.services.base import ManagerCallback |
539 | + |
540 | + |
541 | +__all__ = ['RelationContext', 'TemplateCallback', |
542 | + 'render_template', 'template'] |
543 | + |
544 | + |
545 | +class RelationContext(dict): |
546 | + """ |
547 | + Base class for a context generator that gets relation data from juju. |
548 | + |
549 | + Subclasses must provide the attributes `name`, which is the name of the |
550 | + interface of interest, `interface`, which is the type of the interface of |
551 | + interest, and `required_keys`, which is the set of keys required for the |
552 | + relation to be considered complete. The data for all interfaces matching |
553 | + the `name` attribute that are complete will used to populate the dictionary |
554 | + values (see `get_data`, below). |
555 | + |
556 | + The generated context will be namespaced under the interface type, to prevent |
557 | + potential naming conflicts. |
558 | + """ |
559 | + name = None |
560 | + interface = None |
561 | + required_keys = [] |
562 | + |
563 | + def __init__(self, *args, **kwargs): |
564 | + super(RelationContext, self).__init__(*args, **kwargs) |
565 | + self.get_data() |
566 | + |
567 | + def __bool__(self): |
568 | + """ |
569 | + Returns True if all of the required_keys are available. |
570 | + """ |
571 | + return self.is_ready() |
572 | + |
573 | + __nonzero__ = __bool__ |
574 | + |
575 | + def __repr__(self): |
576 | + return super(RelationContext, self).__repr__() |
577 | + |
578 | + def is_ready(self): |
579 | + """ |
580 | + Returns True if all of the `required_keys` are available from any units. |
581 | + """ |
582 | + ready = len(self.get(self.name, [])) > 0 |
583 | + if not ready: |
584 | + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) |
585 | + return ready |
586 | + |
587 | + def _is_ready(self, unit_data): |
588 | + """ |
589 | + Helper method that tests a set of relation data and returns True if |
590 | + all of the `required_keys` are present. |
591 | + """ |
592 | + return set(unit_data.keys()).issuperset(set(self.required_keys)) |
593 | + |
594 | + def get_data(self): |
595 | + """ |
596 | + Retrieve the relation data for each unit involved in a relation and, |
597 | + if complete, store it in a list under `self[self.name]`. This |
598 | + is automatically called when the RelationContext is instantiated. |
599 | + |
600 | + The units are sorted lexographically first by the service ID, then by |
601 | + the unit ID. Thus, if an interface has two other services, 'db:1' |
602 | + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', |
603 | + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete |
604 | + set of data, the relation data for the units will be stored in the |
605 | + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. |
606 | + |
607 | + If you only care about a single unit on the relation, you can just |
608 | + access it as `{{ interface[0]['key'] }}`. However, if you can at all |
609 | + support multiple units on a relation, you should iterate over the list, |
610 | + like:: |
611 | + |
612 | + {% for unit in interface -%} |
613 | + {{ unit['key'] }}{% if not loop.last %},{% endif %} |
614 | + {%- endfor %} |
615 | + |
616 | + Note that since all sets of relation data from all related services and |
617 | + units are in a single list, if you need to know which service or unit a |
618 | + set of data came from, you'll need to extend this class to preserve |
619 | + that information. |
620 | + """ |
621 | + if not hookenv.relation_ids(self.name): |
622 | + return |
623 | + |
624 | + ns = self.setdefault(self.name, []) |
625 | + for rid in sorted(hookenv.relation_ids(self.name)): |
626 | + for unit in sorted(hookenv.related_units(rid)): |
627 | + reldata = hookenv.relation_get(rid=rid, unit=unit) |
628 | + if self._is_ready(reldata): |
629 | + ns.append(reldata) |
630 | + |
631 | + def provide_data(self): |
632 | + """ |
633 | + Return data to be relation_set for this interface. |
634 | + """ |
635 | + return {} |
636 | + |
637 | + |
638 | +class TemplateCallback(ManagerCallback): |
639 | + """ |
640 | + Callback class that will render a template, for use as a ready action. |
641 | + """ |
642 | + def __init__(self, source, target, owner='root', group='root', perms=0444): |
643 | + self.source = source |
644 | + self.target = target |
645 | + self.owner = owner |
646 | + self.group = group |
647 | + self.perms = perms |
648 | + |
649 | + def __call__(self, manager, service_name, event_name): |
650 | + service = manager.get_service(service_name) |
651 | + context = {} |
652 | + for ctx in service.get('required_data', []): |
653 | + context.update(ctx) |
654 | + templating.render(self.source, self.target, context, |
655 | + self.owner, self.group, self.perms) |
656 | + |
657 | + |
658 | +# Convenience aliases for templates |
659 | +render_template = template = TemplateCallback |
660 | |
661 | === added file 'hooks/charmhelpers/core/templating.py' |
662 | --- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000 |
663 | +++ hooks/charmhelpers/core/templating.py 2014-08-13 13:55:05 +0000 |
664 | @@ -0,0 +1,51 @@ |
665 | +import os |
666 | + |
667 | +from charmhelpers.core import host |
668 | +from charmhelpers.core import hookenv |
669 | + |
670 | + |
671 | +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): |
672 | + """ |
673 | + Render a template. |
674 | + |
675 | + The `source` path, if not absolute, is relative to the `templates_dir`. |
676 | + |
677 | + The `target` path should be absolute. |
678 | + |
679 | + The context should be a dict containing the values to be replaced in the |
680 | + template. |
681 | + |
682 | + The `owner`, `group`, and `perms` options will be passed to `write_file`. |
683 | + |
684 | + If omitted, `templates_dir` defaults to the `templates` folder in the charm. |
685 | + |
686 | + Note: Using this requires python-jinja2; if it is not installed, calling |
687 | + this will attempt to use charmhelpers.fetch.apt_install to install it. |
688 | + """ |
689 | + try: |
690 | + from jinja2 import FileSystemLoader, Environment, exceptions |
691 | + except ImportError: |
692 | + try: |
693 | + from charmhelpers.fetch import apt_install |
694 | + except ImportError: |
695 | + hookenv.log('Could not import jinja2, and could not import ' |
696 | + 'charmhelpers.fetch to install it', |
697 | + level=hookenv.ERROR) |
698 | + raise |
699 | + apt_install('python-jinja2', fatal=True) |
700 | + from jinja2 import FileSystemLoader, Environment, exceptions |
701 | + |
702 | + if templates_dir is None: |
703 | + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') |
704 | + loader = Environment(loader=FileSystemLoader(templates_dir)) |
705 | + try: |
706 | + source = source |
707 | + template = loader.get_template(source) |
708 | + except exceptions.TemplateNotFound as e: |
709 | + hookenv.log('Could not load template %s from %s.' % |
710 | + (source, templates_dir), |
711 | + level=hookenv.ERROR) |
712 | + raise e |
713 | + content = template.render(context) |
714 | + host.mkdir(os.path.dirname(target)) |
715 | + host.write_file(target, content, owner, group, perms) |
716 | |
717 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
718 | --- hooks/charmhelpers/fetch/__init__.py 2014-07-28 14:41:41 +0000 |
719 | +++ hooks/charmhelpers/fetch/__init__.py 2014-08-13 13:55:05 +0000 |
720 | @@ -122,6 +122,7 @@ |
721 | # Tell apt to build an in-memory cache to prevent race conditions (if |
722 | # another process is already building the cache). |
723 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
724 | + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") |
725 | |
726 | cache = apt_pkg.Cache() |
727 | _pkgs = [] |
Approved by jamespage