Merge lp:~gnuoy/charms/trusty/nova-cloud-controller/next-charm-sync into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next
- Trusty Tahr (14.04)
- next-charm-sync
- Merge into next
Proposed by
Liam Young
Status: | Merged |
---|---|
Merged at revision: | 94 |
Proposed branch: | lp:~gnuoy/charms/trusty/nova-cloud-controller/next-charm-sync |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next |
Diff against target: |
727 lines (+570/-12) 11 files modified
hooks/charmhelpers/contrib/network/ip.py (+19/-1) hooks/charmhelpers/contrib/openstack/context.py (+20/-4) hooks/charmhelpers/contrib/openstack/ip.py (+7/-3) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-3) hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-0) hooks/charmhelpers/core/host.py (+34/-1) hooks/charmhelpers/core/services/__init__.py (+2/-0) hooks/charmhelpers/core/services/base.py (+305/-0) hooks/charmhelpers/core/services/helpers.py (+125/-0) hooks/charmhelpers/core/templating.py (+51/-0) hooks/charmhelpers/fetch/__init__.py (+1/-0) |
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/nova-cloud-controller/next-charm-sync |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Liam Young (community) | Approve | ||
Review via email: mp+230629@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
2 | --- hooks/charmhelpers/contrib/network/ip.py 2014-07-28 12:05:42 +0000 | |||
3 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-08-13 13:55:05 +0000 | |||
4 | @@ -4,7 +4,7 @@ | |||
5 | 4 | 4 | ||
6 | 5 | from charmhelpers.fetch import apt_install | 5 | from charmhelpers.fetch import apt_install |
7 | 6 | from charmhelpers.core.hookenv import ( | 6 | from charmhelpers.core.hookenv import ( |
9 | 7 | ERROR, log, | 7 | ERROR, log, config, |
10 | 8 | ) | 8 | ) |
11 | 9 | 9 | ||
12 | 10 | try: | 10 | try: |
13 | @@ -154,3 +154,21 @@ | |||
14 | 154 | get_iface_for_address = partial(_get_for_address, key='iface') | 154 | get_iface_for_address = partial(_get_for_address, key='iface') |
15 | 155 | 155 | ||
16 | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
17 | 157 | |||
18 | 158 | |||
19 | 159 | def get_ipv6_addr(iface="eth0"): | ||
20 | 160 | try: | ||
21 | 161 | iface_addrs = netifaces.ifaddresses(iface) | ||
22 | 162 | if netifaces.AF_INET6 not in iface_addrs: | ||
23 | 163 | raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) | ||
24 | 164 | |||
25 | 165 | addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] | ||
26 | 166 | ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') | ||
27 | 167 | and config('vip') != a['addr']] | ||
28 | 168 | if not ipv6_addr: | ||
29 | 169 | raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) | ||
30 | 170 | |||
31 | 171 | return ipv6_addr[0] | ||
32 | 172 | |||
33 | 173 | except ValueError: | ||
34 | 174 | raise ValueError("Invalid interface '%s'" % iface) | ||
35 | 157 | 175 | ||
36 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
37 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-07-28 14:41:41 +0000 | |||
38 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 13:55:05 +0000 | |||
39 | @@ -44,7 +44,10 @@ | |||
40 | 44 | neutron_plugin_attribute, | 44 | neutron_plugin_attribute, |
41 | 45 | ) | 45 | ) |
42 | 46 | 46 | ||
44 | 47 | from charmhelpers.contrib.network.ip import get_address_in_network | 47 | from charmhelpers.contrib.network.ip import ( |
45 | 48 | get_address_in_network, | ||
46 | 49 | get_ipv6_addr, | ||
47 | 50 | ) | ||
48 | 48 | 51 | ||
49 | 49 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 52 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
50 | 50 | 53 | ||
51 | @@ -401,9 +404,12 @@ | |||
52 | 401 | 404 | ||
53 | 402 | cluster_hosts = {} | 405 | cluster_hosts = {} |
54 | 403 | l_unit = local_unit().replace('/', '-') | 406 | l_unit = local_unit().replace('/', '-') |
58 | 404 | cluster_hosts[l_unit] = \ | 407 | if config('prefer-ipv6'): |
59 | 405 | get_address_in_network(config('os-internal-network'), | 408 | addr = get_ipv6_addr() |
60 | 406 | unit_get('private-address')) | 409 | else: |
61 | 410 | addr = unit_get('private-address') | ||
62 | 411 | cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), | ||
63 | 412 | addr) | ||
64 | 407 | 413 | ||
65 | 408 | for rid in relation_ids('cluster'): | 414 | for rid in relation_ids('cluster'): |
66 | 409 | for unit in related_units(rid): | 415 | for unit in related_units(rid): |
67 | @@ -414,6 +420,16 @@ | |||
68 | 414 | ctxt = { | 420 | ctxt = { |
69 | 415 | 'units': cluster_hosts, | 421 | 'units': cluster_hosts, |
70 | 416 | } | 422 | } |
71 | 423 | |||
72 | 424 | if config('prefer-ipv6'): | ||
73 | 425 | ctxt['local_host'] = 'ip6-localhost' | ||
74 | 426 | ctxt['haproxy_host'] = '::' | ||
75 | 427 | ctxt['stat_port'] = ':::8888' | ||
76 | 428 | else: | ||
77 | 429 | ctxt['local_host'] = '127.0.0.1' | ||
78 | 430 | ctxt['haproxy_host'] = '0.0.0.0' | ||
79 | 431 | ctxt['stat_port'] = ':8888' | ||
80 | 432 | |||
81 | 417 | if len(cluster_hosts.keys()) > 1: | 433 | if len(cluster_hosts.keys()) > 1: |
82 | 418 | # Enable haproxy when we have enough peers. | 434 | # Enable haproxy when we have enough peers. |
83 | 419 | log('Ensuring haproxy enabled in /etc/default/haproxy.') | 435 | log('Ensuring haproxy enabled in /etc/default/haproxy.') |
84 | 420 | 436 | ||
85 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
86 | --- hooks/charmhelpers/contrib/openstack/ip.py 2014-07-28 11:39:11 +0000 | |||
87 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 13:55:05 +0000 | |||
88 | @@ -7,6 +7,7 @@ | |||
89 | 7 | get_address_in_network, | 7 | get_address_in_network, |
90 | 8 | is_address_in_network, | 8 | is_address_in_network, |
91 | 9 | is_ipv6, | 9 | is_ipv6, |
92 | 10 | get_ipv6_addr, | ||
93 | 10 | ) | 11 | ) |
94 | 11 | 12 | ||
95 | 12 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | 13 | from charmhelpers.contrib.hahelpers.cluster import is_clustered |
96 | @@ -64,10 +65,13 @@ | |||
97 | 64 | vip): | 65 | vip): |
98 | 65 | resolved_address = vip | 66 | resolved_address = vip |
99 | 66 | else: | 67 | else: |
100 | 68 | if config('prefer-ipv6'): | ||
101 | 69 | fallback_addr = get_ipv6_addr() | ||
102 | 70 | else: | ||
103 | 71 | fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) | ||
104 | 67 | resolved_address = get_address_in_network( | 72 | resolved_address = get_address_in_network( |
108 | 68 | config(_address_map[endpoint_type]['config']), | 73 | config(_address_map[endpoint_type]['config']), fallback_addr) |
109 | 69 | unit_get(_address_map[endpoint_type]['fallback']) | 74 | |
107 | 70 | ) | ||
110 | 71 | if resolved_address is None: | 75 | if resolved_address is None: |
111 | 72 | raise ValueError('Unable to resolve a suitable IP address' | 76 | raise ValueError('Unable to resolve a suitable IP address' |
112 | 73 | ' based on charm state and configuration') | 77 | ' based on charm state and configuration') |
113 | 74 | 78 | ||
114 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
115 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-28 14:41:41 +0000 | |||
116 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-08-13 13:55:05 +0000 | |||
117 | @@ -1,6 +1,6 @@ | |||
118 | 1 | global | 1 | global |
121 | 2 | log 127.0.0.1 local0 | 2 | log {{ local_host }} local0 |
122 | 3 | log 127.0.0.1 local1 notice | 3 | log {{ local_host }} local1 notice |
123 | 4 | maxconn 20000 | 4 | maxconn 20000 |
124 | 5 | user haproxy | 5 | user haproxy |
125 | 6 | group haproxy | 6 | group haproxy |
126 | @@ -17,7 +17,7 @@ | |||
127 | 17 | timeout client 30000 | 17 | timeout client 30000 |
128 | 18 | timeout server 30000 | 18 | timeout server 30000 |
129 | 19 | 19 | ||
131 | 20 | listen stats :8888 | 20 | listen stats {{ stat_port }} |
132 | 21 | mode http | 21 | mode http |
133 | 22 | stats enable | 22 | stats enable |
134 | 23 | stats hide-version | 23 | stats hide-version |
135 | 24 | 24 | ||
136 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
137 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-28 14:41:41 +0000 | |||
138 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-13 13:55:05 +0000 | |||
139 | @@ -46,5 +46,8 @@ | |||
140 | 46 | :returns: boolean: True if the path represents a mounted device, False if | 46 | :returns: boolean: True if the path represents a mounted device, False if |
141 | 47 | it doesn't. | 47 | it doesn't. |
142 | 48 | ''' | 48 | ''' |
143 | 49 | is_partition = bool(re.search(r".*[0-9]+\b", device)) | ||
144 | 49 | out = check_output(['mount']) | 50 | out = check_output(['mount']) |
145 | 51 | if is_partition: | ||
146 | 52 | return bool(re.search(device + r"\b", out)) | ||
147 | 50 | return bool(re.search(device + r"[0-9]+\b", out)) | 53 | return bool(re.search(device + r"[0-9]+\b", out)) |
148 | 51 | 54 | ||
149 | === modified file 'hooks/charmhelpers/core/host.py' | |||
150 | --- hooks/charmhelpers/core/host.py 2014-07-28 14:41:41 +0000 | |||
151 | +++ hooks/charmhelpers/core/host.py 2014-08-13 13:55:05 +0000 | |||
152 | @@ -12,6 +12,8 @@ | |||
153 | 12 | import string | 12 | import string |
154 | 13 | import subprocess | 13 | import subprocess |
155 | 14 | import hashlib | 14 | import hashlib |
156 | 15 | import shutil | ||
157 | 16 | from contextlib import contextmanager | ||
158 | 15 | 17 | ||
159 | 16 | from collections import OrderedDict | 18 | from collections import OrderedDict |
160 | 17 | 19 | ||
161 | @@ -52,7 +54,7 @@ | |||
162 | 52 | def service_running(service): | 54 | def service_running(service): |
163 | 53 | """Determine whether a system service is running""" | 55 | """Determine whether a system service is running""" |
164 | 54 | try: | 56 | try: |
166 | 55 | output = subprocess.check_output(['service', service, 'status']) | 57 | output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) |
167 | 56 | except subprocess.CalledProcessError: | 58 | except subprocess.CalledProcessError: |
168 | 57 | return False | 59 | return False |
169 | 58 | else: | 60 | else: |
170 | @@ -62,6 +64,16 @@ | |||
171 | 62 | return False | 64 | return False |
172 | 63 | 65 | ||
173 | 64 | 66 | ||
174 | 67 | def service_available(service_name): | ||
175 | 68 | """Determine whether a system service is available""" | ||
176 | 69 | try: | ||
177 | 70 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) | ||
178 | 71 | except subprocess.CalledProcessError: | ||
179 | 72 | return False | ||
180 | 73 | else: | ||
181 | 74 | return True | ||
182 | 75 | |||
183 | 76 | |||
184 | 65 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 77 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
185 | 66 | """Add a user to the system""" | 78 | """Add a user to the system""" |
186 | 67 | try: | 79 | try: |
187 | @@ -329,3 +341,24 @@ | |||
188 | 329 | pkgcache = apt_pkg.Cache() | 341 | pkgcache = apt_pkg.Cache() |
189 | 330 | pkg = pkgcache[package] | 342 | pkg = pkgcache[package] |
190 | 331 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | 343 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
191 | 344 | |||
192 | 345 | |||
193 | 346 | @contextmanager | ||
194 | 347 | def chdir(d): | ||
195 | 348 | cur = os.getcwd() | ||
196 | 349 | try: | ||
197 | 350 | yield os.chdir(d) | ||
198 | 351 | finally: | ||
199 | 352 | os.chdir(cur) | ||
200 | 353 | |||
201 | 354 | |||
202 | 355 | def chownr(path, owner, group): | ||
203 | 356 | uid = pwd.getpwnam(owner).pw_uid | ||
204 | 357 | gid = grp.getgrnam(group).gr_gid | ||
205 | 358 | |||
206 | 359 | for root, dirs, files in os.walk(path): | ||
207 | 360 | for name in dirs + files: | ||
208 | 361 | full = os.path.join(root, name) | ||
209 | 362 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) | ||
210 | 363 | if not broken_symlink: | ||
211 | 364 | os.chown(full, uid, gid) | ||
212 | 332 | 365 | ||
213 | === added directory 'hooks/charmhelpers/core/services' | |||
214 | === added file 'hooks/charmhelpers/core/services/__init__.py' | |||
215 | --- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000 | |||
216 | +++ hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:55:05 +0000 | |||
217 | @@ -0,0 +1,2 @@ | |||
218 | 1 | from .base import * | ||
219 | 2 | from .helpers import * | ||
220 | 0 | 3 | ||
221 | === added file 'hooks/charmhelpers/core/services/base.py' | |||
222 | --- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000 | |||
223 | +++ hooks/charmhelpers/core/services/base.py 2014-08-13 13:55:05 +0000 | |||
224 | @@ -0,0 +1,305 @@ | |||
225 | 1 | import os | ||
226 | 2 | import re | ||
227 | 3 | import json | ||
228 | 4 | from collections import Iterable | ||
229 | 5 | |||
230 | 6 | from charmhelpers.core import host | ||
231 | 7 | from charmhelpers.core import hookenv | ||
232 | 8 | |||
233 | 9 | |||
234 | 10 | __all__ = ['ServiceManager', 'ManagerCallback', | ||
235 | 11 | 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', | ||
236 | 12 | 'service_restart', 'service_stop'] | ||
237 | 13 | |||
238 | 14 | |||
239 | 15 | class ServiceManager(object): | ||
240 | 16 | def __init__(self, services=None): | ||
241 | 17 | """ | ||
242 | 18 | Register a list of services, given their definitions. | ||
243 | 19 | |||
244 | 20 | Traditional charm authoring is focused on implementing hooks. That is, | ||
245 | 21 | the charm author is thinking in terms of "What hook am I handling; what | ||
246 | 22 | does this hook need to do?" However, in most cases, the real question | ||
247 | 23 | should be "Do I have the information I need to configure and start this | ||
248 | 24 | piece of software and, if so, what are the steps for doing so?" The | ||
249 | 25 | ServiceManager framework tries to bring the focus to the data and the | ||
250 | 26 | setup tasks, in the most declarative way possible. | ||
251 | 27 | |||
252 | 28 | Service definitions are dicts in the following formats (all keys except | ||
253 | 29 | 'service' are optional):: | ||
254 | 30 | |||
255 | 31 | { | ||
256 | 32 | "service": <service name>, | ||
257 | 33 | "required_data": <list of required data contexts>, | ||
258 | 34 | "data_ready": <one or more callbacks>, | ||
259 | 35 | "data_lost": <one or more callbacks>, | ||
260 | 36 | "start": <one or more callbacks>, | ||
261 | 37 | "stop": <one or more callbacks>, | ||
262 | 38 | "ports": <list of ports to manage>, | ||
263 | 39 | } | ||
264 | 40 | |||
265 | 41 | The 'required_data' list should contain dicts of required data (or | ||
266 | 42 | dependency managers that act like dicts and know how to collect the data). | ||
267 | 43 | Only when all items in the 'required_data' list are populated are the list | ||
268 | 44 | of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more | ||
269 | 45 | information. | ||
270 | 46 | |||
271 | 47 | The 'data_ready' value should be either a single callback, or a list of | ||
272 | 48 | callbacks, to be called when all items in 'required_data' pass `is_ready()`. | ||
273 | 49 | Each callback will be called with the service name as the only parameter. | ||
274 | 50 | After all of the 'data_ready' callbacks are called, the 'start' callbacks | ||
275 | 51 | are fired. | ||
276 | 52 | |||
277 | 53 | The 'data_lost' value should be either a single callback, or a list of | ||
278 | 54 | callbacks, to be called when a 'required_data' item no longer passes | ||
279 | 55 | `is_ready()`. Each callback will be called with the service name as the | ||
280 | 56 | only parameter. After all of the 'data_lost' callbacks are called, | ||
281 | 57 | the 'stop' callbacks are fired. | ||
282 | 58 | |||
283 | 59 | The 'start' value should be either a single callback, or a list of | ||
284 | 60 | callbacks, to be called when starting the service, after the 'data_ready' | ||
285 | 61 | callbacks are complete. Each callback will be called with the service | ||
286 | 62 | name as the only parameter. This defaults to | ||
287 | 63 | `[host.service_start, services.open_ports]`. | ||
288 | 64 | |||
289 | 65 | The 'stop' value should be either a single callback, or a list of | ||
290 | 66 | callbacks, to be called when stopping the service. If the service is | ||
291 | 67 | being stopped because it no longer has all of its 'required_data', this | ||
292 | 68 | will be called after all of the 'data_lost' callbacks are complete. | ||
293 | 69 | Each callback will be called with the service name as the only parameter. | ||
294 | 70 | This defaults to `[services.close_ports, host.service_stop]`. | ||
295 | 71 | |||
296 | 72 | The 'ports' value should be a list of ports to manage. The default | ||
297 | 73 | 'start' handler will open the ports after the service is started, | ||
298 | 74 | and the default 'stop' handler will close the ports prior to stopping | ||
299 | 75 | the service. | ||
300 | 76 | |||
301 | 77 | |||
302 | 78 | Examples: | ||
303 | 79 | |||
304 | 80 | The following registers an Upstart service called bingod that depends on | ||
305 | 81 | a mongodb relation and which runs a custom `db_migrate` function prior to | ||
306 | 82 | restarting the service, and a Runit service called spadesd:: | ||
307 | 83 | |||
308 | 84 | manager = services.ServiceManager([ | ||
309 | 85 | { | ||
310 | 86 | 'service': 'bingod', | ||
311 | 87 | 'ports': [80, 443], | ||
312 | 88 | 'required_data': [MongoRelation(), config(), {'my': 'data'}], | ||
313 | 89 | 'data_ready': [ | ||
314 | 90 | services.template(source='bingod.conf'), | ||
315 | 91 | services.template(source='bingod.ini', | ||
316 | 92 | target='/etc/bingod.ini', | ||
317 | 93 | owner='bingo', perms=0400), | ||
318 | 94 | ], | ||
319 | 95 | }, | ||
320 | 96 | { | ||
321 | 97 | 'service': 'spadesd', | ||
322 | 98 | 'data_ready': services.template(source='spadesd_run.j2', | ||
323 | 99 | target='/etc/sv/spadesd/run', | ||
324 | 100 | perms=0555), | ||
325 | 101 | 'start': runit_start, | ||
326 | 102 | 'stop': runit_stop, | ||
327 | 103 | }, | ||
328 | 104 | ]) | ||
329 | 105 | manager.manage() | ||
330 | 106 | """ | ||
331 | 107 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') | ||
332 | 108 | self._ready = None | ||
333 | 109 | self.services = {} | ||
334 | 110 | for service in services or []: | ||
335 | 111 | service_name = service['service'] | ||
336 | 112 | self.services[service_name] = service | ||
337 | 113 | |||
338 | 114 | def manage(self): | ||
339 | 115 | """ | ||
340 | 116 | Handle the current hook by doing The Right Thing with the registered services. | ||
341 | 117 | """ | ||
342 | 118 | hook_name = hookenv.hook_name() | ||
343 | 119 | if hook_name == 'stop': | ||
344 | 120 | self.stop_services() | ||
345 | 121 | else: | ||
346 | 122 | self.provide_data() | ||
347 | 123 | self.reconfigure_services() | ||
348 | 124 | |||
349 | 125 | def provide_data(self): | ||
350 | 126 | hook_name = hookenv.hook_name() | ||
351 | 127 | for service in self.services.values(): | ||
352 | 128 | for provider in service.get('provided_data', []): | ||
353 | 129 | if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): | ||
354 | 130 | data = provider.provide_data() | ||
355 | 131 | if provider._is_ready(data): | ||
356 | 132 | hookenv.relation_set(None, data) | ||
357 | 133 | |||
358 | 134 | def reconfigure_services(self, *service_names): | ||
359 | 135 | """ | ||
360 | 136 | Update all files for one or more registered services, and, | ||
361 | 137 | if ready, optionally restart them. | ||
362 | 138 | |||
363 | 139 | If no service names are given, reconfigures all registered services. | ||
364 | 140 | """ | ||
365 | 141 | for service_name in service_names or self.services.keys(): | ||
366 | 142 | if self.is_ready(service_name): | ||
367 | 143 | self.fire_event('data_ready', service_name) | ||
368 | 144 | self.fire_event('start', service_name, default=[ | ||
369 | 145 | service_restart, | ||
370 | 146 | manage_ports]) | ||
371 | 147 | self.save_ready(service_name) | ||
372 | 148 | else: | ||
373 | 149 | if self.was_ready(service_name): | ||
374 | 150 | self.fire_event('data_lost', service_name) | ||
375 | 151 | self.fire_event('stop', service_name, default=[ | ||
376 | 152 | manage_ports, | ||
377 | 153 | service_stop]) | ||
378 | 154 | self.save_lost(service_name) | ||
379 | 155 | |||
380 | 156 | def stop_services(self, *service_names): | ||
381 | 157 | """ | ||
382 | 158 | Stop one or more registered services, by name. | ||
383 | 159 | |||
384 | 160 | If no service names are given, stops all registered services. | ||
385 | 161 | """ | ||
386 | 162 | for service_name in service_names or self.services.keys(): | ||
387 | 163 | self.fire_event('stop', service_name, default=[ | ||
388 | 164 | manage_ports, | ||
389 | 165 | service_stop]) | ||
390 | 166 | |||
391 | 167 | def get_service(self, service_name): | ||
392 | 168 | """ | ||
393 | 169 | Given the name of a registered service, return its service definition. | ||
394 | 170 | """ | ||
395 | 171 | service = self.services.get(service_name) | ||
396 | 172 | if not service: | ||
397 | 173 | raise KeyError('Service not registered: %s' % service_name) | ||
398 | 174 | return service | ||
399 | 175 | |||
400 | 176 | def fire_event(self, event_name, service_name, default=None): | ||
401 | 177 | """ | ||
402 | 178 | Fire a data_ready, data_lost, start, or stop event on a given service. | ||
403 | 179 | """ | ||
404 | 180 | service = self.get_service(service_name) | ||
405 | 181 | callbacks = service.get(event_name, default) | ||
406 | 182 | if not callbacks: | ||
407 | 183 | return | ||
408 | 184 | if not isinstance(callbacks, Iterable): | ||
409 | 185 | callbacks = [callbacks] | ||
410 | 186 | for callback in callbacks: | ||
411 | 187 | if isinstance(callback, ManagerCallback): | ||
412 | 188 | callback(self, service_name, event_name) | ||
413 | 189 | else: | ||
414 | 190 | callback(service_name) | ||
415 | 191 | |||
416 | 192 | def is_ready(self, service_name): | ||
417 | 193 | """ | ||
418 | 194 | Determine if a registered service is ready, by checking its 'required_data'. | ||
419 | 195 | |||
420 | 196 | A 'required_data' item can be any mapping type, and is considered ready | ||
421 | 197 | if `bool(item)` evaluates as True. | ||
422 | 198 | """ | ||
423 | 199 | service = self.get_service(service_name) | ||
424 | 200 | reqs = service.get('required_data', []) | ||
425 | 201 | return all(bool(req) for req in reqs) | ||
426 | 202 | |||
427 | 203 | def _load_ready_file(self): | ||
428 | 204 | if self._ready is not None: | ||
429 | 205 | return | ||
430 | 206 | if os.path.exists(self._ready_file): | ||
431 | 207 | with open(self._ready_file) as fp: | ||
432 | 208 | self._ready = set(json.load(fp)) | ||
433 | 209 | else: | ||
434 | 210 | self._ready = set() | ||
435 | 211 | |||
436 | 212 | def _save_ready_file(self): | ||
437 | 213 | if self._ready is None: | ||
438 | 214 | return | ||
439 | 215 | with open(self._ready_file, 'w') as fp: | ||
440 | 216 | json.dump(list(self._ready), fp) | ||
441 | 217 | |||
442 | 218 | def save_ready(self, service_name): | ||
443 | 219 | """ | ||
444 | 220 | Save an indicator that the given service is now data_ready. | ||
445 | 221 | """ | ||
446 | 222 | self._load_ready_file() | ||
447 | 223 | self._ready.add(service_name) | ||
448 | 224 | self._save_ready_file() | ||
449 | 225 | |||
450 | 226 | def save_lost(self, service_name): | ||
451 | 227 | """ | ||
452 | 228 | Save an indicator that the given service is no longer data_ready. | ||
453 | 229 | """ | ||
454 | 230 | self._load_ready_file() | ||
455 | 231 | self._ready.discard(service_name) | ||
456 | 232 | self._save_ready_file() | ||
457 | 233 | |||
458 | 234 | def was_ready(self, service_name): | ||
459 | 235 | """ | ||
460 | 236 | Determine if the given service was previously data_ready. | ||
461 | 237 | """ | ||
462 | 238 | self._load_ready_file() | ||
463 | 239 | return service_name in self._ready | ||
464 | 240 | |||
465 | 241 | |||
466 | 242 | class ManagerCallback(object): | ||
467 | 243 | """ | ||
468 | 244 | Special case of a callback that takes the `ServiceManager` instance | ||
469 | 245 | in addition to the service name. | ||
470 | 246 | |||
471 | 247 | Subclasses should implement `__call__` which should accept three parameters: | ||
472 | 248 | |||
473 | 249 | * `manager` The `ServiceManager` instance | ||
474 | 250 | * `service_name` The name of the service it's being triggered for | ||
475 | 251 | * `event_name` The name of the event that this callback is handling | ||
476 | 252 | """ | ||
477 | 253 | def __call__(self, manager, service_name, event_name): | ||
478 | 254 | raise NotImplementedError() | ||
479 | 255 | |||
480 | 256 | |||
481 | 257 | class PortManagerCallback(ManagerCallback): | ||
482 | 258 | """ | ||
483 | 259 | Callback class that will open or close ports, for use as either | ||
484 | 260 | a start or stop action. | ||
485 | 261 | """ | ||
486 | 262 | def __call__(self, manager, service_name, event_name): | ||
487 | 263 | service = manager.get_service(service_name) | ||
488 | 264 | new_ports = service.get('ports', []) | ||
489 | 265 | port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) | ||
490 | 266 | if os.path.exists(port_file): | ||
491 | 267 | with open(port_file) as fp: | ||
492 | 268 | old_ports = fp.read().split(',') | ||
493 | 269 | for old_port in old_ports: | ||
494 | 270 | if bool(old_port): | ||
495 | 271 | old_port = int(old_port) | ||
496 | 272 | if old_port not in new_ports: | ||
497 | 273 | hookenv.close_port(old_port) | ||
498 | 274 | with open(port_file, 'w') as fp: | ||
499 | 275 | fp.write(','.join(str(port) for port in new_ports)) | ||
500 | 276 | for port in new_ports: | ||
501 | 277 | if event_name == 'start': | ||
502 | 278 | hookenv.open_port(port) | ||
503 | 279 | elif event_name == 'stop': | ||
504 | 280 | hookenv.close_port(port) | ||
505 | 281 | |||
506 | 282 | |||
507 | 283 | def service_stop(service_name): | ||
508 | 284 | """ | ||
509 | 285 | Wrapper around host.service_stop to prevent spurious "unknown service" | ||
510 | 286 | messages in the logs. | ||
511 | 287 | """ | ||
512 | 288 | if host.service_running(service_name): | ||
513 | 289 | host.service_stop(service_name) | ||
514 | 290 | |||
515 | 291 | |||
516 | 292 | def service_restart(service_name): | ||
517 | 293 | """ | ||
518 | 294 | Wrapper around host.service_restart to prevent spurious "unknown service" | ||
519 | 295 | messages in the logs. | ||
520 | 296 | """ | ||
521 | 297 | if host.service_available(service_name): | ||
522 | 298 | if host.service_running(service_name): | ||
523 | 299 | host.service_restart(service_name) | ||
524 | 300 | else: | ||
525 | 301 | host.service_start(service_name) | ||
526 | 302 | |||
527 | 303 | |||
528 | 304 | # Convenience aliases | ||
529 | 305 | open_ports = close_ports = manage_ports = PortManagerCallback() | ||
530 | 0 | 306 | ||
531 | === added file 'hooks/charmhelpers/core/services/helpers.py' | |||
532 | --- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000 | |||
533 | +++ hooks/charmhelpers/core/services/helpers.py 2014-08-13 13:55:05 +0000 | |||
534 | @@ -0,0 +1,125 @@ | |||
535 | 1 | from charmhelpers.core import hookenv | ||
536 | 2 | from charmhelpers.core import templating | ||
537 | 3 | |||
538 | 4 | from charmhelpers.core.services.base import ManagerCallback | ||
539 | 5 | |||
540 | 6 | |||
541 | 7 | __all__ = ['RelationContext', 'TemplateCallback', | ||
542 | 8 | 'render_template', 'template'] | ||
543 | 9 | |||
544 | 10 | |||
545 | 11 | class RelationContext(dict): | ||
546 | 12 | """ | ||
547 | 13 | Base class for a context generator that gets relation data from juju. | ||
548 | 14 | |||
549 | 15 | Subclasses must provide the attributes `name`, which is the name of the | ||
550 | 16 | interface of interest, `interface`, which is the type of the interface of | ||
551 | 17 | interest, and `required_keys`, which is the set of keys required for the | ||
552 | 18 | relation to be considered complete. The data for all interfaces matching | ||
553 | 19 | the `name` attribute that are complete will used to populate the dictionary | ||
554 | 20 | values (see `get_data`, below). | ||
555 | 21 | |||
556 | 22 | The generated context will be namespaced under the interface type, to prevent | ||
557 | 23 | potential naming conflicts. | ||
558 | 24 | """ | ||
559 | 25 | name = None | ||
560 | 26 | interface = None | ||
561 | 27 | required_keys = [] | ||
562 | 28 | |||
563 | 29 | def __init__(self, *args, **kwargs): | ||
564 | 30 | super(RelationContext, self).__init__(*args, **kwargs) | ||
565 | 31 | self.get_data() | ||
566 | 32 | |||
567 | 33 | def __bool__(self): | ||
568 | 34 | """ | ||
569 | 35 | Returns True if all of the required_keys are available. | ||
570 | 36 | """ | ||
571 | 37 | return self.is_ready() | ||
572 | 38 | |||
573 | 39 | __nonzero__ = __bool__ | ||
574 | 40 | |||
575 | 41 | def __repr__(self): | ||
576 | 42 | return super(RelationContext, self).__repr__() | ||
577 | 43 | |||
578 | 44 | def is_ready(self): | ||
579 | 45 | """ | ||
580 | 46 | Returns True if all of the `required_keys` are available from any units. | ||
581 | 47 | """ | ||
582 | 48 | ready = len(self.get(self.name, [])) > 0 | ||
583 | 49 | if not ready: | ||
584 | 50 | hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) | ||
585 | 51 | return ready | ||
586 | 52 | |||
587 | 53 | def _is_ready(self, unit_data): | ||
588 | 54 | """ | ||
589 | 55 | Helper method that tests a set of relation data and returns True if | ||
590 | 56 | all of the `required_keys` are present. | ||
591 | 57 | """ | ||
592 | 58 | return set(unit_data.keys()).issuperset(set(self.required_keys)) | ||
593 | 59 | |||
594 | 60 | def get_data(self): | ||
595 | 61 | """ | ||
596 | 62 | Retrieve the relation data for each unit involved in a relation and, | ||
597 | 63 | if complete, store it in a list under `self[self.name]`. This | ||
598 | 64 | is automatically called when the RelationContext is instantiated. | ||
599 | 65 | |||
600 | 66 | The units are sorted lexographically first by the service ID, then by | ||
601 | 67 | the unit ID. Thus, if an interface has two other services, 'db:1' | ||
602 | 68 | and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', | ||
603 | 69 | and 'db:2' having one unit, 'mediawiki/0', all of which have a complete | ||
604 | 70 | set of data, the relation data for the units will be stored in the | ||
605 | 71 | order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. | ||
606 | 72 | |||
607 | 73 | If you only care about a single unit on the relation, you can just | ||
608 | 74 | access it as `{{ interface[0]['key'] }}`. However, if you can at all | ||
609 | 75 | support multiple units on a relation, you should iterate over the list, | ||
610 | 76 | like:: | ||
611 | 77 | |||
612 | 78 | {% for unit in interface -%} | ||
613 | 79 | {{ unit['key'] }}{% if not loop.last %},{% endif %} | ||
614 | 80 | {%- endfor %} | ||
615 | 81 | |||
616 | 82 | Note that since all sets of relation data from all related services and | ||
617 | 83 | units are in a single list, if you need to know which service or unit a | ||
618 | 84 | set of data came from, you'll need to extend this class to preserve | ||
619 | 85 | that information. | ||
620 | 86 | """ | ||
621 | 87 | if not hookenv.relation_ids(self.name): | ||
622 | 88 | return | ||
623 | 89 | |||
624 | 90 | ns = self.setdefault(self.name, []) | ||
625 | 91 | for rid in sorted(hookenv.relation_ids(self.name)): | ||
626 | 92 | for unit in sorted(hookenv.related_units(rid)): | ||
627 | 93 | reldata = hookenv.relation_get(rid=rid, unit=unit) | ||
628 | 94 | if self._is_ready(reldata): | ||
629 | 95 | ns.append(reldata) | ||
630 | 96 | |||
631 | 97 | def provide_data(self): | ||
632 | 98 | """ | ||
633 | 99 | Return data to be relation_set for this interface. | ||
634 | 100 | """ | ||
635 | 101 | return {} | ||
636 | 102 | |||
637 | 103 | |||
638 | 104 | class TemplateCallback(ManagerCallback): | ||
639 | 105 | """ | ||
640 | 106 | Callback class that will render a template, for use as a ready action. | ||
641 | 107 | """ | ||
642 | 108 | def __init__(self, source, target, owner='root', group='root', perms=0444): | ||
643 | 109 | self.source = source | ||
644 | 110 | self.target = target | ||
645 | 111 | self.owner = owner | ||
646 | 112 | self.group = group | ||
647 | 113 | self.perms = perms | ||
648 | 114 | |||
649 | 115 | def __call__(self, manager, service_name, event_name): | ||
650 | 116 | service = manager.get_service(service_name) | ||
651 | 117 | context = {} | ||
652 | 118 | for ctx in service.get('required_data', []): | ||
653 | 119 | context.update(ctx) | ||
654 | 120 | templating.render(self.source, self.target, context, | ||
655 | 121 | self.owner, self.group, self.perms) | ||
656 | 122 | |||
657 | 123 | |||
658 | 124 | # Convenience aliases for templates | ||
659 | 125 | render_template = template = TemplateCallback | ||
660 | 0 | 126 | ||
661 | === added file 'hooks/charmhelpers/core/templating.py' | |||
662 | --- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000 | |||
663 | +++ hooks/charmhelpers/core/templating.py 2014-08-13 13:55:05 +0000 | |||
664 | @@ -0,0 +1,51 @@ | |||
665 | 1 | import os | ||
666 | 2 | |||
667 | 3 | from charmhelpers.core import host | ||
668 | 4 | from charmhelpers.core import hookenv | ||
669 | 5 | |||
670 | 6 | |||
671 | 7 | def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): | ||
672 | 8 | """ | ||
673 | 9 | Render a template. | ||
674 | 10 | |||
675 | 11 | The `source` path, if not absolute, is relative to the `templates_dir`. | ||
676 | 12 | |||
677 | 13 | The `target` path should be absolute. | ||
678 | 14 | |||
679 | 15 | The context should be a dict containing the values to be replaced in the | ||
680 | 16 | template. | ||
681 | 17 | |||
682 | 18 | The `owner`, `group`, and `perms` options will be passed to `write_file`. | ||
683 | 19 | |||
684 | 20 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | ||
685 | 21 | |||
686 | 22 | Note: Using this requires python-jinja2; if it is not installed, calling | ||
687 | 23 | this will attempt to use charmhelpers.fetch.apt_install to install it. | ||
688 | 24 | """ | ||
689 | 25 | try: | ||
690 | 26 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
691 | 27 | except ImportError: | ||
692 | 28 | try: | ||
693 | 29 | from charmhelpers.fetch import apt_install | ||
694 | 30 | except ImportError: | ||
695 | 31 | hookenv.log('Could not import jinja2, and could not import ' | ||
696 | 32 | 'charmhelpers.fetch to install it', | ||
697 | 33 | level=hookenv.ERROR) | ||
698 | 34 | raise | ||
699 | 35 | apt_install('python-jinja2', fatal=True) | ||
700 | 36 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
701 | 37 | |||
702 | 38 | if templates_dir is None: | ||
703 | 39 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | ||
704 | 40 | loader = Environment(loader=FileSystemLoader(templates_dir)) | ||
705 | 41 | try: | ||
706 | 42 | source = source | ||
707 | 43 | template = loader.get_template(source) | ||
708 | 44 | except exceptions.TemplateNotFound as e: | ||
709 | 45 | hookenv.log('Could not load template %s from %s.' % | ||
710 | 46 | (source, templates_dir), | ||
711 | 47 | level=hookenv.ERROR) | ||
712 | 48 | raise e | ||
713 | 49 | content = template.render(context) | ||
714 | 50 | host.mkdir(os.path.dirname(target)) | ||
715 | 51 | host.write_file(target, content, owner, group, perms) | ||
716 | 0 | 52 | ||
717 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
718 | --- hooks/charmhelpers/fetch/__init__.py 2014-07-28 14:41:41 +0000 | |||
719 | +++ hooks/charmhelpers/fetch/__init__.py 2014-08-13 13:55:05 +0000 | |||
720 | @@ -122,6 +122,7 @@ | |||
721 | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if |
722 | 123 | # another process is already building the cache). | 123 | # another process is already building the cache). |
723 | 124 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | 124 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
724 | 125 | apt_pkg.config.set("Dir::Cache::srcpkgcache", "") | ||
725 | 125 | 126 | ||
726 | 126 | cache = apt_pkg.Cache() | 127 | cache = apt_pkg.Cache() |
727 | 127 | _pkgs = [] | 128 | _pkgs = [] |
Approved by jamespage