Merge lp:~johnsca/charm-helpers/services-framework into lp:charm-helpers
- services-framework
- Merge into devel
Status: | Merged |
---|---|
Merged at revision: | 191 |
Proposed branch: | lp:~johnsca/charm-helpers/services-framework |
Merge into: | lp:charm-helpers |
Diff against target: |
1537 lines (+1445/-2) 12 files modified
charmhelpers/core/host.py (+34/-1) charmhelpers/core/services/__init__.py (+2/-0) charmhelpers/core/services/base.py (+305/-0) charmhelpers/core/services/helpers.py (+125/-0) charmhelpers/core/templating.py (+51/-0) test_requirements.txt (+0/-1) tests/core/templates/cloud_controller_ng.yml (+173/-0) tests/core/templates/fake_cc.yml (+3/-0) tests/core/templates/nginx.conf (+154/-0) tests/core/templates/test.conf (+3/-0) tests/core/test_services.py (+531/-0) tests/core/test_templating.py (+64/-0) |
To merge this branch: | bzr merge lp:~johnsca/charm-helpers/services-framework |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Charles Butler (community) | Approve | ||
Tim Van Steenburgh | Approve | ||
charmers | Pending | ||
Review via email:
|
Commit message
Description of the change
Split services framework off into separate merge proposal, and combined commits, for easier review.
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Cory Johns (johnsca) wrote : | # |
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
James Troup (elmo) wrote : | # |
Comments inline
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Cory Johns (johnsca) wrote : | # |
Replies / comments inline.
- 180. By Cory Johns
-
Cleanup based on review
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Tim Van Steenburgh (tvansteenburgh) wrote : | # |
+1 LGTM.
This is awesome, I can't wait to try it out! I found a few typos (see inline diff comments), but other than that, this is good-to-go. Tests all pass.
Looking forward to implementing a charm with this to get a better feel for it. You might consider adding more examples to the published docs (http://
Very nice work!
- 181. By Cory Johns
-
Fixed documentation typos, per review
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Charles Butler (lazypower) wrote : | # |
Looks good to me. I'm on teh band wagon for using this as well.
I see nothing truly heinous in the code base, and considering there are already a slew of CF charms relying on this - I'd like to get more traction. Merging so we can pilot this officially in charms and get a charm-school published on this.
Make sure you're watching the charm-helpers bug tracker for any incoming bugs related to the services framework, and thank you for helping solve some long running issues with a very unique approach to service declaration, implementation, and constraints (on a per service level via relation even!)
+1
Preview Diff
1 | === modified file 'charmhelpers/core/host.py' |
2 | --- charmhelpers/core/host.py 2014-07-23 11:18:54 +0000 |
3 | +++ charmhelpers/core/host.py 2014-08-05 13:03:33 +0000 |
4 | @@ -12,6 +12,8 @@ |
5 | import string |
6 | import subprocess |
7 | import hashlib |
8 | +import shutil |
9 | +from contextlib import contextmanager |
10 | |
11 | from collections import OrderedDict |
12 | |
13 | @@ -52,7 +54,7 @@ |
14 | def service_running(service): |
15 | """Determine whether a system service is running""" |
16 | try: |
17 | - output = subprocess.check_output(['service', service, 'status']) |
18 | + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) |
19 | except subprocess.CalledProcessError: |
20 | return False |
21 | else: |
22 | @@ -62,6 +64,16 @@ |
23 | return False |
24 | |
25 | |
26 | +def service_available(service_name): |
27 | + """Determine whether a system service is available""" |
28 | + try: |
29 | + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) |
30 | + except subprocess.CalledProcessError: |
31 | + return False |
32 | + else: |
33 | + return True |
34 | + |
35 | + |
36 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
37 | """Add a user to the system""" |
38 | try: |
39 | @@ -329,3 +341,24 @@ |
40 | pkgcache = apt_pkg.Cache() |
41 | pkg = pkgcache[package] |
42 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
43 | + |
44 | + |
45 | +@contextmanager |
46 | +def chdir(d): |
47 | + cur = os.getcwd() |
48 | + try: |
49 | + yield os.chdir(d) |
50 | + finally: |
51 | + os.chdir(cur) |
52 | + |
53 | + |
54 | +def chownr(path, owner, group): |
55 | + uid = pwd.getpwnam(owner).pw_uid |
56 | + gid = grp.getgrnam(group).gr_gid |
57 | + |
58 | + for root, dirs, files in os.walk(path): |
59 | + for name in dirs + files: |
60 | + full = os.path.join(root, name) |
61 | + broken_symlink = os.path.lexists(full) and not os.path.exists(full) |
62 | + if not broken_symlink: |
63 | + os.chown(full, uid, gid) |
64 | |
65 | === added directory 'charmhelpers/core/services' |
66 | === added file 'charmhelpers/core/services/__init__.py' |
67 | --- charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000 |
68 | +++ charmhelpers/core/services/__init__.py 2014-08-05 13:03:33 +0000 |
69 | @@ -0,0 +1,2 @@ |
70 | +from .base import * |
71 | +from .helpers import * |
72 | |
73 | === added file 'charmhelpers/core/services/base.py' |
74 | --- charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000 |
75 | +++ charmhelpers/core/services/base.py 2014-08-05 13:03:33 +0000 |
76 | @@ -0,0 +1,305 @@ |
77 | +import os |
78 | +import re |
79 | +import json |
80 | +from collections import Iterable |
81 | + |
82 | +from charmhelpers.core import host |
83 | +from charmhelpers.core import hookenv |
84 | + |
85 | + |
86 | +__all__ = ['ServiceManager', 'ManagerCallback', |
87 | + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', |
88 | + 'service_restart', 'service_stop'] |
89 | + |
90 | + |
91 | +class ServiceManager(object): |
92 | + def __init__(self, services=None): |
93 | + """ |
94 | + Register a list of services, given their definitions. |
95 | + |
96 | + Traditional charm authoring is focused on implementing hooks. That is, |
97 | + the charm author is thinking in terms of "What hook am I handling; what |
98 | + does this hook need to do?" However, in most cases, the real question |
99 | + should be "Do I have the information I need to configure and start this |
100 | + piece of software and, if so, what are the steps for doing so?" The |
101 | + ServiceManager framework tries to bring the focus to the data and the |
102 | + setup tasks, in the most declarative way possible. |
103 | + |
104 | + Service definitions are dicts in the following formats (all keys except |
105 | + 'service' are optional): |
106 | + |
107 | + { |
108 | + "service": <service name>, |
109 | + "required_data": <list of required data contexts>, |
110 | + "data_ready": <one or more callbacks>, |
111 | + "data_lost": <one or more callbacks>, |
112 | + "start": <one or more callbacks>, |
113 | + "stop": <one or more callbacks>, |
114 | + "ports": <list of ports to manage>, |
115 | + } |
116 | + |
117 | + The 'required_data' list should contain dicts of required data (or |
118 | + dependency managers that act like dicts and know how to collect the data). |
119 | + Only when all items in the 'required_data' list are populated are the list |
120 | + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more |
121 | + information. |
122 | + |
123 | + The 'data_ready' value should be either a single callback, or a list of |
124 | + callbacks, to be called when all items in 'required_data' pass `is_ready()`. |
125 | + Each callback will be called with the service name as the only parameter. |
126 | + After all of the 'data_ready' callbacks are called, the 'start' callbacks |
127 | + are fired. |
128 | + |
129 | + The 'data_lost' value should be either a single callback, or a list of |
130 | + callbacks, to be called when a 'required_data' item no longer passes |
131 | + `is_ready()`. Each callback will be called with the service name as the |
132 | + only parameter. After all of the 'data_lost' callbacks are called, |
133 | + the 'stop' callbacks are fired. |
134 | + |
135 | + The 'start' value should be either a single callback, or a list of |
136 | + callbacks, to be called when starting the service, after the 'data_ready' |
137 | + callbacks are complete. Each callback will be called with the service |
138 | + name as the only parameter. This defaults to |
139 | + `[host.service_start, services.open_ports]`. |
140 | + |
141 | + The 'stop' value should be either a single callback, or a list of |
142 | + callbacks, to be called when stopping the service. If the service is |
143 | + being stopped because it no longer has all of its 'required_data', this |
144 | + will be called after all of the 'data_lost' callbacks are complete. |
145 | + Each callback will be called with the service name as the only parameter. |
146 | + This defaults to `[services.close_ports, host.service_stop]`. |
147 | + |
148 | + The 'ports' value should be a list of ports to manage. The default |
149 | + 'start' handler will open the ports after the service is started, |
150 | + and the default 'stop' handler will close the ports prior to stopping |
151 | + the service. |
152 | + |
153 | + |
154 | + Examples: |
155 | + |
156 | + The following registers an Upstart service called bingod that depends on |
157 | + a mongodb relation and which runs a custom `db_migrate` function prior to |
158 | + restarting the service, and a Runit service called spadesd. |
159 | + |
160 | + manager = services.ServiceManager([ |
161 | + { |
162 | + 'service': 'bingod', |
163 | + 'ports': [80, 443], |
164 | + 'required_data': [MongoRelation(), config(), {'my': 'data'}], |
165 | + 'data_ready': [ |
166 | + services.template(source='bingod.conf'), |
167 | + services.template(source='bingod.ini', |
168 | + target='/etc/bingod.ini', |
169 | + owner='bingo', perms=0400), |
170 | + ], |
171 | + }, |
172 | + { |
173 | + 'service': 'spadesd', |
174 | + 'data_ready': services.template(source='spadesd_run.j2', |
175 | + target='/etc/sv/spadesd/run', |
176 | + perms=0555), |
177 | + 'start': runit_start, |
178 | + 'stop': runit_stop, |
179 | + }, |
180 | + ]) |
181 | + manager.manage() |
182 | + """ |
183 | + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') |
184 | + self._ready = None |
185 | + self.services = {} |
186 | + for service in services or []: |
187 | + service_name = service['service'] |
188 | + self.services[service_name] = service |
189 | + |
190 | + def manage(self): |
191 | + """ |
192 | + Handle the current hook by doing The Right Thing with the registered services. |
193 | + """ |
194 | + hook_name = hookenv.hook_name() |
195 | + if hook_name == 'stop': |
196 | + self.stop_services() |
197 | + else: |
198 | + self.provide_data() |
199 | + self.reconfigure_services() |
200 | + |
201 | + def provide_data(self): |
202 | + hook_name = hookenv.hook_name() |
203 | + for service in self.services.values(): |
204 | + for provider in service.get('provided_data', []): |
205 | + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): |
206 | + data = provider.provide_data() |
207 | + if provider._is_ready(data): |
208 | + hookenv.relation_set(None, data) |
209 | + |
210 | + def reconfigure_services(self, *service_names): |
211 | + """ |
212 | + Update all files for one or more registered services, and, |
213 | + if ready, optionally restart them. |
214 | + |
215 | + If no service names are given, reconfigures all registered services. |
216 | + """ |
217 | + for service_name in service_names or self.services.keys(): |
218 | + if self.is_ready(service_name): |
219 | + self.fire_event('data_ready', service_name) |
220 | + self.fire_event('start', service_name, default=[ |
221 | + service_restart, |
222 | + manage_ports]) |
223 | + self.save_ready(service_name) |
224 | + else: |
225 | + if self.was_ready(service_name): |
226 | + self.fire_event('data_lost', service_name) |
227 | + self.fire_event('stop', service_name, default=[ |
228 | + manage_ports, |
229 | + service_stop]) |
230 | + self.save_lost(service_name) |
231 | + |
232 | + def stop_services(self, *service_names): |
233 | + """ |
234 | + Stop one or more registered services, by name. |
235 | + |
236 | + If no service names are given, stops all registered services. |
237 | + """ |
238 | + for service_name in service_names or self.services.keys(): |
239 | + self.fire_event('stop', service_name, default=[ |
240 | + manage_ports, |
241 | + service_stop]) |
242 | + |
243 | + def get_service(self, service_name): |
244 | + """ |
245 | + Given the name of a registered service, return its service definition. |
246 | + """ |
247 | + service = self.services.get(service_name) |
248 | + if not service: |
249 | + raise KeyError('Service not registered: %s' % service_name) |
250 | + return service |
251 | + |
252 | + def fire_event(self, event_name, service_name, default=None): |
253 | + """ |
254 | + Fire a data_ready, data_lost, start, or stop event on a given service. |
255 | + """ |
256 | + service = self.get_service(service_name) |
257 | + callbacks = service.get(event_name, default) |
258 | + if not callbacks: |
259 | + return |
260 | + if not isinstance(callbacks, Iterable): |
261 | + callbacks = [callbacks] |
262 | + for callback in callbacks: |
263 | + if isinstance(callback, ManagerCallback): |
264 | + callback(self, service_name, event_name) |
265 | + else: |
266 | + callback(service_name) |
267 | + |
268 | + def is_ready(self, service_name): |
269 | + """ |
270 | + Determine if a registered service is ready, by checking its 'required_data'. |
271 | + |
272 | + A 'required_data' item can be any mapping type, and is considered ready |
273 | + if `bool(item)` evaluates as True. |
274 | + """ |
275 | + service = self.get_service(service_name) |
276 | + reqs = service.get('required_data', []) |
277 | + return all(bool(req) for req in reqs) |
278 | + |
279 | + def _load_ready_file(self): |
280 | + if self._ready is not None: |
281 | + return |
282 | + if os.path.exists(self._ready_file): |
283 | + with open(self._ready_file) as fp: |
284 | + self._ready = set(json.load(fp)) |
285 | + else: |
286 | + self._ready = set() |
287 | + |
288 | + def _save_ready_file(self): |
289 | + if self._ready is None: |
290 | + return |
291 | + with open(self._ready_file, 'w') as fp: |
292 | + json.dump(list(self._ready), fp) |
293 | + |
294 | + def save_ready(self, service_name): |
295 | + """ |
296 | + Save an indicator that the given service is now data_ready. |
297 | + """ |
298 | + self._load_ready_file() |
299 | + self._ready.add(service_name) |
300 | + self._save_ready_file() |
301 | + |
302 | + def save_lost(self, service_name): |
303 | + """ |
304 | + Save an indicator that the given service is no longer data_ready. |
305 | + """ |
306 | + self._load_ready_file() |
307 | + self._ready.discard(service_name) |
308 | + self._save_ready_file() |
309 | + |
310 | + def was_ready(self, service_name): |
311 | + """ |
312 | + Determine if the given service was previously data_ready. |
313 | + """ |
314 | + self._load_ready_file() |
315 | + return service_name in self._ready |
316 | + |
317 | + |
318 | +class ManagerCallback(object): |
319 | + """ |
320 | + Special case of a callback that takes the `ServiceManager` instance |
321 | + in addition to the service name. |
322 | + |
323 | + Subclasses should implement `__call__` which should accept three parameters: |
324 | + |
325 | + * `manager` The `ServiceManager` instance |
326 | + * `service_name` The name of the service it's being triggered for |
327 | + * `event_name` The name of the event that this callback is handling |
328 | + """ |
329 | + def __call__(self, manager, service_name, event_name): |
330 | + raise NotImplementedError() |
331 | + |
332 | + |
333 | +class PortManagerCallback(ManagerCallback): |
334 | + """ |
335 | + Callback class that will open or close ports, for use as either |
336 | + a start or stop action. |
337 | + """ |
338 | + def __call__(self, manager, service_name, event_name): |
339 | + service = manager.get_service(service_name) |
340 | + new_ports = service.get('ports', []) |
341 | + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) |
342 | + if os.path.exists(port_file): |
343 | + with open(port_file) as fp: |
344 | + old_ports = fp.read().split(',') |
345 | + for old_port in old_ports: |
346 | + if bool(old_port): |
347 | + old_port = int(old_port) |
348 | + if old_port not in new_ports: |
349 | + hookenv.close_port(old_port) |
350 | + with open(port_file, 'w') as fp: |
351 | + fp.write(','.join(str(port) for port in new_ports)) |
352 | + for port in new_ports: |
353 | + if event_name == 'start': |
354 | + hookenv.open_port(port) |
355 | + elif event_name == 'stop': |
356 | + hookenv.close_port(port) |
357 | + |
358 | + |
359 | +def service_stop(service_name): |
360 | + """ |
361 | + Wrapper around host.service_stop to prevent spurious "unknown service" |
362 | + messages in the logs. |
363 | + """ |
364 | + if host.service_running(service_name): |
365 | + host.service_stop(service_name) |
366 | + |
367 | + |
368 | +def service_restart(service_name): |
369 | + """ |
370 | + Wrapper around host.service_restart to prevent spurious "unknown service" |
371 | + messages in the logs. |
372 | + """ |
373 | + if host.service_available(service_name): |
374 | + if host.service_running(service_name): |
375 | + host.service_restart(service_name) |
376 | + else: |
377 | + host.service_start(service_name) |
378 | + |
379 | + |
380 | +# Convenience aliases |
381 | +open_ports = close_ports = manage_ports = PortManagerCallback() |
382 | |
383 | === added file 'charmhelpers/core/services/helpers.py' |
384 | --- charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000 |
385 | +++ charmhelpers/core/services/helpers.py 2014-08-05 13:03:33 +0000 |
386 | @@ -0,0 +1,125 @@ |
387 | +from charmhelpers.core import hookenv |
388 | +from charmhelpers.core import templating |
389 | + |
390 | +from charmhelpers.core.services.base import ManagerCallback |
391 | + |
392 | + |
393 | +__all__ = ['RelationContext', 'TemplateCallback', |
394 | + 'render_template', 'template'] |
395 | + |
396 | + |
397 | +class RelationContext(dict): |
398 | + """ |
399 | + Base class for a context generator that gets relation data from juju. |
400 | + |
401 | + Subclasses must provide the attributes `name`, which is the name of the |
402 | + interface of interest, `interface`, which is the type of the interface of |
403 | + interest, and `required_keys`, which is the set of keys required for the |
404 | + relation to be considered complete. The data for all interfaces matching |
405 | + the `name` attribute that are complete will used to populate the dictionary |
406 | + values (see `get_data`, below). |
407 | + |
408 | + The generated context will be namespaced under the interface type, to prevent |
409 | + potential naming conflicts. |
410 | + """ |
411 | + name = None |
412 | + interface = None |
413 | + required_keys = [] |
414 | + |
415 | + def __init__(self, *args, **kwargs): |
416 | + super(RelationContext, self).__init__(*args, **kwargs) |
417 | + self.get_data() |
418 | + |
419 | + def __bool__(self): |
420 | + """ |
421 | + Returns True if all of the required_keys are available. |
422 | + """ |
423 | + return self.is_ready() |
424 | + |
425 | + __nonzero__ = __bool__ |
426 | + |
427 | + def __repr__(self): |
428 | + return super(RelationContext, self).__repr__() |
429 | + |
430 | + def is_ready(self): |
431 | + """ |
432 | + Returns True if all of the `required_keys` are available from any units. |
433 | + """ |
434 | + ready = len(self.get(self.name, [])) > 0 |
435 | + if not ready: |
436 | + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) |
437 | + return ready |
438 | + |
439 | + def _is_ready(self, unit_data): |
440 | + """ |
441 | + Helper method that tests a set of relation data and returns True if |
442 | + all of the `required_keys` are present. |
443 | + """ |
444 | + return set(unit_data.keys()).issuperset(set(self.required_keys)) |
445 | + |
446 | + def get_data(self): |
447 | + """ |
448 | + Retrieve the relation data for each unit involved in a relation and, |
449 | + if complete, store it in a list under `self[self.name]`. This |
450 | + is automatically called when the RelationContext is instantiated. |
451 | + |
452 | + The units are sorted lexographically first by the service ID, then by |
453 | + the unit ID. Thus, if an interface has two other services, 'db:1' |
454 | + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', |
455 | + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete |
456 | + set of data, the relation data for the units will be stored in the |
457 | + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. |
458 | + |
459 | + If you only care about a single unit on the relation, you can just |
460 | + access it as `{{ interface[0]['key'] }}`. However, if you can at all |
461 | + support multiple units on a relation, you should iterate over the list, |
462 | + like: |
463 | + |
464 | + {% for unit in interface -%} |
465 | + {{ unit['key'] }}{% if not loop.last %},{% endif %} |
466 | + {%- endfor %} |
467 | + |
468 | + Note that since all sets of relation data from all related services and |
469 | + units are in a single list, if you need to know which service or unit a |
470 | + set of data came from, you'll need to extend this class to preserve |
471 | + that information. |
472 | + """ |
473 | + if not hookenv.relation_ids(self.name): |
474 | + return |
475 | + |
476 | + ns = self.setdefault(self.name, []) |
477 | + for rid in sorted(hookenv.relation_ids(self.name)): |
478 | + for unit in sorted(hookenv.related_units(rid)): |
479 | + reldata = hookenv.relation_get(rid=rid, unit=unit) |
480 | + if self._is_ready(reldata): |
481 | + ns.append(reldata) |
482 | + |
483 | + def provide_data(self): |
484 | + """ |
485 | + Return data to be relation_set for this interface. |
486 | + """ |
487 | + return {} |
488 | + |
489 | + |
490 | +class TemplateCallback(ManagerCallback): |
491 | + """ |
492 | + Callback class that will render a template, for use as a ready action. |
493 | + """ |
494 | + def __init__(self, source, target, owner='root', group='root', perms=0444): |
495 | + self.source = source |
496 | + self.target = target |
497 | + self.owner = owner |
498 | + self.group = group |
499 | + self.perms = perms |
500 | + |
501 | + def __call__(self, manager, service_name, event_name): |
502 | + service = manager.get_service(service_name) |
503 | + context = {} |
504 | + for ctx in service.get('required_data', []): |
505 | + context.update(ctx) |
506 | + templating.render(self.source, self.target, context, |
507 | + self.owner, self.group, self.perms) |
508 | + |
509 | + |
510 | +# Convenience aliases for templates |
511 | +render_template = template = TemplateCallback |
512 | |
513 | === added file 'charmhelpers/core/templating.py' |
514 | --- charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000 |
515 | +++ charmhelpers/core/templating.py 2014-08-05 13:03:33 +0000 |
516 | @@ -0,0 +1,51 @@ |
517 | +import os |
518 | + |
519 | +from charmhelpers.core import host |
520 | +from charmhelpers.core import hookenv |
521 | + |
522 | + |
523 | +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): |
524 | + """ |
525 | + Render a template. |
526 | + |
527 | + The `source` path, if not absolute, is relative to the `templates_dir`. |
528 | + |
529 | + The `target` path should be absolute. |
530 | + |
531 | + The context should be a dict containing the values to be replaced in the |
532 | + template. |
533 | + |
534 | + The `owner`, `group`, and `perms` options will be passed to `write_file`. |
535 | + |
536 | + If omitted, `templates_dir` defaults to the `templates` folder in the charm. |
537 | + |
538 | + Note: Using this requires python-jinja2; if it is not installed, calling |
539 | + this will attempt to use charmhelpers.fetch.apt_install to install it. |
540 | + """ |
541 | + try: |
542 | + from jinja2 import FileSystemLoader, Environment, exceptions |
543 | + except ImportError: |
544 | + try: |
545 | + from charmhelpers.fetch import apt_install |
546 | + except ImportError: |
547 | + hookenv.log('Could not import jinja2, and could not import ' |
548 | + 'charmhelpers.fetch to install it', |
549 | + level=hookenv.ERROR) |
550 | + raise |
551 | + apt_install('python-jinja2', fatal=True) |
552 | + from jinja2 import FileSystemLoader, Environment, exceptions |
553 | + |
554 | + if templates_dir is None: |
555 | + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') |
556 | + loader = Environment(loader=FileSystemLoader(templates_dir)) |
557 | + try: |
558 | + source = source |
559 | + template = loader.get_template(source) |
560 | + except exceptions.TemplateNotFound as e: |
561 | + hookenv.log('Could not load template %s from %s.' % |
562 | + (source, templates_dir), |
563 | + level=hookenv.ERROR) |
564 | + raise e |
565 | + content = template.render(context) |
566 | + host.mkdir(os.path.dirname(target)) |
567 | + host.write_file(target, content, owner, group, perms) |
568 | |
569 | === modified file 'test_requirements.txt' |
570 | --- test_requirements.txt 2014-06-02 12:18:22 +0000 |
571 | +++ test_requirements.txt 2014-08-05 13:03:33 +0000 |
572 | @@ -17,6 +17,5 @@ |
573 | Tempita==0.5.1 |
574 | bzr+http://bazaar.launchpad.net/~yellow/python-shelltoolbox/trunk@17#egg=shelltoolbox |
575 | http://alastairs-place.net/projects/netifaces/netifaces-0.6.tar.gz |
576 | -netaddr==0.7.5 |
577 | bzr==2.6.0 |
578 | Jinja2==2.7.2 |
579 | |
580 | === added directory 'tests/core/templates' |
581 | === added file 'tests/core/templates/cloud_controller_ng.yml' |
582 | --- tests/core/templates/cloud_controller_ng.yml 1970-01-01 00:00:00 +0000 |
583 | +++ tests/core/templates/cloud_controller_ng.yml 2014-08-05 13:03:33 +0000 |
584 | @@ -0,0 +1,173 @@ |
585 | +--- |
586 | +# TODO cc_ip cc public ip |
587 | +local_route: {{ domain }} |
588 | +port: {{ cc_port }} |
589 | +pid_filename: /var/vcap/sys/run/cloud_controller_ng/cloud_controller_ng.pid |
590 | +development_mode: false |
591 | + |
592 | +message_bus_servers: |
593 | + - nats://{{ nats['user'] }}:{{ nats['password'] }}@{{ nats['address'] }}:{{ nats['port'] }} |
594 | + |
595 | +external_domain: |
596 | + - api.{{ domain }} |
597 | + |
598 | +system_domain_organization: {{ default_organization }} |
599 | +system_domain: {{ domain }} |
600 | +app_domains: [ {{ domain }} ] |
601 | +srv_api_uri: http://api.{{ domain }} |
602 | + |
603 | +default_app_memory: 1024 |
604 | + |
605 | +cc_partition: default |
606 | + |
607 | +bootstrap_admin_email: admin@{{ default_organization }} |
608 | + |
609 | +bulk_api: |
610 | + auth_user: bulk_api |
611 | + auth_password: "Password" |
612 | + |
613 | +nginx: |
614 | + use_nginx: false |
615 | + instance_socket: "/var/vcap/sys/run/cloud_controller_ng/cloud_controller.sock" |
616 | + |
617 | +index: 1 |
618 | +name: cloud_controller_ng |
619 | + |
620 | +info: |
621 | + name: vcap |
622 | + build: "2222" |
623 | + version: 2 |
624 | + support_address: http://support.cloudfoundry.com |
625 | + description: Cloud Foundry sponsored by Pivotal |
626 | + api_version: 2.0.0 |
627 | + |
628 | + |
629 | +directories: |
630 | + tmpdir: /var/vcap/data/cloud_controller_ng/tmp |
631 | + |
632 | + |
633 | +logging: |
634 | + file: /var/vcap/sys/log/cloud_controller_ng/cloud_controller_ng.log |
635 | + |
636 | + syslog: vcap.cloud_controller_ng |
637 | + |
638 | + level: debug2 |
639 | + max_retries: 1 |
640 | + |
641 | + |
642 | + |
643 | + |
644 | + |
645 | +db: &db |
646 | + database: sqlite:///var/lib/cloudfoundry/cfcloudcontroller/db/cc.db |
647 | + max_connections: 25 |
648 | + pool_timeout: 10 |
649 | + log_level: debug2 |
650 | + |
651 | + |
652 | +login: |
653 | + url: http://uaa.{{ domain }} |
654 | + |
655 | +uaa: |
656 | + url: http://uaa.{{ domain }} |
657 | + resource_id: cloud_controller |
658 | + #symmetric_secret: cc-secret |
659 | + verification_key: | |
660 | + -----BEGIN PUBLIC KEY----- |
661 | + MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDHFr+KICms+tuT1OXJwhCUmR2d |
662 | + KVy7psa8xzElSyzqx7oJyfJ1JZyOzToj9T5SfTIq396agbHJWVfYphNahvZ/7uMX |
663 | + qHxf+ZH9BL1gk9Y6kCnbM5R60gfwjyW1/dQPjOzn9N394zd2FJoFHwdq9Qs0wBug |
664 | + spULZVNRxq7veq/fzwIDAQAB |
665 | + -----END PUBLIC KEY----- |
666 | + |
667 | +# App staging parameters |
668 | +staging: |
669 | + max_staging_runtime: 900 |
670 | + auth: |
671 | + user: |
672 | + password: "Password" |
673 | + |
674 | +maximum_health_check_timeout: 180 |
675 | + |
676 | +runtimes_file: /var/lib/cloudfoundry/cfcloudcontroller/jobs/config/runtimes.yml |
677 | +stacks_file: /var/lib/cloudfoundry/cfcloudcontroller/jobs/config/stacks.yml |
678 | + |
679 | +quota_definitions: |
680 | + free: |
681 | + non_basic_services_allowed: false |
682 | + total_services: 2 |
683 | + total_routes: 1000 |
684 | + memory_limit: 1024 |
685 | + paid: |
686 | + non_basic_services_allowed: true |
687 | + total_services: 32 |
688 | + total_routes: 1000 |
689 | + memory_limit: 204800 |
690 | + runaway: |
691 | + non_basic_services_allowed: true |
692 | + total_services: 500 |
693 | + total_routes: 1000 |
694 | + memory_limit: 204800 |
695 | + trial: |
696 | + non_basic_services_allowed: false |
697 | + total_services: 10 |
698 | + memory_limit: 2048 |
699 | + total_routes: 1000 |
700 | + trial_db_allowed: true |
701 | + |
702 | +default_quota_definition: free |
703 | + |
704 | +resource_pool: |
705 | + minimum_size: 65536 |
706 | + maximum_size: 536870912 |
707 | + resource_directory_key: cc-resources |
708 | + |
709 | + cdn: |
710 | + uri: |
711 | + key_pair_id: |
712 | + private_key: "" |
713 | + |
714 | + fog_connection: {"provider":"Local","local_root":"/var/vcap/nfs/store"} |
715 | + |
716 | +packages: |
717 | + app_package_directory_key: cc-packages |
718 | + |
719 | + cdn: |
720 | + uri: |
721 | + key_pair_id: |
722 | + private_key: "" |
723 | + |
724 | + fog_connection: {"provider":"Local","local_root":"/var/vcap/nfs/store"} |
725 | + |
726 | +droplets: |
727 | + droplet_directory_key: cc-droplets |
728 | + |
729 | + cdn: |
730 | + uri: |
731 | + key_pair_id: |
732 | + private_key: "" |
733 | + |
734 | + fog_connection: {"provider":"Local","local_root":"/var/vcap/nfs/store"} |
735 | + |
736 | +buildpacks: |
737 | + buildpack_directory_key: cc-buildpacks |
738 | + |
739 | + cdn: |
740 | + uri: |
741 | + key_pair_id: |
742 | + private_key: "" |
743 | + |
744 | + fog_connection: {"provider":"Local","local_root":"/var/vcap/nfs/store"} |
745 | + |
746 | +db_encryption_key: Password |
747 | + |
748 | +trial_db: |
749 | + guid: "78ad16cf-3c22-4427-a982-b9d35d746914" |
750 | + |
751 | +tasks_disabled: false |
752 | +hm9000_noop: true |
753 | +flapping_crash_count_threshold: 3 |
754 | + |
755 | +disable_custom_buildpacks: false |
756 | + |
757 | +broker_client_timeout_seconds: 60 |
758 | |
759 | === added file 'tests/core/templates/fake_cc.yml' |
760 | --- tests/core/templates/fake_cc.yml 1970-01-01 00:00:00 +0000 |
761 | +++ tests/core/templates/fake_cc.yml 2014-08-05 13:03:33 +0000 |
762 | @@ -0,0 +1,3 @@ |
763 | +host: {{nats['host']}} |
764 | +port: {{nats['port']}} |
765 | +domain: {{router['domain']}} |
766 | |
767 | === added file 'tests/core/templates/nginx.conf' |
768 | --- tests/core/templates/nginx.conf 1970-01-01 00:00:00 +0000 |
769 | +++ tests/core/templates/nginx.conf 2014-08-05 13:03:33 +0000 |
770 | @@ -0,0 +1,154 @@ |
771 | +# deployment cloudcontroller nginx.conf |
772 | +#user vcap vcap; |
773 | + |
774 | +error_log /var/vcap/sys/log/nginx_ccng/nginx.error.log; |
775 | +pid /var/vcap/sys/run/nginx_ccng/nginx.pid; |
776 | + |
777 | +events { |
778 | + worker_connections 8192; |
779 | + use epoll; |
780 | +} |
781 | + |
782 | +http { |
783 | + include mime.types; |
784 | + default_type text/html; |
785 | + server_tokens off; |
786 | + variables_hash_max_size 1024; |
787 | + |
788 | + log_format main '$host - [$time_local] ' |
789 | + '"$request" $status $bytes_sent ' |
790 | + '"$http_referer" "$http_#user_agent" ' |
791 | + '$proxy_add_x_forwarded_for response_time:$upstream_response_time'; |
792 | + |
793 | + access_log /var/vcap/sys/log/nginx_ccng/nginx.access.log main; |
794 | + |
795 | + sendfile on; #enable use of sendfile() |
796 | + tcp_nopush on; |
797 | + tcp_nodelay on; #disable nagel's algorithm |
798 | + |
799 | + keepalive_timeout 75 20; #inherited from router |
800 | + |
801 | + client_max_body_size 256M; #already enforced upstream/but doesn't hurt. |
802 | + |
803 | + upstream cloud_controller { |
804 | + server unix:/var/vcap/sys/run/cloud_controller_ng/cloud_controller.sock; |
805 | + } |
806 | + |
807 | + server { |
808 | + listen {{ nginx_port }}; |
809 | + server_name _; |
810 | + server_name_in_redirect off; |
811 | + proxy_send_timeout 300; |
812 | + proxy_read_timeout 300; |
813 | + |
814 | + # proxy and log all CC traffic |
815 | + location / { |
816 | + access_log /var/vcap/sys/log/nginx_ccng/nginx.access.log main; |
817 | + proxy_buffering off; |
818 | + proxy_set_header Host $host; |
819 | + proxy_set_header X-Real_IP $remote_addr; |
820 | + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
821 | + proxy_redirect off; |
822 | + proxy_connect_timeout 10; |
823 | + proxy_pass http://cloud_controller; |
824 | + } |
825 | + |
826 | + |
827 | + # used for x-accel-redirect uri://location/foo.txt |
828 | + # nginx will serve the file root || location || foo.txt |
829 | + location /droplets/ { |
830 | + internal; |
831 | + root /var/vcap/nfs/store; |
832 | + } |
833 | + |
834 | + |
835 | + |
836 | + # used for x-accel-redirect uri://location/foo.txt |
837 | + # nginx will serve the file root || location || foo.txt |
838 | + location /cc-packages/ { |
839 | + internal; |
840 | + root /var/vcap/nfs/store; |
841 | + } |
842 | + |
843 | + |
844 | + # used for x-accel-redirect uri://location/foo.txt |
845 | + # nginx will serve the file root || location || foo.txt |
846 | + location /cc-droplets/ { |
847 | + internal; |
848 | + root /var/vcap/nfs/store; |
849 | + } |
850 | + |
851 | + |
852 | + location ~ (/apps/.*/application|/v2/apps/.*/bits|/services/v\d+/configurations/.*/serialized/data|/v2/buildpacks/.*/bits) { |
853 | + # Pass altered request body to this location |
854 | + upload_pass @cc_uploads; |
855 | + upload_pass_args on; |
856 | + |
857 | + # Store files to this directory |
858 | + upload_store /var/vcap/data/cloud_controller_ng/tmp/uploads; |
859 | + |
860 | + # No limit for output body forwarded to CC |
861 | + upload_max_output_body_len 0; |
862 | + |
863 | + # Allow uploaded files to be read only by #user |
864 | + #upload_store_access #user:r; |
865 | + |
866 | + # Set specified fields in request body |
867 | + upload_set_form_field "${upload_field_name}_name" $upload_file_name; |
868 | + upload_set_form_field "${upload_field_name}_path" $upload_tmp_path; |
869 | + |
870 | + #forward the following fields from existing body |
871 | + upload_pass_form_field "^resources$"; |
872 | + upload_pass_form_field "^_method$"; |
873 | + |
874 | + #on any error, delete uploaded files. |
875 | + upload_cleanup 400-505; |
876 | + } |
877 | + |
878 | + location ~ /staging/(buildpack_cache|droplets)/.*/upload { |
879 | + |
880 | + # Allow download the droplets and buildpacks |
881 | + if ($request_method = GET){ |
882 | + proxy_pass http://cloud_controller; |
883 | + } |
884 | + |
885 | + # Pass along auth header |
886 | + set $auth_header $upstream_http_x_auth; |
887 | + proxy_set_header Authorization $auth_header; |
888 | + |
889 | + # Pass altered request body to this location |
890 | + upload_pass @cc_uploads; |
891 | + |
892 | + # Store files to this directory |
893 | + upload_store /var/vcap/data/cloud_controller_ng/tmp/staged_droplet_uploads; |
894 | + |
895 | + # Allow uploaded files to be read only by #user |
896 | + upload_store_access user:r; |
897 | + |
898 | + # Set specified fields in request body |
899 | + upload_set_form_field "droplet_path" $upload_tmp_path; |
900 | + |
901 | + #on any error, delete uploaded files. |
902 | + upload_cleanup 400-505; |
903 | + } |
904 | + |
905 | + # Pass altered request body to a backend |
906 | + location @cc_uploads { |
907 | + proxy_pass http://unix:/var/vcap/sys/run/cloud_controller_ng/cloud_controller.sock; |
908 | + } |
909 | + |
910 | + location ~ ^/internal_redirect/(.*){ |
911 | + # only allow internal redirects |
912 | + internal; |
913 | + |
914 | + set $download_url $1; |
915 | + |
916 | + #have to manualy pass along auth header |
917 | + set $auth_header $upstream_http_x_auth; |
918 | + proxy_set_header Authorization $auth_header; |
919 | + |
920 | + # Download the file and send it to client |
921 | + proxy_pass $download_url; |
922 | + } |
923 | + } |
924 | +} |
925 | |
926 | === added file 'tests/core/templates/test.conf' |
927 | --- tests/core/templates/test.conf 1970-01-01 00:00:00 +0000 |
928 | +++ tests/core/templates/test.conf 2014-08-05 13:03:33 +0000 |
929 | @@ -0,0 +1,3 @@ |
930 | +something |
931 | +listen {{nginx_port}} |
932 | +something else |
933 | |
934 | === added file 'tests/core/test_services.py' |
935 | --- tests/core/test_services.py 1970-01-01 00:00:00 +0000 |
936 | +++ tests/core/test_services.py 2014-08-05 13:03:33 +0000 |
937 | @@ -0,0 +1,531 @@ |
938 | +import mock |
939 | +import unittest |
940 | +from charmhelpers.core import hookenv |
941 | +from charmhelpers.core import services |
942 | + |
943 | + |
944 | +class TestServiceManager(unittest.TestCase): |
945 | + def setUp(self): |
946 | + self.pcharm_dir = mock.patch.object(hookenv, 'charm_dir') |
947 | + self.mcharm_dir = self.pcharm_dir.start() |
948 | + self.mcharm_dir.return_value = 'charm_dir' |
949 | + |
950 | + def tearDown(self): |
951 | + self.pcharm_dir.stop() |
952 | + |
953 | + def test_register(self): |
954 | + manager = services.ServiceManager([ |
955 | + {'service': 'service1', |
956 | + 'foo': 'bar'}, |
957 | + {'service': 'service2', |
958 | + 'qux': 'baz'}, |
959 | + ]) |
960 | + self.assertEqual(manager.services, { |
961 | + 'service1': {'service': 'service1', |
962 | + 'foo': 'bar'}, |
963 | + 'service2': {'service': 'service2', |
964 | + 'qux': 'baz'}, |
965 | + }) |
966 | + |
967 | + @mock.patch.object(services.ServiceManager, 'reconfigure_services') |
968 | + @mock.patch.object(services.ServiceManager, 'stop_services') |
969 | + @mock.patch.object(hookenv, 'hook_name') |
970 | + def test_manage_stop(self, hook_name, stop_services, reconfigure_services): |
971 | + manager = services.ServiceManager() |
972 | + hook_name.return_value = 'stop' |
973 | + manager.manage() |
974 | + stop_services.assert_called_once_with() |
975 | + assert not reconfigure_services.called |
976 | + |
977 | + @mock.patch.object(services.ServiceManager, 'provide_data') |
978 | + @mock.patch.object(services.ServiceManager, 'reconfigure_services') |
979 | + @mock.patch.object(services.ServiceManager, 'stop_services') |
980 | + @mock.patch.object(hookenv, 'hook_name') |
981 | + def test_manage_other(self, hook_name, stop_services, reconfigure_services, provide_data): |
982 | + manager = services.ServiceManager() |
983 | + hook_name.return_value = 'config-changed' |
984 | + manager.manage() |
985 | + assert not stop_services.called |
986 | + reconfigure_services.assert_called_once_with() |
987 | + provide_data.assert_called_once_with() |
988 | + |
989 | + @mock.patch.object(services.ServiceManager, 'save_ready') |
990 | + @mock.patch.object(services.ServiceManager, 'fire_event') |
991 | + @mock.patch.object(services.ServiceManager, 'is_ready') |
992 | + def test_reconfigure_ready(self, is_ready, fire_event, save_ready): |
993 | + manager = services.ServiceManager([ |
994 | + {'service': 'service1'}, {'service': 'service2'}]) |
995 | + is_ready.return_value = True |
996 | + manager.reconfigure_services() |
997 | + is_ready.assert_has_calls([ |
998 | + mock.call('service1'), |
999 | + mock.call('service2'), |
1000 | + ], any_order=True) |
1001 | + fire_event.assert_has_calls([ |
1002 | + mock.call('data_ready', 'service1'), |
1003 | + mock.call('start', 'service1', default=[ |
1004 | + services.service_restart, |
1005 | + services.manage_ports]), |
1006 | + ], any_order=False) |
1007 | + fire_event.assert_has_calls([ |
1008 | + mock.call('data_ready', 'service2'), |
1009 | + mock.call('start', 'service2', default=[ |
1010 | + services.service_restart, |
1011 | + services.manage_ports]), |
1012 | + ], any_order=False) |
1013 | + save_ready.assert_has_calls([ |
1014 | + mock.call('service1'), |
1015 | + mock.call('service2'), |
1016 | + ], any_order=True) |
1017 | + |
1018 | + @mock.patch.object(services.ServiceManager, 'save_ready') |
1019 | + @mock.patch.object(services.ServiceManager, 'fire_event') |
1020 | + @mock.patch.object(services.ServiceManager, 'is_ready') |
1021 | + def test_reconfigure_ready_list(self, is_ready, fire_event, save_ready): |
1022 | + manager = services.ServiceManager([ |
1023 | + {'service': 'service1'}, {'service': 'service2'}]) |
1024 | + is_ready.return_value = True |
1025 | + manager.reconfigure_services('service3', 'service4') |
1026 | + self.assertEqual(is_ready.call_args_list, [ |
1027 | + mock.call('service3'), |
1028 | + mock.call('service4'), |
1029 | + ]) |
1030 | + self.assertEqual(fire_event.call_args_list, [ |
1031 | + mock.call('data_ready', 'service3'), |
1032 | + mock.call('start', 'service3', default=[ |
1033 | + services.service_restart, |
1034 | + services.open_ports]), |
1035 | + mock.call('data_ready', 'service4'), |
1036 | + mock.call('start', 'service4', default=[ |
1037 | + services.service_restart, |
1038 | + services.open_ports]), |
1039 | + ]) |
1040 | + self.assertEqual(save_ready.call_args_list, [ |
1041 | + mock.call('service3'), |
1042 | + mock.call('service4'), |
1043 | + ]) |
1044 | + |
1045 | + @mock.patch.object(services.ServiceManager, 'save_lost') |
1046 | + @mock.patch.object(services.ServiceManager, 'fire_event') |
1047 | + @mock.patch.object(services.ServiceManager, 'was_ready') |
1048 | + @mock.patch.object(services.ServiceManager, 'is_ready') |
1049 | + def test_reconfigure_not_ready(self, is_ready, was_ready, fire_event, save_lost): |
1050 | + manager = services.ServiceManager([ |
1051 | + {'service': 'service1'}, {'service': 'service2'}]) |
1052 | + is_ready.return_value = False |
1053 | + was_ready.return_value = False |
1054 | + manager.reconfigure_services() |
1055 | + is_ready.assert_has_calls([ |
1056 | + mock.call('service1'), |
1057 | + mock.call('service2'), |
1058 | + ], any_order=True) |
1059 | + fire_event.assert_has_calls([ |
1060 | + mock.call('stop', 'service1', default=[ |
1061 | + services.close_ports, |
1062 | + services.service_stop]), |
1063 | + mock.call('stop', 'service2', default=[ |
1064 | + services.close_ports, |
1065 | + services.service_stop]), |
1066 | + ], any_order=True) |
1067 | + save_lost.assert_has_calls([ |
1068 | + mock.call('service1'), |
1069 | + mock.call('service2'), |
1070 | + ], any_order=True) |
1071 | + |
1072 | + @mock.patch.object(services.ServiceManager, 'save_lost') |
1073 | + @mock.patch.object(services.ServiceManager, 'fire_event') |
1074 | + @mock.patch.object(services.ServiceManager, 'was_ready') |
1075 | + @mock.patch.object(services.ServiceManager, 'is_ready') |
1076 | + def test_reconfigure_no_longer_ready(self, is_ready, was_ready, fire_event, save_lost): |
1077 | + manager = services.ServiceManager([ |
1078 | + {'service': 'service1'}, {'service': 'service2'}]) |
1079 | + is_ready.return_value = False |
1080 | + was_ready.return_value = True |
1081 | + manager.reconfigure_services() |
1082 | + is_ready.assert_has_calls([ |
1083 | + mock.call('service1'), |
1084 | + mock.call('service2'), |
1085 | + ], any_order=True) |
1086 | + fire_event.assert_has_calls([ |
1087 | + mock.call('data_lost', 'service1'), |
1088 | + mock.call('stop', 'service1', default=[ |
1089 | + services.close_ports, |
1090 | + services.service_stop]), |
1091 | + ], any_order=False) |
1092 | + fire_event.assert_has_calls([ |
1093 | + mock.call('data_lost', 'service2'), |
1094 | + mock.call('stop', 'service2', default=[ |
1095 | + services.close_ports, |
1096 | + services.service_stop]), |
1097 | + ], any_order=False) |
1098 | + save_lost.assert_has_calls([ |
1099 | + mock.call('service1'), |
1100 | + mock.call('service2'), |
1101 | + ], any_order=True) |
1102 | + |
1103 | + @mock.patch.object(services.ServiceManager, 'fire_event') |
1104 | + def test_stop_services(self, fire_event): |
1105 | + manager = services.ServiceManager([ |
1106 | + {'service': 'service1'}, {'service': 'service2'}]) |
1107 | + manager.stop_services() |
1108 | + fire_event.assert_has_calls([ |
1109 | + mock.call('stop', 'service1', default=[ |
1110 | + services.close_ports, |
1111 | + services.service_stop]), |
1112 | + mock.call('stop', 'service2', default=[ |
1113 | + services.close_ports, |
1114 | + services.service_stop]), |
1115 | + ], any_order=True) |
1116 | + |
1117 | + @mock.patch.object(services.ServiceManager, 'fire_event') |
1118 | + def test_stop_services_list(self, fire_event): |
1119 | + manager = services.ServiceManager([ |
1120 | + {'service': 'service1'}, {'service': 'service2'}]) |
1121 | + manager.stop_services('service3', 'service4') |
1122 | + self.assertEqual(fire_event.call_args_list, [ |
1123 | + mock.call('stop', 'service3', default=[ |
1124 | + services.close_ports, |
1125 | + services.service_stop]), |
1126 | + mock.call('stop', 'service4', default=[ |
1127 | + services.close_ports, |
1128 | + services.service_stop]), |
1129 | + ]) |
1130 | + |
1131 | + def test_get_service(self): |
1132 | + service = {'service': 'test', 'test': 'test_service'} |
1133 | + manager = services.ServiceManager([service]) |
1134 | + self.assertEqual(manager.get_service('test'), service) |
1135 | + |
1136 | + def test_get_service_not_registered(self): |
1137 | + service = {'service': 'test', 'test': 'test_service'} |
1138 | + manager = services.ServiceManager([service]) |
1139 | + self.assertRaises(KeyError, manager.get_service, 'foo') |
1140 | + |
1141 | + @mock.patch.object(services.ServiceManager, 'get_service') |
1142 | + def test_fire_event_default(self, get_service): |
1143 | + get_service.return_value = {} |
1144 | + cb = mock.Mock() |
1145 | + manager = services.ServiceManager() |
1146 | + manager.fire_event('event', 'service', cb) |
1147 | + cb.assert_called_once_with('service') |
1148 | + |
1149 | + @mock.patch.object(services.ServiceManager, 'get_service') |
1150 | + def test_fire_event_default_list(self, get_service): |
1151 | + get_service.return_value = {} |
1152 | + cb = mock.Mock() |
1153 | + manager = services.ServiceManager() |
1154 | + manager.fire_event('event', 'service', [cb]) |
1155 | + cb.assert_called_once_with('service') |
1156 | + |
1157 | + @mock.patch.object(services.ServiceManager, 'get_service') |
1158 | + def test_fire_event_simple_callback(self, get_service): |
1159 | + cb = mock.Mock() |
1160 | + dcb = mock.Mock() |
1161 | + get_service.return_value = {'event': cb} |
1162 | + manager = services.ServiceManager() |
1163 | + manager.fire_event('event', 'service', dcb) |
1164 | + assert not dcb.called |
1165 | + cb.assert_called_once_with('service') |
1166 | + |
1167 | + @mock.patch.object(services.ServiceManager, 'get_service') |
1168 | + def test_fire_event_simple_callback_list(self, get_service): |
1169 | + cb = mock.Mock() |
1170 | + dcb = mock.Mock() |
1171 | + get_service.return_value = {'event': [cb]} |
1172 | + manager = services.ServiceManager() |
1173 | + manager.fire_event('event', 'service', dcb) |
1174 | + assert not dcb.called |
1175 | + cb.assert_called_once_with('service') |
1176 | + |
1177 | + @mock.patch.object(services.ManagerCallback, '__call__') |
1178 | + @mock.patch.object(services.ServiceManager, 'get_service') |
1179 | + def test_fire_event_manager_callback(self, get_service, mcall): |
1180 | + cb = services.ManagerCallback() |
1181 | + dcb = mock.Mock() |
1182 | + get_service.return_value = {'event': cb} |
1183 | + manager = services.ServiceManager() |
1184 | + manager.fire_event('event', 'service', dcb) |
1185 | + assert not dcb.called |
1186 | + mcall.assert_called_once_with(manager, 'service', 'event') |
1187 | + |
1188 | + @mock.patch.object(services.ManagerCallback, '__call__') |
1189 | + @mock.patch.object(services.ServiceManager, 'get_service') |
1190 | + def test_fire_event_manager_callback_list(self, get_service, mcall): |
1191 | + cb = services.ManagerCallback() |
1192 | + dcb = mock.Mock() |
1193 | + get_service.return_value = {'event': [cb]} |
1194 | + manager = services.ServiceManager() |
1195 | + manager.fire_event('event', 'service', dcb) |
1196 | + assert not dcb.called |
1197 | + mcall.assert_called_once_with(manager, 'service', 'event') |
1198 | + |
1199 | + @mock.patch.object(services.ServiceManager, 'get_service') |
1200 | + def test_is_ready(self, get_service): |
1201 | + get_service.side_effect = [ |
1202 | + {}, |
1203 | + {'required_data': [True]}, |
1204 | + {'required_data': [False]}, |
1205 | + {'required_data': [True, False]}, |
1206 | + ] |
1207 | + manager = services.ServiceManager() |
1208 | + assert manager.is_ready('foo') |
1209 | + assert manager.is_ready('bar') |
1210 | + assert not manager.is_ready('foo') |
1211 | + assert not manager.is_ready('foo') |
1212 | + get_service.assert_has_calls([mock.call('foo'), mock.call('bar')]) |
1213 | + |
1214 | + def test_load_ready_file_short_circuit(self): |
1215 | + manager = services.ServiceManager() |
1216 | + manager._ready = 'foo' |
1217 | + manager._load_ready_file() |
1218 | + self.assertEqual(manager._ready, 'foo') |
1219 | + |
1220 | + @mock.patch('os.path.exists') |
1221 | + @mock.patch.object(services.base, 'open', create=True) |
1222 | + def test_load_ready_file_new(self, mopen, exists): |
1223 | + manager = services.ServiceManager() |
1224 | + exists.return_value = False |
1225 | + manager._load_ready_file() |
1226 | + self.assertEqual(manager._ready, set()) |
1227 | + assert not mopen.called |
1228 | + |
1229 | + @mock.patch('json.load') |
1230 | + @mock.patch('os.path.exists') |
1231 | + @mock.patch.object(services.base, 'open', create=True) |
1232 | + def test_load_ready_file(self, mopen, exists, jload): |
1233 | + manager = services.ServiceManager() |
1234 | + exists.return_value = True |
1235 | + jload.return_value = ['bar'] |
1236 | + manager._load_ready_file() |
1237 | + self.assertEqual(manager._ready, set(['bar'])) |
1238 | + exists.assert_called_once_with('charm_dir/READY-SERVICES.json') |
1239 | + mopen.assert_called_once_with('charm_dir/READY-SERVICES.json') |
1240 | + |
1241 | + @mock.patch('json.dump') |
1242 | + @mock.patch.object(services.base, 'open', create=True) |
1243 | + def test_save_ready_file(self, mopen, jdump): |
1244 | + manager = services.ServiceManager() |
1245 | + manager._save_ready_file() |
1246 | + assert not mopen.called |
1247 | + manager._ready = set(['foo']) |
1248 | + manager._save_ready_file() |
1249 | + mopen.assert_called_once_with('charm_dir/READY-SERVICES.json', 'w') |
1250 | + jdump.assert_called_once_with(['foo'], mopen.return_value.__enter__()) |
1251 | + |
1252 | + @mock.patch.object(services.base.ServiceManager, '_save_ready_file') |
1253 | + @mock.patch.object(services.base.ServiceManager, '_load_ready_file') |
1254 | + def test_save_ready(self, _lrf, _srf): |
1255 | + manager = services.ServiceManager() |
1256 | + manager._ready = set(['foo']) |
1257 | + manager.save_ready('bar') |
1258 | + _lrf.assert_called_once_with() |
1259 | + self.assertEqual(manager._ready, set(['foo', 'bar'])) |
1260 | + _srf.assert_called_once_with() |
1261 | + |
1262 | + @mock.patch.object(services.base.ServiceManager, '_save_ready_file') |
1263 | + @mock.patch.object(services.base.ServiceManager, '_load_ready_file') |
1264 | + def test_save_lost(self, _lrf, _srf): |
1265 | + manager = services.ServiceManager() |
1266 | + manager._ready = set(['foo', 'bar']) |
1267 | + manager.save_lost('bar') |
1268 | + _lrf.assert_called_once_with() |
1269 | + self.assertEqual(manager._ready, set(['foo'])) |
1270 | + _srf.assert_called_once_with() |
1271 | + manager.save_lost('bar') |
1272 | + self.assertEqual(manager._ready, set(['foo'])) |
1273 | + |
1274 | + @mock.patch.object(services.base.ServiceManager, '_save_ready_file') |
1275 | + @mock.patch.object(services.base.ServiceManager, '_load_ready_file') |
1276 | + def test_was_ready(self, _lrf, _srf): |
1277 | + manager = services.ServiceManager() |
1278 | + manager._ready = set() |
1279 | + manager.save_ready('foo') |
1280 | + manager.save_ready('bar') |
1281 | + assert manager.was_ready('foo') |
1282 | + assert manager.was_ready('bar') |
1283 | + manager.save_lost('bar') |
1284 | + assert manager.was_ready('foo') |
1285 | + assert not manager.was_ready('bar') |
1286 | + |
1287 | + @mock.patch.object(services.base.hookenv, 'relation_set') |
1288 | + @mock.patch.object(services.base.hookenv, 'hook_name') |
1289 | + def test_provide_data_no_match(self, hook_name, relation_set): |
1290 | + provider = mock.Mock() |
1291 | + provider.name = 'provided' |
1292 | + manager = services.ServiceManager([ |
1293 | + {'service': 'service', 'provided_data': [provider]} |
1294 | + ]) |
1295 | + hook_name.return_value = 'not-provided-relation-joined' |
1296 | + manager.provide_data() |
1297 | + assert not provider.provide_data.called |
1298 | + |
1299 | + hook_name.return_value = 'provided-relation-broken' |
1300 | + manager.provide_data() |
1301 | + assert not provider.provide_data.called |
1302 | + |
1303 | + @mock.patch.object(services.base.hookenv, 'relation_set') |
1304 | + @mock.patch.object(services.base.hookenv, 'hook_name') |
1305 | + def test_provide_data_not_ready(self, hook_name, relation_set): |
1306 | + provider = mock.Mock() |
1307 | + provider.name = 'provided' |
1308 | + data = provider.provide_data.return_value = {'data': True} |
1309 | + provider._is_ready.return_value = False |
1310 | + manager = services.ServiceManager([ |
1311 | + {'service': 'service', 'provided_data': [provider]} |
1312 | + ]) |
1313 | + hook_name.return_value = 'provided-relation-joined' |
1314 | + manager.provide_data() |
1315 | + assert not relation_set.called |
1316 | + provider._is_ready.assert_called_once_with(data) |
1317 | + |
1318 | + @mock.patch.object(services.base.hookenv, 'relation_set') |
1319 | + @mock.patch.object(services.base.hookenv, 'hook_name') |
1320 | + def test_provide_data_ready(self, hook_name, relation_set): |
1321 | + provider = mock.Mock() |
1322 | + provider.name = 'provided' |
1323 | + data = provider.provide_data.return_value = {'data': True} |
1324 | + provider._is_ready.return_value = True |
1325 | + manager = services.ServiceManager([ |
1326 | + {'service': 'service', 'provided_data': [provider]} |
1327 | + ]) |
1328 | + hook_name.return_value = 'provided-relation-changed' |
1329 | + manager.provide_data() |
1330 | + relation_set.assert_called_once_with(None, data) |
1331 | + |
1332 | + |
1333 | +class TestRelationContext(unittest.TestCase): |
1334 | + def setUp(self): |
1335 | + self.phookenv = mock.patch.object(services.helpers, 'hookenv') |
1336 | + self.mhookenv = self.phookenv.start() |
1337 | + self.mhookenv.relation_ids.return_value = [] |
1338 | + self.context = services.RelationContext() |
1339 | + self.context.name = 'http' |
1340 | + self.context.interface = 'http' |
1341 | + self.context.required_keys = ['foo', 'bar'] |
1342 | + self.mhookenv.reset_mock() |
1343 | + |
1344 | + def tearDown(self): |
1345 | + self.phookenv.stop() |
1346 | + |
1347 | + def test_no_relations(self): |
1348 | + self.context.get_data() |
1349 | + self.assertFalse(self.context.is_ready()) |
1350 | + self.assertEqual(self.context, {}) |
1351 | + self.mhookenv.relation_ids.assert_called_once_with('http') |
1352 | + |
1353 | + def test_no_units(self): |
1354 | + self.mhookenv.relation_ids.return_value = ['nginx'] |
1355 | + self.mhookenv.related_units.return_value = [] |
1356 | + self.context.get_data() |
1357 | + self.assertFalse(self.context.is_ready()) |
1358 | + self.assertEqual(self.context, {'http': []}) |
1359 | + |
1360 | + def test_incomplete(self): |
1361 | + self.mhookenv.relation_ids.return_value = ['nginx', 'apache'] |
1362 | + self.mhookenv.related_units.side_effect = lambda i: [i+'/0'] |
1363 | + self.mhookenv.relation_get.side_effect = [{}, {'foo': '1'}] |
1364 | + self.context.get_data() |
1365 | + self.assertFalse(bool(self.context)) |
1366 | + self.assertEqual(self.mhookenv.relation_get.call_args_list, [ |
1367 | + mock.call(rid='apache', unit='apache/0'), |
1368 | + mock.call(rid='nginx', unit='nginx/0'), |
1369 | + ]) |
1370 | + |
1371 | + def test_complete(self): |
1372 | + self.mhookenv.relation_ids.return_value = ['nginx', 'apache', 'tomcat'] |
1373 | + self.mhookenv.related_units.side_effect = lambda i: [i+'/0'] |
1374 | + self.mhookenv.relation_get.side_effect = [{'foo': '1'}, {'foo': '2', 'bar': '3'}, {}] |
1375 | + self.context.get_data() |
1376 | + self.assertTrue(self.context.is_ready()) |
1377 | + self.assertEqual(self.context, {'http': [ |
1378 | + { |
1379 | + 'foo': '2', |
1380 | + 'bar': '3', |
1381 | + }, |
1382 | + ]}) |
1383 | + self.mhookenv.relation_ids.assert_called_with('http') |
1384 | + self.assertEqual(self.mhookenv.relation_get.call_args_list, [ |
1385 | + mock.call(rid='apache', unit='apache/0'), |
1386 | + mock.call(rid='nginx', unit='nginx/0'), |
1387 | + mock.call(rid='tomcat', unit='tomcat/0'), |
1388 | + ]) |
1389 | + |
1390 | + def test_provide(self): |
1391 | + self.assertEqual(self.context.provide_data(), {}) |
1392 | + |
1393 | + |
1394 | +class TestTemplateCallback(unittest.TestCase): |
1395 | + @mock.patch.object(services.helpers, 'templating') |
1396 | + def test_template_defaults(self, mtemplating): |
1397 | + manager = mock.Mock(**{'get_service.return_value': { |
1398 | + 'required_data': [{'foo': 'bar'}]}}) |
1399 | + self.assertRaises(TypeError, services.template, source='foo.yml') |
1400 | + callback = services.template(source='foo.yml', target='bar.yml') |
1401 | + assert isinstance(callback, services.ManagerCallback) |
1402 | + assert not mtemplating.render.called |
1403 | + callback(manager, 'test', 'event') |
1404 | + mtemplating.render.assert_called_once_with( |
1405 | + 'foo.yml', 'bar.yml', {'foo': 'bar'}, |
1406 | + 'root', 'root', 0444) |
1407 | + |
1408 | + @mock.patch.object(services.helpers, 'templating') |
1409 | + def test_template_explicit(self, mtemplating): |
1410 | + manager = mock.Mock(**{'get_service.return_value': { |
1411 | + 'required_data': [{'foo': 'bar'}]}}) |
1412 | + callback = services.template( |
1413 | + source='foo.yml', target='bar.yml', |
1414 | + owner='user', group='group', perms=0555 |
1415 | + ) |
1416 | + assert isinstance(callback, services.ManagerCallback) |
1417 | + assert not mtemplating.render.called |
1418 | + callback(manager, 'test', 'event') |
1419 | + mtemplating.render.assert_called_once_with( |
1420 | + 'foo.yml', 'bar.yml', {'foo': 'bar'}, |
1421 | + 'user', 'group', 0555) |
1422 | + |
1423 | + |
1424 | +class TestPortsCallback(unittest.TestCase): |
1425 | + def setUp(self): |
1426 | + self.phookenv = mock.patch.object(services.base, 'hookenv') |
1427 | + self.mhookenv = self.phookenv.start() |
1428 | + self.mhookenv.relation_ids.return_value = [] |
1429 | + self.mhookenv.charm_dir.return_value = 'charm_dir' |
1430 | + self.popen = mock.patch.object(services.base, 'open', create=True) |
1431 | + self.mopen = self.popen.start() |
1432 | + |
1433 | + def tearDown(self): |
1434 | + self.phookenv.stop() |
1435 | + self.popen.stop() |
1436 | + |
1437 | + def test_no_ports(self): |
1438 | + manager = mock.Mock(**{'get_service.return_value': {}}) |
1439 | + services.PortManagerCallback()(manager, 'service', 'event') |
1440 | + assert not self.mhookenv.open_port.called |
1441 | + assert not self.mhookenv.close_port.called |
1442 | + |
1443 | + def test_open_ports(self): |
1444 | + manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) |
1445 | + services.open_ports(manager, 'service', 'start') |
1446 | + self.mhookenv.open_port.has_calls([mock.call(1), mock.call(2)]) |
1447 | + assert not self.mhookenv.close_port.called |
1448 | + |
1449 | + def test_close_ports(self): |
1450 | + manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) |
1451 | + services.close_ports(manager, 'service', 'stop') |
1452 | + assert not self.mhookenv.open_port.called |
1453 | + self.mhookenv.close_port.has_calls([mock.call(1), mock.call(2)]) |
1454 | + |
1455 | + def test_close_old_ports(self): |
1456 | + self.mopen.return_value.read.return_value = '10,20' |
1457 | + manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) |
1458 | + services.close_ports(manager, 'service', 'stop') |
1459 | + assert not self.mhookenv.open_port.called |
1460 | + self.mhookenv.close_port.has_calls([ |
1461 | + mock.call(10), |
1462 | + mock.call(20), |
1463 | + mock.call(1), |
1464 | + mock.call(2)]) |
1465 | + |
1466 | + |
1467 | +if __name__ == '__main__': |
1468 | + unittest.main() |
1469 | |
1470 | === added file 'tests/core/test_templating.py' |
1471 | --- tests/core/test_templating.py 1970-01-01 00:00:00 +0000 |
1472 | +++ tests/core/test_templating.py 2014-08-05 13:03:33 +0000 |
1473 | @@ -0,0 +1,64 @@ |
1474 | +import os |
1475 | +import pkg_resources |
1476 | +import tempfile |
1477 | +import unittest |
1478 | +import jinja2 |
1479 | + |
1480 | +import mock |
1481 | +from charmhelpers.core import templating |
1482 | + |
1483 | + |
1484 | +TEMPLATES_DIR = pkg_resources.resource_filename(__name__, 'templates') |
1485 | + |
1486 | + |
1487 | +class TestTemplating(unittest.TestCase): |
1488 | + def setUp(self): |
1489 | + self.charm_dir = pkg_resources.resource_filename(__name__, '') |
1490 | + self._charm_dir_patch = mock.patch.object(templating.hookenv, 'charm_dir') |
1491 | + self._charm_dir_mock = self._charm_dir_patch.start() |
1492 | + self._charm_dir_mock.side_effect = lambda: self.charm_dir |
1493 | + |
1494 | + def tearDown(self): |
1495 | + self._charm_dir_patch.stop() |
1496 | + |
1497 | + @mock.patch.object(templating.host.os, 'fchown') |
1498 | + @mock.patch.object(templating.host, 'mkdir') |
1499 | + @mock.patch.object(templating.host, 'log') |
1500 | + def test_render(self, log, mkdir, fchown): |
1501 | + _, fn1 = tempfile.mkstemp() |
1502 | + _, fn2 = tempfile.mkstemp() |
1503 | + try: |
1504 | + context = { |
1505 | + 'nats': { |
1506 | + 'port': '1234', |
1507 | + 'host': 'example.com', |
1508 | + }, |
1509 | + 'router': { |
1510 | + 'domain': 'api.foo.com' |
1511 | + }, |
1512 | + 'nginx_port': 80, |
1513 | + } |
1514 | + templating.render('fake_cc.yml', fn1, context, templates_dir=TEMPLATES_DIR) |
1515 | + contents = open(fn1).read() |
1516 | + self.assertRegexpMatches(contents, 'port: 1234') |
1517 | + self.assertRegexpMatches(contents, 'host: example.com') |
1518 | + self.assertRegexpMatches(contents, 'domain: api.foo.com') |
1519 | + |
1520 | + templating.render('test.conf', fn2, context, templates_dir=TEMPLATES_DIR) |
1521 | + contents = open(fn2).read() |
1522 | + self.assertRegexpMatches(contents, 'listen 80') |
1523 | + self.assertEqual(fchown.call_count, 2) |
1524 | + self.assertEqual(mkdir.call_count, 2) |
1525 | + finally: |
1526 | + for fn in (fn1, fn2): |
1527 | + if os.path.exists(fn): |
1528 | + os.remove(fn) |
1529 | + |
1530 | + @mock.patch.object(templating, 'hookenv') |
1531 | + @mock.patch('jinja2.Environment') |
1532 | + def test_load_error(self, Env, hookenv): |
1533 | + Env().get_template.side_effect = jinja2.exceptions.TemplateNotFound('fake_cc.yml') |
1534 | + self.assertRaises( |
1535 | + jinja2.exceptions.TemplateNotFound, templating.render, |
1536 | + 'fake.src', 'fake.tgt', {}, templates_dir='tmpl') |
1537 | + hookenv.log.assert_called_once_with('Could not load template fake.src from tmpl.', level=hookenv.ERROR) |
This MP includes the most recent version of the Services framework, which changes the focus of charms from handling charm events (hooks) to describing the data (and sources of that data) required to set up the software and the actions to take when all of the required data is available. It also creates a standard for rendering config and services jobs based on Jinja templates, and manages re-rendering the files and restarting the services when the data changes.
This relieves the charm author of having to do things like keep track of a bunch of .foo flag files for indicating whether or not such-and-such file has been written or such-and-such service has been started.
The docstrings below are fairly complete, and some (somewhat simple) real-world example usages can be found in the Apache Allura charm (http:// bazaar. launchpad. net/~johnsca/ charms/ precise/ apache- allura/ refactoring- with-tests/ files) and the RethinkDB Docker charm (https:/ /github. com/bcsaller/ juju-docker/).