Merge lp:~gnuoy/charms/trusty/nova-compute/1453940 into lp:~openstack-charmers-archive/charms/trusty/nova-compute/next
- Trusty Tahr (14.04)
- 1453940
- Merge into next
Status: | Merged |
---|---|
Merged at revision: | 157 |
Proposed branch: | lp:~gnuoy/charms/trusty/nova-compute/1453940 |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/nova-compute/next |
Diff against target: |
1252 lines (+866/-91) 7 files modified
hooks/charmhelpers/contrib/openstack/context.py (+8/-9) hooks/charmhelpers/contrib/storage/linux/ceph.py (+224/-2) hooks/nova_compute_hooks.py (+14/-23) tests/charmhelpers/contrib/amulet/utils.py (+234/-52) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+20/-5) tests/charmhelpers/contrib/openstack/amulet/utils.py (+359/-0) unit_tests/test_nova_compute_hooks.py (+7/-0) |
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/nova-compute/1453940 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Edward Hope-Morley | Approve | ||
Review via email: mp+269377@code.launchpad.net |
Commit message
Description of the change
uosci-testing-bot (uosci-testing-bot) wrote : | # |
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #8830 nova-compute-next for gnuoy mp269377
UNIT OK: passed
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #6315 nova-compute-next for gnuoy mp269377
AMULET OK: passed
Build: http://
- 159. By Liam Young
-
Charm helper sync
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_lint_check #9709 nova-compute-next for gnuoy mp269377
LINT FAIL: lint-test failed
LINT FAIL: charm-proof failed
LINT Results (max last 2 lines):
make: *** [lint] Error 100
ERROR:root:Make target returned non-zero.
Full lint test output: http://
Build: http://
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #8941 nova-compute-next for gnuoy mp269377
UNIT OK: passed
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #6337 nova-compute-next for gnuoy mp269377
AMULET FAIL: amulet-test failed
AMULET Results (max last 2 lines):
make: *** [test] Error 1
ERROR:root:Make target returned non-zero.
Full amulet test output: http://
Build: http://
Edward Hope-Morley (hopem) wrote : | # |
LGTM +1 (think amulet fail is unrelated)
Preview Diff
1 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
2 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-09-02 15:03:16 +0000 |
3 | +++ hooks/charmhelpers/contrib/openstack/context.py 2015-09-10 09:35:32 +0000 |
4 | @@ -485,13 +485,15 @@ |
5 | |
6 | log('Generating template context for ceph', level=DEBUG) |
7 | mon_hosts = [] |
8 | - auth = None |
9 | - key = None |
10 | - use_syslog = str(config('use-syslog')).lower() |
11 | + ctxt = { |
12 | + 'use_syslog': str(config('use-syslog')).lower() |
13 | + } |
14 | for rid in relation_ids('ceph'): |
15 | for unit in related_units(rid): |
16 | - auth = relation_get('auth', rid=rid, unit=unit) |
17 | - key = relation_get('key', rid=rid, unit=unit) |
18 | + if not ctxt.get('auth'): |
19 | + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) |
20 | + if not ctxt.get('key'): |
21 | + ctxt['key'] = relation_get('key', rid=rid, unit=unit) |
22 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, |
23 | unit=unit) |
24 | unit_priv_addr = relation_get('private-address', rid=rid, |
25 | @@ -500,10 +502,7 @@ |
26 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr |
27 | mon_hosts.append(ceph_addr) |
28 | |
29 | - ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), |
30 | - 'auth': auth, |
31 | - 'key': key, |
32 | - 'use_syslog': use_syslog} |
33 | + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) |
34 | |
35 | if not os.path.isdir('/etc/ceph'): |
36 | os.mkdir('/etc/ceph') |
37 | |
38 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' |
39 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-16 20:18:54 +0000 |
40 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-09-10 09:35:32 +0000 |
41 | @@ -28,6 +28,7 @@ |
42 | import shutil |
43 | import json |
44 | import time |
45 | +import uuid |
46 | |
47 | from subprocess import ( |
48 | check_call, |
49 | @@ -35,8 +36,10 @@ |
50 | CalledProcessError, |
51 | ) |
52 | from charmhelpers.core.hookenv import ( |
53 | + local_unit, |
54 | relation_get, |
55 | relation_ids, |
56 | + relation_set, |
57 | related_units, |
58 | log, |
59 | DEBUG, |
60 | @@ -411,17 +414,52 @@ |
61 | |
62 | The API is versioned and defaults to version 1. |
63 | """ |
64 | - def __init__(self, api_version=1): |
65 | + def __init__(self, api_version=1, request_id=None): |
66 | self.api_version = api_version |
67 | + if request_id: |
68 | + self.request_id = request_id |
69 | + else: |
70 | + self.request_id = str(uuid.uuid1()) |
71 | self.ops = [] |
72 | |
73 | def add_op_create_pool(self, name, replica_count=3): |
74 | self.ops.append({'op': 'create-pool', 'name': name, |
75 | 'replicas': replica_count}) |
76 | |
77 | + def set_ops(self, ops): |
78 | + """Set request ops to provided value. |
79 | + |
80 | + Useful for injecting ops that come from a previous request |
81 | + to allow comparisons to ensure validity. |
82 | + """ |
83 | + self.ops = ops |
84 | + |
85 | @property |
86 | def request(self): |
87 | - return json.dumps({'api-version': self.api_version, 'ops': self.ops}) |
88 | + return json.dumps({'api-version': self.api_version, 'ops': self.ops, |
89 | + 'request-id': self.request_id}) |
90 | + |
91 | + def _ops_equal(self, other): |
92 | + if len(self.ops) == len(other.ops): |
93 | + for req_no in range(0, len(self.ops)): |
94 | + for key in ['replicas', 'name', 'op']: |
95 | + if self.ops[req_no][key] != other.ops[req_no][key]: |
96 | + return False |
97 | + else: |
98 | + return False |
99 | + return True |
100 | + |
101 | + def __eq__(self, other): |
102 | + if not isinstance(other, self.__class__): |
103 | + return False |
104 | + if self.api_version == other.api_version and \ |
105 | + self._ops_equal(other): |
106 | + return True |
107 | + else: |
108 | + return False |
109 | + |
110 | + def __ne__(self, other): |
111 | + return not self.__eq__(other) |
112 | |
113 | |
114 | class CephBrokerRsp(object): |
115 | @@ -431,14 +469,198 @@ |
116 | |
117 | The API is versioned and defaults to version 1. |
118 | """ |
119 | + |
120 | def __init__(self, encoded_rsp): |
121 | self.api_version = None |
122 | self.rsp = json.loads(encoded_rsp) |
123 | |
124 | @property |
125 | + def request_id(self): |
126 | + return self.rsp.get('request-id') |
127 | + |
128 | + @property |
129 | def exit_code(self): |
130 | return self.rsp.get('exit-code') |
131 | |
132 | @property |
133 | def exit_msg(self): |
134 | return self.rsp.get('stderr') |
135 | + |
136 | + |
137 | +# Ceph Broker Conversation: |
138 | +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq |
139 | +# and send that request to ceph via the ceph relation. The CephBrokerRq has a |
140 | +# unique id so that the client can identity which CephBrokerRsp is associated |
141 | +# with the request. Ceph will also respond to each client unit individually |
142 | +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp |
143 | +# via key broker-rsp-glance-0 |
144 | +# |
145 | +# To use this the charm can just do something like: |
146 | +# |
147 | +# from charmhelpers.contrib.storage.linux.ceph import ( |
148 | +# send_request_if_needed, |
149 | +# is_request_complete, |
150 | +# CephBrokerRq, |
151 | +# ) |
152 | +# |
153 | +# @hooks.hook('ceph-relation-changed') |
154 | +# def ceph_changed(): |
155 | +# rq = CephBrokerRq() |
156 | +# rq.add_op_create_pool(name='poolname', replica_count=3) |
157 | +# |
158 | +# if is_request_complete(rq): |
159 | +# <Request complete actions> |
160 | +# else: |
161 | +# send_request_if_needed(get_ceph_request()) |
162 | +# |
163 | +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example |
164 | +# of glance having sent a request to ceph which ceph has successfully processed |
165 | +# 'ceph:8': { |
166 | +# 'ceph/0': { |
167 | +# 'auth': 'cephx', |
168 | +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', |
169 | +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', |
170 | +# 'ceph-public-address': '10.5.44.103', |
171 | +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', |
172 | +# 'private-address': '10.5.44.103', |
173 | +# }, |
174 | +# 'glance/0': { |
175 | +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' |
176 | +# '"ops": [{"replicas": 3, "name": "glance", ' |
177 | +# '"op": "create-pool"}]}'), |
178 | +# 'private-address': '10.5.44.109', |
179 | +# }, |
180 | +# } |
181 | + |
182 | +def get_previous_request(rid): |
183 | + """Return the last ceph broker request sent on a given relation |
184 | + |
185 | + @param rid: Relation id to query for request |
186 | + """ |
187 | + request = None |
188 | + broker_req = relation_get(attribute='broker_req', rid=rid, |
189 | + unit=local_unit()) |
190 | + if broker_req: |
191 | + request_data = json.loads(broker_req) |
192 | + request = CephBrokerRq(api_version=request_data['api-version'], |
193 | + request_id=request_data['request-id']) |
194 | + request.set_ops(request_data['ops']) |
195 | + |
196 | + return request |
197 | + |
198 | + |
199 | +def get_request_states(request): |
200 | + """Return a dict of requests per relation id with their corresponding |
201 | + completion state. |
202 | + |
203 | + This allows a charm, which has a request for ceph, to see whether there is |
204 | + an equivalent request already being processed and if so what state that |
205 | + request is in. |
206 | + |
207 | + @param request: A CephBrokerRq object |
208 | + """ |
209 | + complete = [] |
210 | + requests = {} |
211 | + for rid in relation_ids('ceph'): |
212 | + complete = False |
213 | + previous_request = get_previous_request(rid) |
214 | + if request == previous_request: |
215 | + sent = True |
216 | + complete = is_request_complete_for_rid(previous_request, rid) |
217 | + else: |
218 | + sent = False |
219 | + complete = False |
220 | + |
221 | + requests[rid] = { |
222 | + 'sent': sent, |
223 | + 'complete': complete, |
224 | + } |
225 | + |
226 | + return requests |
227 | + |
228 | + |
229 | +def is_request_sent(request): |
230 | + """Check to see if a functionally equivalent request has already been sent |
231 | + |
232 | + Returns True if a similair request has been sent |
233 | + |
234 | + @param request: A CephBrokerRq object |
235 | + """ |
236 | + states = get_request_states(request) |
237 | + for rid in states.keys(): |
238 | + if not states[rid]['sent']: |
239 | + return False |
240 | + |
241 | + return True |
242 | + |
243 | + |
244 | +def is_request_complete(request): |
245 | + """Check to see if a functionally equivalent request has already been |
246 | + completed |
247 | + |
248 | + Returns True if a similair request has been completed |
249 | + |
250 | + @param request: A CephBrokerRq object |
251 | + """ |
252 | + states = get_request_states(request) |
253 | + for rid in states.keys(): |
254 | + if not states[rid]['complete']: |
255 | + return False |
256 | + |
257 | + return True |
258 | + |
259 | + |
260 | +def is_request_complete_for_rid(request, rid): |
261 | + """Check if a given request has been completed on the given relation |
262 | + |
263 | + @param request: A CephBrokerRq object |
264 | + @param rid: Relation ID |
265 | + """ |
266 | + broker_key = get_broker_rsp_key() |
267 | + for unit in related_units(rid): |
268 | + rdata = relation_get(rid=rid, unit=unit) |
269 | + if rdata.get(broker_key): |
270 | + rsp = CephBrokerRsp(rdata.get(broker_key)) |
271 | + if rsp.request_id == request.request_id: |
272 | + if not rsp.exit_code: |
273 | + return True |
274 | + else: |
275 | + # The remote unit sent no reply targeted at this unit so either the |
276 | + # remote ceph cluster does not support unit targeted replies or it |
277 | + # has not processed our request yet. |
278 | + if rdata.get('broker_rsp'): |
279 | + request_data = json.loads(rdata['broker_rsp']) |
280 | + if request_data.get('request-id'): |
281 | + log('Ignoring legacy broker_rsp without unit key as remote ' |
282 | + 'service supports unit specific replies', level=DEBUG) |
283 | + else: |
284 | + log('Using legacy broker_rsp as remote service does not ' |
285 | + 'supports unit specific replies', level=DEBUG) |
286 | + rsp = CephBrokerRsp(rdata['broker_rsp']) |
287 | + if not rsp.exit_code: |
288 | + return True |
289 | + |
290 | + return False |
291 | + |
292 | + |
293 | +def get_broker_rsp_key(): |
294 | + """Return broker response key for this unit |
295 | + |
296 | + This is the key that ceph is going to use to pass request status |
297 | + information back to this unit |
298 | + """ |
299 | + return 'broker-rsp-' + local_unit().replace('/', '-') |
300 | + |
301 | + |
302 | +def send_request_if_needed(request): |
303 | + """Send broker request if an equivalent request has not already been sent |
304 | + |
305 | + @param request: A CephBrokerRq object |
306 | + """ |
307 | + if is_request_sent(request): |
308 | + log('Request already sent but not complete, not sending new request', |
309 | + level=DEBUG) |
310 | + else: |
311 | + for rid in relation_ids('ceph'): |
312 | + log('Sending request {}'.format(request.request_id), level=DEBUG) |
313 | + relation_set(relation_id=rid, broker_req=request.request) |
314 | |
315 | === modified file 'hooks/nova_compute_hooks.py' |
316 | --- hooks/nova_compute_hooks.py 2015-08-19 15:07:10 +0000 |
317 | +++ hooks/nova_compute_hooks.py 2015-09-10 09:35:32 +0000 |
318 | @@ -6,7 +6,6 @@ |
319 | config, |
320 | is_relation_made, |
321 | log, |
322 | - INFO, |
323 | ERROR, |
324 | relation_ids, |
325 | relation_get, |
326 | @@ -38,8 +37,9 @@ |
327 | from charmhelpers.contrib.storage.linux.ceph import ( |
328 | ensure_ceph_keyring, |
329 | CephBrokerRq, |
330 | - CephBrokerRsp, |
331 | delete_keyring, |
332 | + send_request_if_needed, |
333 | + is_request_complete, |
334 | ) |
335 | from charmhelpers.payload.execd import execd_preinstall |
336 | from nova_compute_utils import ( |
337 | @@ -264,6 +264,13 @@ |
338 | service_restart('libvirt-bin') |
339 | |
340 | |
341 | +def get_ceph_request(): |
342 | + rq = CephBrokerRq() |
343 | + replicas = config('ceph-osd-replication-count') |
344 | + rq.add_op_create_pool(name=config('rbd-pool'), replica_count=replicas) |
345 | + return rq |
346 | + |
347 | + |
348 | @hooks.hook('ceph-relation-changed') |
349 | @restart_on_change(restart_map()) |
350 | def ceph_changed(): |
351 | @@ -282,36 +289,20 @@ |
352 | |
353 | # With some refactoring, this can move into NovaComputeCephContext |
354 | # and allow easily extended to support other compute flavors. |
355 | - if config('virt-type') in ['kvm', 'qemu', 'lxc']: |
356 | + if config('virt-type') in ['kvm', 'qemu', 'lxc'] and relation_get('key'): |
357 | create_libvirt_secret(secret_file=CEPH_SECRET, |
358 | secret_uuid=CEPH_SECRET_UUID, |
359 | key=relation_get('key')) |
360 | |
361 | if (config('libvirt-image-backend') == 'rbd' and |
362 | assert_libvirt_imagebackend_allowed()): |
363 | - settings = relation_get() |
364 | - if settings and 'broker_rsp' in settings: |
365 | - rsp = CephBrokerRsp(settings['broker_rsp']) |
366 | - # Non-zero return code implies failure |
367 | - if rsp.exit_code: |
368 | - log("Ceph broker request failed (rc=%s, msg=%s)" % |
369 | - (rsp.exit_code, rsp.exit_msg), level=ERROR) |
370 | - return |
371 | - |
372 | - log("Ceph broker request succeeded (rc=%s, msg=%s)" % |
373 | - (rsp.exit_code, rsp.exit_msg), level=INFO) |
374 | + if is_request_complete(get_ceph_request()): |
375 | + log('Request complete') |
376 | # Ensure that nova-compute is restarted since only now can we |
377 | # guarantee that ceph resources are ready. |
378 | - if config('libvirt-image-backend') == 'rbd': |
379 | - service_restart('nova-compute') |
380 | + service_restart('nova-compute') |
381 | else: |
382 | - rq = CephBrokerRq() |
383 | - replicas = config('ceph-osd-replication-count') |
384 | - rq.add_op_create_pool(name=config('rbd-pool'), |
385 | - replica_count=replicas) |
386 | - for rid in relation_ids('ceph'): |
387 | - relation_set(broker_req=rq.request) |
388 | - log("Request(s) sent to Ceph broker (rid=%s)" % (rid)) |
389 | + send_request_if_needed(get_ceph_request()) |
390 | |
391 | |
392 | @hooks.hook('ceph-relation-broken') |
393 | |
394 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' |
395 | --- tests/charmhelpers/contrib/amulet/utils.py 2015-08-19 14:19:48 +0000 |
396 | +++ tests/charmhelpers/contrib/amulet/utils.py 2015-09-10 09:35:32 +0000 |
397 | @@ -19,9 +19,11 @@ |
398 | import logging |
399 | import os |
400 | import re |
401 | +import socket |
402 | import subprocess |
403 | import sys |
404 | import time |
405 | +import uuid |
406 | |
407 | import amulet |
408 | import distro_info |
409 | @@ -114,7 +116,7 @@ |
410 | # /!\ DEPRECATION WARNING (beisner): |
411 | # New and existing tests should be rewritten to use |
412 | # validate_services_by_name() as it is aware of init systems. |
413 | - self.log.warn('/!\\ DEPRECATION WARNING: use ' |
414 | + self.log.warn('DEPRECATION WARNING: use ' |
415 | 'validate_services_by_name instead of validate_services ' |
416 | 'due to init system differences.') |
417 | |
418 | @@ -269,33 +271,52 @@ |
419 | """Get last modification time of directory.""" |
420 | return sentry_unit.directory_stat(directory)['mtime'] |
421 | |
422 | - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): |
423 | - """Get process' start time. |
424 | - |
425 | - Determine start time of the process based on the last modification |
426 | - time of the /proc/pid directory. If pgrep_full is True, the process |
427 | - name is matched against the full command line. |
428 | - """ |
429 | - if pgrep_full: |
430 | - cmd = 'pgrep -o -f {}'.format(service) |
431 | - else: |
432 | - cmd = 'pgrep -o {}'.format(service) |
433 | - cmd = cmd + ' | grep -v pgrep || exit 0' |
434 | - cmd_out = sentry_unit.run(cmd) |
435 | - self.log.debug('CMDout: ' + str(cmd_out)) |
436 | - if cmd_out[0]: |
437 | - self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) |
438 | - proc_dir = '/proc/{}'.format(cmd_out[0].strip()) |
439 | - return self._get_dir_mtime(sentry_unit, proc_dir) |
440 | + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): |
441 | + """Get start time of a process based on the last modification time |
442 | + of the /proc/pid directory. |
443 | + |
444 | + :sentry_unit: The sentry unit to check for the service on |
445 | + :service: service name to look for in process table |
446 | + :pgrep_full: [Deprecated] Use full command line search mode with pgrep |
447 | + :returns: epoch time of service process start |
448 | + :param commands: list of bash commands |
449 | + :param sentry_units: list of sentry unit pointers |
450 | + :returns: None if successful; Failure message otherwise |
451 | + """ |
452 | + if pgrep_full is not None: |
453 | + # /!\ DEPRECATION WARNING (beisner): |
454 | + # No longer implemented, as pidof is now used instead of pgrep. |
455 | + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 |
456 | + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' |
457 | + 'longer implemented re: lp 1474030.') |
458 | + |
459 | + pid_list = self.get_process_id_list(sentry_unit, service) |
460 | + pid = pid_list[0] |
461 | + proc_dir = '/proc/{}'.format(pid) |
462 | + self.log.debug('Pid for {} on {}: {}'.format( |
463 | + service, sentry_unit.info['unit_name'], pid)) |
464 | + |
465 | + return self._get_dir_mtime(sentry_unit, proc_dir) |
466 | |
467 | def service_restarted(self, sentry_unit, service, filename, |
468 | - pgrep_full=False, sleep_time=20): |
469 | + pgrep_full=None, sleep_time=20): |
470 | """Check if service was restarted. |
471 | |
472 | Compare a service's start time vs a file's last modification time |
473 | (such as a config file for that service) to determine if the service |
474 | has been restarted. |
475 | """ |
476 | + # /!\ DEPRECATION WARNING (beisner): |
477 | + # This method is prone to races in that no before-time is known. |
478 | + # Use validate_service_config_changed instead. |
479 | + |
480 | + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
481 | + # used instead of pgrep. pgrep_full is still passed through to ensure |
482 | + # deprecation WARNS. lp1474030 |
483 | + self.log.warn('DEPRECATION WARNING: use ' |
484 | + 'validate_service_config_changed instead of ' |
485 | + 'service_restarted due to known races.') |
486 | + |
487 | time.sleep(sleep_time) |
488 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= |
489 | self._get_file_mtime(sentry_unit, filename)): |
490 | @@ -304,15 +325,15 @@ |
491 | return False |
492 | |
493 | def service_restarted_since(self, sentry_unit, mtime, service, |
494 | - pgrep_full=False, sleep_time=20, |
495 | - retry_count=2): |
496 | + pgrep_full=None, sleep_time=20, |
497 | + retry_count=2, retry_sleep_time=30): |
498 | """Check if service was been started after a given time. |
499 | |
500 | Args: |
501 | sentry_unit (sentry): The sentry unit to check for the service on |
502 | mtime (float): The epoch time to check against |
503 | service (string): service name to look for in process table |
504 | - pgrep_full (boolean): Use full command line search mode with pgrep |
505 | + pgrep_full: [Deprecated] Use full command line search mode with pgrep |
506 | sleep_time (int): Seconds to sleep before looking for process |
507 | retry_count (int): If service is not found, how many times to retry |
508 | |
509 | @@ -321,30 +342,44 @@ |
510 | False if service is older than mtime or if service was |
511 | not found. |
512 | """ |
513 | - self.log.debug('Checking %s restarted since %s' % (service, mtime)) |
514 | + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
515 | + # used instead of pgrep. pgrep_full is still passed through to ensure |
516 | + # deprecation WARNS. lp1474030 |
517 | + |
518 | + unit_name = sentry_unit.info['unit_name'] |
519 | + self.log.debug('Checking that %s service restarted since %s on ' |
520 | + '%s' % (service, mtime, unit_name)) |
521 | time.sleep(sleep_time) |
522 | - proc_start_time = self._get_proc_start_time(sentry_unit, service, |
523 | - pgrep_full) |
524 | - while retry_count > 0 and not proc_start_time: |
525 | - self.log.debug('No pid file found for service %s, will retry %i ' |
526 | - 'more times' % (service, retry_count)) |
527 | - time.sleep(30) |
528 | - proc_start_time = self._get_proc_start_time(sentry_unit, service, |
529 | - pgrep_full) |
530 | - retry_count = retry_count - 1 |
531 | + proc_start_time = None |
532 | + tries = 0 |
533 | + while tries <= retry_count and not proc_start_time: |
534 | + try: |
535 | + proc_start_time = self._get_proc_start_time(sentry_unit, |
536 | + service, |
537 | + pgrep_full) |
538 | + self.log.debug('Attempt {} to get {} proc start time on {} ' |
539 | + 'OK'.format(tries, service, unit_name)) |
540 | + except IOError: |
541 | + # NOTE(beisner) - race avoidance, proc may not exist yet. |
542 | + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 |
543 | + self.log.debug('Attempt {} to get {} proc start time on {} ' |
544 | + 'failed'.format(tries, service, unit_name)) |
545 | + time.sleep(retry_sleep_time) |
546 | + tries += 1 |
547 | |
548 | if not proc_start_time: |
549 | self.log.warn('No proc start time found, assuming service did ' |
550 | 'not start') |
551 | return False |
552 | if proc_start_time >= mtime: |
553 | - self.log.debug('proc start time is newer than provided mtime' |
554 | - '(%s >= %s)' % (proc_start_time, mtime)) |
555 | + self.log.debug('Proc start time is newer than provided mtime' |
556 | + '(%s >= %s) on %s (OK)' % (proc_start_time, |
557 | + mtime, unit_name)) |
558 | return True |
559 | else: |
560 | - self.log.warn('proc start time (%s) is older than provided mtime ' |
561 | - '(%s), service did not restart' % (proc_start_time, |
562 | - mtime)) |
563 | + self.log.warn('Proc start time (%s) is older than provided mtime ' |
564 | + '(%s) on %s, service did not ' |
565 | + 'restart' % (proc_start_time, mtime, unit_name)) |
566 | return False |
567 | |
568 | def config_updated_since(self, sentry_unit, filename, mtime, |
569 | @@ -374,8 +409,9 @@ |
570 | return False |
571 | |
572 | def validate_service_config_changed(self, sentry_unit, mtime, service, |
573 | - filename, pgrep_full=False, |
574 | - sleep_time=20, retry_count=2): |
575 | + filename, pgrep_full=None, |
576 | + sleep_time=20, retry_count=2, |
577 | + retry_sleep_time=30): |
578 | """Check service and file were updated after mtime |
579 | |
580 | Args: |
581 | @@ -383,9 +419,10 @@ |
582 | mtime (float): The epoch time to check against |
583 | service (string): service name to look for in process table |
584 | filename (string): The file to check mtime of |
585 | - pgrep_full (boolean): Use full command line search mode with pgrep |
586 | - sleep_time (int): Seconds to sleep before looking for process |
587 | + pgrep_full: [Deprecated] Use full command line search mode with pgrep |
588 | + sleep_time (int): Initial sleep in seconds to pass to test helpers |
589 | retry_count (int): If service is not found, how many times to retry |
590 | + retry_sleep_time (int): Time in seconds to wait between retries |
591 | |
592 | Typical Usage: |
593 | u = OpenStackAmuletUtils(ERROR) |
594 | @@ -402,15 +439,25 @@ |
595 | mtime, False if service is older than mtime or if service was |
596 | not found or if filename was modified before mtime. |
597 | """ |
598 | - self.log.debug('Checking %s restarted since %s' % (service, mtime)) |
599 | - time.sleep(sleep_time) |
600 | - service_restart = self.service_restarted_since(sentry_unit, mtime, |
601 | - service, |
602 | - pgrep_full=pgrep_full, |
603 | - sleep_time=0, |
604 | - retry_count=retry_count) |
605 | - config_update = self.config_updated_since(sentry_unit, filename, mtime, |
606 | - sleep_time=0) |
607 | + |
608 | + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
609 | + # used instead of pgrep. pgrep_full is still passed through to ensure |
610 | + # deprecation WARNS. lp1474030 |
611 | + |
612 | + service_restart = self.service_restarted_since( |
613 | + sentry_unit, mtime, |
614 | + service, |
615 | + pgrep_full=pgrep_full, |
616 | + sleep_time=sleep_time, |
617 | + retry_count=retry_count, |
618 | + retry_sleep_time=retry_sleep_time) |
619 | + |
620 | + config_update = self.config_updated_since( |
621 | + sentry_unit, |
622 | + filename, |
623 | + mtime, |
624 | + sleep_time=0) |
625 | + |
626 | return service_restart and config_update |
627 | |
628 | def get_sentry_time(self, sentry_unit): |
629 | @@ -428,7 +475,6 @@ |
630 | """Return a list of all Ubuntu releases in order of release.""" |
631 | _d = distro_info.UbuntuDistroInfo() |
632 | _release_list = _d.all |
633 | - self.log.debug('Ubuntu release list: {}'.format(_release_list)) |
634 | return _release_list |
635 | |
636 | def file_to_url(self, file_rel_path): |
637 | @@ -568,6 +614,142 @@ |
638 | |
639 | return None |
640 | |
641 | + def validate_sectionless_conf(self, file_contents, expected): |
642 | + """A crude conf parser. Useful to inspect configuration files which |
643 | + do not have section headers (as would be necessary in order to use |
644 | + the configparser). Such as openstack-dashboard or rabbitmq confs.""" |
645 | + for line in file_contents.split('\n'): |
646 | + if '=' in line: |
647 | + args = line.split('=') |
648 | + if len(args) <= 1: |
649 | + continue |
650 | + key = args[0].strip() |
651 | + value = args[1].strip() |
652 | + if key in expected.keys(): |
653 | + if expected[key] != value: |
654 | + msg = ('Config mismatch. Expected, actual: {}, ' |
655 | + '{}'.format(expected[key], value)) |
656 | + amulet.raise_status(amulet.FAIL, msg=msg) |
657 | + |
658 | + def get_unit_hostnames(self, units): |
659 | + """Return a dict of juju unit names to hostnames.""" |
660 | + host_names = {} |
661 | + for unit in units: |
662 | + host_names[unit.info['unit_name']] = \ |
663 | + str(unit.file_contents('/etc/hostname').strip()) |
664 | + self.log.debug('Unit host names: {}'.format(host_names)) |
665 | + return host_names |
666 | + |
667 | + def run_cmd_unit(self, sentry_unit, cmd): |
668 | + """Run a command on a unit, return the output and exit code.""" |
669 | + output, code = sentry_unit.run(cmd) |
670 | + if code == 0: |
671 | + self.log.debug('{} `{}` command returned {} ' |
672 | + '(OK)'.format(sentry_unit.info['unit_name'], |
673 | + cmd, code)) |
674 | + else: |
675 | + msg = ('{} `{}` command returned {} ' |
676 | + '{}'.format(sentry_unit.info['unit_name'], |
677 | + cmd, code, output)) |
678 | + amulet.raise_status(amulet.FAIL, msg=msg) |
679 | + return str(output), code |
680 | + |
681 | + def file_exists_on_unit(self, sentry_unit, file_name): |
682 | + """Check if a file exists on a unit.""" |
683 | + try: |
684 | + sentry_unit.file_stat(file_name) |
685 | + return True |
686 | + except IOError: |
687 | + return False |
688 | + except Exception as e: |
689 | + msg = 'Error checking file {}: {}'.format(file_name, e) |
690 | + amulet.raise_status(amulet.FAIL, msg=msg) |
691 | + |
692 | + def file_contents_safe(self, sentry_unit, file_name, |
693 | + max_wait=60, fatal=False): |
694 | + """Get file contents from a sentry unit. Wrap amulet file_contents |
695 | + with retry logic to address races where a file checks as existing, |
696 | + but no longer exists by the time file_contents is called. |
697 | + Return None if file not found. Optionally raise if fatal is True.""" |
698 | + unit_name = sentry_unit.info['unit_name'] |
699 | + file_contents = False |
700 | + tries = 0 |
701 | + while not file_contents and tries < (max_wait / 4): |
702 | + try: |
703 | + file_contents = sentry_unit.file_contents(file_name) |
704 | + except IOError: |
705 | + self.log.debug('Attempt {} to open file {} from {} ' |
706 | + 'failed'.format(tries, file_name, |
707 | + unit_name)) |
708 | + time.sleep(4) |
709 | + tries += 1 |
710 | + |
711 | + if file_contents: |
712 | + return file_contents |
713 | + elif not fatal: |
714 | + return None |
715 | + elif fatal: |
716 | + msg = 'Failed to get file contents from unit.' |
717 | + amulet.raise_status(amulet.FAIL, msg) |
718 | + |
719 | + def port_knock_tcp(self, host="localhost", port=22, timeout=15): |
720 | + """Open a TCP socket to check for a listening sevice on a host. |
721 | + |
722 | + :param host: host name or IP address, default to localhost |
723 | + :param port: TCP port number, default to 22 |
724 | + :param timeout: Connect timeout, default to 15 seconds |
725 | + :returns: True if successful, False if connect failed |
726 | + """ |
727 | + |
728 | + # Resolve host name if possible |
729 | + try: |
730 | + connect_host = socket.gethostbyname(host) |
731 | + host_human = "{} ({})".format(connect_host, host) |
732 | + except socket.error as e: |
733 | + self.log.warn('Unable to resolve address: ' |
734 | + '{} ({}) Trying anyway!'.format(host, e)) |
735 | + connect_host = host |
736 | + host_human = connect_host |
737 | + |
738 | + # Attempt socket connection |
739 | + try: |
740 | + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
741 | + knock.settimeout(timeout) |
742 | + knock.connect((connect_host, port)) |
743 | + knock.close() |
744 | + self.log.debug('Socket connect OK for host ' |
745 | + '{} on port {}.'.format(host_human, port)) |
746 | + return True |
747 | + except socket.error as e: |
748 | + self.log.debug('Socket connect FAIL for' |
749 | + ' {} port {} ({})'.format(host_human, port, e)) |
750 | + return False |
751 | + |
752 | + def port_knock_units(self, sentry_units, port=22, |
753 | + timeout=15, expect_success=True): |
754 | + """Open a TCP socket to check for a listening sevice on each |
755 | + listed juju unit. |
756 | + |
757 | + :param sentry_units: list of sentry unit pointers |
758 | + :param port: TCP port number, default to 22 |
759 | + :param timeout: Connect timeout, default to 15 seconds |
760 | + :expect_success: True by default, set False to invert logic |
761 | + :returns: None if successful, Failure message otherwise |
762 | + """ |
763 | + for unit in sentry_units: |
764 | + host = unit.info['public-address'] |
765 | + connected = self.port_knock_tcp(host, port, timeout) |
766 | + if not connected and expect_success: |
767 | + return 'Socket connect failed.' |
768 | + elif connected and not expect_success: |
769 | + return 'Socket connected unexpectedly.' |
770 | + |
771 | + def get_uuid_epoch_stamp(self): |
772 | + """Returns a stamp string based on uuid4 and epoch time. Useful in |
773 | + generating test messages which need to be unique-ish.""" |
774 | + return '[{}-{}]'.format(uuid.uuid4(), time.time()) |
775 | + |
776 | +# amulet juju action helpers: |
777 | def run_action(self, unit_sentry, action, |
778 | _check_output=subprocess.check_output): |
779 | """Run the named action on a given unit sentry. |
780 | |
781 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' |
782 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-19 14:19:48 +0000 |
783 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-09-10 09:35:32 +0000 |
784 | @@ -44,8 +44,15 @@ |
785 | Determine if the local branch being tested is derived from its |
786 | stable or next (dev) branch, and based on this, use the corresonding |
787 | stable or next branches for the other_services.""" |
788 | + |
789 | + # Charms outside the lp:~openstack-charmers namespace |
790 | base_charms = ['mysql', 'mongodb', 'nrpe'] |
791 | |
792 | + # Force these charms to current series even when using an older series. |
793 | + # ie. Use trusty/nrpe even when series is precise, as the P charm |
794 | + # does not possess the necessary external master config and hooks. |
795 | + force_series_current = ['nrpe'] |
796 | + |
797 | if self.series in ['precise', 'trusty']: |
798 | base_series = self.series |
799 | else: |
800 | @@ -53,11 +60,17 @@ |
801 | |
802 | if self.stable: |
803 | for svc in other_services: |
804 | + if svc['name'] in force_series_current: |
805 | + base_series = self.current_next |
806 | + |
807 | temp = 'lp:charms/{}/{}' |
808 | svc['location'] = temp.format(base_series, |
809 | svc['name']) |
810 | else: |
811 | for svc in other_services: |
812 | + if svc['name'] in force_series_current: |
813 | + base_series = self.current_next |
814 | + |
815 | if svc['name'] in base_charms: |
816 | temp = 'lp:charms/{}/{}' |
817 | svc['location'] = temp.format(base_series, |
818 | @@ -77,21 +90,23 @@ |
819 | |
820 | services = other_services |
821 | services.append(this_service) |
822 | + |
823 | + # Charms which should use the source config option |
824 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
825 | 'ceph-osd', 'ceph-radosgw'] |
826 | - # Most OpenStack subordinate charms do not expose an origin option |
827 | - # as that is controlled by the principle. |
828 | - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] |
829 | + |
830 | + # Charms which can not use openstack-origin, ie. many subordinates |
831 | + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] |
832 | |
833 | if self.openstack: |
834 | for svc in services: |
835 | - if svc['name'] not in use_source + ignore: |
836 | + if svc['name'] not in use_source + no_origin: |
837 | config = {'openstack-origin': self.openstack} |
838 | self.d.configure(svc['name'], config) |
839 | |
840 | if self.source: |
841 | for svc in services: |
842 | - if svc['name'] in use_source and svc['name'] not in ignore: |
843 | + if svc['name'] in use_source and svc['name'] not in no_origin: |
844 | config = {'source': self.source} |
845 | self.d.configure(svc['name'], config) |
846 | |
847 | |
848 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' |
849 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:18:54 +0000 |
850 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-09-10 09:35:32 +0000 |
851 | @@ -27,6 +27,7 @@ |
852 | import heatclient.v1.client as heat_client |
853 | import keystoneclient.v2_0 as keystone_client |
854 | import novaclient.v1_1.client as nova_client |
855 | +import pika |
856 | import swiftclient |
857 | |
858 | from charmhelpers.contrib.amulet.utils import ( |
859 | @@ -602,3 +603,361 @@ |
860 | self.log.debug('Ceph {} samples (OK): ' |
861 | '{}'.format(sample_type, samples)) |
862 | return None |
863 | + |
864 | +# rabbitmq/amqp specific helpers: |
865 | + def add_rmq_test_user(self, sentry_units, |
866 | + username="testuser1", password="changeme"): |
867 | + """Add a test user via the first rmq juju unit, check connection as |
868 | + the new user against all sentry units. |
869 | + |
870 | + :param sentry_units: list of sentry unit pointers |
871 | + :param username: amqp user name, default to testuser1 |
872 | + :param password: amqp user password |
873 | + :returns: None if successful. Raise on error. |
874 | + """ |
875 | + self.log.debug('Adding rmq user ({})...'.format(username)) |
876 | + |
877 | + # Check that user does not already exist |
878 | + cmd_user_list = 'rabbitmqctl list_users' |
879 | + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) |
880 | + if username in output: |
881 | + self.log.warning('User ({}) already exists, returning ' |
882 | + 'gracefully.'.format(username)) |
883 | + return |
884 | + |
885 | + perms = '".*" ".*" ".*"' |
886 | + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), |
887 | + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] |
888 | + |
889 | + # Add user via first unit |
890 | + for cmd in cmds: |
891 | + output, _ = self.run_cmd_unit(sentry_units[0], cmd) |
892 | + |
893 | + # Check connection against the other sentry_units |
894 | + self.log.debug('Checking user connect against units...') |
895 | + for sentry_unit in sentry_units: |
896 | + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, |
897 | + username=username, |
898 | + password=password) |
899 | + connection.close() |
900 | + |
901 | + def delete_rmq_test_user(self, sentry_units, username="testuser1"): |
902 | + """Delete a rabbitmq user via the first rmq juju unit. |
903 | + |
904 | + :param sentry_units: list of sentry unit pointers |
905 | + :param username: amqp user name, default to testuser1 |
906 | + :param password: amqp user password |
907 | + :returns: None if successful or no such user. |
908 | + """ |
909 | + self.log.debug('Deleting rmq user ({})...'.format(username)) |
910 | + |
911 | + # Check that the user exists |
912 | + cmd_user_list = 'rabbitmqctl list_users' |
913 | + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) |
914 | + |
915 | + if username not in output: |
916 | + self.log.warning('User ({}) does not exist, returning ' |
917 | + 'gracefully.'.format(username)) |
918 | + return |
919 | + |
920 | + # Delete the user |
921 | + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) |
922 | + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) |
923 | + |
924 | + def get_rmq_cluster_status(self, sentry_unit): |
925 | + """Execute rabbitmq cluster status command on a unit and return |
926 | + the full output. |
927 | + |
928 | + :param unit: sentry unit |
929 | + :returns: String containing console output of cluster status command |
930 | + """ |
931 | + cmd = 'rabbitmqctl cluster_status' |
932 | + output, _ = self.run_cmd_unit(sentry_unit, cmd) |
933 | + self.log.debug('{} cluster_status:\n{}'.format( |
934 | + sentry_unit.info['unit_name'], output)) |
935 | + return str(output) |
936 | + |
937 | + def get_rmq_cluster_running_nodes(self, sentry_unit): |
938 | + """Parse rabbitmqctl cluster_status output string, return list of |
939 | + running rabbitmq cluster nodes. |
940 | + |
941 | + :param unit: sentry unit |
942 | + :returns: List containing node names of running nodes |
943 | + """ |
944 | + # NOTE(beisner): rabbitmqctl cluster_status output is not |
945 | + # json-parsable, do string chop foo, then json.loads that. |
946 | + str_stat = self.get_rmq_cluster_status(sentry_unit) |
947 | + if 'running_nodes' in str_stat: |
948 | + pos_start = str_stat.find("{running_nodes,") + 15 |
949 | + pos_end = str_stat.find("]},", pos_start) + 1 |
950 | + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') |
951 | + run_nodes = json.loads(str_run_nodes) |
952 | + return run_nodes |
953 | + else: |
954 | + return [] |
955 | + |
956 | + def validate_rmq_cluster_running_nodes(self, sentry_units): |
957 | + """Check that all rmq unit hostnames are represented in the |
958 | + cluster_status output of all units. |
959 | + |
960 | + :param host_names: dict of juju unit names to host names |
961 | + :param units: list of sentry unit pointers (all rmq units) |
962 | + :returns: None if successful, otherwise return error message |
963 | + """ |
964 | + host_names = self.get_unit_hostnames(sentry_units) |
965 | + errors = [] |
966 | + |
967 | + # Query every unit for cluster_status running nodes |
968 | + for query_unit in sentry_units: |
969 | + query_unit_name = query_unit.info['unit_name'] |
970 | + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) |
971 | + |
972 | + # Confirm that every unit is represented in the queried unit's |
973 | + # cluster_status running nodes output. |
974 | + for validate_unit in sentry_units: |
975 | + val_host_name = host_names[validate_unit.info['unit_name']] |
976 | + val_node_name = 'rabbit@{}'.format(val_host_name) |
977 | + |
978 | + if val_node_name not in running_nodes: |
979 | + errors.append('Cluster member check failed on {}: {} not ' |
980 | + 'in {}\n'.format(query_unit_name, |
981 | + val_node_name, |
982 | + running_nodes)) |
983 | + if errors: |
984 | + return ''.join(errors) |
985 | + |
986 | + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): |
987 | + """Check a single juju rmq unit for ssl and port in the config file.""" |
988 | + host = sentry_unit.info['public-address'] |
989 | + unit_name = sentry_unit.info['unit_name'] |
990 | + |
991 | + conf_file = '/etc/rabbitmq/rabbitmq.config' |
992 | + conf_contents = str(self.file_contents_safe(sentry_unit, |
993 | + conf_file, max_wait=16)) |
994 | + # Checks |
995 | + conf_ssl = 'ssl' in conf_contents |
996 | + conf_port = str(port) in conf_contents |
997 | + |
998 | + # Port explicitly checked in config |
999 | + if port and conf_port and conf_ssl: |
1000 | + self.log.debug('SSL is enabled @{}:{} ' |
1001 | + '({})'.format(host, port, unit_name)) |
1002 | + return True |
1003 | + elif port and not conf_port and conf_ssl: |
1004 | + self.log.debug('SSL is enabled @{} but not on port {} ' |
1005 | + '({})'.format(host, port, unit_name)) |
1006 | + return False |
1007 | + # Port not checked (useful when checking that ssl is disabled) |
1008 | + elif not port and conf_ssl: |
1009 | + self.log.debug('SSL is enabled @{}:{} ' |
1010 | + '({})'.format(host, port, unit_name)) |
1011 | + return True |
1012 | + elif not port and not conf_ssl: |
1013 | + self.log.debug('SSL not enabled @{}:{} ' |
1014 | + '({})'.format(host, port, unit_name)) |
1015 | + return False |
1016 | + else: |
1017 | + msg = ('Unknown condition when checking SSL status @{}:{} ' |
1018 | + '({})'.format(host, port, unit_name)) |
1019 | + amulet.raise_status(amulet.FAIL, msg) |
1020 | + |
1021 | + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): |
1022 | + """Check that ssl is enabled on rmq juju sentry units. |
1023 | + |
1024 | + :param sentry_units: list of all rmq sentry units |
1025 | + :param port: optional ssl port override to validate |
1026 | + :returns: None if successful, otherwise return error message |
1027 | + """ |
1028 | + for sentry_unit in sentry_units: |
1029 | + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): |
1030 | + return ('Unexpected condition: ssl is disabled on unit ' |
1031 | + '({})'.format(sentry_unit.info['unit_name'])) |
1032 | + return None |
1033 | + |
1034 | + def validate_rmq_ssl_disabled_units(self, sentry_units): |
1035 | + """Check that ssl is enabled on listed rmq juju sentry units. |
1036 | + |
1037 | + :param sentry_units: list of all rmq sentry units |
1038 | + :returns: True if successful. Raise on error. |
1039 | + """ |
1040 | + for sentry_unit in sentry_units: |
1041 | + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): |
1042 | + return ('Unexpected condition: ssl is enabled on unit ' |
1043 | + '({})'.format(sentry_unit.info['unit_name'])) |
1044 | + return None |
1045 | + |
1046 | + def configure_rmq_ssl_on(self, sentry_units, deployment, |
1047 | + port=None, max_wait=60): |
1048 | + """Turn ssl charm config option on, with optional non-default |
1049 | + ssl port specification. Confirm that it is enabled on every |
1050 | + unit. |
1051 | + |
1052 | + :param sentry_units: list of sentry units |
1053 | + :param deployment: amulet deployment object pointer |
1054 | + :param port: amqp port, use defaults if None |
1055 | + :param max_wait: maximum time to wait in seconds to confirm |
1056 | + :returns: None if successful. Raise on error. |
1057 | + """ |
1058 | + self.log.debug('Setting ssl charm config option: on') |
1059 | + |
1060 | + # Enable RMQ SSL |
1061 | + config = {'ssl': 'on'} |
1062 | + if port: |
1063 | + config['ssl_port'] = port |
1064 | + |
1065 | + deployment.configure('rabbitmq-server', config) |
1066 | + |
1067 | + # Confirm |
1068 | + tries = 0 |
1069 | + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) |
1070 | + while ret and tries < (max_wait / 4): |
1071 | + time.sleep(4) |
1072 | + self.log.debug('Attempt {}: {}'.format(tries, ret)) |
1073 | + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) |
1074 | + tries += 1 |
1075 | + |
1076 | + if ret: |
1077 | + amulet.raise_status(amulet.FAIL, ret) |
1078 | + |
1079 | + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): |
1080 | + """Turn ssl charm config option off, confirm that it is disabled |
1081 | + on every unit. |
1082 | + |
1083 | + :param sentry_units: list of sentry units |
1084 | + :param deployment: amulet deployment object pointer |
1085 | + :param max_wait: maximum time to wait in seconds to confirm |
1086 | + :returns: None if successful. Raise on error. |
1087 | + """ |
1088 | + self.log.debug('Setting ssl charm config option: off') |
1089 | + |
1090 | + # Disable RMQ SSL |
1091 | + config = {'ssl': 'off'} |
1092 | + deployment.configure('rabbitmq-server', config) |
1093 | + |
1094 | + # Confirm |
1095 | + tries = 0 |
1096 | + ret = self.validate_rmq_ssl_disabled_units(sentry_units) |
1097 | + while ret and tries < (max_wait / 4): |
1098 | + time.sleep(4) |
1099 | + self.log.debug('Attempt {}: {}'.format(tries, ret)) |
1100 | + ret = self.validate_rmq_ssl_disabled_units(sentry_units) |
1101 | + tries += 1 |
1102 | + |
1103 | + if ret: |
1104 | + amulet.raise_status(amulet.FAIL, ret) |
1105 | + |
1106 | + def connect_amqp_by_unit(self, sentry_unit, ssl=False, |
1107 | + port=None, fatal=True, |
1108 | + username="testuser1", password="changeme"): |
1109 | + """Establish and return a pika amqp connection to the rabbitmq service |
1110 | + running on a rmq juju unit. |
1111 | + |
1112 | + :param sentry_unit: sentry unit pointer |
1113 | + :param ssl: boolean, default to False |
1114 | + :param port: amqp port, use defaults if None |
1115 | + :param fatal: boolean, default to True (raises on connect error) |
1116 | + :param username: amqp user name, default to testuser1 |
1117 | + :param password: amqp user password |
1118 | + :returns: pika amqp connection pointer or None if failed and non-fatal |
1119 | + """ |
1120 | + host = sentry_unit.info['public-address'] |
1121 | + unit_name = sentry_unit.info['unit_name'] |
1122 | + |
1123 | + # Default port logic if port is not specified |
1124 | + if ssl and not port: |
1125 | + port = 5671 |
1126 | + elif not ssl and not port: |
1127 | + port = 5672 |
1128 | + |
1129 | + self.log.debug('Connecting to amqp on {}:{} ({}) as ' |
1130 | + '{}...'.format(host, port, unit_name, username)) |
1131 | + |
1132 | + try: |
1133 | + credentials = pika.PlainCredentials(username, password) |
1134 | + parameters = pika.ConnectionParameters(host=host, port=port, |
1135 | + credentials=credentials, |
1136 | + ssl=ssl, |
1137 | + connection_attempts=3, |
1138 | + retry_delay=5, |
1139 | + socket_timeout=1) |
1140 | + connection = pika.BlockingConnection(parameters) |
1141 | + assert connection.server_properties['product'] == 'RabbitMQ' |
1142 | + self.log.debug('Connect OK') |
1143 | + return connection |
1144 | + except Exception as e: |
1145 | + msg = ('amqp connection failed to {}:{} as ' |
1146 | + '{} ({})'.format(host, port, username, str(e))) |
1147 | + if fatal: |
1148 | + amulet.raise_status(amulet.FAIL, msg) |
1149 | + else: |
1150 | + self.log.warn(msg) |
1151 | + return None |
1152 | + |
1153 | + def publish_amqp_message_by_unit(self, sentry_unit, message, |
1154 | + queue="test", ssl=False, |
1155 | + username="testuser1", |
1156 | + password="changeme", |
1157 | + port=None): |
1158 | + """Publish an amqp message to a rmq juju unit. |
1159 | + |
1160 | + :param sentry_unit: sentry unit pointer |
1161 | + :param message: amqp message string |
1162 | + :param queue: message queue, default to test |
1163 | + :param username: amqp user name, default to testuser1 |
1164 | + :param password: amqp user password |
1165 | + :param ssl: boolean, default to False |
1166 | + :param port: amqp port, use defaults if None |
1167 | + :returns: None. Raises exception if publish failed. |
1168 | + """ |
1169 | + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, |
1170 | + message)) |
1171 | + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, |
1172 | + port=port, |
1173 | + username=username, |
1174 | + password=password) |
1175 | + |
1176 | + # NOTE(beisner): extra debug here re: pika hang potential: |
1177 | + # https://github.com/pika/pika/issues/297 |
1178 | + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw |
1179 | + self.log.debug('Defining channel...') |
1180 | + channel = connection.channel() |
1181 | + self.log.debug('Declaring queue...') |
1182 | + channel.queue_declare(queue=queue, auto_delete=False, durable=True) |
1183 | + self.log.debug('Publishing message...') |
1184 | + channel.basic_publish(exchange='', routing_key=queue, body=message) |
1185 | + self.log.debug('Closing channel...') |
1186 | + channel.close() |
1187 | + self.log.debug('Closing connection...') |
1188 | + connection.close() |
1189 | + |
1190 | + def get_amqp_message_by_unit(self, sentry_unit, queue="test", |
1191 | + username="testuser1", |
1192 | + password="changeme", |
1193 | + ssl=False, port=None): |
1194 | + """Get an amqp message from a rmq juju unit. |
1195 | + |
1196 | + :param sentry_unit: sentry unit pointer |
1197 | + :param queue: message queue, default to test |
1198 | + :param username: amqp user name, default to testuser1 |
1199 | + :param password: amqp user password |
1200 | + :param ssl: boolean, default to False |
1201 | + :param port: amqp port, use defaults if None |
1202 | + :returns: amqp message body as string. Raise if get fails. |
1203 | + """ |
1204 | + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, |
1205 | + port=port, |
1206 | + username=username, |
1207 | + password=password) |
1208 | + channel = connection.channel() |
1209 | + method_frame, _, body = channel.basic_get(queue) |
1210 | + |
1211 | + if method_frame: |
1212 | + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, |
1213 | + body)) |
1214 | + channel.basic_ack(method_frame.delivery_tag) |
1215 | + channel.close() |
1216 | + connection.close() |
1217 | + return body |
1218 | + else: |
1219 | + msg = 'No message retrieved.' |
1220 | + amulet.raise_status(amulet.FAIL, msg) |
1221 | |
1222 | === modified file 'unit_tests/test_nova_compute_hooks.py' |
1223 | --- unit_tests/test_nova_compute_hooks.py 2015-08-13 10:54:25 +0000 |
1224 | +++ unit_tests/test_nova_compute_hooks.py 2015-09-10 09:35:32 +0000 |
1225 | @@ -54,6 +54,9 @@ |
1226 | # misc_utils |
1227 | 'ensure_ceph_keyring', |
1228 | 'execd_preinstall', |
1229 | + 'assert_libvirt_imagebackend_allowed', |
1230 | + 'is_request_complete', |
1231 | + 'send_request_if_needed', |
1232 | # socket |
1233 | 'gethostname', |
1234 | 'create_sysctl', |
1235 | @@ -450,6 +453,9 @@ |
1236 | @patch.object(hooks, 'CONFIGS') |
1237 | def test_ceph_changed_with_key_and_relation_data(self, configs, |
1238 | service_name): |
1239 | + self.test_config.set('libvirt-image-backend', 'rbd') |
1240 | + self.is_request_complete.return_value = True |
1241 | + self.assert_libvirt_imagebackend_allowed.return_value = True |
1242 | configs.complete_contexts = MagicMock() |
1243 | configs.complete_contexts.return_value = ['ceph'] |
1244 | configs.write = MagicMock() |
1245 | @@ -462,6 +468,7 @@ |
1246 | call('/etc/nova/nova.conf'), |
1247 | ] |
1248 | self.assertEquals(ex, configs.write.call_args_list) |
1249 | + self.service_restart.assert_called_with('nova-compute') |
1250 | |
1251 | @patch.object(hooks, 'CONFIGS') |
1252 | def test_neutron_plugin_changed(self, configs): |
charm_lint_check #9592 nova-compute-next for gnuoy mp269377
LINT OK: passed
Build: http:// 10.245. 162.77: 8080/job/ charm_lint_ check/9592/