Merge lp:~gnuoy/charms/trusty/ceph/1453940 into lp:~openstack-charmers-archive/charms/trusty/ceph/next
- Trusty Tahr (14.04)
- 1453940
- Merge into next
Status: | Merged |
---|---|
Merged at revision: | 117 |
Proposed branch: | lp:~gnuoy/charms/trusty/ceph/1453940 |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/ceph/next |
Diff against target: |
1195 lines (+885/-62) 7 files modified
hooks/ceph_broker.py (+12/-2) hooks/charmhelpers/contrib/storage/linux/ceph.py (+224/-2) hooks/hooks.py (+9/-1) tests/charmhelpers/contrib/amulet/utils.py (+234/-52) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+20/-5) tests/charmhelpers/contrib/openstack/amulet/utils.py (+359/-0) unit_tests/test_ceph_broker.py (+27/-0) |
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/ceph/1453940 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Edward Hope-Morley | Approve | ||
Review via email: mp+268614@code.launchpad.net |
Commit message
Description of the change
Edward Hope-Morley (hopem) : | # |
Liam Young (gnuoy) : | # |
Edward Hope-Morley (hopem) : | # |
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #8678 ceph-next for gnuoy mp268614
UNIT OK: passed
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_lint_check #9384 ceph-next for gnuoy mp268614
LINT OK: passed
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #6247 ceph-next for gnuoy mp268614
AMULET OK: passed
Build: http://
- 138. By Liam Young
-
Charm helper sync
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #8778 ceph-next for gnuoy mp268614
UNIT OK: passed
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_lint_check #9538 ceph-next for gnuoy mp268614
LINT OK: passed
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #6308 ceph-next for gnuoy mp268614
AMULET OK: passed
Build: http://
- 139. By Liam Young
-
Charm helper sync
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_lint_check #9710 ceph-next for gnuoy mp268614
LINT OK: passed
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #8943 ceph-next for gnuoy mp268614
UNIT OK: passed
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #6344 ceph-next for gnuoy mp268614
AMULET OK: passed
Build: http://
Preview Diff
1 | === modified file 'hooks/ceph_broker.py' | |||
2 | --- hooks/ceph_broker.py 2014-11-19 21:33:12 +0000 | |||
3 | +++ hooks/ceph_broker.py 2015-09-10 09:35:21 +0000 | |||
4 | @@ -31,10 +31,16 @@ | |||
5 | 31 | This is a versioned api. API version must be supplied by the client making | 31 | This is a versioned api. API version must be supplied by the client making |
6 | 32 | the request. | 32 | the request. |
7 | 33 | """ | 33 | """ |
8 | 34 | request_id = reqs.get('request-id') | ||
9 | 34 | try: | 35 | try: |
10 | 35 | version = reqs.get('api-version') | 36 | version = reqs.get('api-version') |
11 | 36 | if version == 1: | 37 | if version == 1: |
13 | 37 | return process_requests_v1(reqs['ops']) | 38 | log('Processing request {}'.format(request_id), level=DEBUG) |
14 | 39 | resp = process_requests_v1(reqs['ops']) | ||
15 | 40 | if request_id: | ||
16 | 41 | resp['request-id'] = request_id | ||
17 | 42 | |||
18 | 43 | return resp | ||
19 | 38 | 44 | ||
20 | 39 | except Exception as exc: | 45 | except Exception as exc: |
21 | 40 | log(str(exc), level=ERROR) | 46 | log(str(exc), level=ERROR) |
22 | @@ -44,7 +50,11 @@ | |||
23 | 44 | return {'exit-code': 1, 'stderr': msg} | 50 | return {'exit-code': 1, 'stderr': msg} |
24 | 45 | 51 | ||
25 | 46 | msg = ("Missing or invalid api version (%s)" % (version)) | 52 | msg = ("Missing or invalid api version (%s)" % (version)) |
27 | 47 | return {'exit-code': 1, 'stderr': msg} | 53 | resp = {'exit-code': 1, 'stderr': msg} |
28 | 54 | if request_id: | ||
29 | 55 | resp['request-id'] = request_id | ||
30 | 56 | |||
31 | 57 | return resp | ||
32 | 48 | 58 | ||
33 | 49 | 59 | ||
34 | 50 | def process_requests_v1(reqs): | 60 | def process_requests_v1(reqs): |
35 | 51 | 61 | ||
36 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
37 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-29 10:48:21 +0000 | |||
38 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-09-10 09:35:21 +0000 | |||
39 | @@ -28,6 +28,7 @@ | |||
40 | 28 | import shutil | 28 | import shutil |
41 | 29 | import json | 29 | import json |
42 | 30 | import time | 30 | import time |
43 | 31 | import uuid | ||
44 | 31 | 32 | ||
45 | 32 | from subprocess import ( | 33 | from subprocess import ( |
46 | 33 | check_call, | 34 | check_call, |
47 | @@ -35,8 +36,10 @@ | |||
48 | 35 | CalledProcessError, | 36 | CalledProcessError, |
49 | 36 | ) | 37 | ) |
50 | 37 | from charmhelpers.core.hookenv import ( | 38 | from charmhelpers.core.hookenv import ( |
51 | 39 | local_unit, | ||
52 | 38 | relation_get, | 40 | relation_get, |
53 | 39 | relation_ids, | 41 | relation_ids, |
54 | 42 | relation_set, | ||
55 | 40 | related_units, | 43 | related_units, |
56 | 41 | log, | 44 | log, |
57 | 42 | DEBUG, | 45 | DEBUG, |
58 | @@ -411,17 +414,52 @@ | |||
59 | 411 | 414 | ||
60 | 412 | The API is versioned and defaults to version 1. | 415 | The API is versioned and defaults to version 1. |
61 | 413 | """ | 416 | """ |
63 | 414 | def __init__(self, api_version=1): | 417 | def __init__(self, api_version=1, request_id=None): |
64 | 415 | self.api_version = api_version | 418 | self.api_version = api_version |
65 | 419 | if request_id: | ||
66 | 420 | self.request_id = request_id | ||
67 | 421 | else: | ||
68 | 422 | self.request_id = str(uuid.uuid1()) | ||
69 | 416 | self.ops = [] | 423 | self.ops = [] |
70 | 417 | 424 | ||
71 | 418 | def add_op_create_pool(self, name, replica_count=3): | 425 | def add_op_create_pool(self, name, replica_count=3): |
72 | 419 | self.ops.append({'op': 'create-pool', 'name': name, | 426 | self.ops.append({'op': 'create-pool', 'name': name, |
73 | 420 | 'replicas': replica_count}) | 427 | 'replicas': replica_count}) |
74 | 421 | 428 | ||
75 | 429 | def set_ops(self, ops): | ||
76 | 430 | """Set request ops to provided value. | ||
77 | 431 | |||
78 | 432 | Useful for injecting ops that come from a previous request | ||
79 | 433 | to allow comparisons to ensure validity. | ||
80 | 434 | """ | ||
81 | 435 | self.ops = ops | ||
82 | 436 | |||
83 | 422 | @property | 437 | @property |
84 | 423 | def request(self): | 438 | def request(self): |
86 | 424 | return json.dumps({'api-version': self.api_version, 'ops': self.ops}) | 439 | return json.dumps({'api-version': self.api_version, 'ops': self.ops, |
87 | 440 | 'request-id': self.request_id}) | ||
88 | 441 | |||
89 | 442 | def _ops_equal(self, other): | ||
90 | 443 | if len(self.ops) == len(other.ops): | ||
91 | 444 | for req_no in range(0, len(self.ops)): | ||
92 | 445 | for key in ['replicas', 'name', 'op']: | ||
93 | 446 | if self.ops[req_no][key] != other.ops[req_no][key]: | ||
94 | 447 | return False | ||
95 | 448 | else: | ||
96 | 449 | return False | ||
97 | 450 | return True | ||
98 | 451 | |||
99 | 452 | def __eq__(self, other): | ||
100 | 453 | if not isinstance(other, self.__class__): | ||
101 | 454 | return False | ||
102 | 455 | if self.api_version == other.api_version and \ | ||
103 | 456 | self._ops_equal(other): | ||
104 | 457 | return True | ||
105 | 458 | else: | ||
106 | 459 | return False | ||
107 | 460 | |||
108 | 461 | def __ne__(self, other): | ||
109 | 462 | return not self.__eq__(other) | ||
110 | 425 | 463 | ||
111 | 426 | 464 | ||
112 | 427 | class CephBrokerRsp(object): | 465 | class CephBrokerRsp(object): |
113 | @@ -431,14 +469,198 @@ | |||
114 | 431 | 469 | ||
115 | 432 | The API is versioned and defaults to version 1. | 470 | The API is versioned and defaults to version 1. |
116 | 433 | """ | 471 | """ |
117 | 472 | |||
118 | 434 | def __init__(self, encoded_rsp): | 473 | def __init__(self, encoded_rsp): |
119 | 435 | self.api_version = None | 474 | self.api_version = None |
120 | 436 | self.rsp = json.loads(encoded_rsp) | 475 | self.rsp = json.loads(encoded_rsp) |
121 | 437 | 476 | ||
122 | 438 | @property | 477 | @property |
123 | 478 | def request_id(self): | ||
124 | 479 | return self.rsp.get('request-id') | ||
125 | 480 | |||
126 | 481 | @property | ||
127 | 439 | def exit_code(self): | 482 | def exit_code(self): |
128 | 440 | return self.rsp.get('exit-code') | 483 | return self.rsp.get('exit-code') |
129 | 441 | 484 | ||
130 | 442 | @property | 485 | @property |
131 | 443 | def exit_msg(self): | 486 | def exit_msg(self): |
132 | 444 | return self.rsp.get('stderr') | 487 | return self.rsp.get('stderr') |
133 | 488 | |||
134 | 489 | |||
135 | 490 | # Ceph Broker Conversation: | ||
136 | 491 | # If a charm needs an action to be taken by ceph it can create a CephBrokerRq | ||
137 | 492 | # and send that request to ceph via the ceph relation. The CephBrokerRq has a | ||
138 | 493 | # unique id so that the client can identity which CephBrokerRsp is associated | ||
139 | 494 | # with the request. Ceph will also respond to each client unit individually | ||
140 | 495 | # creating a response key per client unit eg glance/0 will get a CephBrokerRsp | ||
141 | 496 | # via key broker-rsp-glance-0 | ||
142 | 497 | # | ||
143 | 498 | # To use this the charm can just do something like: | ||
144 | 499 | # | ||
145 | 500 | # from charmhelpers.contrib.storage.linux.ceph import ( | ||
146 | 501 | # send_request_if_needed, | ||
147 | 502 | # is_request_complete, | ||
148 | 503 | # CephBrokerRq, | ||
149 | 504 | # ) | ||
150 | 505 | # | ||
151 | 506 | # @hooks.hook('ceph-relation-changed') | ||
152 | 507 | # def ceph_changed(): | ||
153 | 508 | # rq = CephBrokerRq() | ||
154 | 509 | # rq.add_op_create_pool(name='poolname', replica_count=3) | ||
155 | 510 | # | ||
156 | 511 | # if is_request_complete(rq): | ||
157 | 512 | # <Request complete actions> | ||
158 | 513 | # else: | ||
159 | 514 | # send_request_if_needed(get_ceph_request()) | ||
160 | 515 | # | ||
161 | 516 | # CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example | ||
162 | 517 | # of glance having sent a request to ceph which ceph has successfully processed | ||
163 | 518 | # 'ceph:8': { | ||
164 | 519 | # 'ceph/0': { | ||
165 | 520 | # 'auth': 'cephx', | ||
166 | 521 | # 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', | ||
167 | 522 | # 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', | ||
168 | 523 | # 'ceph-public-address': '10.5.44.103', | ||
169 | 524 | # 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', | ||
170 | 525 | # 'private-address': '10.5.44.103', | ||
171 | 526 | # }, | ||
172 | 527 | # 'glance/0': { | ||
173 | 528 | # 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' | ||
174 | 529 | # '"ops": [{"replicas": 3, "name": "glance", ' | ||
175 | 530 | # '"op": "create-pool"}]}'), | ||
176 | 531 | # 'private-address': '10.5.44.109', | ||
177 | 532 | # }, | ||
178 | 533 | # } | ||
179 | 534 | |||
180 | 535 | def get_previous_request(rid): | ||
181 | 536 | """Return the last ceph broker request sent on a given relation | ||
182 | 537 | |||
183 | 538 | @param rid: Relation id to query for request | ||
184 | 539 | """ | ||
185 | 540 | request = None | ||
186 | 541 | broker_req = relation_get(attribute='broker_req', rid=rid, | ||
187 | 542 | unit=local_unit()) | ||
188 | 543 | if broker_req: | ||
189 | 544 | request_data = json.loads(broker_req) | ||
190 | 545 | request = CephBrokerRq(api_version=request_data['api-version'], | ||
191 | 546 | request_id=request_data['request-id']) | ||
192 | 547 | request.set_ops(request_data['ops']) | ||
193 | 548 | |||
194 | 549 | return request | ||
195 | 550 | |||
196 | 551 | |||
197 | 552 | def get_request_states(request): | ||
198 | 553 | """Return a dict of requests per relation id with their corresponding | ||
199 | 554 | completion state. | ||
200 | 555 | |||
201 | 556 | This allows a charm, which has a request for ceph, to see whether there is | ||
202 | 557 | an equivalent request already being processed and if so what state that | ||
203 | 558 | request is in. | ||
204 | 559 | |||
205 | 560 | @param request: A CephBrokerRq object | ||
206 | 561 | """ | ||
207 | 562 | complete = [] | ||
208 | 563 | requests = {} | ||
209 | 564 | for rid in relation_ids('ceph'): | ||
210 | 565 | complete = False | ||
211 | 566 | previous_request = get_previous_request(rid) | ||
212 | 567 | if request == previous_request: | ||
213 | 568 | sent = True | ||
214 | 569 | complete = is_request_complete_for_rid(previous_request, rid) | ||
215 | 570 | else: | ||
216 | 571 | sent = False | ||
217 | 572 | complete = False | ||
218 | 573 | |||
219 | 574 | requests[rid] = { | ||
220 | 575 | 'sent': sent, | ||
221 | 576 | 'complete': complete, | ||
222 | 577 | } | ||
223 | 578 | |||
224 | 579 | return requests | ||
225 | 580 | |||
226 | 581 | |||
227 | 582 | def is_request_sent(request): | ||
228 | 583 | """Check to see if a functionally equivalent request has already been sent | ||
229 | 584 | |||
230 | 585 | Returns True if a similair request has been sent | ||
231 | 586 | |||
232 | 587 | @param request: A CephBrokerRq object | ||
233 | 588 | """ | ||
234 | 589 | states = get_request_states(request) | ||
235 | 590 | for rid in states.keys(): | ||
236 | 591 | if not states[rid]['sent']: | ||
237 | 592 | return False | ||
238 | 593 | |||
239 | 594 | return True | ||
240 | 595 | |||
241 | 596 | |||
242 | 597 | def is_request_complete(request): | ||
243 | 598 | """Check to see if a functionally equivalent request has already been | ||
244 | 599 | completed | ||
245 | 600 | |||
246 | 601 | Returns True if a similair request has been completed | ||
247 | 602 | |||
248 | 603 | @param request: A CephBrokerRq object | ||
249 | 604 | """ | ||
250 | 605 | states = get_request_states(request) | ||
251 | 606 | for rid in states.keys(): | ||
252 | 607 | if not states[rid]['complete']: | ||
253 | 608 | return False | ||
254 | 609 | |||
255 | 610 | return True | ||
256 | 611 | |||
257 | 612 | |||
258 | 613 | def is_request_complete_for_rid(request, rid): | ||
259 | 614 | """Check if a given request has been completed on the given relation | ||
260 | 615 | |||
261 | 616 | @param request: A CephBrokerRq object | ||
262 | 617 | @param rid: Relation ID | ||
263 | 618 | """ | ||
264 | 619 | broker_key = get_broker_rsp_key() | ||
265 | 620 | for unit in related_units(rid): | ||
266 | 621 | rdata = relation_get(rid=rid, unit=unit) | ||
267 | 622 | if rdata.get(broker_key): | ||
268 | 623 | rsp = CephBrokerRsp(rdata.get(broker_key)) | ||
269 | 624 | if rsp.request_id == request.request_id: | ||
270 | 625 | if not rsp.exit_code: | ||
271 | 626 | return True | ||
272 | 627 | else: | ||
273 | 628 | # The remote unit sent no reply targeted at this unit so either the | ||
274 | 629 | # remote ceph cluster does not support unit targeted replies or it | ||
275 | 630 | # has not processed our request yet. | ||
276 | 631 | if rdata.get('broker_rsp'): | ||
277 | 632 | request_data = json.loads(rdata['broker_rsp']) | ||
278 | 633 | if request_data.get('request-id'): | ||
279 | 634 | log('Ignoring legacy broker_rsp without unit key as remote ' | ||
280 | 635 | 'service supports unit specific replies', level=DEBUG) | ||
281 | 636 | else: | ||
282 | 637 | log('Using legacy broker_rsp as remote service does not ' | ||
283 | 638 | 'supports unit specific replies', level=DEBUG) | ||
284 | 639 | rsp = CephBrokerRsp(rdata['broker_rsp']) | ||
285 | 640 | if not rsp.exit_code: | ||
286 | 641 | return True | ||
287 | 642 | |||
288 | 643 | return False | ||
289 | 644 | |||
290 | 645 | |||
291 | 646 | def get_broker_rsp_key(): | ||
292 | 647 | """Return broker response key for this unit | ||
293 | 648 | |||
294 | 649 | This is the key that ceph is going to use to pass request status | ||
295 | 650 | information back to this unit | ||
296 | 651 | """ | ||
297 | 652 | return 'broker-rsp-' + local_unit().replace('/', '-') | ||
298 | 653 | |||
299 | 654 | |||
300 | 655 | def send_request_if_needed(request): | ||
301 | 656 | """Send broker request if an equivalent request has not already been sent | ||
302 | 657 | |||
303 | 658 | @param request: A CephBrokerRq object | ||
304 | 659 | """ | ||
305 | 660 | if is_request_sent(request): | ||
306 | 661 | log('Request already sent but not complete, not sending new request', | ||
307 | 662 | level=DEBUG) | ||
308 | 663 | else: | ||
309 | 664 | for rid in relation_ids('ceph'): | ||
310 | 665 | log('Sending request {}'.format(request.request_id), level=DEBUG) | ||
311 | 666 | relation_set(relation_id=rid, broker_req=request.request) | ||
312 | 445 | 667 | ||
313 | === modified file 'hooks/hooks.py' | |||
314 | --- hooks/hooks.py 2015-03-23 17:40:42 +0000 | |||
315 | +++ hooks/hooks.py 2015-09-10 09:35:21 +0000 | |||
316 | @@ -319,7 +319,15 @@ | |||
317 | 319 | log("Not leader - ignoring broker request", level=DEBUG) | 319 | log("Not leader - ignoring broker request", level=DEBUG) |
318 | 320 | else: | 320 | else: |
319 | 321 | rsp = process_requests(settings['broker_req']) | 321 | rsp = process_requests(settings['broker_req']) |
321 | 322 | relation_set(relation_settings={'broker_rsp': rsp}) | 322 | unit_id = remote_unit().replace('/', '-') |
322 | 323 | unit_response_key = 'broker-rsp-' + unit_id | ||
323 | 324 | # broker_rsp is being left for backward compatibility, | ||
324 | 325 | # unit_response_key superscedes it | ||
325 | 326 | data = { | ||
326 | 327 | 'broker_rsp': rsp, | ||
327 | 328 | unit_response_key: rsp, | ||
328 | 329 | } | ||
329 | 330 | relation_set(relation_settings=data) | ||
330 | 323 | else: | 331 | else: |
331 | 324 | log('mon cluster not in quorum', level=DEBUG) | 332 | log('mon cluster not in quorum', level=DEBUG) |
332 | 325 | 333 | ||
333 | 326 | 334 | ||
334 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
335 | --- tests/charmhelpers/contrib/amulet/utils.py 2015-08-19 00:51:43 +0000 | |||
336 | +++ tests/charmhelpers/contrib/amulet/utils.py 2015-09-10 09:35:21 +0000 | |||
337 | @@ -19,9 +19,11 @@ | |||
338 | 19 | import logging | 19 | import logging |
339 | 20 | import os | 20 | import os |
340 | 21 | import re | 21 | import re |
341 | 22 | import socket | ||
342 | 22 | import subprocess | 23 | import subprocess |
343 | 23 | import sys | 24 | import sys |
344 | 24 | import time | 25 | import time |
345 | 26 | import uuid | ||
346 | 25 | 27 | ||
347 | 26 | import amulet | 28 | import amulet |
348 | 27 | import distro_info | 29 | import distro_info |
349 | @@ -114,7 +116,7 @@ | |||
350 | 114 | # /!\ DEPRECATION WARNING (beisner): | 116 | # /!\ DEPRECATION WARNING (beisner): |
351 | 115 | # New and existing tests should be rewritten to use | 117 | # New and existing tests should be rewritten to use |
352 | 116 | # validate_services_by_name() as it is aware of init systems. | 118 | # validate_services_by_name() as it is aware of init systems. |
354 | 117 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | 119 | self.log.warn('DEPRECATION WARNING: use ' |
355 | 118 | 'validate_services_by_name instead of validate_services ' | 120 | 'validate_services_by_name instead of validate_services ' |
356 | 119 | 'due to init system differences.') | 121 | 'due to init system differences.') |
357 | 120 | 122 | ||
358 | @@ -269,33 +271,52 @@ | |||
359 | 269 | """Get last modification time of directory.""" | 271 | """Get last modification time of directory.""" |
360 | 270 | return sentry_unit.directory_stat(directory)['mtime'] | 272 | return sentry_unit.directory_stat(directory)['mtime'] |
361 | 271 | 273 | ||
380 | 272 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | 274 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): |
381 | 273 | """Get process' start time. | 275 | """Get start time of a process based on the last modification time |
382 | 274 | 276 | of the /proc/pid directory. | |
383 | 275 | Determine start time of the process based on the last modification | 277 | |
384 | 276 | time of the /proc/pid directory. If pgrep_full is True, the process | 278 | :sentry_unit: The sentry unit to check for the service on |
385 | 277 | name is matched against the full command line. | 279 | :service: service name to look for in process table |
386 | 278 | """ | 280 | :pgrep_full: [Deprecated] Use full command line search mode with pgrep |
387 | 279 | if pgrep_full: | 281 | :returns: epoch time of service process start |
388 | 280 | cmd = 'pgrep -o -f {}'.format(service) | 282 | :param commands: list of bash commands |
389 | 281 | else: | 283 | :param sentry_units: list of sentry unit pointers |
390 | 282 | cmd = 'pgrep -o {}'.format(service) | 284 | :returns: None if successful; Failure message otherwise |
391 | 283 | cmd = cmd + ' | grep -v pgrep || exit 0' | 285 | """ |
392 | 284 | cmd_out = sentry_unit.run(cmd) | 286 | if pgrep_full is not None: |
393 | 285 | self.log.debug('CMDout: ' + str(cmd_out)) | 287 | # /!\ DEPRECATION WARNING (beisner): |
394 | 286 | if cmd_out[0]: | 288 | # No longer implemented, as pidof is now used instead of pgrep. |
395 | 287 | self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) | 289 | # https://bugs.launchpad.net/charm-helpers/+bug/1474030 |
396 | 288 | proc_dir = '/proc/{}'.format(cmd_out[0].strip()) | 290 | self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' |
397 | 289 | return self._get_dir_mtime(sentry_unit, proc_dir) | 291 | 'longer implemented re: lp 1474030.') |
398 | 292 | |||
399 | 293 | pid_list = self.get_process_id_list(sentry_unit, service) | ||
400 | 294 | pid = pid_list[0] | ||
401 | 295 | proc_dir = '/proc/{}'.format(pid) | ||
402 | 296 | self.log.debug('Pid for {} on {}: {}'.format( | ||
403 | 297 | service, sentry_unit.info['unit_name'], pid)) | ||
404 | 298 | |||
405 | 299 | return self._get_dir_mtime(sentry_unit, proc_dir) | ||
406 | 290 | 300 | ||
407 | 291 | def service_restarted(self, sentry_unit, service, filename, | 301 | def service_restarted(self, sentry_unit, service, filename, |
409 | 292 | pgrep_full=False, sleep_time=20): | 302 | pgrep_full=None, sleep_time=20): |
410 | 293 | """Check if service was restarted. | 303 | """Check if service was restarted. |
411 | 294 | 304 | ||
412 | 295 | Compare a service's start time vs a file's last modification time | 305 | Compare a service's start time vs a file's last modification time |
413 | 296 | (such as a config file for that service) to determine if the service | 306 | (such as a config file for that service) to determine if the service |
414 | 297 | has been restarted. | 307 | has been restarted. |
415 | 298 | """ | 308 | """ |
416 | 309 | # /!\ DEPRECATION WARNING (beisner): | ||
417 | 310 | # This method is prone to races in that no before-time is known. | ||
418 | 311 | # Use validate_service_config_changed instead. | ||
419 | 312 | |||
420 | 313 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now | ||
421 | 314 | # used instead of pgrep. pgrep_full is still passed through to ensure | ||
422 | 315 | # deprecation WARNS. lp1474030 | ||
423 | 316 | self.log.warn('DEPRECATION WARNING: use ' | ||
424 | 317 | 'validate_service_config_changed instead of ' | ||
425 | 318 | 'service_restarted due to known races.') | ||
426 | 319 | |||
427 | 299 | time.sleep(sleep_time) | 320 | time.sleep(sleep_time) |
428 | 300 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= | 321 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= |
429 | 301 | self._get_file_mtime(sentry_unit, filename)): | 322 | self._get_file_mtime(sentry_unit, filename)): |
430 | @@ -304,15 +325,15 @@ | |||
431 | 304 | return False | 325 | return False |
432 | 305 | 326 | ||
433 | 306 | def service_restarted_since(self, sentry_unit, mtime, service, | 327 | def service_restarted_since(self, sentry_unit, mtime, service, |
436 | 307 | pgrep_full=False, sleep_time=20, | 328 | pgrep_full=None, sleep_time=20, |
437 | 308 | retry_count=2): | 329 | retry_count=2, retry_sleep_time=30): |
438 | 309 | """Check if service was been started after a given time. | 330 | """Check if service was been started after a given time. |
439 | 310 | 331 | ||
440 | 311 | Args: | 332 | Args: |
441 | 312 | sentry_unit (sentry): The sentry unit to check for the service on | 333 | sentry_unit (sentry): The sentry unit to check for the service on |
442 | 313 | mtime (float): The epoch time to check against | 334 | mtime (float): The epoch time to check against |
443 | 314 | service (string): service name to look for in process table | 335 | service (string): service name to look for in process table |
445 | 315 | pgrep_full (boolean): Use full command line search mode with pgrep | 336 | pgrep_full: [Deprecated] Use full command line search mode with pgrep |
446 | 316 | sleep_time (int): Seconds to sleep before looking for process | 337 | sleep_time (int): Seconds to sleep before looking for process |
447 | 317 | retry_count (int): If service is not found, how many times to retry | 338 | retry_count (int): If service is not found, how many times to retry |
448 | 318 | 339 | ||
449 | @@ -321,30 +342,44 @@ | |||
450 | 321 | False if service is older than mtime or if service was | 342 | False if service is older than mtime or if service was |
451 | 322 | not found. | 343 | not found. |
452 | 323 | """ | 344 | """ |
454 | 324 | self.log.debug('Checking %s restarted since %s' % (service, mtime)) | 345 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
455 | 346 | # used instead of pgrep. pgrep_full is still passed through to ensure | ||
456 | 347 | # deprecation WARNS. lp1474030 | ||
457 | 348 | |||
458 | 349 | unit_name = sentry_unit.info['unit_name'] | ||
459 | 350 | self.log.debug('Checking that %s service restarted since %s on ' | ||
460 | 351 | '%s' % (service, mtime, unit_name)) | ||
461 | 325 | time.sleep(sleep_time) | 352 | time.sleep(sleep_time) |
471 | 326 | proc_start_time = self._get_proc_start_time(sentry_unit, service, | 353 | proc_start_time = None |
472 | 327 | pgrep_full) | 354 | tries = 0 |
473 | 328 | while retry_count > 0 and not proc_start_time: | 355 | while tries <= retry_count and not proc_start_time: |
474 | 329 | self.log.debug('No pid file found for service %s, will retry %i ' | 356 | try: |
475 | 330 | 'more times' % (service, retry_count)) | 357 | proc_start_time = self._get_proc_start_time(sentry_unit, |
476 | 331 | time.sleep(30) | 358 | service, |
477 | 332 | proc_start_time = self._get_proc_start_time(sentry_unit, service, | 359 | pgrep_full) |
478 | 333 | pgrep_full) | 360 | self.log.debug('Attempt {} to get {} proc start time on {} ' |
479 | 334 | retry_count = retry_count - 1 | 361 | 'OK'.format(tries, service, unit_name)) |
480 | 362 | except IOError: | ||
481 | 363 | # NOTE(beisner) - race avoidance, proc may not exist yet. | ||
482 | 364 | # https://bugs.launchpad.net/charm-helpers/+bug/1474030 | ||
483 | 365 | self.log.debug('Attempt {} to get {} proc start time on {} ' | ||
484 | 366 | 'failed'.format(tries, service, unit_name)) | ||
485 | 367 | time.sleep(retry_sleep_time) | ||
486 | 368 | tries += 1 | ||
487 | 335 | 369 | ||
488 | 336 | if not proc_start_time: | 370 | if not proc_start_time: |
489 | 337 | self.log.warn('No proc start time found, assuming service did ' | 371 | self.log.warn('No proc start time found, assuming service did ' |
490 | 338 | 'not start') | 372 | 'not start') |
491 | 339 | return False | 373 | return False |
492 | 340 | if proc_start_time >= mtime: | 374 | if proc_start_time >= mtime: |
495 | 341 | self.log.debug('proc start time is newer than provided mtime' | 375 | self.log.debug('Proc start time is newer than provided mtime' |
496 | 342 | '(%s >= %s)' % (proc_start_time, mtime)) | 376 | '(%s >= %s) on %s (OK)' % (proc_start_time, |
497 | 377 | mtime, unit_name)) | ||
498 | 343 | return True | 378 | return True |
499 | 344 | else: | 379 | else: |
503 | 345 | self.log.warn('proc start time (%s) is older than provided mtime ' | 380 | self.log.warn('Proc start time (%s) is older than provided mtime ' |
504 | 346 | '(%s), service did not restart' % (proc_start_time, | 381 | '(%s) on %s, service did not ' |
505 | 347 | mtime)) | 382 | 'restart' % (proc_start_time, mtime, unit_name)) |
506 | 348 | return False | 383 | return False |
507 | 349 | 384 | ||
508 | 350 | def config_updated_since(self, sentry_unit, filename, mtime, | 385 | def config_updated_since(self, sentry_unit, filename, mtime, |
509 | @@ -374,8 +409,9 @@ | |||
510 | 374 | return False | 409 | return False |
511 | 375 | 410 | ||
512 | 376 | def validate_service_config_changed(self, sentry_unit, mtime, service, | 411 | def validate_service_config_changed(self, sentry_unit, mtime, service, |
515 | 377 | filename, pgrep_full=False, | 412 | filename, pgrep_full=None, |
516 | 378 | sleep_time=20, retry_count=2): | 413 | sleep_time=20, retry_count=2, |
517 | 414 | retry_sleep_time=30): | ||
518 | 379 | """Check service and file were updated after mtime | 415 | """Check service and file were updated after mtime |
519 | 380 | 416 | ||
520 | 381 | Args: | 417 | Args: |
521 | @@ -383,9 +419,10 @@ | |||
522 | 383 | mtime (float): The epoch time to check against | 419 | mtime (float): The epoch time to check against |
523 | 384 | service (string): service name to look for in process table | 420 | service (string): service name to look for in process table |
524 | 385 | filename (string): The file to check mtime of | 421 | filename (string): The file to check mtime of |
527 | 386 | pgrep_full (boolean): Use full command line search mode with pgrep | 422 | pgrep_full: [Deprecated] Use full command line search mode with pgrep |
528 | 387 | sleep_time (int): Seconds to sleep before looking for process | 423 | sleep_time (int): Initial sleep in seconds to pass to test helpers |
529 | 388 | retry_count (int): If service is not found, how many times to retry | 424 | retry_count (int): If service is not found, how many times to retry |
530 | 425 | retry_sleep_time (int): Time in seconds to wait between retries | ||
531 | 389 | 426 | ||
532 | 390 | Typical Usage: | 427 | Typical Usage: |
533 | 391 | u = OpenStackAmuletUtils(ERROR) | 428 | u = OpenStackAmuletUtils(ERROR) |
534 | @@ -402,15 +439,25 @@ | |||
535 | 402 | mtime, False if service is older than mtime or if service was | 439 | mtime, False if service is older than mtime or if service was |
536 | 403 | not found or if filename was modified before mtime. | 440 | not found or if filename was modified before mtime. |
537 | 404 | """ | 441 | """ |
547 | 405 | self.log.debug('Checking %s restarted since %s' % (service, mtime)) | 442 | |
548 | 406 | time.sleep(sleep_time) | 443 | # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now |
549 | 407 | service_restart = self.service_restarted_since(sentry_unit, mtime, | 444 | # used instead of pgrep. pgrep_full is still passed through to ensure |
550 | 408 | service, | 445 | # deprecation WARNS. lp1474030 |
551 | 409 | pgrep_full=pgrep_full, | 446 | |
552 | 410 | sleep_time=0, | 447 | service_restart = self.service_restarted_since( |
553 | 411 | retry_count=retry_count) | 448 | sentry_unit, mtime, |
554 | 412 | config_update = self.config_updated_since(sentry_unit, filename, mtime, | 449 | service, |
555 | 413 | sleep_time=0) | 450 | pgrep_full=pgrep_full, |
556 | 451 | sleep_time=sleep_time, | ||
557 | 452 | retry_count=retry_count, | ||
558 | 453 | retry_sleep_time=retry_sleep_time) | ||
559 | 454 | |||
560 | 455 | config_update = self.config_updated_since( | ||
561 | 456 | sentry_unit, | ||
562 | 457 | filename, | ||
563 | 458 | mtime, | ||
564 | 459 | sleep_time=0) | ||
565 | 460 | |||
566 | 414 | return service_restart and config_update | 461 | return service_restart and config_update |
567 | 415 | 462 | ||
568 | 416 | def get_sentry_time(self, sentry_unit): | 463 | def get_sentry_time(self, sentry_unit): |
569 | @@ -428,7 +475,6 @@ | |||
570 | 428 | """Return a list of all Ubuntu releases in order of release.""" | 475 | """Return a list of all Ubuntu releases in order of release.""" |
571 | 429 | _d = distro_info.UbuntuDistroInfo() | 476 | _d = distro_info.UbuntuDistroInfo() |
572 | 430 | _release_list = _d.all | 477 | _release_list = _d.all |
573 | 431 | self.log.debug('Ubuntu release list: {}'.format(_release_list)) | ||
574 | 432 | return _release_list | 478 | return _release_list |
575 | 433 | 479 | ||
576 | 434 | def file_to_url(self, file_rel_path): | 480 | def file_to_url(self, file_rel_path): |
577 | @@ -568,6 +614,142 @@ | |||
578 | 568 | 614 | ||
579 | 569 | return None | 615 | return None |
580 | 570 | 616 | ||
581 | 617 | def validate_sectionless_conf(self, file_contents, expected): | ||
582 | 618 | """A crude conf parser. Useful to inspect configuration files which | ||
583 | 619 | do not have section headers (as would be necessary in order to use | ||
584 | 620 | the configparser). Such as openstack-dashboard or rabbitmq confs.""" | ||
585 | 621 | for line in file_contents.split('\n'): | ||
586 | 622 | if '=' in line: | ||
587 | 623 | args = line.split('=') | ||
588 | 624 | if len(args) <= 1: | ||
589 | 625 | continue | ||
590 | 626 | key = args[0].strip() | ||
591 | 627 | value = args[1].strip() | ||
592 | 628 | if key in expected.keys(): | ||
593 | 629 | if expected[key] != value: | ||
594 | 630 | msg = ('Config mismatch. Expected, actual: {}, ' | ||
595 | 631 | '{}'.format(expected[key], value)) | ||
596 | 632 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
597 | 633 | |||
598 | 634 | def get_unit_hostnames(self, units): | ||
599 | 635 | """Return a dict of juju unit names to hostnames.""" | ||
600 | 636 | host_names = {} | ||
601 | 637 | for unit in units: | ||
602 | 638 | host_names[unit.info['unit_name']] = \ | ||
603 | 639 | str(unit.file_contents('/etc/hostname').strip()) | ||
604 | 640 | self.log.debug('Unit host names: {}'.format(host_names)) | ||
605 | 641 | return host_names | ||
606 | 642 | |||
607 | 643 | def run_cmd_unit(self, sentry_unit, cmd): | ||
608 | 644 | """Run a command on a unit, return the output and exit code.""" | ||
609 | 645 | output, code = sentry_unit.run(cmd) | ||
610 | 646 | if code == 0: | ||
611 | 647 | self.log.debug('{} `{}` command returned {} ' | ||
612 | 648 | '(OK)'.format(sentry_unit.info['unit_name'], | ||
613 | 649 | cmd, code)) | ||
614 | 650 | else: | ||
615 | 651 | msg = ('{} `{}` command returned {} ' | ||
616 | 652 | '{}'.format(sentry_unit.info['unit_name'], | ||
617 | 653 | cmd, code, output)) | ||
618 | 654 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
619 | 655 | return str(output), code | ||
620 | 656 | |||
621 | 657 | def file_exists_on_unit(self, sentry_unit, file_name): | ||
622 | 658 | """Check if a file exists on a unit.""" | ||
623 | 659 | try: | ||
624 | 660 | sentry_unit.file_stat(file_name) | ||
625 | 661 | return True | ||
626 | 662 | except IOError: | ||
627 | 663 | return False | ||
628 | 664 | except Exception as e: | ||
629 | 665 | msg = 'Error checking file {}: {}'.format(file_name, e) | ||
630 | 666 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
631 | 667 | |||
632 | 668 | def file_contents_safe(self, sentry_unit, file_name, | ||
633 | 669 | max_wait=60, fatal=False): | ||
634 | 670 | """Get file contents from a sentry unit. Wrap amulet file_contents | ||
635 | 671 | with retry logic to address races where a file checks as existing, | ||
636 | 672 | but no longer exists by the time file_contents is called. | ||
637 | 673 | Return None if file not found. Optionally raise if fatal is True.""" | ||
638 | 674 | unit_name = sentry_unit.info['unit_name'] | ||
639 | 675 | file_contents = False | ||
640 | 676 | tries = 0 | ||
641 | 677 | while not file_contents and tries < (max_wait / 4): | ||
642 | 678 | try: | ||
643 | 679 | file_contents = sentry_unit.file_contents(file_name) | ||
644 | 680 | except IOError: | ||
645 | 681 | self.log.debug('Attempt {} to open file {} from {} ' | ||
646 | 682 | 'failed'.format(tries, file_name, | ||
647 | 683 | unit_name)) | ||
648 | 684 | time.sleep(4) | ||
649 | 685 | tries += 1 | ||
650 | 686 | |||
651 | 687 | if file_contents: | ||
652 | 688 | return file_contents | ||
653 | 689 | elif not fatal: | ||
654 | 690 | return None | ||
655 | 691 | elif fatal: | ||
656 | 692 | msg = 'Failed to get file contents from unit.' | ||
657 | 693 | amulet.raise_status(amulet.FAIL, msg) | ||
658 | 694 | |||
659 | 695 | def port_knock_tcp(self, host="localhost", port=22, timeout=15): | ||
660 | 696 | """Open a TCP socket to check for a listening sevice on a host. | ||
661 | 697 | |||
662 | 698 | :param host: host name or IP address, default to localhost | ||
663 | 699 | :param port: TCP port number, default to 22 | ||
664 | 700 | :param timeout: Connect timeout, default to 15 seconds | ||
665 | 701 | :returns: True if successful, False if connect failed | ||
666 | 702 | """ | ||
667 | 703 | |||
668 | 704 | # Resolve host name if possible | ||
669 | 705 | try: | ||
670 | 706 | connect_host = socket.gethostbyname(host) | ||
671 | 707 | host_human = "{} ({})".format(connect_host, host) | ||
672 | 708 | except socket.error as e: | ||
673 | 709 | self.log.warn('Unable to resolve address: ' | ||
674 | 710 | '{} ({}) Trying anyway!'.format(host, e)) | ||
675 | 711 | connect_host = host | ||
676 | 712 | host_human = connect_host | ||
677 | 713 | |||
678 | 714 | # Attempt socket connection | ||
679 | 715 | try: | ||
680 | 716 | knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | ||
681 | 717 | knock.settimeout(timeout) | ||
682 | 718 | knock.connect((connect_host, port)) | ||
683 | 719 | knock.close() | ||
684 | 720 | self.log.debug('Socket connect OK for host ' | ||
685 | 721 | '{} on port {}.'.format(host_human, port)) | ||
686 | 722 | return True | ||
687 | 723 | except socket.error as e: | ||
688 | 724 | self.log.debug('Socket connect FAIL for' | ||
689 | 725 | ' {} port {} ({})'.format(host_human, port, e)) | ||
690 | 726 | return False | ||
691 | 727 | |||
692 | 728 | def port_knock_units(self, sentry_units, port=22, | ||
693 | 729 | timeout=15, expect_success=True): | ||
694 | 730 | """Open a TCP socket to check for a listening sevice on each | ||
695 | 731 | listed juju unit. | ||
696 | 732 | |||
697 | 733 | :param sentry_units: list of sentry unit pointers | ||
698 | 734 | :param port: TCP port number, default to 22 | ||
699 | 735 | :param timeout: Connect timeout, default to 15 seconds | ||
700 | 736 | :expect_success: True by default, set False to invert logic | ||
701 | 737 | :returns: None if successful, Failure message otherwise | ||
702 | 738 | """ | ||
703 | 739 | for unit in sentry_units: | ||
704 | 740 | host = unit.info['public-address'] | ||
705 | 741 | connected = self.port_knock_tcp(host, port, timeout) | ||
706 | 742 | if not connected and expect_success: | ||
707 | 743 | return 'Socket connect failed.' | ||
708 | 744 | elif connected and not expect_success: | ||
709 | 745 | return 'Socket connected unexpectedly.' | ||
710 | 746 | |||
711 | 747 | def get_uuid_epoch_stamp(self): | ||
712 | 748 | """Returns a stamp string based on uuid4 and epoch time. Useful in | ||
713 | 749 | generating test messages which need to be unique-ish.""" | ||
714 | 750 | return '[{}-{}]'.format(uuid.uuid4(), time.time()) | ||
715 | 751 | |||
716 | 752 | # amulet juju action helpers: | ||
717 | 571 | def run_action(self, unit_sentry, action, | 753 | def run_action(self, unit_sentry, action, |
718 | 572 | _check_output=subprocess.check_output): | 754 | _check_output=subprocess.check_output): |
719 | 573 | """Run the named action on a given unit sentry. | 755 | """Run the named action on a given unit sentry. |
720 | 574 | 756 | ||
721 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
722 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-19 00:51:43 +0000 | |||
723 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-09-10 09:35:21 +0000 | |||
724 | @@ -44,8 +44,15 @@ | |||
725 | 44 | Determine if the local branch being tested is derived from its | 44 | Determine if the local branch being tested is derived from its |
726 | 45 | stable or next (dev) branch, and based on this, use the corresonding | 45 | stable or next (dev) branch, and based on this, use the corresonding |
727 | 46 | stable or next branches for the other_services.""" | 46 | stable or next branches for the other_services.""" |
728 | 47 | |||
729 | 48 | # Charms outside the lp:~openstack-charmers namespace | ||
730 | 47 | base_charms = ['mysql', 'mongodb', 'nrpe'] | 49 | base_charms = ['mysql', 'mongodb', 'nrpe'] |
731 | 48 | 50 | ||
732 | 51 | # Force these charms to current series even when using an older series. | ||
733 | 52 | # ie. Use trusty/nrpe even when series is precise, as the P charm | ||
734 | 53 | # does not possess the necessary external master config and hooks. | ||
735 | 54 | force_series_current = ['nrpe'] | ||
736 | 55 | |||
737 | 49 | if self.series in ['precise', 'trusty']: | 56 | if self.series in ['precise', 'trusty']: |
738 | 50 | base_series = self.series | 57 | base_series = self.series |
739 | 51 | else: | 58 | else: |
740 | @@ -53,11 +60,17 @@ | |||
741 | 53 | 60 | ||
742 | 54 | if self.stable: | 61 | if self.stable: |
743 | 55 | for svc in other_services: | 62 | for svc in other_services: |
744 | 63 | if svc['name'] in force_series_current: | ||
745 | 64 | base_series = self.current_next | ||
746 | 65 | |||
747 | 56 | temp = 'lp:charms/{}/{}' | 66 | temp = 'lp:charms/{}/{}' |
748 | 57 | svc['location'] = temp.format(base_series, | 67 | svc['location'] = temp.format(base_series, |
749 | 58 | svc['name']) | 68 | svc['name']) |
750 | 59 | else: | 69 | else: |
751 | 60 | for svc in other_services: | 70 | for svc in other_services: |
752 | 71 | if svc['name'] in force_series_current: | ||
753 | 72 | base_series = self.current_next | ||
754 | 73 | |||
755 | 61 | if svc['name'] in base_charms: | 74 | if svc['name'] in base_charms: |
756 | 62 | temp = 'lp:charms/{}/{}' | 75 | temp = 'lp:charms/{}/{}' |
757 | 63 | svc['location'] = temp.format(base_series, | 76 | svc['location'] = temp.format(base_series, |
758 | @@ -77,21 +90,23 @@ | |||
759 | 77 | 90 | ||
760 | 78 | services = other_services | 91 | services = other_services |
761 | 79 | services.append(this_service) | 92 | services.append(this_service) |
762 | 93 | |||
763 | 94 | # Charms which should use the source config option | ||
764 | 80 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 95 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
765 | 81 | 'ceph-osd', 'ceph-radosgw'] | 96 | 'ceph-osd', 'ceph-radosgw'] |
769 | 82 | # Most OpenStack subordinate charms do not expose an origin option | 97 | |
770 | 83 | # as that is controlled by the principle. | 98 | # Charms which can not use openstack-origin, ie. many subordinates |
771 | 84 | ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] | 99 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] |
772 | 85 | 100 | ||
773 | 86 | if self.openstack: | 101 | if self.openstack: |
774 | 87 | for svc in services: | 102 | for svc in services: |
776 | 88 | if svc['name'] not in use_source + ignore: | 103 | if svc['name'] not in use_source + no_origin: |
777 | 89 | config = {'openstack-origin': self.openstack} | 104 | config = {'openstack-origin': self.openstack} |
778 | 90 | self.d.configure(svc['name'], config) | 105 | self.d.configure(svc['name'], config) |
779 | 91 | 106 | ||
780 | 92 | if self.source: | 107 | if self.source: |
781 | 93 | for svc in services: | 108 | for svc in services: |
783 | 94 | if svc['name'] in use_source and svc['name'] not in ignore: | 109 | if svc['name'] in use_source and svc['name'] not in no_origin: |
784 | 95 | config = {'source': self.source} | 110 | config = {'source': self.source} |
785 | 96 | self.d.configure(svc['name'], config) | 111 | self.d.configure(svc['name'], config) |
786 | 97 | 112 | ||
787 | 98 | 113 | ||
788 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
789 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-29 14:25:54 +0000 | |||
790 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-09-10 09:35:21 +0000 | |||
791 | @@ -27,6 +27,7 @@ | |||
792 | 27 | import heatclient.v1.client as heat_client | 27 | import heatclient.v1.client as heat_client |
793 | 28 | import keystoneclient.v2_0 as keystone_client | 28 | import keystoneclient.v2_0 as keystone_client |
794 | 29 | import novaclient.v1_1.client as nova_client | 29 | import novaclient.v1_1.client as nova_client |
795 | 30 | import pika | ||
796 | 30 | import swiftclient | 31 | import swiftclient |
797 | 31 | 32 | ||
798 | 32 | from charmhelpers.contrib.amulet.utils import ( | 33 | from charmhelpers.contrib.amulet.utils import ( |
799 | @@ -602,3 +603,361 @@ | |||
800 | 602 | self.log.debug('Ceph {} samples (OK): ' | 603 | self.log.debug('Ceph {} samples (OK): ' |
801 | 603 | '{}'.format(sample_type, samples)) | 604 | '{}'.format(sample_type, samples)) |
802 | 604 | return None | 605 | return None |
803 | 606 | |||
804 | 607 | # rabbitmq/amqp specific helpers: | ||
805 | 608 | def add_rmq_test_user(self, sentry_units, | ||
806 | 609 | username="testuser1", password="changeme"): | ||
807 | 610 | """Add a test user via the first rmq juju unit, check connection as | ||
808 | 611 | the new user against all sentry units. | ||
809 | 612 | |||
810 | 613 | :param sentry_units: list of sentry unit pointers | ||
811 | 614 | :param username: amqp user name, default to testuser1 | ||
812 | 615 | :param password: amqp user password | ||
813 | 616 | :returns: None if successful. Raise on error. | ||
814 | 617 | """ | ||
815 | 618 | self.log.debug('Adding rmq user ({})...'.format(username)) | ||
816 | 619 | |||
817 | 620 | # Check that user does not already exist | ||
818 | 621 | cmd_user_list = 'rabbitmqctl list_users' | ||
819 | 622 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
820 | 623 | if username in output: | ||
821 | 624 | self.log.warning('User ({}) already exists, returning ' | ||
822 | 625 | 'gracefully.'.format(username)) | ||
823 | 626 | return | ||
824 | 627 | |||
825 | 628 | perms = '".*" ".*" ".*"' | ||
826 | 629 | cmds = ['rabbitmqctl add_user {} {}'.format(username, password), | ||
827 | 630 | 'rabbitmqctl set_permissions {} {}'.format(username, perms)] | ||
828 | 631 | |||
829 | 632 | # Add user via first unit | ||
830 | 633 | for cmd in cmds: | ||
831 | 634 | output, _ = self.run_cmd_unit(sentry_units[0], cmd) | ||
832 | 635 | |||
833 | 636 | # Check connection against the other sentry_units | ||
834 | 637 | self.log.debug('Checking user connect against units...') | ||
835 | 638 | for sentry_unit in sentry_units: | ||
836 | 639 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, | ||
837 | 640 | username=username, | ||
838 | 641 | password=password) | ||
839 | 642 | connection.close() | ||
840 | 643 | |||
841 | 644 | def delete_rmq_test_user(self, sentry_units, username="testuser1"): | ||
842 | 645 | """Delete a rabbitmq user via the first rmq juju unit. | ||
843 | 646 | |||
844 | 647 | :param sentry_units: list of sentry unit pointers | ||
845 | 648 | :param username: amqp user name, default to testuser1 | ||
846 | 649 | :param password: amqp user password | ||
847 | 650 | :returns: None if successful or no such user. | ||
848 | 651 | """ | ||
849 | 652 | self.log.debug('Deleting rmq user ({})...'.format(username)) | ||
850 | 653 | |||
851 | 654 | # Check that the user exists | ||
852 | 655 | cmd_user_list = 'rabbitmqctl list_users' | ||
853 | 656 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
854 | 657 | |||
855 | 658 | if username not in output: | ||
856 | 659 | self.log.warning('User ({}) does not exist, returning ' | ||
857 | 660 | 'gracefully.'.format(username)) | ||
858 | 661 | return | ||
859 | 662 | |||
860 | 663 | # Delete the user | ||
861 | 664 | cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) | ||
862 | 665 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) | ||
863 | 666 | |||
864 | 667 | def get_rmq_cluster_status(self, sentry_unit): | ||
865 | 668 | """Execute rabbitmq cluster status command on a unit and return | ||
866 | 669 | the full output. | ||
867 | 670 | |||
868 | 671 | :param unit: sentry unit | ||
869 | 672 | :returns: String containing console output of cluster status command | ||
870 | 673 | """ | ||
871 | 674 | cmd = 'rabbitmqctl cluster_status' | ||
872 | 675 | output, _ = self.run_cmd_unit(sentry_unit, cmd) | ||
873 | 676 | self.log.debug('{} cluster_status:\n{}'.format( | ||
874 | 677 | sentry_unit.info['unit_name'], output)) | ||
875 | 678 | return str(output) | ||
876 | 679 | |||
877 | 680 | def get_rmq_cluster_running_nodes(self, sentry_unit): | ||
878 | 681 | """Parse rabbitmqctl cluster_status output string, return list of | ||
879 | 682 | running rabbitmq cluster nodes. | ||
880 | 683 | |||
881 | 684 | :param unit: sentry unit | ||
882 | 685 | :returns: List containing node names of running nodes | ||
883 | 686 | """ | ||
884 | 687 | # NOTE(beisner): rabbitmqctl cluster_status output is not | ||
885 | 688 | # json-parsable, do string chop foo, then json.loads that. | ||
886 | 689 | str_stat = self.get_rmq_cluster_status(sentry_unit) | ||
887 | 690 | if 'running_nodes' in str_stat: | ||
888 | 691 | pos_start = str_stat.find("{running_nodes,") + 15 | ||
889 | 692 | pos_end = str_stat.find("]},", pos_start) + 1 | ||
890 | 693 | str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') | ||
891 | 694 | run_nodes = json.loads(str_run_nodes) | ||
892 | 695 | return run_nodes | ||
893 | 696 | else: | ||
894 | 697 | return [] | ||
895 | 698 | |||
896 | 699 | def validate_rmq_cluster_running_nodes(self, sentry_units): | ||
897 | 700 | """Check that all rmq unit hostnames are represented in the | ||
898 | 701 | cluster_status output of all units. | ||
899 | 702 | |||
900 | 703 | :param host_names: dict of juju unit names to host names | ||
901 | 704 | :param units: list of sentry unit pointers (all rmq units) | ||
902 | 705 | :returns: None if successful, otherwise return error message | ||
903 | 706 | """ | ||
904 | 707 | host_names = self.get_unit_hostnames(sentry_units) | ||
905 | 708 | errors = [] | ||
906 | 709 | |||
907 | 710 | # Query every unit for cluster_status running nodes | ||
908 | 711 | for query_unit in sentry_units: | ||
909 | 712 | query_unit_name = query_unit.info['unit_name'] | ||
910 | 713 | running_nodes = self.get_rmq_cluster_running_nodes(query_unit) | ||
911 | 714 | |||
912 | 715 | # Confirm that every unit is represented in the queried unit's | ||
913 | 716 | # cluster_status running nodes output. | ||
914 | 717 | for validate_unit in sentry_units: | ||
915 | 718 | val_host_name = host_names[validate_unit.info['unit_name']] | ||
916 | 719 | val_node_name = 'rabbit@{}'.format(val_host_name) | ||
917 | 720 | |||
918 | 721 | if val_node_name not in running_nodes: | ||
919 | 722 | errors.append('Cluster member check failed on {}: {} not ' | ||
920 | 723 | 'in {}\n'.format(query_unit_name, | ||
921 | 724 | val_node_name, | ||
922 | 725 | running_nodes)) | ||
923 | 726 | if errors: | ||
924 | 727 | return ''.join(errors) | ||
925 | 728 | |||
926 | 729 | def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): | ||
927 | 730 | """Check a single juju rmq unit for ssl and port in the config file.""" | ||
928 | 731 | host = sentry_unit.info['public-address'] | ||
929 | 732 | unit_name = sentry_unit.info['unit_name'] | ||
930 | 733 | |||
931 | 734 | conf_file = '/etc/rabbitmq/rabbitmq.config' | ||
932 | 735 | conf_contents = str(self.file_contents_safe(sentry_unit, | ||
933 | 736 | conf_file, max_wait=16)) | ||
934 | 737 | # Checks | ||
935 | 738 | conf_ssl = 'ssl' in conf_contents | ||
936 | 739 | conf_port = str(port) in conf_contents | ||
937 | 740 | |||
938 | 741 | # Port explicitly checked in config | ||
939 | 742 | if port and conf_port and conf_ssl: | ||
940 | 743 | self.log.debug('SSL is enabled @{}:{} ' | ||
941 | 744 | '({})'.format(host, port, unit_name)) | ||
942 | 745 | return True | ||
943 | 746 | elif port and not conf_port and conf_ssl: | ||
944 | 747 | self.log.debug('SSL is enabled @{} but not on port {} ' | ||
945 | 748 | '({})'.format(host, port, unit_name)) | ||
946 | 749 | return False | ||
947 | 750 | # Port not checked (useful when checking that ssl is disabled) | ||
948 | 751 | elif not port and conf_ssl: | ||
949 | 752 | self.log.debug('SSL is enabled @{}:{} ' | ||
950 | 753 | '({})'.format(host, port, unit_name)) | ||
951 | 754 | return True | ||
952 | 755 | elif not port and not conf_ssl: | ||
953 | 756 | self.log.debug('SSL not enabled @{}:{} ' | ||
954 | 757 | '({})'.format(host, port, unit_name)) | ||
955 | 758 | return False | ||
956 | 759 | else: | ||
957 | 760 | msg = ('Unknown condition when checking SSL status @{}:{} ' | ||
958 | 761 | '({})'.format(host, port, unit_name)) | ||
959 | 762 | amulet.raise_status(amulet.FAIL, msg) | ||
960 | 763 | |||
961 | 764 | def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): | ||
962 | 765 | """Check that ssl is enabled on rmq juju sentry units. | ||
963 | 766 | |||
964 | 767 | :param sentry_units: list of all rmq sentry units | ||
965 | 768 | :param port: optional ssl port override to validate | ||
966 | 769 | :returns: None if successful, otherwise return error message | ||
967 | 770 | """ | ||
968 | 771 | for sentry_unit in sentry_units: | ||
969 | 772 | if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): | ||
970 | 773 | return ('Unexpected condition: ssl is disabled on unit ' | ||
971 | 774 | '({})'.format(sentry_unit.info['unit_name'])) | ||
972 | 775 | return None | ||
973 | 776 | |||
974 | 777 | def validate_rmq_ssl_disabled_units(self, sentry_units): | ||
975 | 778 | """Check that ssl is enabled on listed rmq juju sentry units. | ||
976 | 779 | |||
977 | 780 | :param sentry_units: list of all rmq sentry units | ||
978 | 781 | :returns: True if successful. Raise on error. | ||
979 | 782 | """ | ||
980 | 783 | for sentry_unit in sentry_units: | ||
981 | 784 | if self.rmq_ssl_is_enabled_on_unit(sentry_unit): | ||
982 | 785 | return ('Unexpected condition: ssl is enabled on unit ' | ||
983 | 786 | '({})'.format(sentry_unit.info['unit_name'])) | ||
984 | 787 | return None | ||
985 | 788 | |||
986 | 789 | def configure_rmq_ssl_on(self, sentry_units, deployment, | ||
987 | 790 | port=None, max_wait=60): | ||
988 | 791 | """Turn ssl charm config option on, with optional non-default | ||
989 | 792 | ssl port specification. Confirm that it is enabled on every | ||
990 | 793 | unit. | ||
991 | 794 | |||
992 | 795 | :param sentry_units: list of sentry units | ||
993 | 796 | :param deployment: amulet deployment object pointer | ||
994 | 797 | :param port: amqp port, use defaults if None | ||
995 | 798 | :param max_wait: maximum time to wait in seconds to confirm | ||
996 | 799 | :returns: None if successful. Raise on error. | ||
997 | 800 | """ | ||
998 | 801 | self.log.debug('Setting ssl charm config option: on') | ||
999 | 802 | |||
1000 | 803 | # Enable RMQ SSL | ||
1001 | 804 | config = {'ssl': 'on'} | ||
1002 | 805 | if port: | ||
1003 | 806 | config['ssl_port'] = port | ||
1004 | 807 | |||
1005 | 808 | deployment.configure('rabbitmq-server', config) | ||
1006 | 809 | |||
1007 | 810 | # Confirm | ||
1008 | 811 | tries = 0 | ||
1009 | 812 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
1010 | 813 | while ret and tries < (max_wait / 4): | ||
1011 | 814 | time.sleep(4) | ||
1012 | 815 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
1013 | 816 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
1014 | 817 | tries += 1 | ||
1015 | 818 | |||
1016 | 819 | if ret: | ||
1017 | 820 | amulet.raise_status(amulet.FAIL, ret) | ||
1018 | 821 | |||
1019 | 822 | def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): | ||
1020 | 823 | """Turn ssl charm config option off, confirm that it is disabled | ||
1021 | 824 | on every unit. | ||
1022 | 825 | |||
1023 | 826 | :param sentry_units: list of sentry units | ||
1024 | 827 | :param deployment: amulet deployment object pointer | ||
1025 | 828 | :param max_wait: maximum time to wait in seconds to confirm | ||
1026 | 829 | :returns: None if successful. Raise on error. | ||
1027 | 830 | """ | ||
1028 | 831 | self.log.debug('Setting ssl charm config option: off') | ||
1029 | 832 | |||
1030 | 833 | # Disable RMQ SSL | ||
1031 | 834 | config = {'ssl': 'off'} | ||
1032 | 835 | deployment.configure('rabbitmq-server', config) | ||
1033 | 836 | |||
1034 | 837 | # Confirm | ||
1035 | 838 | tries = 0 | ||
1036 | 839 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
1037 | 840 | while ret and tries < (max_wait / 4): | ||
1038 | 841 | time.sleep(4) | ||
1039 | 842 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
1040 | 843 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
1041 | 844 | tries += 1 | ||
1042 | 845 | |||
1043 | 846 | if ret: | ||
1044 | 847 | amulet.raise_status(amulet.FAIL, ret) | ||
1045 | 848 | |||
1046 | 849 | def connect_amqp_by_unit(self, sentry_unit, ssl=False, | ||
1047 | 850 | port=None, fatal=True, | ||
1048 | 851 | username="testuser1", password="changeme"): | ||
1049 | 852 | """Establish and return a pika amqp connection to the rabbitmq service | ||
1050 | 853 | running on a rmq juju unit. | ||
1051 | 854 | |||
1052 | 855 | :param sentry_unit: sentry unit pointer | ||
1053 | 856 | :param ssl: boolean, default to False | ||
1054 | 857 | :param port: amqp port, use defaults if None | ||
1055 | 858 | :param fatal: boolean, default to True (raises on connect error) | ||
1056 | 859 | :param username: amqp user name, default to testuser1 | ||
1057 | 860 | :param password: amqp user password | ||
1058 | 861 | :returns: pika amqp connection pointer or None if failed and non-fatal | ||
1059 | 862 | """ | ||
1060 | 863 | host = sentry_unit.info['public-address'] | ||
1061 | 864 | unit_name = sentry_unit.info['unit_name'] | ||
1062 | 865 | |||
1063 | 866 | # Default port logic if port is not specified | ||
1064 | 867 | if ssl and not port: | ||
1065 | 868 | port = 5671 | ||
1066 | 869 | elif not ssl and not port: | ||
1067 | 870 | port = 5672 | ||
1068 | 871 | |||
1069 | 872 | self.log.debug('Connecting to amqp on {}:{} ({}) as ' | ||
1070 | 873 | '{}...'.format(host, port, unit_name, username)) | ||
1071 | 874 | |||
1072 | 875 | try: | ||
1073 | 876 | credentials = pika.PlainCredentials(username, password) | ||
1074 | 877 | parameters = pika.ConnectionParameters(host=host, port=port, | ||
1075 | 878 | credentials=credentials, | ||
1076 | 879 | ssl=ssl, | ||
1077 | 880 | connection_attempts=3, | ||
1078 | 881 | retry_delay=5, | ||
1079 | 882 | socket_timeout=1) | ||
1080 | 883 | connection = pika.BlockingConnection(parameters) | ||
1081 | 884 | assert connection.server_properties['product'] == 'RabbitMQ' | ||
1082 | 885 | self.log.debug('Connect OK') | ||
1083 | 886 | return connection | ||
1084 | 887 | except Exception as e: | ||
1085 | 888 | msg = ('amqp connection failed to {}:{} as ' | ||
1086 | 889 | '{} ({})'.format(host, port, username, str(e))) | ||
1087 | 890 | if fatal: | ||
1088 | 891 | amulet.raise_status(amulet.FAIL, msg) | ||
1089 | 892 | else: | ||
1090 | 893 | self.log.warn(msg) | ||
1091 | 894 | return None | ||
1092 | 895 | |||
1093 | 896 | def publish_amqp_message_by_unit(self, sentry_unit, message, | ||
1094 | 897 | queue="test", ssl=False, | ||
1095 | 898 | username="testuser1", | ||
1096 | 899 | password="changeme", | ||
1097 | 900 | port=None): | ||
1098 | 901 | """Publish an amqp message to a rmq juju unit. | ||
1099 | 902 | |||
1100 | 903 | :param sentry_unit: sentry unit pointer | ||
1101 | 904 | :param message: amqp message string | ||
1102 | 905 | :param queue: message queue, default to test | ||
1103 | 906 | :param username: amqp user name, default to testuser1 | ||
1104 | 907 | :param password: amqp user password | ||
1105 | 908 | :param ssl: boolean, default to False | ||
1106 | 909 | :param port: amqp port, use defaults if None | ||
1107 | 910 | :returns: None. Raises exception if publish failed. | ||
1108 | 911 | """ | ||
1109 | 912 | self.log.debug('Publishing message to {} queue:\n{}'.format(queue, | ||
1110 | 913 | message)) | ||
1111 | 914 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
1112 | 915 | port=port, | ||
1113 | 916 | username=username, | ||
1114 | 917 | password=password) | ||
1115 | 918 | |||
1116 | 919 | # NOTE(beisner): extra debug here re: pika hang potential: | ||
1117 | 920 | # https://github.com/pika/pika/issues/297 | ||
1118 | 921 | # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw | ||
1119 | 922 | self.log.debug('Defining channel...') | ||
1120 | 923 | channel = connection.channel() | ||
1121 | 924 | self.log.debug('Declaring queue...') | ||
1122 | 925 | channel.queue_declare(queue=queue, auto_delete=False, durable=True) | ||
1123 | 926 | self.log.debug('Publishing message...') | ||
1124 | 927 | channel.basic_publish(exchange='', routing_key=queue, body=message) | ||
1125 | 928 | self.log.debug('Closing channel...') | ||
1126 | 929 | channel.close() | ||
1127 | 930 | self.log.debug('Closing connection...') | ||
1128 | 931 | connection.close() | ||
1129 | 932 | |||
1130 | 933 | def get_amqp_message_by_unit(self, sentry_unit, queue="test", | ||
1131 | 934 | username="testuser1", | ||
1132 | 935 | password="changeme", | ||
1133 | 936 | ssl=False, port=None): | ||
1134 | 937 | """Get an amqp message from a rmq juju unit. | ||
1135 | 938 | |||
1136 | 939 | :param sentry_unit: sentry unit pointer | ||
1137 | 940 | :param queue: message queue, default to test | ||
1138 | 941 | :param username: amqp user name, default to testuser1 | ||
1139 | 942 | :param password: amqp user password | ||
1140 | 943 | :param ssl: boolean, default to False | ||
1141 | 944 | :param port: amqp port, use defaults if None | ||
1142 | 945 | :returns: amqp message body as string. Raise if get fails. | ||
1143 | 946 | """ | ||
1144 | 947 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
1145 | 948 | port=port, | ||
1146 | 949 | username=username, | ||
1147 | 950 | password=password) | ||
1148 | 951 | channel = connection.channel() | ||
1149 | 952 | method_frame, _, body = channel.basic_get(queue) | ||
1150 | 953 | |||
1151 | 954 | if method_frame: | ||
1152 | 955 | self.log.debug('Retreived message from {} queue:\n{}'.format(queue, | ||
1153 | 956 | body)) | ||
1154 | 957 | channel.basic_ack(method_frame.delivery_tag) | ||
1155 | 958 | channel.close() | ||
1156 | 959 | connection.close() | ||
1157 | 960 | return body | ||
1158 | 961 | else: | ||
1159 | 962 | msg = 'No message retrieved.' | ||
1160 | 963 | amulet.raise_status(amulet.FAIL, msg) | ||
1161 | 605 | 964 | ||
1162 | === modified file 'unit_tests/test_ceph_broker.py' | |||
1163 | --- unit_tests/test_ceph_broker.py 2014-11-09 12:58:04 +0000 | |||
1164 | +++ unit_tests/test_ceph_broker.py 2015-09-10 09:35:21 +0000 | |||
1165 | @@ -70,3 +70,30 @@ | |||
1166 | 70 | mock_pool_exists.assert_called_with(service='admin', name='foo') | 70 | mock_pool_exists.assert_called_with(service='admin', name='foo') |
1167 | 71 | self.assertFalse(mock_create_pool.called) | 71 | self.assertFalse(mock_create_pool.called) |
1168 | 72 | self.assertEqual(json.loads(rc), {'exit-code': 0}) | 72 | self.assertEqual(json.loads(rc), {'exit-code': 0}) |
1169 | 73 | |||
1170 | 74 | @mock.patch('ceph_broker.create_pool') | ||
1171 | 75 | @mock.patch('ceph_broker.pool_exists') | ||
1172 | 76 | @mock.patch('ceph_broker.log') | ||
1173 | 77 | def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists, | ||
1174 | 78 | mock_create_pool): | ||
1175 | 79 | mock_pool_exists.return_value = False | ||
1176 | 80 | reqs = json.dumps({'api-version': 1, | ||
1177 | 81 | 'request-id': '1ef5aede', | ||
1178 | 82 | 'ops': [{'op': 'create-pool', 'name': | ||
1179 | 83 | 'foo', 'replicas': 3}]}) | ||
1180 | 84 | rc = ceph_broker.process_requests(reqs) | ||
1181 | 85 | mock_pool_exists.assert_called_with(service='admin', name='foo') | ||
1182 | 86 | mock_create_pool.assert_called_with(service='admin', name='foo', | ||
1183 | 87 | replicas=3) | ||
1184 | 88 | self.assertEqual(json.loads(rc)['exit-code'], 0) | ||
1185 | 89 | self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') | ||
1186 | 90 | |||
1187 | 91 | @mock.patch('ceph_broker.log') | ||
1188 | 92 | def test_process_requests_invalid_api_rid(self, mock_log): | ||
1189 | 93 | reqs = json.dumps({'api-version': 0, 'request-id': '1ef5aede', | ||
1190 | 94 | 'ops': [{'op': 'create-pool'}]}) | ||
1191 | 95 | rc = ceph_broker.process_requests(reqs) | ||
1192 | 96 | self.assertEqual(json.loads(rc)['exit-code'], 1) | ||
1193 | 97 | self.assertEqual(json.loads(rc)['stderr'], | ||
1194 | 98 | "Missing or invalid api version (0)") | ||
1195 | 99 | self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') |
I've tested this out and it seems good. Tried deploying then scaling for
ceph, cinder, glance and nova-compute and all had updated ceph.conf on the
client side. The use of request-id combined with unit-name scoped responses
seems to nicely avoid any collisions as well. I have a few comments on some
minor fixups inline.