Merge lp:~corey.bryant/charms/trusty/neutron-gateway/sync-ch into lp:~openstack-charmers-archive/charms/trusty/neutron-gateway/next
- Trusty Tahr (14.04)
- sync-ch
- Merge into next
Proposed by
Corey Bryant
Status: | Merged |
---|---|
Merged at revision: | 128 |
Proposed branch: | lp:~corey.bryant/charms/trusty/neutron-gateway/sync-ch |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/neutron-gateway/next |
Diff against target: |
1651 lines (+924/-202) 17 files modified
hooks/charmhelpers/contrib/network/ufw.py (+46/-3) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+36/-3) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+240/-49) hooks/charmhelpers/contrib/openstack/context.py (+8/-7) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6) hooks/charmhelpers/contrib/openstack/utils.py (+9/-5) hooks/charmhelpers/contrib/storage/linux/ceph.py (+6/-6) hooks/charmhelpers/core/hookenv.py (+93/-36) hooks/charmhelpers/core/host.py (+31/-5) hooks/charmhelpers/core/services/base.py (+12/-9) hooks/charmhelpers/core/services/helpers.py (+2/-2) hooks/charmhelpers/fetch/__init__.py (+23/-14) hooks/charmhelpers/fetch/archiveurl.py (+7/-1) hooks/charmhelpers/fetch/giturl.py (+1/-1) tests/charmhelpers/contrib/amulet/utils.py (+128/-3) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+36/-3) tests/charmhelpers/contrib/openstack/amulet/utils.py (+240/-49) |
To merge this branch: | bzr merge lp:~corey.bryant/charms/trusty/neutron-gateway/sync-ch |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+265049@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #5919 neutron-
UNIT OK: passed
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #5142 neutron-
AMULET OK: passed
Build: http://
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/contrib/network/ufw.py' |
2 | --- hooks/charmhelpers/contrib/network/ufw.py 2015-03-23 18:25:01 +0000 |
3 | +++ hooks/charmhelpers/contrib/network/ufw.py 2015-07-16 20:31:59 +0000 |
4 | @@ -180,7 +180,43 @@ |
5 | return True |
6 | |
7 | |
8 | -def modify_access(src, dst='any', port=None, proto=None, action='allow'): |
9 | +def default_policy(policy='deny', direction='incoming'): |
10 | + """ |
11 | + Changes the default policy for traffic `direction` |
12 | + |
13 | + :param policy: allow, deny or reject |
14 | + :param direction: traffic direction, possible values: incoming, outgoing, |
15 | + routed |
16 | + """ |
17 | + if policy not in ['allow', 'deny', 'reject']: |
18 | + raise UFWError(('Unknown policy %s, valid values: ' |
19 | + 'allow, deny, reject') % policy) |
20 | + |
21 | + if direction not in ['incoming', 'outgoing', 'routed']: |
22 | + raise UFWError(('Unknown direction %s, valid values: ' |
23 | + 'incoming, outgoing, routed') % direction) |
24 | + |
25 | + output = subprocess.check_output(['ufw', 'default', policy, direction], |
26 | + universal_newlines=True, |
27 | + env={'LANG': 'en_US', |
28 | + 'PATH': os.environ['PATH']}) |
29 | + hookenv.log(output, level='DEBUG') |
30 | + |
31 | + m = re.findall("^Default %s policy changed to '%s'\n" % (direction, |
32 | + policy), |
33 | + output, re.M) |
34 | + if len(m) == 0: |
35 | + hookenv.log("ufw couldn't change the default policy to %s for %s" |
36 | + % (policy, direction), level='WARN') |
37 | + return False |
38 | + else: |
39 | + hookenv.log("ufw default policy for %s changed to %s" |
40 | + % (direction, policy), level='INFO') |
41 | + return True |
42 | + |
43 | + |
44 | +def modify_access(src, dst='any', port=None, proto=None, action='allow', |
45 | + index=None): |
46 | """ |
47 | Grant access to an address or subnet |
48 | |
49 | @@ -192,6 +228,8 @@ |
50 | :param port: destiny port |
51 | :param proto: protocol (tcp or udp) |
52 | :param action: `allow` or `delete` |
53 | + :param index: if different from None the rule is inserted at the given |
54 | + `index`. |
55 | """ |
56 | if not is_enabled(): |
57 | hookenv.log('ufw is disabled, skipping modify_access()', level='WARN') |
58 | @@ -199,6 +237,8 @@ |
59 | |
60 | if action == 'delete': |
61 | cmd = ['ufw', 'delete', 'allow'] |
62 | + elif index is not None: |
63 | + cmd = ['ufw', 'insert', str(index), action] |
64 | else: |
65 | cmd = ['ufw', action] |
66 | |
67 | @@ -227,7 +267,7 @@ |
68 | level='ERROR') |
69 | |
70 | |
71 | -def grant_access(src, dst='any', port=None, proto=None): |
72 | +def grant_access(src, dst='any', port=None, proto=None, index=None): |
73 | """ |
74 | Grant access to an address or subnet |
75 | |
76 | @@ -238,8 +278,11 @@ |
77 | field has to be set. |
78 | :param port: destiny port |
79 | :param proto: protocol (tcp or udp) |
80 | + :param index: if different from None the rule is inserted at the given |
81 | + `index`. |
82 | """ |
83 | - return modify_access(src, dst=dst, port=port, proto=proto, action='allow') |
84 | + return modify_access(src, dst=dst, port=port, proto=proto, action='allow', |
85 | + index=index) |
86 | |
87 | |
88 | def revoke_access(src, dst='any', port=None, proto=None): |
89 | |
90 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' |
91 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-19 15:09:21 +0000 |
92 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-16 20:31:59 +0000 |
93 | @@ -79,9 +79,9 @@ |
94 | services.append(this_service) |
95 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
96 | 'ceph-osd', 'ceph-radosgw'] |
97 | - # Openstack subordinate charms do not expose an origin option as that |
98 | - # is controlled by the principle |
99 | - ignore = ['neutron-openvswitch'] |
100 | + # Most OpenStack subordinate charms do not expose an origin option |
101 | + # as that is controlled by the principle. |
102 | + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] |
103 | |
104 | if self.openstack: |
105 | for svc in services: |
106 | @@ -148,3 +148,36 @@ |
107 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
108 | else: |
109 | return releases[self.series] |
110 | + |
111 | + def get_ceph_expected_pools(self, radosgw=False): |
112 | + """Return a list of expected ceph pools in a ceph + cinder + glance |
113 | + test scenario, based on OpenStack release and whether ceph radosgw |
114 | + is flagged as present or not.""" |
115 | + |
116 | + if self._get_openstack_release() >= self.trusty_kilo: |
117 | + # Kilo or later |
118 | + pools = [ |
119 | + 'rbd', |
120 | + 'cinder', |
121 | + 'glance' |
122 | + ] |
123 | + else: |
124 | + # Juno or earlier |
125 | + pools = [ |
126 | + 'data', |
127 | + 'metadata', |
128 | + 'rbd', |
129 | + 'cinder', |
130 | + 'glance' |
131 | + ] |
132 | + |
133 | + if radosgw: |
134 | + pools.extend([ |
135 | + '.rgw.root', |
136 | + '.rgw.control', |
137 | + '.rgw', |
138 | + '.rgw.gc', |
139 | + '.users.uid' |
140 | + ]) |
141 | + |
142 | + return pools |
143 | |
144 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' |
145 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-19 15:09:21 +0000 |
146 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:31:59 +0000 |
147 | @@ -14,16 +14,20 @@ |
148 | # You should have received a copy of the GNU Lesser General Public License |
149 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
150 | |
151 | +import amulet |
152 | +import json |
153 | import logging |
154 | import os |
155 | import six |
156 | import time |
157 | import urllib |
158 | |
159 | +import cinderclient.v1.client as cinder_client |
160 | import glanceclient.v1.client as glance_client |
161 | import heatclient.v1.client as heat_client |
162 | import keystoneclient.v2_0 as keystone_client |
163 | import novaclient.v1_1.client as nova_client |
164 | +import swiftclient |
165 | |
166 | from charmhelpers.contrib.amulet.utils import ( |
167 | AmuletUtils |
168 | @@ -171,6 +175,16 @@ |
169 | self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
170 | return tenant in [t.name for t in keystone.tenants.list()] |
171 | |
172 | + def authenticate_cinder_admin(self, keystone_sentry, username, |
173 | + password, tenant): |
174 | + """Authenticates admin user with cinder.""" |
175 | + # NOTE(beisner): cinder python client doesn't accept tokens. |
176 | + service_ip = \ |
177 | + keystone_sentry.relation('shared-db', |
178 | + 'mysql:shared-db')['private-address'] |
179 | + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
180 | + return cinder_client.Client(username, password, tenant, ept) |
181 | + |
182 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
183 | tenant): |
184 | """Authenticates admin user with the keystone admin endpoint.""" |
185 | @@ -212,9 +226,29 @@ |
186 | return nova_client.Client(username=user, api_key=password, |
187 | project_id=tenant, auth_url=ep) |
188 | |
189 | + def authenticate_swift_user(self, keystone, user, password, tenant): |
190 | + """Authenticates a regular user with swift api.""" |
191 | + self.log.debug('Authenticating swift user ({})...'.format(user)) |
192 | + ep = keystone.service_catalog.url_for(service_type='identity', |
193 | + endpoint_type='publicURL') |
194 | + return swiftclient.Connection(authurl=ep, |
195 | + user=user, |
196 | + key=password, |
197 | + tenant_name=tenant, |
198 | + auth_version='2.0') |
199 | + |
200 | def create_cirros_image(self, glance, image_name): |
201 | - """Download the latest cirros image and upload it to glance.""" |
202 | - self.log.debug('Creating glance image ({})...'.format(image_name)) |
203 | + """Download the latest cirros image and upload it to glance, |
204 | + validate and return a resource pointer. |
205 | + |
206 | + :param glance: pointer to authenticated glance connection |
207 | + :param image_name: display name for new image |
208 | + :returns: glance image pointer |
209 | + """ |
210 | + self.log.debug('Creating glance cirros image ' |
211 | + '({})...'.format(image_name)) |
212 | + |
213 | + # Download cirros image |
214 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
215 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
216 | if http_proxy: |
217 | @@ -223,33 +257,51 @@ |
218 | else: |
219 | opener = urllib.FancyURLopener() |
220 | |
221 | - f = opener.open("http://download.cirros-cloud.net/version/released") |
222 | + f = opener.open('http://download.cirros-cloud.net/version/released') |
223 | version = f.read().strip() |
224 | - cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
225 | + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
226 | local_path = os.path.join('tests', cirros_img) |
227 | |
228 | if not os.path.exists(local_path): |
229 | - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
230 | + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
231 | version, cirros_img) |
232 | opener.retrieve(cirros_url, local_path) |
233 | f.close() |
234 | |
235 | + # Create glance image |
236 | with open(local_path) as f: |
237 | image = glance.images.create(name=image_name, is_public=True, |
238 | disk_format='qcow2', |
239 | container_format='bare', data=f) |
240 | - count = 1 |
241 | - status = image.status |
242 | - while status != 'active' and count < 10: |
243 | - time.sleep(3) |
244 | - image = glance.images.get(image.id) |
245 | - status = image.status |
246 | - self.log.debug('image status: {}'.format(status)) |
247 | - count += 1 |
248 | - |
249 | - if status != 'active': |
250 | - self.log.error('image creation timed out') |
251 | - return None |
252 | + |
253 | + # Wait for image to reach active status |
254 | + img_id = image.id |
255 | + ret = self.resource_reaches_status(glance.images, img_id, |
256 | + expected_stat='active', |
257 | + msg='Image status wait') |
258 | + if not ret: |
259 | + msg = 'Glance image failed to reach expected state.' |
260 | + amulet.raise_status(amulet.FAIL, msg=msg) |
261 | + |
262 | + # Re-validate new image |
263 | + self.log.debug('Validating image attributes...') |
264 | + val_img_name = glance.images.get(img_id).name |
265 | + val_img_stat = glance.images.get(img_id).status |
266 | + val_img_pub = glance.images.get(img_id).is_public |
267 | + val_img_cfmt = glance.images.get(img_id).container_format |
268 | + val_img_dfmt = glance.images.get(img_id).disk_format |
269 | + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
270 | + 'container fmt:{} disk fmt:{}'.format( |
271 | + val_img_name, val_img_pub, img_id, |
272 | + val_img_stat, val_img_cfmt, val_img_dfmt)) |
273 | + |
274 | + if val_img_name == image_name and val_img_stat == 'active' \ |
275 | + and val_img_pub is True and val_img_cfmt == 'bare' \ |
276 | + and val_img_dfmt == 'qcow2': |
277 | + self.log.debug(msg_attr) |
278 | + else: |
279 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
280 | + amulet.raise_status(amulet.FAIL, msg=msg) |
281 | |
282 | return image |
283 | |
284 | @@ -260,22 +312,7 @@ |
285 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
286 | 'delete_resource instead of delete_image.') |
287 | self.log.debug('Deleting glance image ({})...'.format(image)) |
288 | - num_before = len(list(glance.images.list())) |
289 | - glance.images.delete(image) |
290 | - |
291 | - count = 1 |
292 | - num_after = len(list(glance.images.list())) |
293 | - while num_after != (num_before - 1) and count < 10: |
294 | - time.sleep(3) |
295 | - num_after = len(list(glance.images.list())) |
296 | - self.log.debug('number of images: {}'.format(num_after)) |
297 | - count += 1 |
298 | - |
299 | - if num_after != (num_before - 1): |
300 | - self.log.error('image deletion timed out') |
301 | - return False |
302 | - |
303 | - return True |
304 | + return self.delete_resource(glance.images, image, msg='glance image') |
305 | |
306 | def create_instance(self, nova, image_name, instance_name, flavor): |
307 | """Create the specified instance.""" |
308 | @@ -308,22 +345,8 @@ |
309 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
310 | 'delete_resource instead of delete_instance.') |
311 | self.log.debug('Deleting instance ({})...'.format(instance)) |
312 | - num_before = len(list(nova.servers.list())) |
313 | - nova.servers.delete(instance) |
314 | - |
315 | - count = 1 |
316 | - num_after = len(list(nova.servers.list())) |
317 | - while num_after != (num_before - 1) and count < 10: |
318 | - time.sleep(3) |
319 | - num_after = len(list(nova.servers.list())) |
320 | - self.log.debug('number of instances: {}'.format(num_after)) |
321 | - count += 1 |
322 | - |
323 | - if num_after != (num_before - 1): |
324 | - self.log.error('instance deletion timed out') |
325 | - return False |
326 | - |
327 | - return True |
328 | + return self.delete_resource(nova.servers, instance, |
329 | + msg='nova instance') |
330 | |
331 | def create_or_get_keypair(self, nova, keypair_name="testkey"): |
332 | """Create a new keypair, or return pointer if it already exists.""" |
333 | @@ -339,6 +362,88 @@ |
334 | _keypair = nova.keypairs.create(name=keypair_name) |
335 | return _keypair |
336 | |
337 | + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
338 | + img_id=None, src_vol_id=None, snap_id=None): |
339 | + """Create cinder volume, optionally from a glance image, OR |
340 | + optionally as a clone of an existing volume, OR optionally |
341 | + from a snapshot. Wait for the new volume status to reach |
342 | + the expected status, validate and return a resource pointer. |
343 | + |
344 | + :param vol_name: cinder volume display name |
345 | + :param vol_size: size in gigabytes |
346 | + :param img_id: optional glance image id |
347 | + :param src_vol_id: optional source volume id to clone |
348 | + :param snap_id: optional snapshot id to use |
349 | + :returns: cinder volume pointer |
350 | + """ |
351 | + # Handle parameter input and avoid impossible combinations |
352 | + if img_id and not src_vol_id and not snap_id: |
353 | + # Create volume from image |
354 | + self.log.debug('Creating cinder volume from glance image...') |
355 | + bootable = 'true' |
356 | + elif src_vol_id and not img_id and not snap_id: |
357 | + # Clone an existing volume |
358 | + self.log.debug('Cloning cinder volume...') |
359 | + bootable = cinder.volumes.get(src_vol_id).bootable |
360 | + elif snap_id and not src_vol_id and not img_id: |
361 | + # Create volume from snapshot |
362 | + self.log.debug('Creating cinder volume from snapshot...') |
363 | + snap = cinder.volume_snapshots.find(id=snap_id) |
364 | + vol_size = snap.size |
365 | + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
366 | + bootable = cinder.volumes.get(snap_vol_id).bootable |
367 | + elif not img_id and not src_vol_id and not snap_id: |
368 | + # Create volume |
369 | + self.log.debug('Creating cinder volume...') |
370 | + bootable = 'false' |
371 | + else: |
372 | + # Impossible combination of parameters |
373 | + msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
374 | + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
375 | + img_id, src_vol_id, |
376 | + snap_id)) |
377 | + amulet.raise_status(amulet.FAIL, msg=msg) |
378 | + |
379 | + # Create new volume |
380 | + try: |
381 | + vol_new = cinder.volumes.create(display_name=vol_name, |
382 | + imageRef=img_id, |
383 | + size=vol_size, |
384 | + source_volid=src_vol_id, |
385 | + snapshot_id=snap_id) |
386 | + vol_id = vol_new.id |
387 | + except Exception as e: |
388 | + msg = 'Failed to create volume: {}'.format(e) |
389 | + amulet.raise_status(amulet.FAIL, msg=msg) |
390 | + |
391 | + # Wait for volume to reach available status |
392 | + ret = self.resource_reaches_status(cinder.volumes, vol_id, |
393 | + expected_stat="available", |
394 | + msg="Volume status wait") |
395 | + if not ret: |
396 | + msg = 'Cinder volume failed to reach expected state.' |
397 | + amulet.raise_status(amulet.FAIL, msg=msg) |
398 | + |
399 | + # Re-validate new volume |
400 | + self.log.debug('Validating volume attributes...') |
401 | + val_vol_name = cinder.volumes.get(vol_id).display_name |
402 | + val_vol_boot = cinder.volumes.get(vol_id).bootable |
403 | + val_vol_stat = cinder.volumes.get(vol_id).status |
404 | + val_vol_size = cinder.volumes.get(vol_id).size |
405 | + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
406 | + '{} size:{}'.format(val_vol_name, vol_id, |
407 | + val_vol_stat, val_vol_boot, |
408 | + val_vol_size)) |
409 | + |
410 | + if val_vol_boot == bootable and val_vol_stat == 'available' \ |
411 | + and val_vol_name == vol_name and val_vol_size == vol_size: |
412 | + self.log.debug(msg_attr) |
413 | + else: |
414 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
415 | + amulet.raise_status(amulet.FAIL, msg=msg) |
416 | + |
417 | + return vol_new |
418 | + |
419 | def delete_resource(self, resource, resource_id, |
420 | msg="resource", max_wait=120): |
421 | """Delete one openstack resource, such as one instance, keypair, |
422 | @@ -350,6 +455,8 @@ |
423 | :param max_wait: maximum wait time in seconds |
424 | :returns: True if successful, otherwise False |
425 | """ |
426 | + self.log.debug('Deleting OpenStack resource ' |
427 | + '{} ({})'.format(resource_id, msg)) |
428 | num_before = len(list(resource.list())) |
429 | resource.delete(resource_id) |
430 | |
431 | @@ -411,3 +518,87 @@ |
432 | self.log.debug('{} never reached expected status: ' |
433 | '{}'.format(resource_id, expected_stat)) |
434 | return False |
435 | + |
436 | + def get_ceph_osd_id_cmd(self, index): |
437 | + """Produce a shell command that will return a ceph-osd id.""" |
438 | + return ("`initctl list | grep 'ceph-osd ' | " |
439 | + "awk 'NR=={} {{ print $2 }}' | " |
440 | + "grep -o '[0-9]*'`".format(index + 1)) |
441 | + |
442 | + def get_ceph_pools(self, sentry_unit): |
443 | + """Return a dict of ceph pools from a single ceph unit, with |
444 | + pool name as keys, pool id as vals.""" |
445 | + pools = {} |
446 | + cmd = 'sudo ceph osd lspools' |
447 | + output, code = sentry_unit.run(cmd) |
448 | + if code != 0: |
449 | + msg = ('{} `{}` returned {} ' |
450 | + '{}'.format(sentry_unit.info['unit_name'], |
451 | + cmd, code, output)) |
452 | + amulet.raise_status(amulet.FAIL, msg=msg) |
453 | + |
454 | + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
455 | + for pool in str(output).split(','): |
456 | + pool_id_name = pool.split(' ') |
457 | + if len(pool_id_name) == 2: |
458 | + pool_id = pool_id_name[0] |
459 | + pool_name = pool_id_name[1] |
460 | + pools[pool_name] = int(pool_id) |
461 | + |
462 | + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
463 | + pools)) |
464 | + return pools |
465 | + |
466 | + def get_ceph_df(self, sentry_unit): |
467 | + """Return dict of ceph df json output, including ceph pool state. |
468 | + |
469 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
470 | + :returns: Dict of ceph df output |
471 | + """ |
472 | + cmd = 'sudo ceph df --format=json' |
473 | + output, code = sentry_unit.run(cmd) |
474 | + if code != 0: |
475 | + msg = ('{} `{}` returned {} ' |
476 | + '{}'.format(sentry_unit.info['unit_name'], |
477 | + cmd, code, output)) |
478 | + amulet.raise_status(amulet.FAIL, msg=msg) |
479 | + return json.loads(output) |
480 | + |
481 | + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
482 | + """Take a sample of attributes of a ceph pool, returning ceph |
483 | + pool name, object count and disk space used for the specified |
484 | + pool ID number. |
485 | + |
486 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
487 | + :param pool_id: Ceph pool ID |
488 | + :returns: List of pool name, object count, kb disk space used |
489 | + """ |
490 | + df = self.get_ceph_df(sentry_unit) |
491 | + pool_name = df['pools'][pool_id]['name'] |
492 | + obj_count = df['pools'][pool_id]['stats']['objects'] |
493 | + kb_used = df['pools'][pool_id]['stats']['kb_used'] |
494 | + self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
495 | + '{} kb used'.format(pool_name, pool_id, |
496 | + obj_count, kb_used)) |
497 | + return pool_name, obj_count, kb_used |
498 | + |
499 | + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
500 | + """Validate ceph pool samples taken over time, such as pool |
501 | + object counts or pool kb used, before adding, after adding, and |
502 | + after deleting items which affect those pool attributes. The |
503 | + 2nd element is expected to be greater than the 1st; 3rd is expected |
504 | + to be less than the 2nd. |
505 | + |
506 | + :param samples: List containing 3 data samples |
507 | + :param sample_type: String for logging and usage context |
508 | + :returns: None if successful, Failure message otherwise |
509 | + """ |
510 | + original, created, deleted = range(3) |
511 | + if samples[created] <= samples[original] or \ |
512 | + samples[deleted] >= samples[created]: |
513 | + return ('Ceph {} samples ({}) ' |
514 | + 'unexpected.'.format(sample_type, samples)) |
515 | + else: |
516 | + self.log.debug('Ceph {} samples (OK): ' |
517 | + '{}'.format(sample_type, samples)) |
518 | + return None |
519 | |
520 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
521 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-06-19 15:09:21 +0000 |
522 | +++ hooks/charmhelpers/contrib/openstack/context.py 2015-07-16 20:31:59 +0000 |
523 | @@ -122,21 +122,24 @@ |
524 | of specifying multiple key value pairs within the same string. For |
525 | example, a string in the format of 'key1=value1, key2=value2' will |
526 | return a dict of: |
527 | - {'key1': 'value1', |
528 | - 'key2': 'value2'}. |
529 | + |
530 | + {'key1': 'value1', |
531 | + 'key2': 'value2'}. |
532 | |
533 | 2. A string in the above format, but supporting a comma-delimited list |
534 | of values for the same key. For example, a string in the format of |
535 | 'key1=value1, key2=value3,value4,value5' will return a dict of: |
536 | - {'key1', 'value1', |
537 | - 'key2', 'value2,value3,value4'} |
538 | + |
539 | + {'key1', 'value1', |
540 | + 'key2', 'value2,value3,value4'} |
541 | |
542 | 3. A string containing a colon character (:) prior to an equal |
543 | character (=) will be treated as yaml and parsed as such. This can be |
544 | used to specify more complex key value pairs. For example, |
545 | a string in the format of 'key1: subkey1=value1, subkey2=value2' will |
546 | return a dict of: |
547 | - {'key1', 'subkey1=value1, subkey2=value2'} |
548 | + |
549 | + {'key1', 'subkey1=value1, subkey2=value2'} |
550 | |
551 | The provided config_flags string may be a list of comma-separated values |
552 | which themselves may be comma-separated list of values. |
553 | @@ -891,8 +894,6 @@ |
554 | return ctxt |
555 | |
556 | def __call__(self): |
557 | - self._ensure_packages() |
558 | - |
559 | if self.network_manager not in ['quantum', 'neutron']: |
560 | return {} |
561 | |
562 | |
563 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' |
564 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-02-25 23:34:09 +0000 |
565 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-07-16 20:31:59 +0000 |
566 | @@ -5,11 +5,11 @@ |
567 | ############################################################################### |
568 | [global] |
569 | {% if auth -%} |
570 | - auth_supported = {{ auth }} |
571 | - keyring = /etc/ceph/$cluster.$name.keyring |
572 | - mon host = {{ mon_hosts }} |
573 | +auth_supported = {{ auth }} |
574 | +keyring = /etc/ceph/$cluster.$name.keyring |
575 | +mon host = {{ mon_hosts }} |
576 | {% endif -%} |
577 | - log to syslog = {{ use_syslog }} |
578 | - err to syslog = {{ use_syslog }} |
579 | - clog to syslog = {{ use_syslog }} |
580 | +log to syslog = {{ use_syslog }} |
581 | +err to syslog = {{ use_syslog }} |
582 | +clog to syslog = {{ use_syslog }} |
583 | |
584 | |
585 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
586 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-19 15:09:21 +0000 |
587 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-07-16 20:31:59 +0000 |
588 | @@ -522,6 +522,7 @@ |
589 | Clone/install all specified OpenStack repositories. |
590 | |
591 | The expected format of projects_yaml is: |
592 | + |
593 | repositories: |
594 | - {name: keystone, |
595 | repository: 'git://git.openstack.org/openstack/keystone.git', |
596 | @@ -529,11 +530,13 @@ |
597 | - {name: requirements, |
598 | repository: 'git://git.openstack.org/openstack/requirements.git', |
599 | branch: 'stable/icehouse'} |
600 | + |
601 | directory: /mnt/openstack-git |
602 | http_proxy: squid-proxy-url |
603 | https_proxy: squid-proxy-url |
604 | |
605 | - The directory, http_proxy, and https_proxy keys are optional. |
606 | + The directory, http_proxy, and https_proxy keys are optional. |
607 | + |
608 | """ |
609 | global requirements_dir |
610 | parent_dir = '/mnt/openstack-git' |
611 | @@ -555,10 +558,11 @@ |
612 | |
613 | pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
614 | |
615 | - # Upgrade setuptools from default virtualenv version. The default version |
616 | - # in trusty breaks update.py in global requirements master branch. |
617 | - pip_install('setuptools', upgrade=True, proxy=http_proxy, |
618 | - venv=os.path.join(parent_dir, 'venv')) |
619 | + # Upgrade setuptools and pip from default virtualenv versions. The default |
620 | + # versions in trusty break master OpenStack branch deployments. |
621 | + for p in ['pip', 'setuptools']: |
622 | + pip_install(p, upgrade=True, proxy=http_proxy, |
623 | + venv=os.path.join(parent_dir, 'venv')) |
624 | |
625 | for p in projects['repositories']: |
626 | repo = p['repository'] |
627 | |
628 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' |
629 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-03-31 15:13:53 +0000 |
630 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-16 20:31:59 +0000 |
631 | @@ -60,12 +60,12 @@ |
632 | KEYFILE = '/etc/ceph/ceph.client.{}.key' |
633 | |
634 | CEPH_CONF = """[global] |
635 | - auth supported = {auth} |
636 | - keyring = {keyring} |
637 | - mon host = {mon_hosts} |
638 | - log to syslog = {use_syslog} |
639 | - err to syslog = {use_syslog} |
640 | - clog to syslog = {use_syslog} |
641 | +auth supported = {auth} |
642 | +keyring = {keyring} |
643 | +mon host = {mon_hosts} |
644 | +log to syslog = {use_syslog} |
645 | +err to syslog = {use_syslog} |
646 | +clog to syslog = {use_syslog} |
647 | """ |
648 | |
649 | |
650 | |
651 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
652 | --- hooks/charmhelpers/core/hookenv.py 2015-06-19 15:09:21 +0000 |
653 | +++ hooks/charmhelpers/core/hookenv.py 2015-07-16 20:31:59 +0000 |
654 | @@ -21,7 +21,9 @@ |
655 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
656 | |
657 | from __future__ import print_function |
658 | +from distutils.version import LooseVersion |
659 | from functools import wraps |
660 | +import glob |
661 | import os |
662 | import json |
663 | import yaml |
664 | @@ -242,29 +244,7 @@ |
665 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
666 | if os.path.exists(self.path): |
667 | self.load_previous() |
668 | - |
669 | - def __getitem__(self, key): |
670 | - """For regular dict lookups, check the current juju config first, |
671 | - then the previous (saved) copy. This ensures that user-saved values |
672 | - will be returned by a dict lookup. |
673 | - |
674 | - """ |
675 | - try: |
676 | - return dict.__getitem__(self, key) |
677 | - except KeyError: |
678 | - return (self._prev_dict or {})[key] |
679 | - |
680 | - def get(self, key, default=None): |
681 | - try: |
682 | - return self[key] |
683 | - except KeyError: |
684 | - return default |
685 | - |
686 | - def keys(self): |
687 | - prev_keys = [] |
688 | - if self._prev_dict is not None: |
689 | - prev_keys = self._prev_dict.keys() |
690 | - return list(set(prev_keys + list(dict.keys(self)))) |
691 | + atexit(self._implicit_save) |
692 | |
693 | def load_previous(self, path=None): |
694 | """Load previous copy of config from disk. |
695 | @@ -283,6 +263,9 @@ |
696 | self.path = path or self.path |
697 | with open(self.path) as f: |
698 | self._prev_dict = json.load(f) |
699 | + for k, v in self._prev_dict.items(): |
700 | + if k not in self: |
701 | + self[k] = v |
702 | |
703 | def changed(self, key): |
704 | """Return True if the current value for this key is different from |
705 | @@ -314,13 +297,13 @@ |
706 | instance. |
707 | |
708 | """ |
709 | - if self._prev_dict: |
710 | - for k, v in six.iteritems(self._prev_dict): |
711 | - if k not in self: |
712 | - self[k] = v |
713 | with open(self.path, 'w') as f: |
714 | json.dump(self, f) |
715 | |
716 | + def _implicit_save(self): |
717 | + if self.implicit_save: |
718 | + self.save() |
719 | + |
720 | |
721 | @cached |
722 | def config(scope=None): |
723 | @@ -587,10 +570,14 @@ |
724 | hooks.execute(sys.argv) |
725 | """ |
726 | |
727 | - def __init__(self, config_save=True): |
728 | + def __init__(self, config_save=None): |
729 | super(Hooks, self).__init__() |
730 | self._hooks = {} |
731 | - self._config_save = config_save |
732 | + |
733 | + # For unknown reasons, we allow the Hooks constructor to override |
734 | + # config().implicit_save. |
735 | + if config_save is not None: |
736 | + config().implicit_save = config_save |
737 | |
738 | def register(self, name, function): |
739 | """Register a hook""" |
740 | @@ -598,13 +585,16 @@ |
741 | |
742 | def execute(self, args): |
743 | """Execute a registered hook based on args[0]""" |
744 | + _run_atstart() |
745 | hook_name = os.path.basename(args[0]) |
746 | if hook_name in self._hooks: |
747 | - self._hooks[hook_name]() |
748 | - if self._config_save: |
749 | - cfg = config() |
750 | - if cfg.implicit_save: |
751 | - cfg.save() |
752 | + try: |
753 | + self._hooks[hook_name]() |
754 | + except SystemExit as x: |
755 | + if x.code is None or x.code == 0: |
756 | + _run_atexit() |
757 | + raise |
758 | + _run_atexit() |
759 | else: |
760 | raise UnregisteredHookError(hook_name) |
761 | |
762 | @@ -732,13 +722,80 @@ |
763 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
764 | def leader_set(settings=None, **kwargs): |
765 | """Juju leader set value(s)""" |
766 | - log("Juju leader-set '%s'" % (settings), level=DEBUG) |
767 | + # Don't log secrets. |
768 | + # log("Juju leader-set '%s'" % (settings), level=DEBUG) |
769 | cmd = ['leader-set'] |
770 | settings = settings or {} |
771 | settings.update(kwargs) |
772 | - for k, v in settings.iteritems(): |
773 | + for k, v in settings.items(): |
774 | if v is None: |
775 | cmd.append('{}='.format(k)) |
776 | else: |
777 | cmd.append('{}={}'.format(k, v)) |
778 | subprocess.check_call(cmd) |
779 | + |
780 | + |
781 | +@cached |
782 | +def juju_version(): |
783 | + """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
784 | + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 |
785 | + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] |
786 | + return subprocess.check_output([jujud, 'version'], |
787 | + universal_newlines=True).strip() |
788 | + |
789 | + |
790 | +@cached |
791 | +def has_juju_version(minimum_version): |
792 | + """Return True if the Juju version is at least the provided version""" |
793 | + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) |
794 | + |
795 | + |
796 | +_atexit = [] |
797 | +_atstart = [] |
798 | + |
799 | + |
800 | +def atstart(callback, *args, **kwargs): |
801 | + '''Schedule a callback to run before the main hook. |
802 | + |
803 | + Callbacks are run in the order they were added. |
804 | + |
805 | + This is useful for modules and classes to perform initialization |
806 | + and inject behavior. In particular: |
807 | + |
808 | + - Run common code before all of your hooks, such as logging |
809 | + the hook name or interesting relation data. |
810 | + - Defer object or module initialization that requires a hook |
811 | + context until we know there actually is a hook context, |
812 | + making testing easier. |
813 | + - Rather than requiring charm authors to include boilerplate to |
814 | + invoke your helper's behavior, have it run automatically if |
815 | + your object is instantiated or module imported. |
816 | + |
817 | + This is not at all useful after your hook framework as been launched. |
818 | + ''' |
819 | + global _atstart |
820 | + _atstart.append((callback, args, kwargs)) |
821 | + |
822 | + |
823 | +def atexit(callback, *args, **kwargs): |
824 | + '''Schedule a callback to run on successful hook completion. |
825 | + |
826 | + Callbacks are run in the reverse order that they were added.''' |
827 | + _atexit.append((callback, args, kwargs)) |
828 | + |
829 | + |
830 | +def _run_atstart(): |
831 | + '''Hook frameworks must invoke this before running the main hook body.''' |
832 | + global _atstart |
833 | + for callback, args, kwargs in _atstart: |
834 | + callback(*args, **kwargs) |
835 | + del _atstart[:] |
836 | + |
837 | + |
838 | +def _run_atexit(): |
839 | + '''Hook frameworks must invoke this after the main hook body has |
840 | + successfully completed. Do not invoke it if the hook fails.''' |
841 | + global _atexit |
842 | + for callback, args, kwargs in reversed(_atexit): |
843 | + callback(*args, **kwargs) |
844 | + del _atexit[:] |
845 | |
846 | === modified file 'hooks/charmhelpers/core/host.py' |
847 | --- hooks/charmhelpers/core/host.py 2015-06-19 15:09:21 +0000 |
848 | +++ hooks/charmhelpers/core/host.py 2015-07-16 20:31:59 +0000 |
849 | @@ -63,6 +63,36 @@ |
850 | return service_result |
851 | |
852 | |
853 | +def service_pause(service_name, init_dir=None): |
854 | + """Pause a system service. |
855 | + |
856 | + Stop it, and prevent it from starting again at boot.""" |
857 | + if init_dir is None: |
858 | + init_dir = "/etc/init" |
859 | + stopped = service_stop(service_name) |
860 | + # XXX: Support systemd too |
861 | + override_path = os.path.join( |
862 | + init_dir, '{}.conf.override'.format(service_name)) |
863 | + with open(override_path, 'w') as fh: |
864 | + fh.write("manual\n") |
865 | + return stopped |
866 | + |
867 | + |
868 | +def service_resume(service_name, init_dir=None): |
869 | + """Resume a system service. |
870 | + |
871 | + Reenable starting again at boot. Start the service""" |
872 | + # XXX: Support systemd too |
873 | + if init_dir is None: |
874 | + init_dir = "/etc/init" |
875 | + override_path = os.path.join( |
876 | + init_dir, '{}.conf.override'.format(service_name)) |
877 | + if os.path.exists(override_path): |
878 | + os.unlink(override_path) |
879 | + started = service_start(service_name) |
880 | + return started |
881 | + |
882 | + |
883 | def service(action, service_name): |
884 | """Control a system service""" |
885 | cmd = ['service', service_name, action] |
886 | @@ -140,11 +170,7 @@ |
887 | |
888 | def add_user_to_group(username, group): |
889 | """Add a user to a group""" |
890 | - cmd = [ |
891 | - 'gpasswd', '-a', |
892 | - username, |
893 | - group |
894 | - ] |
895 | + cmd = ['gpasswd', '-a', username, group] |
896 | log("Adding user {} to group {}".format(username, group)) |
897 | subprocess.check_call(cmd) |
898 | |
899 | |
900 | === modified file 'hooks/charmhelpers/core/services/base.py' |
901 | --- hooks/charmhelpers/core/services/base.py 2015-06-19 15:09:21 +0000 |
902 | +++ hooks/charmhelpers/core/services/base.py 2015-07-16 20:31:59 +0000 |
903 | @@ -128,15 +128,18 @@ |
904 | """ |
905 | Handle the current hook by doing The Right Thing with the registered services. |
906 | """ |
907 | - hook_name = hookenv.hook_name() |
908 | - if hook_name == 'stop': |
909 | - self.stop_services() |
910 | - else: |
911 | - self.reconfigure_services() |
912 | - self.provide_data() |
913 | - cfg = hookenv.config() |
914 | - if cfg.implicit_save: |
915 | - cfg.save() |
916 | + hookenv._run_atstart() |
917 | + try: |
918 | + hook_name = hookenv.hook_name() |
919 | + if hook_name == 'stop': |
920 | + self.stop_services() |
921 | + else: |
922 | + self.reconfigure_services() |
923 | + self.provide_data() |
924 | + except SystemExit as x: |
925 | + if x.code is None or x.code == 0: |
926 | + hookenv._run_atexit() |
927 | + hookenv._run_atexit() |
928 | |
929 | def provide_data(self): |
930 | """ |
931 | |
932 | === modified file 'hooks/charmhelpers/core/services/helpers.py' |
933 | --- hooks/charmhelpers/core/services/helpers.py 2015-03-31 15:13:53 +0000 |
934 | +++ hooks/charmhelpers/core/services/helpers.py 2015-07-16 20:31:59 +0000 |
935 | @@ -239,12 +239,12 @@ |
936 | action. |
937 | |
938 | :param str source: The template source file, relative to |
939 | - `$CHARM_DIR/templates` |
940 | - |
941 | + `$CHARM_DIR/templates` |
942 | :param str target: The target to write the rendered template to |
943 | :param str owner: The owner of the rendered file |
944 | :param str group: The group of the rendered file |
945 | :param int perms: The permissions of the rendered file |
946 | + |
947 | """ |
948 | def __init__(self, source, target, |
949 | owner='root', group='root', perms=0o444): |
950 | |
951 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
952 | --- hooks/charmhelpers/fetch/__init__.py 2015-06-10 14:09:04 +0000 |
953 | +++ hooks/charmhelpers/fetch/__init__.py 2015-07-16 20:31:59 +0000 |
954 | @@ -215,19 +215,27 @@ |
955 | _run_apt_command(cmd, fatal) |
956 | |
957 | |
958 | +def apt_mark(packages, mark, fatal=False): |
959 | + """Flag one or more packages using apt-mark""" |
960 | + cmd = ['apt-mark', mark] |
961 | + if isinstance(packages, six.string_types): |
962 | + cmd.append(packages) |
963 | + else: |
964 | + cmd.extend(packages) |
965 | + log("Holding {}".format(packages)) |
966 | + |
967 | + if fatal: |
968 | + subprocess.check_call(cmd, universal_newlines=True) |
969 | + else: |
970 | + subprocess.call(cmd, universal_newlines=True) |
971 | + |
972 | + |
973 | def apt_hold(packages, fatal=False): |
974 | - """Hold one or more packages""" |
975 | - cmd = ['apt-mark', 'hold'] |
976 | - if isinstance(packages, six.string_types): |
977 | - cmd.append(packages) |
978 | - else: |
979 | - cmd.extend(packages) |
980 | - log("Holding {}".format(packages)) |
981 | - |
982 | - if fatal: |
983 | - subprocess.check_call(cmd) |
984 | - else: |
985 | - subprocess.call(cmd) |
986 | + return apt_mark(packages, 'hold', fatal=fatal) |
987 | + |
988 | + |
989 | +def apt_unhold(packages, fatal=False): |
990 | + return apt_mark(packages, 'unhold', fatal=fatal) |
991 | |
992 | |
993 | def add_source(source, key=None): |
994 | @@ -370,8 +378,9 @@ |
995 | for handler in handlers: |
996 | try: |
997 | installed_to = handler.install(source, *args, **kwargs) |
998 | - except UnhandledSource: |
999 | - pass |
1000 | + except UnhandledSource as e: |
1001 | + log('Install source attempt unsuccessful: {}'.format(e), |
1002 | + level='WARNING') |
1003 | if not installed_to: |
1004 | raise UnhandledSource("No handler found for source {}".format(source)) |
1005 | return installed_to |
1006 | |
1007 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' |
1008 | --- hooks/charmhelpers/fetch/archiveurl.py 2015-03-31 15:13:53 +0000 |
1009 | +++ hooks/charmhelpers/fetch/archiveurl.py 2015-07-16 20:31:59 +0000 |
1010 | @@ -77,6 +77,8 @@ |
1011 | def can_handle(self, source): |
1012 | url_parts = self.parse_url(source) |
1013 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
1014 | + # XXX: Why is this returning a boolean and a string? It's |
1015 | + # doomed to fail since "bool(can_handle('foo://'))" will be True. |
1016 | return "Wrong source type" |
1017 | if get_archive_handler(self.base_url(source)): |
1018 | return True |
1019 | @@ -155,7 +157,11 @@ |
1020 | else: |
1021 | algorithms = hashlib.algorithms_available |
1022 | if key in algorithms: |
1023 | - check_hash(dld_file, value, key) |
1024 | + if len(value) != 1: |
1025 | + raise TypeError( |
1026 | + "Expected 1 hash value, not %d" % len(value)) |
1027 | + expected = value[0] |
1028 | + check_hash(dld_file, expected, key) |
1029 | if checksum: |
1030 | check_hash(dld_file, checksum, hash_type) |
1031 | return extract(dld_file, dest) |
1032 | |
1033 | === modified file 'hooks/charmhelpers/fetch/giturl.py' |
1034 | --- hooks/charmhelpers/fetch/giturl.py 2015-05-27 13:02:35 +0000 |
1035 | +++ hooks/charmhelpers/fetch/giturl.py 2015-07-16 20:31:59 +0000 |
1036 | @@ -67,7 +67,7 @@ |
1037 | try: |
1038 | self.clone(source, dest_dir, branch, depth) |
1039 | except GitCommandError as e: |
1040 | - raise UnhandledSource(e.message) |
1041 | + raise UnhandledSource(e) |
1042 | except OSError as e: |
1043 | raise UnhandledSource(e.strerror) |
1044 | return dest_dir |
1045 | |
1046 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' |
1047 | --- tests/charmhelpers/contrib/amulet/utils.py 2015-06-19 15:09:21 +0000 |
1048 | +++ tests/charmhelpers/contrib/amulet/utils.py 2015-07-16 20:31:59 +0000 |
1049 | @@ -14,6 +14,7 @@ |
1050 | # You should have received a copy of the GNU Lesser General Public License |
1051 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1052 | |
1053 | +import amulet |
1054 | import ConfigParser |
1055 | import distro_info |
1056 | import io |
1057 | @@ -173,6 +174,11 @@ |
1058 | |
1059 | Verify that the specified section of the config file contains |
1060 | the expected option key:value pairs. |
1061 | + |
1062 | + Compare expected dictionary data vs actual dictionary data. |
1063 | + The values in the 'expected' dictionary can be strings, bools, ints, |
1064 | + longs, or can be a function that evaluates a variable and returns a |
1065 | + bool. |
1066 | """ |
1067 | self.log.debug('Validating config file data ({} in {} on {})' |
1068 | '...'.format(section, config_file, |
1069 | @@ -185,9 +191,20 @@ |
1070 | for k in expected.keys(): |
1071 | if not config.has_option(section, k): |
1072 | return "section [{}] is missing option {}".format(section, k) |
1073 | - if config.get(section, k) != expected[k]: |
1074 | + |
1075 | + actual = config.get(section, k) |
1076 | + v = expected[k] |
1077 | + if (isinstance(v, six.string_types) or |
1078 | + isinstance(v, bool) or |
1079 | + isinstance(v, six.integer_types)): |
1080 | + # handle explicit values |
1081 | + if actual != v: |
1082 | + return "section [{}] {}:{} != expected {}:{}".format( |
1083 | + section, k, actual, k, expected[k]) |
1084 | + # handle function pointers, such as not_null or valid_ip |
1085 | + elif not v(actual): |
1086 | return "section [{}] {}:{} != expected {}:{}".format( |
1087 | - section, k, config.get(section, k), k, expected[k]) |
1088 | + section, k, actual, k, expected[k]) |
1089 | return None |
1090 | |
1091 | def _validate_dict_data(self, expected, actual): |
1092 | @@ -195,7 +212,7 @@ |
1093 | |
1094 | Compare expected dictionary data vs actual dictionary data. |
1095 | The values in the 'expected' dictionary can be strings, bools, ints, |
1096 | - longs, or can be a function that evaluate a variable and returns a |
1097 | + longs, or can be a function that evaluates a variable and returns a |
1098 | bool. |
1099 | """ |
1100 | self.log.debug('actual: {}'.format(repr(actual))) |
1101 | @@ -206,8 +223,10 @@ |
1102 | if (isinstance(v, six.string_types) or |
1103 | isinstance(v, bool) or |
1104 | isinstance(v, six.integer_types)): |
1105 | + # handle explicit values |
1106 | if v != actual[k]: |
1107 | return "{}:{}".format(k, actual[k]) |
1108 | + # handle function pointers, such as not_null or valid_ip |
1109 | elif not v(actual[k]): |
1110 | return "{}:{}".format(k, actual[k]) |
1111 | else: |
1112 | @@ -406,3 +425,109 @@ |
1113 | """Convert a relative file path to a file URL.""" |
1114 | _abs_path = os.path.abspath(file_rel_path) |
1115 | return urlparse.urlparse(_abs_path, scheme='file').geturl() |
1116 | + |
1117 | + def check_commands_on_units(self, commands, sentry_units): |
1118 | + """Check that all commands in a list exit zero on all |
1119 | + sentry units in a list. |
1120 | + |
1121 | + :param commands: list of bash commands |
1122 | + :param sentry_units: list of sentry unit pointers |
1123 | + :returns: None if successful; Failure message otherwise |
1124 | + """ |
1125 | + self.log.debug('Checking exit codes for {} commands on {} ' |
1126 | + 'sentry units...'.format(len(commands), |
1127 | + len(sentry_units))) |
1128 | + for sentry_unit in sentry_units: |
1129 | + for cmd in commands: |
1130 | + output, code = sentry_unit.run(cmd) |
1131 | + if code == 0: |
1132 | + self.log.debug('{} `{}` returned {} ' |
1133 | + '(OK)'.format(sentry_unit.info['unit_name'], |
1134 | + cmd, code)) |
1135 | + else: |
1136 | + return ('{} `{}` returned {} ' |
1137 | + '{}'.format(sentry_unit.info['unit_name'], |
1138 | + cmd, code, output)) |
1139 | + return None |
1140 | + |
1141 | + def get_process_id_list(self, sentry_unit, process_name): |
1142 | + """Get a list of process ID(s) from a single sentry juju unit |
1143 | + for a single process name. |
1144 | + |
1145 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
1146 | + :param process_name: Process name |
1147 | + :returns: List of process IDs |
1148 | + """ |
1149 | + cmd = 'pidof {}'.format(process_name) |
1150 | + output, code = sentry_unit.run(cmd) |
1151 | + if code != 0: |
1152 | + msg = ('{} `{}` returned {} ' |
1153 | + '{}'.format(sentry_unit.info['unit_name'], |
1154 | + cmd, code, output)) |
1155 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1156 | + return str(output).split() |
1157 | + |
1158 | + def get_unit_process_ids(self, unit_processes): |
1159 | + """Construct a dict containing unit sentries, process names, and |
1160 | + process IDs.""" |
1161 | + pid_dict = {} |
1162 | + for sentry_unit, process_list in unit_processes.iteritems(): |
1163 | + pid_dict[sentry_unit] = {} |
1164 | + for process in process_list: |
1165 | + pids = self.get_process_id_list(sentry_unit, process) |
1166 | + pid_dict[sentry_unit].update({process: pids}) |
1167 | + return pid_dict |
1168 | + |
1169 | + def validate_unit_process_ids(self, expected, actual): |
1170 | + """Validate process id quantities for services on units.""" |
1171 | + self.log.debug('Checking units for running processes...') |
1172 | + self.log.debug('Expected PIDs: {}'.format(expected)) |
1173 | + self.log.debug('Actual PIDs: {}'.format(actual)) |
1174 | + |
1175 | + if len(actual) != len(expected): |
1176 | + return ('Unit count mismatch. expected, actual: {}, ' |
1177 | + '{} '.format(len(expected), len(actual))) |
1178 | + |
1179 | + for (e_sentry, e_proc_names) in expected.iteritems(): |
1180 | + e_sentry_name = e_sentry.info['unit_name'] |
1181 | + if e_sentry in actual.keys(): |
1182 | + a_proc_names = actual[e_sentry] |
1183 | + else: |
1184 | + return ('Expected sentry ({}) not found in actual dict data.' |
1185 | + '{}'.format(e_sentry_name, e_sentry)) |
1186 | + |
1187 | + if len(e_proc_names.keys()) != len(a_proc_names.keys()): |
1188 | + return ('Process name count mismatch. expected, actual: {}, ' |
1189 | + '{}'.format(len(expected), len(actual))) |
1190 | + |
1191 | + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ |
1192 | + zip(e_proc_names.items(), a_proc_names.items()): |
1193 | + if e_proc_name != a_proc_name: |
1194 | + return ('Process name mismatch. expected, actual: {}, ' |
1195 | + '{}'.format(e_proc_name, a_proc_name)) |
1196 | + |
1197 | + a_pids_length = len(a_pids) |
1198 | + if e_pids_length != a_pids_length: |
1199 | + return ('PID count mismatch. {} ({}) expected, actual: ' |
1200 | + '{}, {} ({})'.format(e_sentry_name, e_proc_name, |
1201 | + e_pids_length, a_pids_length, |
1202 | + a_pids)) |
1203 | + else: |
1204 | + self.log.debug('PID check OK: {} {} {}: ' |
1205 | + '{}'.format(e_sentry_name, e_proc_name, |
1206 | + e_pids_length, a_pids)) |
1207 | + return None |
1208 | + |
1209 | + def validate_list_of_identical_dicts(self, list_of_dicts): |
1210 | + """Check that all dicts within a list are identical.""" |
1211 | + hashes = [] |
1212 | + for _dict in list_of_dicts: |
1213 | + hashes.append(hash(frozenset(_dict.items()))) |
1214 | + |
1215 | + self.log.debug('Hashes: {}'.format(hashes)) |
1216 | + if len(set(hashes)) == 1: |
1217 | + self.log.debug('Dicts within list are identical') |
1218 | + else: |
1219 | + return 'Dicts within list are not identical' |
1220 | + |
1221 | + return None |
1222 | |
1223 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' |
1224 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-19 15:09:21 +0000 |
1225 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-16 20:31:59 +0000 |
1226 | @@ -79,9 +79,9 @@ |
1227 | services.append(this_service) |
1228 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
1229 | 'ceph-osd', 'ceph-radosgw'] |
1230 | - # Openstack subordinate charms do not expose an origin option as that |
1231 | - # is controlled by the principle |
1232 | - ignore = ['neutron-openvswitch'] |
1233 | + # Most OpenStack subordinate charms do not expose an origin option |
1234 | + # as that is controlled by the principle. |
1235 | + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] |
1236 | |
1237 | if self.openstack: |
1238 | for svc in services: |
1239 | @@ -148,3 +148,36 @@ |
1240 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
1241 | else: |
1242 | return releases[self.series] |
1243 | + |
1244 | + def get_ceph_expected_pools(self, radosgw=False): |
1245 | + """Return a list of expected ceph pools in a ceph + cinder + glance |
1246 | + test scenario, based on OpenStack release and whether ceph radosgw |
1247 | + is flagged as present or not.""" |
1248 | + |
1249 | + if self._get_openstack_release() >= self.trusty_kilo: |
1250 | + # Kilo or later |
1251 | + pools = [ |
1252 | + 'rbd', |
1253 | + 'cinder', |
1254 | + 'glance' |
1255 | + ] |
1256 | + else: |
1257 | + # Juno or earlier |
1258 | + pools = [ |
1259 | + 'data', |
1260 | + 'metadata', |
1261 | + 'rbd', |
1262 | + 'cinder', |
1263 | + 'glance' |
1264 | + ] |
1265 | + |
1266 | + if radosgw: |
1267 | + pools.extend([ |
1268 | + '.rgw.root', |
1269 | + '.rgw.control', |
1270 | + '.rgw', |
1271 | + '.rgw.gc', |
1272 | + '.users.uid' |
1273 | + ]) |
1274 | + |
1275 | + return pools |
1276 | |
1277 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' |
1278 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-19 15:09:21 +0000 |
1279 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:31:59 +0000 |
1280 | @@ -14,16 +14,20 @@ |
1281 | # You should have received a copy of the GNU Lesser General Public License |
1282 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1283 | |
1284 | +import amulet |
1285 | +import json |
1286 | import logging |
1287 | import os |
1288 | import six |
1289 | import time |
1290 | import urllib |
1291 | |
1292 | +import cinderclient.v1.client as cinder_client |
1293 | import glanceclient.v1.client as glance_client |
1294 | import heatclient.v1.client as heat_client |
1295 | import keystoneclient.v2_0 as keystone_client |
1296 | import novaclient.v1_1.client as nova_client |
1297 | +import swiftclient |
1298 | |
1299 | from charmhelpers.contrib.amulet.utils import ( |
1300 | AmuletUtils |
1301 | @@ -171,6 +175,16 @@ |
1302 | self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
1303 | return tenant in [t.name for t in keystone.tenants.list()] |
1304 | |
1305 | + def authenticate_cinder_admin(self, keystone_sentry, username, |
1306 | + password, tenant): |
1307 | + """Authenticates admin user with cinder.""" |
1308 | + # NOTE(beisner): cinder python client doesn't accept tokens. |
1309 | + service_ip = \ |
1310 | + keystone_sentry.relation('shared-db', |
1311 | + 'mysql:shared-db')['private-address'] |
1312 | + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
1313 | + return cinder_client.Client(username, password, tenant, ept) |
1314 | + |
1315 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
1316 | tenant): |
1317 | """Authenticates admin user with the keystone admin endpoint.""" |
1318 | @@ -212,9 +226,29 @@ |
1319 | return nova_client.Client(username=user, api_key=password, |
1320 | project_id=tenant, auth_url=ep) |
1321 | |
1322 | + def authenticate_swift_user(self, keystone, user, password, tenant): |
1323 | + """Authenticates a regular user with swift api.""" |
1324 | + self.log.debug('Authenticating swift user ({})...'.format(user)) |
1325 | + ep = keystone.service_catalog.url_for(service_type='identity', |
1326 | + endpoint_type='publicURL') |
1327 | + return swiftclient.Connection(authurl=ep, |
1328 | + user=user, |
1329 | + key=password, |
1330 | + tenant_name=tenant, |
1331 | + auth_version='2.0') |
1332 | + |
1333 | def create_cirros_image(self, glance, image_name): |
1334 | - """Download the latest cirros image and upload it to glance.""" |
1335 | - self.log.debug('Creating glance image ({})...'.format(image_name)) |
1336 | + """Download the latest cirros image and upload it to glance, |
1337 | + validate and return a resource pointer. |
1338 | + |
1339 | + :param glance: pointer to authenticated glance connection |
1340 | + :param image_name: display name for new image |
1341 | + :returns: glance image pointer |
1342 | + """ |
1343 | + self.log.debug('Creating glance cirros image ' |
1344 | + '({})...'.format(image_name)) |
1345 | + |
1346 | + # Download cirros image |
1347 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
1348 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
1349 | if http_proxy: |
1350 | @@ -223,33 +257,51 @@ |
1351 | else: |
1352 | opener = urllib.FancyURLopener() |
1353 | |
1354 | - f = opener.open("http://download.cirros-cloud.net/version/released") |
1355 | + f = opener.open('http://download.cirros-cloud.net/version/released') |
1356 | version = f.read().strip() |
1357 | - cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
1358 | + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
1359 | local_path = os.path.join('tests', cirros_img) |
1360 | |
1361 | if not os.path.exists(local_path): |
1362 | - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
1363 | + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
1364 | version, cirros_img) |
1365 | opener.retrieve(cirros_url, local_path) |
1366 | f.close() |
1367 | |
1368 | + # Create glance image |
1369 | with open(local_path) as f: |
1370 | image = glance.images.create(name=image_name, is_public=True, |
1371 | disk_format='qcow2', |
1372 | container_format='bare', data=f) |
1373 | - count = 1 |
1374 | - status = image.status |
1375 | - while status != 'active' and count < 10: |
1376 | - time.sleep(3) |
1377 | - image = glance.images.get(image.id) |
1378 | - status = image.status |
1379 | - self.log.debug('image status: {}'.format(status)) |
1380 | - count += 1 |
1381 | - |
1382 | - if status != 'active': |
1383 | - self.log.error('image creation timed out') |
1384 | - return None |
1385 | + |
1386 | + # Wait for image to reach active status |
1387 | + img_id = image.id |
1388 | + ret = self.resource_reaches_status(glance.images, img_id, |
1389 | + expected_stat='active', |
1390 | + msg='Image status wait') |
1391 | + if not ret: |
1392 | + msg = 'Glance image failed to reach expected state.' |
1393 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1394 | + |
1395 | + # Re-validate new image |
1396 | + self.log.debug('Validating image attributes...') |
1397 | + val_img_name = glance.images.get(img_id).name |
1398 | + val_img_stat = glance.images.get(img_id).status |
1399 | + val_img_pub = glance.images.get(img_id).is_public |
1400 | + val_img_cfmt = glance.images.get(img_id).container_format |
1401 | + val_img_dfmt = glance.images.get(img_id).disk_format |
1402 | + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
1403 | + 'container fmt:{} disk fmt:{}'.format( |
1404 | + val_img_name, val_img_pub, img_id, |
1405 | + val_img_stat, val_img_cfmt, val_img_dfmt)) |
1406 | + |
1407 | + if val_img_name == image_name and val_img_stat == 'active' \ |
1408 | + and val_img_pub is True and val_img_cfmt == 'bare' \ |
1409 | + and val_img_dfmt == 'qcow2': |
1410 | + self.log.debug(msg_attr) |
1411 | + else: |
1412 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
1413 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1414 | |
1415 | return image |
1416 | |
1417 | @@ -260,22 +312,7 @@ |
1418 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
1419 | 'delete_resource instead of delete_image.') |
1420 | self.log.debug('Deleting glance image ({})...'.format(image)) |
1421 | - num_before = len(list(glance.images.list())) |
1422 | - glance.images.delete(image) |
1423 | - |
1424 | - count = 1 |
1425 | - num_after = len(list(glance.images.list())) |
1426 | - while num_after != (num_before - 1) and count < 10: |
1427 | - time.sleep(3) |
1428 | - num_after = len(list(glance.images.list())) |
1429 | - self.log.debug('number of images: {}'.format(num_after)) |
1430 | - count += 1 |
1431 | - |
1432 | - if num_after != (num_before - 1): |
1433 | - self.log.error('image deletion timed out') |
1434 | - return False |
1435 | - |
1436 | - return True |
1437 | + return self.delete_resource(glance.images, image, msg='glance image') |
1438 | |
1439 | def create_instance(self, nova, image_name, instance_name, flavor): |
1440 | """Create the specified instance.""" |
1441 | @@ -308,22 +345,8 @@ |
1442 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
1443 | 'delete_resource instead of delete_instance.') |
1444 | self.log.debug('Deleting instance ({})...'.format(instance)) |
1445 | - num_before = len(list(nova.servers.list())) |
1446 | - nova.servers.delete(instance) |
1447 | - |
1448 | - count = 1 |
1449 | - num_after = len(list(nova.servers.list())) |
1450 | - while num_after != (num_before - 1) and count < 10: |
1451 | - time.sleep(3) |
1452 | - num_after = len(list(nova.servers.list())) |
1453 | - self.log.debug('number of instances: {}'.format(num_after)) |
1454 | - count += 1 |
1455 | - |
1456 | - if num_after != (num_before - 1): |
1457 | - self.log.error('instance deletion timed out') |
1458 | - return False |
1459 | - |
1460 | - return True |
1461 | + return self.delete_resource(nova.servers, instance, |
1462 | + msg='nova instance') |
1463 | |
1464 | def create_or_get_keypair(self, nova, keypair_name="testkey"): |
1465 | """Create a new keypair, or return pointer if it already exists.""" |
1466 | @@ -339,6 +362,88 @@ |
1467 | _keypair = nova.keypairs.create(name=keypair_name) |
1468 | return _keypair |
1469 | |
1470 | + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
1471 | + img_id=None, src_vol_id=None, snap_id=None): |
1472 | + """Create cinder volume, optionally from a glance image, OR |
1473 | + optionally as a clone of an existing volume, OR optionally |
1474 | + from a snapshot. Wait for the new volume status to reach |
1475 | + the expected status, validate and return a resource pointer. |
1476 | + |
1477 | + :param vol_name: cinder volume display name |
1478 | + :param vol_size: size in gigabytes |
1479 | + :param img_id: optional glance image id |
1480 | + :param src_vol_id: optional source volume id to clone |
1481 | + :param snap_id: optional snapshot id to use |
1482 | + :returns: cinder volume pointer |
1483 | + """ |
1484 | + # Handle parameter input and avoid impossible combinations |
1485 | + if img_id and not src_vol_id and not snap_id: |
1486 | + # Create volume from image |
1487 | + self.log.debug('Creating cinder volume from glance image...') |
1488 | + bootable = 'true' |
1489 | + elif src_vol_id and not img_id and not snap_id: |
1490 | + # Clone an existing volume |
1491 | + self.log.debug('Cloning cinder volume...') |
1492 | + bootable = cinder.volumes.get(src_vol_id).bootable |
1493 | + elif snap_id and not src_vol_id and not img_id: |
1494 | + # Create volume from snapshot |
1495 | + self.log.debug('Creating cinder volume from snapshot...') |
1496 | + snap = cinder.volume_snapshots.find(id=snap_id) |
1497 | + vol_size = snap.size |
1498 | + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
1499 | + bootable = cinder.volumes.get(snap_vol_id).bootable |
1500 | + elif not img_id and not src_vol_id and not snap_id: |
1501 | + # Create volume |
1502 | + self.log.debug('Creating cinder volume...') |
1503 | + bootable = 'false' |
1504 | + else: |
1505 | + # Impossible combination of parameters |
1506 | + msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
1507 | + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
1508 | + img_id, src_vol_id, |
1509 | + snap_id)) |
1510 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1511 | + |
1512 | + # Create new volume |
1513 | + try: |
1514 | + vol_new = cinder.volumes.create(display_name=vol_name, |
1515 | + imageRef=img_id, |
1516 | + size=vol_size, |
1517 | + source_volid=src_vol_id, |
1518 | + snapshot_id=snap_id) |
1519 | + vol_id = vol_new.id |
1520 | + except Exception as e: |
1521 | + msg = 'Failed to create volume: {}'.format(e) |
1522 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1523 | + |
1524 | + # Wait for volume to reach available status |
1525 | + ret = self.resource_reaches_status(cinder.volumes, vol_id, |
1526 | + expected_stat="available", |
1527 | + msg="Volume status wait") |
1528 | + if not ret: |
1529 | + msg = 'Cinder volume failed to reach expected state.' |
1530 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1531 | + |
1532 | + # Re-validate new volume |
1533 | + self.log.debug('Validating volume attributes...') |
1534 | + val_vol_name = cinder.volumes.get(vol_id).display_name |
1535 | + val_vol_boot = cinder.volumes.get(vol_id).bootable |
1536 | + val_vol_stat = cinder.volumes.get(vol_id).status |
1537 | + val_vol_size = cinder.volumes.get(vol_id).size |
1538 | + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
1539 | + '{} size:{}'.format(val_vol_name, vol_id, |
1540 | + val_vol_stat, val_vol_boot, |
1541 | + val_vol_size)) |
1542 | + |
1543 | + if val_vol_boot == bootable and val_vol_stat == 'available' \ |
1544 | + and val_vol_name == vol_name and val_vol_size == vol_size: |
1545 | + self.log.debug(msg_attr) |
1546 | + else: |
1547 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
1548 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1549 | + |
1550 | + return vol_new |
1551 | + |
1552 | def delete_resource(self, resource, resource_id, |
1553 | msg="resource", max_wait=120): |
1554 | """Delete one openstack resource, such as one instance, keypair, |
1555 | @@ -350,6 +455,8 @@ |
1556 | :param max_wait: maximum wait time in seconds |
1557 | :returns: True if successful, otherwise False |
1558 | """ |
1559 | + self.log.debug('Deleting OpenStack resource ' |
1560 | + '{} ({})'.format(resource_id, msg)) |
1561 | num_before = len(list(resource.list())) |
1562 | resource.delete(resource_id) |
1563 | |
1564 | @@ -411,3 +518,87 @@ |
1565 | self.log.debug('{} never reached expected status: ' |
1566 | '{}'.format(resource_id, expected_stat)) |
1567 | return False |
1568 | + |
1569 | + def get_ceph_osd_id_cmd(self, index): |
1570 | + """Produce a shell command that will return a ceph-osd id.""" |
1571 | + return ("`initctl list | grep 'ceph-osd ' | " |
1572 | + "awk 'NR=={} {{ print $2 }}' | " |
1573 | + "grep -o '[0-9]*'`".format(index + 1)) |
1574 | + |
1575 | + def get_ceph_pools(self, sentry_unit): |
1576 | + """Return a dict of ceph pools from a single ceph unit, with |
1577 | + pool name as keys, pool id as vals.""" |
1578 | + pools = {} |
1579 | + cmd = 'sudo ceph osd lspools' |
1580 | + output, code = sentry_unit.run(cmd) |
1581 | + if code != 0: |
1582 | + msg = ('{} `{}` returned {} ' |
1583 | + '{}'.format(sentry_unit.info['unit_name'], |
1584 | + cmd, code, output)) |
1585 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1586 | + |
1587 | + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
1588 | + for pool in str(output).split(','): |
1589 | + pool_id_name = pool.split(' ') |
1590 | + if len(pool_id_name) == 2: |
1591 | + pool_id = pool_id_name[0] |
1592 | + pool_name = pool_id_name[1] |
1593 | + pools[pool_name] = int(pool_id) |
1594 | + |
1595 | + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
1596 | + pools)) |
1597 | + return pools |
1598 | + |
1599 | + def get_ceph_df(self, sentry_unit): |
1600 | + """Return dict of ceph df json output, including ceph pool state. |
1601 | + |
1602 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
1603 | + :returns: Dict of ceph df output |
1604 | + """ |
1605 | + cmd = 'sudo ceph df --format=json' |
1606 | + output, code = sentry_unit.run(cmd) |
1607 | + if code != 0: |
1608 | + msg = ('{} `{}` returned {} ' |
1609 | + '{}'.format(sentry_unit.info['unit_name'], |
1610 | + cmd, code, output)) |
1611 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1612 | + return json.loads(output) |
1613 | + |
1614 | + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
1615 | + """Take a sample of attributes of a ceph pool, returning ceph |
1616 | + pool name, object count and disk space used for the specified |
1617 | + pool ID number. |
1618 | + |
1619 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
1620 | + :param pool_id: Ceph pool ID |
1621 | + :returns: List of pool name, object count, kb disk space used |
1622 | + """ |
1623 | + df = self.get_ceph_df(sentry_unit) |
1624 | + pool_name = df['pools'][pool_id]['name'] |
1625 | + obj_count = df['pools'][pool_id]['stats']['objects'] |
1626 | + kb_used = df['pools'][pool_id]['stats']['kb_used'] |
1627 | + self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
1628 | + '{} kb used'.format(pool_name, pool_id, |
1629 | + obj_count, kb_used)) |
1630 | + return pool_name, obj_count, kb_used |
1631 | + |
1632 | + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
1633 | + """Validate ceph pool samples taken over time, such as pool |
1634 | + object counts or pool kb used, before adding, after adding, and |
1635 | + after deleting items which affect those pool attributes. The |
1636 | + 2nd element is expected to be greater than the 1st; 3rd is expected |
1637 | + to be less than the 2nd. |
1638 | + |
1639 | + :param samples: List containing 3 data samples |
1640 | + :param sample_type: String for logging and usage context |
1641 | + :returns: None if successful, Failure message otherwise |
1642 | + """ |
1643 | + original, created, deleted = range(3) |
1644 | + if samples[created] <= samples[original] or \ |
1645 | + samples[deleted] >= samples[created]: |
1646 | + return ('Ceph {} samples ({}) ' |
1647 | + 'unexpected.'.format(sample_type, samples)) |
1648 | + else: |
1649 | + self.log.debug('Ceph {} samples (OK): ' |
1650 | + '{}'.format(sample_type, samples)) |
1651 | + return None |
charm_lint_check #6287 neutron- gateway- next for corey.bryant mp265049
LINT OK: passed
Build: http:// 10.245. 162.77: 8080/job/ charm_lint_ check/6287/