Merge lp:~corey.bryant/charms/trusty/neutron-api/sync-ch into lp:~openstack-charmers-archive/charms/trusty/neutron-api/next
- Trusty Tahr (14.04)
- sync-ch
- Merge into next
Proposed by
Corey Bryant
Status: | Merged |
---|---|
Merged at revision: | 127 |
Proposed branch: | lp:~corey.bryant/charms/trusty/neutron-api/sync-ch |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/neutron-api/next |
Diff against target: |
1562 lines (+878/-199) 16 files modified
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+36/-3) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+240/-49) hooks/charmhelpers/contrib/openstack/context.py (+8/-7) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6) hooks/charmhelpers/contrib/openstack/utils.py (+9/-5) hooks/charmhelpers/contrib/storage/linux/ceph.py (+6/-6) hooks/charmhelpers/core/hookenv.py (+93/-36) hooks/charmhelpers/core/host.py (+31/-5) hooks/charmhelpers/core/services/base.py (+12/-9) hooks/charmhelpers/core/services/helpers.py (+2/-2) hooks/charmhelpers/fetch/__init__.py (+23/-14) hooks/charmhelpers/fetch/archiveurl.py (+7/-1) hooks/charmhelpers/fetch/giturl.py (+1/-1) tests/charmhelpers/contrib/amulet/utils.py (+128/-3) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+36/-3) tests/charmhelpers/contrib/openstack/amulet/utils.py (+240/-49) |
To merge this branch: | bzr merge lp:~corey.bryant/charms/trusty/neutron-api/sync-ch |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+265051@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #5920 neutron-api-next for corey.bryant mp265051
UNIT OK: passed
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #5143 neutron-api-next for corey.bryant mp265051
AMULET FAIL: amulet-test failed
AMULET Results (max last 2 lines):
make: *** [test] Error 1
ERROR:root:Make target returned non-zero.
Full amulet test output: http://
Build: http://
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_lint_check #6343 neutron-api-next for corey.bryant mp265051
LINT OK: passed
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #5975 neutron-api-next for corey.bryant mp265051
UNIT OK: passed
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #5153 neutron-api-next for corey.bryant mp265051
AMULET OK: passed
Build: http://
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' |
2 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-19 15:09:05 +0000 |
3 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-16 20:32:27 +0000 |
4 | @@ -79,9 +79,9 @@ |
5 | services.append(this_service) |
6 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
7 | 'ceph-osd', 'ceph-radosgw'] |
8 | - # Openstack subordinate charms do not expose an origin option as that |
9 | - # is controlled by the principle |
10 | - ignore = ['neutron-openvswitch'] |
11 | + # Most OpenStack subordinate charms do not expose an origin option |
12 | + # as that is controlled by the principle. |
13 | + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] |
14 | |
15 | if self.openstack: |
16 | for svc in services: |
17 | @@ -148,3 +148,36 @@ |
18 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
19 | else: |
20 | return releases[self.series] |
21 | + |
22 | + def get_ceph_expected_pools(self, radosgw=False): |
23 | + """Return a list of expected ceph pools in a ceph + cinder + glance |
24 | + test scenario, based on OpenStack release and whether ceph radosgw |
25 | + is flagged as present or not.""" |
26 | + |
27 | + if self._get_openstack_release() >= self.trusty_kilo: |
28 | + # Kilo or later |
29 | + pools = [ |
30 | + 'rbd', |
31 | + 'cinder', |
32 | + 'glance' |
33 | + ] |
34 | + else: |
35 | + # Juno or earlier |
36 | + pools = [ |
37 | + 'data', |
38 | + 'metadata', |
39 | + 'rbd', |
40 | + 'cinder', |
41 | + 'glance' |
42 | + ] |
43 | + |
44 | + if radosgw: |
45 | + pools.extend([ |
46 | + '.rgw.root', |
47 | + '.rgw.control', |
48 | + '.rgw', |
49 | + '.rgw.gc', |
50 | + '.users.uid' |
51 | + ]) |
52 | + |
53 | + return pools |
54 | |
55 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' |
56 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-19 15:09:05 +0000 |
57 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:32:27 +0000 |
58 | @@ -14,16 +14,20 @@ |
59 | # You should have received a copy of the GNU Lesser General Public License |
60 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
61 | |
62 | +import amulet |
63 | +import json |
64 | import logging |
65 | import os |
66 | import six |
67 | import time |
68 | import urllib |
69 | |
70 | +import cinderclient.v1.client as cinder_client |
71 | import glanceclient.v1.client as glance_client |
72 | import heatclient.v1.client as heat_client |
73 | import keystoneclient.v2_0 as keystone_client |
74 | import novaclient.v1_1.client as nova_client |
75 | +import swiftclient |
76 | |
77 | from charmhelpers.contrib.amulet.utils import ( |
78 | AmuletUtils |
79 | @@ -171,6 +175,16 @@ |
80 | self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
81 | return tenant in [t.name for t in keystone.tenants.list()] |
82 | |
83 | + def authenticate_cinder_admin(self, keystone_sentry, username, |
84 | + password, tenant): |
85 | + """Authenticates admin user with cinder.""" |
86 | + # NOTE(beisner): cinder python client doesn't accept tokens. |
87 | + service_ip = \ |
88 | + keystone_sentry.relation('shared-db', |
89 | + 'mysql:shared-db')['private-address'] |
90 | + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
91 | + return cinder_client.Client(username, password, tenant, ept) |
92 | + |
93 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
94 | tenant): |
95 | """Authenticates admin user with the keystone admin endpoint.""" |
96 | @@ -212,9 +226,29 @@ |
97 | return nova_client.Client(username=user, api_key=password, |
98 | project_id=tenant, auth_url=ep) |
99 | |
100 | + def authenticate_swift_user(self, keystone, user, password, tenant): |
101 | + """Authenticates a regular user with swift api.""" |
102 | + self.log.debug('Authenticating swift user ({})...'.format(user)) |
103 | + ep = keystone.service_catalog.url_for(service_type='identity', |
104 | + endpoint_type='publicURL') |
105 | + return swiftclient.Connection(authurl=ep, |
106 | + user=user, |
107 | + key=password, |
108 | + tenant_name=tenant, |
109 | + auth_version='2.0') |
110 | + |
111 | def create_cirros_image(self, glance, image_name): |
112 | - """Download the latest cirros image and upload it to glance.""" |
113 | - self.log.debug('Creating glance image ({})...'.format(image_name)) |
114 | + """Download the latest cirros image and upload it to glance, |
115 | + validate and return a resource pointer. |
116 | + |
117 | + :param glance: pointer to authenticated glance connection |
118 | + :param image_name: display name for new image |
119 | + :returns: glance image pointer |
120 | + """ |
121 | + self.log.debug('Creating glance cirros image ' |
122 | + '({})...'.format(image_name)) |
123 | + |
124 | + # Download cirros image |
125 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
126 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
127 | if http_proxy: |
128 | @@ -223,33 +257,51 @@ |
129 | else: |
130 | opener = urllib.FancyURLopener() |
131 | |
132 | - f = opener.open("http://download.cirros-cloud.net/version/released") |
133 | + f = opener.open('http://download.cirros-cloud.net/version/released') |
134 | version = f.read().strip() |
135 | - cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
136 | + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
137 | local_path = os.path.join('tests', cirros_img) |
138 | |
139 | if not os.path.exists(local_path): |
140 | - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
141 | + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
142 | version, cirros_img) |
143 | opener.retrieve(cirros_url, local_path) |
144 | f.close() |
145 | |
146 | + # Create glance image |
147 | with open(local_path) as f: |
148 | image = glance.images.create(name=image_name, is_public=True, |
149 | disk_format='qcow2', |
150 | container_format='bare', data=f) |
151 | - count = 1 |
152 | - status = image.status |
153 | - while status != 'active' and count < 10: |
154 | - time.sleep(3) |
155 | - image = glance.images.get(image.id) |
156 | - status = image.status |
157 | - self.log.debug('image status: {}'.format(status)) |
158 | - count += 1 |
159 | - |
160 | - if status != 'active': |
161 | - self.log.error('image creation timed out') |
162 | - return None |
163 | + |
164 | + # Wait for image to reach active status |
165 | + img_id = image.id |
166 | + ret = self.resource_reaches_status(glance.images, img_id, |
167 | + expected_stat='active', |
168 | + msg='Image status wait') |
169 | + if not ret: |
170 | + msg = 'Glance image failed to reach expected state.' |
171 | + amulet.raise_status(amulet.FAIL, msg=msg) |
172 | + |
173 | + # Re-validate new image |
174 | + self.log.debug('Validating image attributes...') |
175 | + val_img_name = glance.images.get(img_id).name |
176 | + val_img_stat = glance.images.get(img_id).status |
177 | + val_img_pub = glance.images.get(img_id).is_public |
178 | + val_img_cfmt = glance.images.get(img_id).container_format |
179 | + val_img_dfmt = glance.images.get(img_id).disk_format |
180 | + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
181 | + 'container fmt:{} disk fmt:{}'.format( |
182 | + val_img_name, val_img_pub, img_id, |
183 | + val_img_stat, val_img_cfmt, val_img_dfmt)) |
184 | + |
185 | + if val_img_name == image_name and val_img_stat == 'active' \ |
186 | + and val_img_pub is True and val_img_cfmt == 'bare' \ |
187 | + and val_img_dfmt == 'qcow2': |
188 | + self.log.debug(msg_attr) |
189 | + else: |
190 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
191 | + amulet.raise_status(amulet.FAIL, msg=msg) |
192 | |
193 | return image |
194 | |
195 | @@ -260,22 +312,7 @@ |
196 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
197 | 'delete_resource instead of delete_image.') |
198 | self.log.debug('Deleting glance image ({})...'.format(image)) |
199 | - num_before = len(list(glance.images.list())) |
200 | - glance.images.delete(image) |
201 | - |
202 | - count = 1 |
203 | - num_after = len(list(glance.images.list())) |
204 | - while num_after != (num_before - 1) and count < 10: |
205 | - time.sleep(3) |
206 | - num_after = len(list(glance.images.list())) |
207 | - self.log.debug('number of images: {}'.format(num_after)) |
208 | - count += 1 |
209 | - |
210 | - if num_after != (num_before - 1): |
211 | - self.log.error('image deletion timed out') |
212 | - return False |
213 | - |
214 | - return True |
215 | + return self.delete_resource(glance.images, image, msg='glance image') |
216 | |
217 | def create_instance(self, nova, image_name, instance_name, flavor): |
218 | """Create the specified instance.""" |
219 | @@ -308,22 +345,8 @@ |
220 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
221 | 'delete_resource instead of delete_instance.') |
222 | self.log.debug('Deleting instance ({})...'.format(instance)) |
223 | - num_before = len(list(nova.servers.list())) |
224 | - nova.servers.delete(instance) |
225 | - |
226 | - count = 1 |
227 | - num_after = len(list(nova.servers.list())) |
228 | - while num_after != (num_before - 1) and count < 10: |
229 | - time.sleep(3) |
230 | - num_after = len(list(nova.servers.list())) |
231 | - self.log.debug('number of instances: {}'.format(num_after)) |
232 | - count += 1 |
233 | - |
234 | - if num_after != (num_before - 1): |
235 | - self.log.error('instance deletion timed out') |
236 | - return False |
237 | - |
238 | - return True |
239 | + return self.delete_resource(nova.servers, instance, |
240 | + msg='nova instance') |
241 | |
242 | def create_or_get_keypair(self, nova, keypair_name="testkey"): |
243 | """Create a new keypair, or return pointer if it already exists.""" |
244 | @@ -339,6 +362,88 @@ |
245 | _keypair = nova.keypairs.create(name=keypair_name) |
246 | return _keypair |
247 | |
248 | + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
249 | + img_id=None, src_vol_id=None, snap_id=None): |
250 | + """Create cinder volume, optionally from a glance image, OR |
251 | + optionally as a clone of an existing volume, OR optionally |
252 | + from a snapshot. Wait for the new volume status to reach |
253 | + the expected status, validate and return a resource pointer. |
254 | + |
255 | + :param vol_name: cinder volume display name |
256 | + :param vol_size: size in gigabytes |
257 | + :param img_id: optional glance image id |
258 | + :param src_vol_id: optional source volume id to clone |
259 | + :param snap_id: optional snapshot id to use |
260 | + :returns: cinder volume pointer |
261 | + """ |
262 | + # Handle parameter input and avoid impossible combinations |
263 | + if img_id and not src_vol_id and not snap_id: |
264 | + # Create volume from image |
265 | + self.log.debug('Creating cinder volume from glance image...') |
266 | + bootable = 'true' |
267 | + elif src_vol_id and not img_id and not snap_id: |
268 | + # Clone an existing volume |
269 | + self.log.debug('Cloning cinder volume...') |
270 | + bootable = cinder.volumes.get(src_vol_id).bootable |
271 | + elif snap_id and not src_vol_id and not img_id: |
272 | + # Create volume from snapshot |
273 | + self.log.debug('Creating cinder volume from snapshot...') |
274 | + snap = cinder.volume_snapshots.find(id=snap_id) |
275 | + vol_size = snap.size |
276 | + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
277 | + bootable = cinder.volumes.get(snap_vol_id).bootable |
278 | + elif not img_id and not src_vol_id and not snap_id: |
279 | + # Create volume |
280 | + self.log.debug('Creating cinder volume...') |
281 | + bootable = 'false' |
282 | + else: |
283 | + # Impossible combination of parameters |
284 | + msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
285 | + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
286 | + img_id, src_vol_id, |
287 | + snap_id)) |
288 | + amulet.raise_status(amulet.FAIL, msg=msg) |
289 | + |
290 | + # Create new volume |
291 | + try: |
292 | + vol_new = cinder.volumes.create(display_name=vol_name, |
293 | + imageRef=img_id, |
294 | + size=vol_size, |
295 | + source_volid=src_vol_id, |
296 | + snapshot_id=snap_id) |
297 | + vol_id = vol_new.id |
298 | + except Exception as e: |
299 | + msg = 'Failed to create volume: {}'.format(e) |
300 | + amulet.raise_status(amulet.FAIL, msg=msg) |
301 | + |
302 | + # Wait for volume to reach available status |
303 | + ret = self.resource_reaches_status(cinder.volumes, vol_id, |
304 | + expected_stat="available", |
305 | + msg="Volume status wait") |
306 | + if not ret: |
307 | + msg = 'Cinder volume failed to reach expected state.' |
308 | + amulet.raise_status(amulet.FAIL, msg=msg) |
309 | + |
310 | + # Re-validate new volume |
311 | + self.log.debug('Validating volume attributes...') |
312 | + val_vol_name = cinder.volumes.get(vol_id).display_name |
313 | + val_vol_boot = cinder.volumes.get(vol_id).bootable |
314 | + val_vol_stat = cinder.volumes.get(vol_id).status |
315 | + val_vol_size = cinder.volumes.get(vol_id).size |
316 | + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
317 | + '{} size:{}'.format(val_vol_name, vol_id, |
318 | + val_vol_stat, val_vol_boot, |
319 | + val_vol_size)) |
320 | + |
321 | + if val_vol_boot == bootable and val_vol_stat == 'available' \ |
322 | + and val_vol_name == vol_name and val_vol_size == vol_size: |
323 | + self.log.debug(msg_attr) |
324 | + else: |
325 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
326 | + amulet.raise_status(amulet.FAIL, msg=msg) |
327 | + |
328 | + return vol_new |
329 | + |
330 | def delete_resource(self, resource, resource_id, |
331 | msg="resource", max_wait=120): |
332 | """Delete one openstack resource, such as one instance, keypair, |
333 | @@ -350,6 +455,8 @@ |
334 | :param max_wait: maximum wait time in seconds |
335 | :returns: True if successful, otherwise False |
336 | """ |
337 | + self.log.debug('Deleting OpenStack resource ' |
338 | + '{} ({})'.format(resource_id, msg)) |
339 | num_before = len(list(resource.list())) |
340 | resource.delete(resource_id) |
341 | |
342 | @@ -411,3 +518,87 @@ |
343 | self.log.debug('{} never reached expected status: ' |
344 | '{}'.format(resource_id, expected_stat)) |
345 | return False |
346 | + |
347 | + def get_ceph_osd_id_cmd(self, index): |
348 | + """Produce a shell command that will return a ceph-osd id.""" |
349 | + return ("`initctl list | grep 'ceph-osd ' | " |
350 | + "awk 'NR=={} {{ print $2 }}' | " |
351 | + "grep -o '[0-9]*'`".format(index + 1)) |
352 | + |
353 | + def get_ceph_pools(self, sentry_unit): |
354 | + """Return a dict of ceph pools from a single ceph unit, with |
355 | + pool name as keys, pool id as vals.""" |
356 | + pools = {} |
357 | + cmd = 'sudo ceph osd lspools' |
358 | + output, code = sentry_unit.run(cmd) |
359 | + if code != 0: |
360 | + msg = ('{} `{}` returned {} ' |
361 | + '{}'.format(sentry_unit.info['unit_name'], |
362 | + cmd, code, output)) |
363 | + amulet.raise_status(amulet.FAIL, msg=msg) |
364 | + |
365 | + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
366 | + for pool in str(output).split(','): |
367 | + pool_id_name = pool.split(' ') |
368 | + if len(pool_id_name) == 2: |
369 | + pool_id = pool_id_name[0] |
370 | + pool_name = pool_id_name[1] |
371 | + pools[pool_name] = int(pool_id) |
372 | + |
373 | + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
374 | + pools)) |
375 | + return pools |
376 | + |
377 | + def get_ceph_df(self, sentry_unit): |
378 | + """Return dict of ceph df json output, including ceph pool state. |
379 | + |
380 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
381 | + :returns: Dict of ceph df output |
382 | + """ |
383 | + cmd = 'sudo ceph df --format=json' |
384 | + output, code = sentry_unit.run(cmd) |
385 | + if code != 0: |
386 | + msg = ('{} `{}` returned {} ' |
387 | + '{}'.format(sentry_unit.info['unit_name'], |
388 | + cmd, code, output)) |
389 | + amulet.raise_status(amulet.FAIL, msg=msg) |
390 | + return json.loads(output) |
391 | + |
392 | + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
393 | + """Take a sample of attributes of a ceph pool, returning ceph |
394 | + pool name, object count and disk space used for the specified |
395 | + pool ID number. |
396 | + |
397 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
398 | + :param pool_id: Ceph pool ID |
399 | + :returns: List of pool name, object count, kb disk space used |
400 | + """ |
401 | + df = self.get_ceph_df(sentry_unit) |
402 | + pool_name = df['pools'][pool_id]['name'] |
403 | + obj_count = df['pools'][pool_id]['stats']['objects'] |
404 | + kb_used = df['pools'][pool_id]['stats']['kb_used'] |
405 | + self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
406 | + '{} kb used'.format(pool_name, pool_id, |
407 | + obj_count, kb_used)) |
408 | + return pool_name, obj_count, kb_used |
409 | + |
410 | + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
411 | + """Validate ceph pool samples taken over time, such as pool |
412 | + object counts or pool kb used, before adding, after adding, and |
413 | + after deleting items which affect those pool attributes. The |
414 | + 2nd element is expected to be greater than the 1st; 3rd is expected |
415 | + to be less than the 2nd. |
416 | + |
417 | + :param samples: List containing 3 data samples |
418 | + :param sample_type: String for logging and usage context |
419 | + :returns: None if successful, Failure message otherwise |
420 | + """ |
421 | + original, created, deleted = range(3) |
422 | + if samples[created] <= samples[original] or \ |
423 | + samples[deleted] >= samples[created]: |
424 | + return ('Ceph {} samples ({}) ' |
425 | + 'unexpected.'.format(sample_type, samples)) |
426 | + else: |
427 | + self.log.debug('Ceph {} samples (OK): ' |
428 | + '{}'.format(sample_type, samples)) |
429 | + return None |
430 | |
431 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
432 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-06-19 15:09:05 +0000 |
433 | +++ hooks/charmhelpers/contrib/openstack/context.py 2015-07-16 20:32:27 +0000 |
434 | @@ -122,21 +122,24 @@ |
435 | of specifying multiple key value pairs within the same string. For |
436 | example, a string in the format of 'key1=value1, key2=value2' will |
437 | return a dict of: |
438 | - {'key1': 'value1', |
439 | - 'key2': 'value2'}. |
440 | + |
441 | + {'key1': 'value1', |
442 | + 'key2': 'value2'}. |
443 | |
444 | 2. A string in the above format, but supporting a comma-delimited list |
445 | of values for the same key. For example, a string in the format of |
446 | 'key1=value1, key2=value3,value4,value5' will return a dict of: |
447 | - {'key1', 'value1', |
448 | - 'key2', 'value2,value3,value4'} |
449 | + |
450 | + {'key1', 'value1', |
451 | + 'key2', 'value2,value3,value4'} |
452 | |
453 | 3. A string containing a colon character (:) prior to an equal |
454 | character (=) will be treated as yaml and parsed as such. This can be |
455 | used to specify more complex key value pairs. For example, |
456 | a string in the format of 'key1: subkey1=value1, subkey2=value2' will |
457 | return a dict of: |
458 | - {'key1', 'subkey1=value1, subkey2=value2'} |
459 | + |
460 | + {'key1', 'subkey1=value1, subkey2=value2'} |
461 | |
462 | The provided config_flags string may be a list of comma-separated values |
463 | which themselves may be comma-separated list of values. |
464 | @@ -891,8 +894,6 @@ |
465 | return ctxt |
466 | |
467 | def __call__(self): |
468 | - self._ensure_packages() |
469 | - |
470 | if self.network_manager not in ['quantum', 'neutron']: |
471 | return {} |
472 | |
473 | |
474 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' |
475 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2014-06-05 10:59:00 +0000 |
476 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-07-16 20:32:27 +0000 |
477 | @@ -5,11 +5,11 @@ |
478 | ############################################################################### |
479 | [global] |
480 | {% if auth -%} |
481 | - auth_supported = {{ auth }} |
482 | - keyring = /etc/ceph/$cluster.$name.keyring |
483 | - mon host = {{ mon_hosts }} |
484 | +auth_supported = {{ auth }} |
485 | +keyring = /etc/ceph/$cluster.$name.keyring |
486 | +mon host = {{ mon_hosts }} |
487 | {% endif -%} |
488 | - log to syslog = {{ use_syslog }} |
489 | - err to syslog = {{ use_syslog }} |
490 | - clog to syslog = {{ use_syslog }} |
491 | +log to syslog = {{ use_syslog }} |
492 | +err to syslog = {{ use_syslog }} |
493 | +clog to syslog = {{ use_syslog }} |
494 | |
495 | |
496 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
497 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-19 15:09:05 +0000 |
498 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-07-16 20:32:27 +0000 |
499 | @@ -522,6 +522,7 @@ |
500 | Clone/install all specified OpenStack repositories. |
501 | |
502 | The expected format of projects_yaml is: |
503 | + |
504 | repositories: |
505 | - {name: keystone, |
506 | repository: 'git://git.openstack.org/openstack/keystone.git', |
507 | @@ -529,11 +530,13 @@ |
508 | - {name: requirements, |
509 | repository: 'git://git.openstack.org/openstack/requirements.git', |
510 | branch: 'stable/icehouse'} |
511 | + |
512 | directory: /mnt/openstack-git |
513 | http_proxy: squid-proxy-url |
514 | https_proxy: squid-proxy-url |
515 | |
516 | - The directory, http_proxy, and https_proxy keys are optional. |
517 | + The directory, http_proxy, and https_proxy keys are optional. |
518 | + |
519 | """ |
520 | global requirements_dir |
521 | parent_dir = '/mnt/openstack-git' |
522 | @@ -555,10 +558,11 @@ |
523 | |
524 | pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
525 | |
526 | - # Upgrade setuptools from default virtualenv version. The default version |
527 | - # in trusty breaks update.py in global requirements master branch. |
528 | - pip_install('setuptools', upgrade=True, proxy=http_proxy, |
529 | - venv=os.path.join(parent_dir, 'venv')) |
530 | + # Upgrade setuptools and pip from default virtualenv versions. The default |
531 | + # versions in trusty break master OpenStack branch deployments. |
532 | + for p in ['pip', 'setuptools']: |
533 | + pip_install(p, upgrade=True, proxy=http_proxy, |
534 | + venv=os.path.join(parent_dir, 'venv')) |
535 | |
536 | for p in projects['repositories']: |
537 | repo = p['repository'] |
538 | |
539 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' |
540 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-02-24 11:40:25 +0000 |
541 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-16 20:32:27 +0000 |
542 | @@ -60,12 +60,12 @@ |
543 | KEYFILE = '/etc/ceph/ceph.client.{}.key' |
544 | |
545 | CEPH_CONF = """[global] |
546 | - auth supported = {auth} |
547 | - keyring = {keyring} |
548 | - mon host = {mon_hosts} |
549 | - log to syslog = {use_syslog} |
550 | - err to syslog = {use_syslog} |
551 | - clog to syslog = {use_syslog} |
552 | +auth supported = {auth} |
553 | +keyring = {keyring} |
554 | +mon host = {mon_hosts} |
555 | +log to syslog = {use_syslog} |
556 | +err to syslog = {use_syslog} |
557 | +clog to syslog = {use_syslog} |
558 | """ |
559 | |
560 | |
561 | |
562 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
563 | --- hooks/charmhelpers/core/hookenv.py 2015-06-09 09:58:16 +0000 |
564 | +++ hooks/charmhelpers/core/hookenv.py 2015-07-16 20:32:27 +0000 |
565 | @@ -21,7 +21,9 @@ |
566 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
567 | |
568 | from __future__ import print_function |
569 | +from distutils.version import LooseVersion |
570 | from functools import wraps |
571 | +import glob |
572 | import os |
573 | import json |
574 | import yaml |
575 | @@ -242,29 +244,7 @@ |
576 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
577 | if os.path.exists(self.path): |
578 | self.load_previous() |
579 | - |
580 | - def __getitem__(self, key): |
581 | - """For regular dict lookups, check the current juju config first, |
582 | - then the previous (saved) copy. This ensures that user-saved values |
583 | - will be returned by a dict lookup. |
584 | - |
585 | - """ |
586 | - try: |
587 | - return dict.__getitem__(self, key) |
588 | - except KeyError: |
589 | - return (self._prev_dict or {})[key] |
590 | - |
591 | - def get(self, key, default=None): |
592 | - try: |
593 | - return self[key] |
594 | - except KeyError: |
595 | - return default |
596 | - |
597 | - def keys(self): |
598 | - prev_keys = [] |
599 | - if self._prev_dict is not None: |
600 | - prev_keys = self._prev_dict.keys() |
601 | - return list(set(prev_keys + list(dict.keys(self)))) |
602 | + atexit(self._implicit_save) |
603 | |
604 | def load_previous(self, path=None): |
605 | """Load previous copy of config from disk. |
606 | @@ -283,6 +263,9 @@ |
607 | self.path = path or self.path |
608 | with open(self.path) as f: |
609 | self._prev_dict = json.load(f) |
610 | + for k, v in self._prev_dict.items(): |
611 | + if k not in self: |
612 | + self[k] = v |
613 | |
614 | def changed(self, key): |
615 | """Return True if the current value for this key is different from |
616 | @@ -314,13 +297,13 @@ |
617 | instance. |
618 | |
619 | """ |
620 | - if self._prev_dict: |
621 | - for k, v in six.iteritems(self._prev_dict): |
622 | - if k not in self: |
623 | - self[k] = v |
624 | with open(self.path, 'w') as f: |
625 | json.dump(self, f) |
626 | |
627 | + def _implicit_save(self): |
628 | + if self.implicit_save: |
629 | + self.save() |
630 | + |
631 | |
632 | @cached |
633 | def config(scope=None): |
634 | @@ -587,10 +570,14 @@ |
635 | hooks.execute(sys.argv) |
636 | """ |
637 | |
638 | - def __init__(self, config_save=True): |
639 | + def __init__(self, config_save=None): |
640 | super(Hooks, self).__init__() |
641 | self._hooks = {} |
642 | - self._config_save = config_save |
643 | + |
644 | + # For unknown reasons, we allow the Hooks constructor to override |
645 | + # config().implicit_save. |
646 | + if config_save is not None: |
647 | + config().implicit_save = config_save |
648 | |
649 | def register(self, name, function): |
650 | """Register a hook""" |
651 | @@ -598,13 +585,16 @@ |
652 | |
653 | def execute(self, args): |
654 | """Execute a registered hook based on args[0]""" |
655 | + _run_atstart() |
656 | hook_name = os.path.basename(args[0]) |
657 | if hook_name in self._hooks: |
658 | - self._hooks[hook_name]() |
659 | - if self._config_save: |
660 | - cfg = config() |
661 | - if cfg.implicit_save: |
662 | - cfg.save() |
663 | + try: |
664 | + self._hooks[hook_name]() |
665 | + except SystemExit as x: |
666 | + if x.code is None or x.code == 0: |
667 | + _run_atexit() |
668 | + raise |
669 | + _run_atexit() |
670 | else: |
671 | raise UnregisteredHookError(hook_name) |
672 | |
673 | @@ -732,13 +722,80 @@ |
674 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
675 | def leader_set(settings=None, **kwargs): |
676 | """Juju leader set value(s)""" |
677 | - log("Juju leader-set '%s'" % (settings), level=DEBUG) |
678 | + # Don't log secrets. |
679 | + # log("Juju leader-set '%s'" % (settings), level=DEBUG) |
680 | cmd = ['leader-set'] |
681 | settings = settings or {} |
682 | settings.update(kwargs) |
683 | - for k, v in settings.iteritems(): |
684 | + for k, v in settings.items(): |
685 | if v is None: |
686 | cmd.append('{}='.format(k)) |
687 | else: |
688 | cmd.append('{}={}'.format(k, v)) |
689 | subprocess.check_call(cmd) |
690 | + |
691 | + |
692 | +@cached |
693 | +def juju_version(): |
694 | + """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
695 | + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 |
696 | + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] |
697 | + return subprocess.check_output([jujud, 'version'], |
698 | + universal_newlines=True).strip() |
699 | + |
700 | + |
701 | +@cached |
702 | +def has_juju_version(minimum_version): |
703 | + """Return True if the Juju version is at least the provided version""" |
704 | + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) |
705 | + |
706 | + |
707 | +_atexit = [] |
708 | +_atstart = [] |
709 | + |
710 | + |
711 | +def atstart(callback, *args, **kwargs): |
712 | + '''Schedule a callback to run before the main hook. |
713 | + |
714 | + Callbacks are run in the order they were added. |
715 | + |
716 | + This is useful for modules and classes to perform initialization |
717 | + and inject behavior. In particular: |
718 | + |
719 | + - Run common code before all of your hooks, such as logging |
720 | + the hook name or interesting relation data. |
721 | + - Defer object or module initialization that requires a hook |
722 | + context until we know there actually is a hook context, |
723 | + making testing easier. |
724 | + - Rather than requiring charm authors to include boilerplate to |
725 | + invoke your helper's behavior, have it run automatically if |
726 | + your object is instantiated or module imported. |
727 | + |
728 | + This is not at all useful after your hook framework as been launched. |
729 | + ''' |
730 | + global _atstart |
731 | + _atstart.append((callback, args, kwargs)) |
732 | + |
733 | + |
734 | +def atexit(callback, *args, **kwargs): |
735 | + '''Schedule a callback to run on successful hook completion. |
736 | + |
737 | + Callbacks are run in the reverse order that they were added.''' |
738 | + _atexit.append((callback, args, kwargs)) |
739 | + |
740 | + |
741 | +def _run_atstart(): |
742 | + '''Hook frameworks must invoke this before running the main hook body.''' |
743 | + global _atstart |
744 | + for callback, args, kwargs in _atstart: |
745 | + callback(*args, **kwargs) |
746 | + del _atstart[:] |
747 | + |
748 | + |
749 | +def _run_atexit(): |
750 | + '''Hook frameworks must invoke this after the main hook body has |
751 | + successfully completed. Do not invoke it if the hook fails.''' |
752 | + global _atexit |
753 | + for callback, args, kwargs in reversed(_atexit): |
754 | + callback(*args, **kwargs) |
755 | + del _atexit[:] |
756 | |
757 | === modified file 'hooks/charmhelpers/core/host.py' |
758 | --- hooks/charmhelpers/core/host.py 2015-06-19 15:09:05 +0000 |
759 | +++ hooks/charmhelpers/core/host.py 2015-07-16 20:32:27 +0000 |
760 | @@ -63,6 +63,36 @@ |
761 | return service_result |
762 | |
763 | |
764 | +def service_pause(service_name, init_dir=None): |
765 | + """Pause a system service. |
766 | + |
767 | + Stop it, and prevent it from starting again at boot.""" |
768 | + if init_dir is None: |
769 | + init_dir = "/etc/init" |
770 | + stopped = service_stop(service_name) |
771 | + # XXX: Support systemd too |
772 | + override_path = os.path.join( |
773 | + init_dir, '{}.conf.override'.format(service_name)) |
774 | + with open(override_path, 'w') as fh: |
775 | + fh.write("manual\n") |
776 | + return stopped |
777 | + |
778 | + |
779 | +def service_resume(service_name, init_dir=None): |
780 | + """Resume a system service. |
781 | + |
782 | + Reenable starting again at boot. Start the service""" |
783 | + # XXX: Support systemd too |
784 | + if init_dir is None: |
785 | + init_dir = "/etc/init" |
786 | + override_path = os.path.join( |
787 | + init_dir, '{}.conf.override'.format(service_name)) |
788 | + if os.path.exists(override_path): |
789 | + os.unlink(override_path) |
790 | + started = service_start(service_name) |
791 | + return started |
792 | + |
793 | + |
794 | def service(action, service_name): |
795 | """Control a system service""" |
796 | cmd = ['service', service_name, action] |
797 | @@ -140,11 +170,7 @@ |
798 | |
799 | def add_user_to_group(username, group): |
800 | """Add a user to a group""" |
801 | - cmd = [ |
802 | - 'gpasswd', '-a', |
803 | - username, |
804 | - group |
805 | - ] |
806 | + cmd = ['gpasswd', '-a', username, group] |
807 | log("Adding user {} to group {}".format(username, group)) |
808 | subprocess.check_call(cmd) |
809 | |
810 | |
811 | === modified file 'hooks/charmhelpers/core/services/base.py' |
812 | --- hooks/charmhelpers/core/services/base.py 2015-06-09 09:58:16 +0000 |
813 | +++ hooks/charmhelpers/core/services/base.py 2015-07-16 20:32:27 +0000 |
814 | @@ -128,15 +128,18 @@ |
815 | """ |
816 | Handle the current hook by doing The Right Thing with the registered services. |
817 | """ |
818 | - hook_name = hookenv.hook_name() |
819 | - if hook_name == 'stop': |
820 | - self.stop_services() |
821 | - else: |
822 | - self.reconfigure_services() |
823 | - self.provide_data() |
824 | - cfg = hookenv.config() |
825 | - if cfg.implicit_save: |
826 | - cfg.save() |
827 | + hookenv._run_atstart() |
828 | + try: |
829 | + hook_name = hookenv.hook_name() |
830 | + if hook_name == 'stop': |
831 | + self.stop_services() |
832 | + else: |
833 | + self.reconfigure_services() |
834 | + self.provide_data() |
835 | + except SystemExit as x: |
836 | + if x.code is None or x.code == 0: |
837 | + hookenv._run_atexit() |
838 | + hookenv._run_atexit() |
839 | |
840 | def provide_data(self): |
841 | """ |
842 | |
843 | === modified file 'hooks/charmhelpers/core/services/helpers.py' |
844 | --- hooks/charmhelpers/core/services/helpers.py 2015-05-11 07:38:23 +0000 |
845 | +++ hooks/charmhelpers/core/services/helpers.py 2015-07-16 20:32:27 +0000 |
846 | @@ -239,12 +239,12 @@ |
847 | action. |
848 | |
849 | :param str source: The template source file, relative to |
850 | - `$CHARM_DIR/templates` |
851 | - |
852 | + `$CHARM_DIR/templates` |
853 | :param str target: The target to write the rendered template to |
854 | :param str owner: The owner of the rendered file |
855 | :param str group: The group of the rendered file |
856 | :param int perms: The permissions of the rendered file |
857 | + |
858 | """ |
859 | def __init__(self, source, target, |
860 | owner='root', group='root', perms=0o444): |
861 | |
862 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
863 | --- hooks/charmhelpers/fetch/__init__.py 2015-06-10 14:01:05 +0000 |
864 | +++ hooks/charmhelpers/fetch/__init__.py 2015-07-16 20:32:27 +0000 |
865 | @@ -215,19 +215,27 @@ |
866 | _run_apt_command(cmd, fatal) |
867 | |
868 | |
869 | +def apt_mark(packages, mark, fatal=False): |
870 | + """Flag one or more packages using apt-mark""" |
871 | + cmd = ['apt-mark', mark] |
872 | + if isinstance(packages, six.string_types): |
873 | + cmd.append(packages) |
874 | + else: |
875 | + cmd.extend(packages) |
876 | + log("Holding {}".format(packages)) |
877 | + |
878 | + if fatal: |
879 | + subprocess.check_call(cmd, universal_newlines=True) |
880 | + else: |
881 | + subprocess.call(cmd, universal_newlines=True) |
882 | + |
883 | + |
884 | def apt_hold(packages, fatal=False): |
885 | - """Hold one or more packages""" |
886 | - cmd = ['apt-mark', 'hold'] |
887 | - if isinstance(packages, six.string_types): |
888 | - cmd.append(packages) |
889 | - else: |
890 | - cmd.extend(packages) |
891 | - log("Holding {}".format(packages)) |
892 | - |
893 | - if fatal: |
894 | - subprocess.check_call(cmd) |
895 | - else: |
896 | - subprocess.call(cmd) |
897 | + return apt_mark(packages, 'hold', fatal=fatal) |
898 | + |
899 | + |
900 | +def apt_unhold(packages, fatal=False): |
901 | + return apt_mark(packages, 'unhold', fatal=fatal) |
902 | |
903 | |
904 | def add_source(source, key=None): |
905 | @@ -370,8 +378,9 @@ |
906 | for handler in handlers: |
907 | try: |
908 | installed_to = handler.install(source, *args, **kwargs) |
909 | - except UnhandledSource: |
910 | - pass |
911 | + except UnhandledSource as e: |
912 | + log('Install source attempt unsuccessful: {}'.format(e), |
913 | + level='WARNING') |
914 | if not installed_to: |
915 | raise UnhandledSource("No handler found for source {}".format(source)) |
916 | return installed_to |
917 | |
918 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' |
919 | --- hooks/charmhelpers/fetch/archiveurl.py 2015-03-16 14:16:02 +0000 |
920 | +++ hooks/charmhelpers/fetch/archiveurl.py 2015-07-16 20:32:27 +0000 |
921 | @@ -77,6 +77,8 @@ |
922 | def can_handle(self, source): |
923 | url_parts = self.parse_url(source) |
924 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
925 | + # XXX: Why is this returning a boolean and a string? It's |
926 | + # doomed to fail since "bool(can_handle('foo://'))" will be True. |
927 | return "Wrong source type" |
928 | if get_archive_handler(self.base_url(source)): |
929 | return True |
930 | @@ -155,7 +157,11 @@ |
931 | else: |
932 | algorithms = hashlib.algorithms_available |
933 | if key in algorithms: |
934 | - check_hash(dld_file, value, key) |
935 | + if len(value) != 1: |
936 | + raise TypeError( |
937 | + "Expected 1 hash value, not %d" % len(value)) |
938 | + expected = value[0] |
939 | + check_hash(dld_file, expected, key) |
940 | if checksum: |
941 | check_hash(dld_file, checksum, hash_type) |
942 | return extract(dld_file, dest) |
943 | |
944 | === modified file 'hooks/charmhelpers/fetch/giturl.py' |
945 | --- hooks/charmhelpers/fetch/giturl.py 2015-06-10 14:01:05 +0000 |
946 | +++ hooks/charmhelpers/fetch/giturl.py 2015-07-16 20:32:27 +0000 |
947 | @@ -67,7 +67,7 @@ |
948 | try: |
949 | self.clone(source, dest_dir, branch, depth) |
950 | except GitCommandError as e: |
951 | - raise UnhandledSource(e.message) |
952 | + raise UnhandledSource(e) |
953 | except OSError as e: |
954 | raise UnhandledSource(e.strerror) |
955 | return dest_dir |
956 | |
957 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' |
958 | --- tests/charmhelpers/contrib/amulet/utils.py 2015-06-19 15:09:05 +0000 |
959 | +++ tests/charmhelpers/contrib/amulet/utils.py 2015-07-16 20:32:27 +0000 |
960 | @@ -14,6 +14,7 @@ |
961 | # You should have received a copy of the GNU Lesser General Public License |
962 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
963 | |
964 | +import amulet |
965 | import ConfigParser |
966 | import distro_info |
967 | import io |
968 | @@ -173,6 +174,11 @@ |
969 | |
970 | Verify that the specified section of the config file contains |
971 | the expected option key:value pairs. |
972 | + |
973 | + Compare expected dictionary data vs actual dictionary data. |
974 | + The values in the 'expected' dictionary can be strings, bools, ints, |
975 | + longs, or can be a function that evaluates a variable and returns a |
976 | + bool. |
977 | """ |
978 | self.log.debug('Validating config file data ({} in {} on {})' |
979 | '...'.format(section, config_file, |
980 | @@ -185,9 +191,20 @@ |
981 | for k in expected.keys(): |
982 | if not config.has_option(section, k): |
983 | return "section [{}] is missing option {}".format(section, k) |
984 | - if config.get(section, k) != expected[k]: |
985 | + |
986 | + actual = config.get(section, k) |
987 | + v = expected[k] |
988 | + if (isinstance(v, six.string_types) or |
989 | + isinstance(v, bool) or |
990 | + isinstance(v, six.integer_types)): |
991 | + # handle explicit values |
992 | + if actual != v: |
993 | + return "section [{}] {}:{} != expected {}:{}".format( |
994 | + section, k, actual, k, expected[k]) |
995 | + # handle function pointers, such as not_null or valid_ip |
996 | + elif not v(actual): |
997 | return "section [{}] {}:{} != expected {}:{}".format( |
998 | - section, k, config.get(section, k), k, expected[k]) |
999 | + section, k, actual, k, expected[k]) |
1000 | return None |
1001 | |
1002 | def _validate_dict_data(self, expected, actual): |
1003 | @@ -195,7 +212,7 @@ |
1004 | |
1005 | Compare expected dictionary data vs actual dictionary data. |
1006 | The values in the 'expected' dictionary can be strings, bools, ints, |
1007 | - longs, or can be a function that evaluate a variable and returns a |
1008 | + longs, or can be a function that evaluates a variable and returns a |
1009 | bool. |
1010 | """ |
1011 | self.log.debug('actual: {}'.format(repr(actual))) |
1012 | @@ -206,8 +223,10 @@ |
1013 | if (isinstance(v, six.string_types) or |
1014 | isinstance(v, bool) or |
1015 | isinstance(v, six.integer_types)): |
1016 | + # handle explicit values |
1017 | if v != actual[k]: |
1018 | return "{}:{}".format(k, actual[k]) |
1019 | + # handle function pointers, such as not_null or valid_ip |
1020 | elif not v(actual[k]): |
1021 | return "{}:{}".format(k, actual[k]) |
1022 | else: |
1023 | @@ -406,3 +425,109 @@ |
1024 | """Convert a relative file path to a file URL.""" |
1025 | _abs_path = os.path.abspath(file_rel_path) |
1026 | return urlparse.urlparse(_abs_path, scheme='file').geturl() |
1027 | + |
1028 | + def check_commands_on_units(self, commands, sentry_units): |
1029 | + """Check that all commands in a list exit zero on all |
1030 | + sentry units in a list. |
1031 | + |
1032 | + :param commands: list of bash commands |
1033 | + :param sentry_units: list of sentry unit pointers |
1034 | + :returns: None if successful; Failure message otherwise |
1035 | + """ |
1036 | + self.log.debug('Checking exit codes for {} commands on {} ' |
1037 | + 'sentry units...'.format(len(commands), |
1038 | + len(sentry_units))) |
1039 | + for sentry_unit in sentry_units: |
1040 | + for cmd in commands: |
1041 | + output, code = sentry_unit.run(cmd) |
1042 | + if code == 0: |
1043 | + self.log.debug('{} `{}` returned {} ' |
1044 | + '(OK)'.format(sentry_unit.info['unit_name'], |
1045 | + cmd, code)) |
1046 | + else: |
1047 | + return ('{} `{}` returned {} ' |
1048 | + '{}'.format(sentry_unit.info['unit_name'], |
1049 | + cmd, code, output)) |
1050 | + return None |
1051 | + |
1052 | + def get_process_id_list(self, sentry_unit, process_name): |
1053 | + """Get a list of process ID(s) from a single sentry juju unit |
1054 | + for a single process name. |
1055 | + |
1056 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
1057 | + :param process_name: Process name |
1058 | + :returns: List of process IDs |
1059 | + """ |
1060 | + cmd = 'pidof {}'.format(process_name) |
1061 | + output, code = sentry_unit.run(cmd) |
1062 | + if code != 0: |
1063 | + msg = ('{} `{}` returned {} ' |
1064 | + '{}'.format(sentry_unit.info['unit_name'], |
1065 | + cmd, code, output)) |
1066 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1067 | + return str(output).split() |
1068 | + |
1069 | + def get_unit_process_ids(self, unit_processes): |
1070 | + """Construct a dict containing unit sentries, process names, and |
1071 | + process IDs.""" |
1072 | + pid_dict = {} |
1073 | + for sentry_unit, process_list in unit_processes.iteritems(): |
1074 | + pid_dict[sentry_unit] = {} |
1075 | + for process in process_list: |
1076 | + pids = self.get_process_id_list(sentry_unit, process) |
1077 | + pid_dict[sentry_unit].update({process: pids}) |
1078 | + return pid_dict |
1079 | + |
1080 | + def validate_unit_process_ids(self, expected, actual): |
1081 | + """Validate process id quantities for services on units.""" |
1082 | + self.log.debug('Checking units for running processes...') |
1083 | + self.log.debug('Expected PIDs: {}'.format(expected)) |
1084 | + self.log.debug('Actual PIDs: {}'.format(actual)) |
1085 | + |
1086 | + if len(actual) != len(expected): |
1087 | + return ('Unit count mismatch. expected, actual: {}, ' |
1088 | + '{} '.format(len(expected), len(actual))) |
1089 | + |
1090 | + for (e_sentry, e_proc_names) in expected.iteritems(): |
1091 | + e_sentry_name = e_sentry.info['unit_name'] |
1092 | + if e_sentry in actual.keys(): |
1093 | + a_proc_names = actual[e_sentry] |
1094 | + else: |
1095 | + return ('Expected sentry ({}) not found in actual dict data.' |
1096 | + '{}'.format(e_sentry_name, e_sentry)) |
1097 | + |
1098 | + if len(e_proc_names.keys()) != len(a_proc_names.keys()): |
1099 | + return ('Process name count mismatch. expected, actual: {}, ' |
1100 | + '{}'.format(len(expected), len(actual))) |
1101 | + |
1102 | + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ |
1103 | + zip(e_proc_names.items(), a_proc_names.items()): |
1104 | + if e_proc_name != a_proc_name: |
1105 | + return ('Process name mismatch. expected, actual: {}, ' |
1106 | + '{}'.format(e_proc_name, a_proc_name)) |
1107 | + |
1108 | + a_pids_length = len(a_pids) |
1109 | + if e_pids_length != a_pids_length: |
1110 | + return ('PID count mismatch. {} ({}) expected, actual: ' |
1111 | + '{}, {} ({})'.format(e_sentry_name, e_proc_name, |
1112 | + e_pids_length, a_pids_length, |
1113 | + a_pids)) |
1114 | + else: |
1115 | + self.log.debug('PID check OK: {} {} {}: ' |
1116 | + '{}'.format(e_sentry_name, e_proc_name, |
1117 | + e_pids_length, a_pids)) |
1118 | + return None |
1119 | + |
1120 | + def validate_list_of_identical_dicts(self, list_of_dicts): |
1121 | + """Check that all dicts within a list are identical.""" |
1122 | + hashes = [] |
1123 | + for _dict in list_of_dicts: |
1124 | + hashes.append(hash(frozenset(_dict.items()))) |
1125 | + |
1126 | + self.log.debug('Hashes: {}'.format(hashes)) |
1127 | + if len(set(hashes)) == 1: |
1128 | + self.log.debug('Dicts within list are identical') |
1129 | + else: |
1130 | + return 'Dicts within list are not identical' |
1131 | + |
1132 | + return None |
1133 | |
1134 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' |
1135 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-19 15:09:05 +0000 |
1136 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-16 20:32:27 +0000 |
1137 | @@ -79,9 +79,9 @@ |
1138 | services.append(this_service) |
1139 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
1140 | 'ceph-osd', 'ceph-radosgw'] |
1141 | - # Openstack subordinate charms do not expose an origin option as that |
1142 | - # is controlled by the principle |
1143 | - ignore = ['neutron-openvswitch'] |
1144 | + # Most OpenStack subordinate charms do not expose an origin option |
1145 | + # as that is controlled by the principle. |
1146 | + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] |
1147 | |
1148 | if self.openstack: |
1149 | for svc in services: |
1150 | @@ -148,3 +148,36 @@ |
1151 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
1152 | else: |
1153 | return releases[self.series] |
1154 | + |
1155 | + def get_ceph_expected_pools(self, radosgw=False): |
1156 | + """Return a list of expected ceph pools in a ceph + cinder + glance |
1157 | + test scenario, based on OpenStack release and whether ceph radosgw |
1158 | + is flagged as present or not.""" |
1159 | + |
1160 | + if self._get_openstack_release() >= self.trusty_kilo: |
1161 | + # Kilo or later |
1162 | + pools = [ |
1163 | + 'rbd', |
1164 | + 'cinder', |
1165 | + 'glance' |
1166 | + ] |
1167 | + else: |
1168 | + # Juno or earlier |
1169 | + pools = [ |
1170 | + 'data', |
1171 | + 'metadata', |
1172 | + 'rbd', |
1173 | + 'cinder', |
1174 | + 'glance' |
1175 | + ] |
1176 | + |
1177 | + if radosgw: |
1178 | + pools.extend([ |
1179 | + '.rgw.root', |
1180 | + '.rgw.control', |
1181 | + '.rgw', |
1182 | + '.rgw.gc', |
1183 | + '.users.uid' |
1184 | + ]) |
1185 | + |
1186 | + return pools |
1187 | |
1188 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' |
1189 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-19 15:09:05 +0000 |
1190 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:32:27 +0000 |
1191 | @@ -14,16 +14,20 @@ |
1192 | # You should have received a copy of the GNU Lesser General Public License |
1193 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1194 | |
1195 | +import amulet |
1196 | +import json |
1197 | import logging |
1198 | import os |
1199 | import six |
1200 | import time |
1201 | import urllib |
1202 | |
1203 | +import cinderclient.v1.client as cinder_client |
1204 | import glanceclient.v1.client as glance_client |
1205 | import heatclient.v1.client as heat_client |
1206 | import keystoneclient.v2_0 as keystone_client |
1207 | import novaclient.v1_1.client as nova_client |
1208 | +import swiftclient |
1209 | |
1210 | from charmhelpers.contrib.amulet.utils import ( |
1211 | AmuletUtils |
1212 | @@ -171,6 +175,16 @@ |
1213 | self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
1214 | return tenant in [t.name for t in keystone.tenants.list()] |
1215 | |
1216 | + def authenticate_cinder_admin(self, keystone_sentry, username, |
1217 | + password, tenant): |
1218 | + """Authenticates admin user with cinder.""" |
1219 | + # NOTE(beisner): cinder python client doesn't accept tokens. |
1220 | + service_ip = \ |
1221 | + keystone_sentry.relation('shared-db', |
1222 | + 'mysql:shared-db')['private-address'] |
1223 | + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
1224 | + return cinder_client.Client(username, password, tenant, ept) |
1225 | + |
1226 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
1227 | tenant): |
1228 | """Authenticates admin user with the keystone admin endpoint.""" |
1229 | @@ -212,9 +226,29 @@ |
1230 | return nova_client.Client(username=user, api_key=password, |
1231 | project_id=tenant, auth_url=ep) |
1232 | |
1233 | + def authenticate_swift_user(self, keystone, user, password, tenant): |
1234 | + """Authenticates a regular user with swift api.""" |
1235 | + self.log.debug('Authenticating swift user ({})...'.format(user)) |
1236 | + ep = keystone.service_catalog.url_for(service_type='identity', |
1237 | + endpoint_type='publicURL') |
1238 | + return swiftclient.Connection(authurl=ep, |
1239 | + user=user, |
1240 | + key=password, |
1241 | + tenant_name=tenant, |
1242 | + auth_version='2.0') |
1243 | + |
1244 | def create_cirros_image(self, glance, image_name): |
1245 | - """Download the latest cirros image and upload it to glance.""" |
1246 | - self.log.debug('Creating glance image ({})...'.format(image_name)) |
1247 | + """Download the latest cirros image and upload it to glance, |
1248 | + validate and return a resource pointer. |
1249 | + |
1250 | + :param glance: pointer to authenticated glance connection |
1251 | + :param image_name: display name for new image |
1252 | + :returns: glance image pointer |
1253 | + """ |
1254 | + self.log.debug('Creating glance cirros image ' |
1255 | + '({})...'.format(image_name)) |
1256 | + |
1257 | + # Download cirros image |
1258 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
1259 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
1260 | if http_proxy: |
1261 | @@ -223,33 +257,51 @@ |
1262 | else: |
1263 | opener = urllib.FancyURLopener() |
1264 | |
1265 | - f = opener.open("http://download.cirros-cloud.net/version/released") |
1266 | + f = opener.open('http://download.cirros-cloud.net/version/released') |
1267 | version = f.read().strip() |
1268 | - cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
1269 | + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
1270 | local_path = os.path.join('tests', cirros_img) |
1271 | |
1272 | if not os.path.exists(local_path): |
1273 | - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
1274 | + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
1275 | version, cirros_img) |
1276 | opener.retrieve(cirros_url, local_path) |
1277 | f.close() |
1278 | |
1279 | + # Create glance image |
1280 | with open(local_path) as f: |
1281 | image = glance.images.create(name=image_name, is_public=True, |
1282 | disk_format='qcow2', |
1283 | container_format='bare', data=f) |
1284 | - count = 1 |
1285 | - status = image.status |
1286 | - while status != 'active' and count < 10: |
1287 | - time.sleep(3) |
1288 | - image = glance.images.get(image.id) |
1289 | - status = image.status |
1290 | - self.log.debug('image status: {}'.format(status)) |
1291 | - count += 1 |
1292 | - |
1293 | - if status != 'active': |
1294 | - self.log.error('image creation timed out') |
1295 | - return None |
1296 | + |
1297 | + # Wait for image to reach active status |
1298 | + img_id = image.id |
1299 | + ret = self.resource_reaches_status(glance.images, img_id, |
1300 | + expected_stat='active', |
1301 | + msg='Image status wait') |
1302 | + if not ret: |
1303 | + msg = 'Glance image failed to reach expected state.' |
1304 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1305 | + |
1306 | + # Re-validate new image |
1307 | + self.log.debug('Validating image attributes...') |
1308 | + val_img_name = glance.images.get(img_id).name |
1309 | + val_img_stat = glance.images.get(img_id).status |
1310 | + val_img_pub = glance.images.get(img_id).is_public |
1311 | + val_img_cfmt = glance.images.get(img_id).container_format |
1312 | + val_img_dfmt = glance.images.get(img_id).disk_format |
1313 | + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
1314 | + 'container fmt:{} disk fmt:{}'.format( |
1315 | + val_img_name, val_img_pub, img_id, |
1316 | + val_img_stat, val_img_cfmt, val_img_dfmt)) |
1317 | + |
1318 | + if val_img_name == image_name and val_img_stat == 'active' \ |
1319 | + and val_img_pub is True and val_img_cfmt == 'bare' \ |
1320 | + and val_img_dfmt == 'qcow2': |
1321 | + self.log.debug(msg_attr) |
1322 | + else: |
1323 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
1324 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1325 | |
1326 | return image |
1327 | |
1328 | @@ -260,22 +312,7 @@ |
1329 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
1330 | 'delete_resource instead of delete_image.') |
1331 | self.log.debug('Deleting glance image ({})...'.format(image)) |
1332 | - num_before = len(list(glance.images.list())) |
1333 | - glance.images.delete(image) |
1334 | - |
1335 | - count = 1 |
1336 | - num_after = len(list(glance.images.list())) |
1337 | - while num_after != (num_before - 1) and count < 10: |
1338 | - time.sleep(3) |
1339 | - num_after = len(list(glance.images.list())) |
1340 | - self.log.debug('number of images: {}'.format(num_after)) |
1341 | - count += 1 |
1342 | - |
1343 | - if num_after != (num_before - 1): |
1344 | - self.log.error('image deletion timed out') |
1345 | - return False |
1346 | - |
1347 | - return True |
1348 | + return self.delete_resource(glance.images, image, msg='glance image') |
1349 | |
1350 | def create_instance(self, nova, image_name, instance_name, flavor): |
1351 | """Create the specified instance.""" |
1352 | @@ -308,22 +345,8 @@ |
1353 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
1354 | 'delete_resource instead of delete_instance.') |
1355 | self.log.debug('Deleting instance ({})...'.format(instance)) |
1356 | - num_before = len(list(nova.servers.list())) |
1357 | - nova.servers.delete(instance) |
1358 | - |
1359 | - count = 1 |
1360 | - num_after = len(list(nova.servers.list())) |
1361 | - while num_after != (num_before - 1) and count < 10: |
1362 | - time.sleep(3) |
1363 | - num_after = len(list(nova.servers.list())) |
1364 | - self.log.debug('number of instances: {}'.format(num_after)) |
1365 | - count += 1 |
1366 | - |
1367 | - if num_after != (num_before - 1): |
1368 | - self.log.error('instance deletion timed out') |
1369 | - return False |
1370 | - |
1371 | - return True |
1372 | + return self.delete_resource(nova.servers, instance, |
1373 | + msg='nova instance') |
1374 | |
1375 | def create_or_get_keypair(self, nova, keypair_name="testkey"): |
1376 | """Create a new keypair, or return pointer if it already exists.""" |
1377 | @@ -339,6 +362,88 @@ |
1378 | _keypair = nova.keypairs.create(name=keypair_name) |
1379 | return _keypair |
1380 | |
1381 | + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
1382 | + img_id=None, src_vol_id=None, snap_id=None): |
1383 | + """Create cinder volume, optionally from a glance image, OR |
1384 | + optionally as a clone of an existing volume, OR optionally |
1385 | + from a snapshot. Wait for the new volume status to reach |
1386 | + the expected status, validate and return a resource pointer. |
1387 | + |
1388 | + :param vol_name: cinder volume display name |
1389 | + :param vol_size: size in gigabytes |
1390 | + :param img_id: optional glance image id |
1391 | + :param src_vol_id: optional source volume id to clone |
1392 | + :param snap_id: optional snapshot id to use |
1393 | + :returns: cinder volume pointer |
1394 | + """ |
1395 | + # Handle parameter input and avoid impossible combinations |
1396 | + if img_id and not src_vol_id and not snap_id: |
1397 | + # Create volume from image |
1398 | + self.log.debug('Creating cinder volume from glance image...') |
1399 | + bootable = 'true' |
1400 | + elif src_vol_id and not img_id and not snap_id: |
1401 | + # Clone an existing volume |
1402 | + self.log.debug('Cloning cinder volume...') |
1403 | + bootable = cinder.volumes.get(src_vol_id).bootable |
1404 | + elif snap_id and not src_vol_id and not img_id: |
1405 | + # Create volume from snapshot |
1406 | + self.log.debug('Creating cinder volume from snapshot...') |
1407 | + snap = cinder.volume_snapshots.find(id=snap_id) |
1408 | + vol_size = snap.size |
1409 | + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
1410 | + bootable = cinder.volumes.get(snap_vol_id).bootable |
1411 | + elif not img_id and not src_vol_id and not snap_id: |
1412 | + # Create volume |
1413 | + self.log.debug('Creating cinder volume...') |
1414 | + bootable = 'false' |
1415 | + else: |
1416 | + # Impossible combination of parameters |
1417 | + msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
1418 | + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
1419 | + img_id, src_vol_id, |
1420 | + snap_id)) |
1421 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1422 | + |
1423 | + # Create new volume |
1424 | + try: |
1425 | + vol_new = cinder.volumes.create(display_name=vol_name, |
1426 | + imageRef=img_id, |
1427 | + size=vol_size, |
1428 | + source_volid=src_vol_id, |
1429 | + snapshot_id=snap_id) |
1430 | + vol_id = vol_new.id |
1431 | + except Exception as e: |
1432 | + msg = 'Failed to create volume: {}'.format(e) |
1433 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1434 | + |
1435 | + # Wait for volume to reach available status |
1436 | + ret = self.resource_reaches_status(cinder.volumes, vol_id, |
1437 | + expected_stat="available", |
1438 | + msg="Volume status wait") |
1439 | + if not ret: |
1440 | + msg = 'Cinder volume failed to reach expected state.' |
1441 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1442 | + |
1443 | + # Re-validate new volume |
1444 | + self.log.debug('Validating volume attributes...') |
1445 | + val_vol_name = cinder.volumes.get(vol_id).display_name |
1446 | + val_vol_boot = cinder.volumes.get(vol_id).bootable |
1447 | + val_vol_stat = cinder.volumes.get(vol_id).status |
1448 | + val_vol_size = cinder.volumes.get(vol_id).size |
1449 | + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
1450 | + '{} size:{}'.format(val_vol_name, vol_id, |
1451 | + val_vol_stat, val_vol_boot, |
1452 | + val_vol_size)) |
1453 | + |
1454 | + if val_vol_boot == bootable and val_vol_stat == 'available' \ |
1455 | + and val_vol_name == vol_name and val_vol_size == vol_size: |
1456 | + self.log.debug(msg_attr) |
1457 | + else: |
1458 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
1459 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1460 | + |
1461 | + return vol_new |
1462 | + |
1463 | def delete_resource(self, resource, resource_id, |
1464 | msg="resource", max_wait=120): |
1465 | """Delete one openstack resource, such as one instance, keypair, |
1466 | @@ -350,6 +455,8 @@ |
1467 | :param max_wait: maximum wait time in seconds |
1468 | :returns: True if successful, otherwise False |
1469 | """ |
1470 | + self.log.debug('Deleting OpenStack resource ' |
1471 | + '{} ({})'.format(resource_id, msg)) |
1472 | num_before = len(list(resource.list())) |
1473 | resource.delete(resource_id) |
1474 | |
1475 | @@ -411,3 +518,87 @@ |
1476 | self.log.debug('{} never reached expected status: ' |
1477 | '{}'.format(resource_id, expected_stat)) |
1478 | return False |
1479 | + |
1480 | + def get_ceph_osd_id_cmd(self, index): |
1481 | + """Produce a shell command that will return a ceph-osd id.""" |
1482 | + return ("`initctl list | grep 'ceph-osd ' | " |
1483 | + "awk 'NR=={} {{ print $2 }}' | " |
1484 | + "grep -o '[0-9]*'`".format(index + 1)) |
1485 | + |
1486 | + def get_ceph_pools(self, sentry_unit): |
1487 | + """Return a dict of ceph pools from a single ceph unit, with |
1488 | + pool name as keys, pool id as vals.""" |
1489 | + pools = {} |
1490 | + cmd = 'sudo ceph osd lspools' |
1491 | + output, code = sentry_unit.run(cmd) |
1492 | + if code != 0: |
1493 | + msg = ('{} `{}` returned {} ' |
1494 | + '{}'.format(sentry_unit.info['unit_name'], |
1495 | + cmd, code, output)) |
1496 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1497 | + |
1498 | + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
1499 | + for pool in str(output).split(','): |
1500 | + pool_id_name = pool.split(' ') |
1501 | + if len(pool_id_name) == 2: |
1502 | + pool_id = pool_id_name[0] |
1503 | + pool_name = pool_id_name[1] |
1504 | + pools[pool_name] = int(pool_id) |
1505 | + |
1506 | + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
1507 | + pools)) |
1508 | + return pools |
1509 | + |
1510 | + def get_ceph_df(self, sentry_unit): |
1511 | + """Return dict of ceph df json output, including ceph pool state. |
1512 | + |
1513 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
1514 | + :returns: Dict of ceph df output |
1515 | + """ |
1516 | + cmd = 'sudo ceph df --format=json' |
1517 | + output, code = sentry_unit.run(cmd) |
1518 | + if code != 0: |
1519 | + msg = ('{} `{}` returned {} ' |
1520 | + '{}'.format(sentry_unit.info['unit_name'], |
1521 | + cmd, code, output)) |
1522 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1523 | + return json.loads(output) |
1524 | + |
1525 | + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
1526 | + """Take a sample of attributes of a ceph pool, returning ceph |
1527 | + pool name, object count and disk space used for the specified |
1528 | + pool ID number. |
1529 | + |
1530 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
1531 | + :param pool_id: Ceph pool ID |
1532 | + :returns: List of pool name, object count, kb disk space used |
1533 | + """ |
1534 | + df = self.get_ceph_df(sentry_unit) |
1535 | + pool_name = df['pools'][pool_id]['name'] |
1536 | + obj_count = df['pools'][pool_id]['stats']['objects'] |
1537 | + kb_used = df['pools'][pool_id]['stats']['kb_used'] |
1538 | + self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
1539 | + '{} kb used'.format(pool_name, pool_id, |
1540 | + obj_count, kb_used)) |
1541 | + return pool_name, obj_count, kb_used |
1542 | + |
1543 | + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
1544 | + """Validate ceph pool samples taken over time, such as pool |
1545 | + object counts or pool kb used, before adding, after adding, and |
1546 | + after deleting items which affect those pool attributes. The |
1547 | + 2nd element is expected to be greater than the 1st; 3rd is expected |
1548 | + to be less than the 2nd. |
1549 | + |
1550 | + :param samples: List containing 3 data samples |
1551 | + :param sample_type: String for logging and usage context |
1552 | + :returns: None if successful, Failure message otherwise |
1553 | + """ |
1554 | + original, created, deleted = range(3) |
1555 | + if samples[created] <= samples[original] or \ |
1556 | + samples[deleted] >= samples[created]: |
1557 | + return ('Ceph {} samples ({}) ' |
1558 | + 'unexpected.'.format(sample_type, samples)) |
1559 | + else: |
1560 | + self.log.debug('Ceph {} samples (OK): ' |
1561 | + '{}'.format(sample_type, samples)) |
1562 | + return None |
charm_lint_check #6288 neutron-api-next for corey.bryant mp265051
LINT OK: passed
Build: http:// 10.245. 162.77: 8080/job/ charm_lint_ check/6288/