Merge lp:~narindergupta/charms/trusty/neutron-api-odl/liberty into lp:~openstack-charmers/charms/trusty/neutron-api-odl/vpp
- Trusty Tahr (14.04)
- liberty
- Merge into vpp
Proposed by
Narinder Gupta
Status: | Needs review |
---|---|
Proposed branch: | lp:~narindergupta/charms/trusty/neutron-api-odl/liberty |
Merge into: | lp:~openstack-charmers/charms/trusty/neutron-api-odl/vpp |
Diff against target: |
3668 lines (+2724/-235) 18 files modified
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+38/-4) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+361/-51) hooks/charmhelpers/contrib/openstack/context.py (+47/-34) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6) hooks/charmhelpers/contrib/openstack/templating.py (+2/-4) hooks/charmhelpers/contrib/openstack/utils.py (+79/-25) hooks/charmhelpers/contrib/python/packages.py (+2/-0) hooks/charmhelpers/contrib/storage/linux/ceph.py (+6/-6) hooks/charmhelpers/contrib/storage/linux/utils.py (+4/-3) hooks/charmhelpers/core/hookenv.py (+192/-40) hooks/charmhelpers/core/host.py (+32/-7) hooks/charmhelpers/core/hugepage.py (+11/-13) hooks/charmhelpers/core/services/base.py (+12/-9) hooks/charmhelpers/core/unitdata.py (+61/-17) hooks/charmhelpers/fetch/__init__.py (+31/-14) hooks/charmhelpers/fetch/archiveurl.py (+7/-1) hooks/charmhelpers/fetch/giturl.py (+1/-1) vpp.patch (+1832/-0) |
To merge this branch: | bzr merge lp:~narindergupta/charms/trusty/neutron-api-odl/liberty |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Liam Young (community) | Needs Information | ||
Review via email: mp+276491@code.launchpad.net |
Commit message
Description of the change
updates with charm helpers sync and modified as per kilo changes.
To post a comment you must log in.
Revision history for this message
Narinder Gupta (narindergupta) wrote : | # |
> What is the vpp.patch included in this mp, was it included in error?
This patch only sync the charm helpers so that liberty also can be deployed.
Unmerged revisions
- 5. By Narinder Gupta
-
add support for liberty after charmer sync
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' |
2 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-24 12:22:08 +0000 |
3 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-11-03 04:19:02 +0000 |
4 | @@ -44,7 +44,7 @@ |
5 | Determine if the local branch being tested is derived from its |
6 | stable or next (dev) branch, and based on this, use the corresonding |
7 | stable or next branches for the other_services.""" |
8 | - base_charms = ['mysql', 'mongodb'] |
9 | + base_charms = ['mysql', 'mongodb', 'nrpe'] |
10 | |
11 | if self.series in ['precise', 'trusty']: |
12 | base_series = self.series |
13 | @@ -83,9 +83,10 @@ |
14 | services.append(this_service) |
15 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
16 | 'ceph-osd', 'ceph-radosgw'] |
17 | - # Openstack subordinate charms do not expose an origin option as that |
18 | - # is controlled by the principle |
19 | - ignore = ['neutron-openvswitch', 'cisco-vpp'] |
20 | + # Most OpenStack subordinate charms do not expose an origin option |
21 | + # as that is controlled by the principle. |
22 | + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
23 | + 'cisco-vpp', 'odl-controller'] |
24 | |
25 | if self.openstack: |
26 | for svc in services: |
27 | @@ -152,3 +153,36 @@ |
28 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
29 | else: |
30 | return releases[self.series] |
31 | + |
32 | + def get_ceph_expected_pools(self, radosgw=False): |
33 | + """Return a list of expected ceph pools in a ceph + cinder + glance |
34 | + test scenario, based on OpenStack release and whether ceph radosgw |
35 | + is flagged as present or not.""" |
36 | + |
37 | + if self._get_openstack_release() >= self.trusty_kilo: |
38 | + # Kilo or later |
39 | + pools = [ |
40 | + 'rbd', |
41 | + 'cinder', |
42 | + 'glance' |
43 | + ] |
44 | + else: |
45 | + # Juno or earlier |
46 | + pools = [ |
47 | + 'data', |
48 | + 'metadata', |
49 | + 'rbd', |
50 | + 'cinder', |
51 | + 'glance' |
52 | + ] |
53 | + |
54 | + if radosgw: |
55 | + pools.extend([ |
56 | + '.rgw.root', |
57 | + '.rgw.control', |
58 | + '.rgw', |
59 | + '.rgw.gc', |
60 | + '.users.uid' |
61 | + ]) |
62 | + |
63 | + return pools |
64 | |
65 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' |
66 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-24 12:22:08 +0000 |
67 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-11-03 04:19:02 +0000 |
68 | @@ -14,16 +14,20 @@ |
69 | # You should have received a copy of the GNU Lesser General Public License |
70 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
71 | |
72 | +import amulet |
73 | +import json |
74 | import logging |
75 | import os |
76 | +import six |
77 | import time |
78 | import urllib |
79 | |
80 | +import cinderclient.v1.client as cinder_client |
81 | import glanceclient.v1.client as glance_client |
82 | +import heatclient.v1.client as heat_client |
83 | import keystoneclient.v2_0 as keystone_client |
84 | import novaclient.v1_1.client as nova_client |
85 | - |
86 | -import six |
87 | +import swiftclient |
88 | |
89 | from charmhelpers.contrib.amulet.utils import ( |
90 | AmuletUtils |
91 | @@ -37,7 +41,7 @@ |
92 | """OpenStack amulet utilities. |
93 | |
94 | This class inherits from AmuletUtils and has additional support |
95 | - that is specifically for use by OpenStack charms. |
96 | + that is specifically for use by OpenStack charm tests. |
97 | """ |
98 | |
99 | def __init__(self, log_level=ERROR): |
100 | @@ -51,6 +55,8 @@ |
101 | Validate actual endpoint data vs expected endpoint data. The ports |
102 | are used to find the matching endpoint. |
103 | """ |
104 | + self.log.debug('Validating endpoint data...') |
105 | + self.log.debug('actual: {}'.format(repr(endpoints))) |
106 | found = False |
107 | for ep in endpoints: |
108 | self.log.debug('endpoint: {}'.format(repr(ep))) |
109 | @@ -77,6 +83,7 @@ |
110 | Validate a list of actual service catalog endpoints vs a list of |
111 | expected service catalog endpoints. |
112 | """ |
113 | + self.log.debug('Validating service catalog endpoint data...') |
114 | self.log.debug('actual: {}'.format(repr(actual))) |
115 | for k, v in six.iteritems(expected): |
116 | if k in actual: |
117 | @@ -93,6 +100,7 @@ |
118 | Validate a list of actual tenant data vs list of expected tenant |
119 | data. |
120 | """ |
121 | + self.log.debug('Validating tenant data...') |
122 | self.log.debug('actual: {}'.format(repr(actual))) |
123 | for e in expected: |
124 | found = False |
125 | @@ -114,6 +122,7 @@ |
126 | Validate a list of actual role data vs a list of expected role |
127 | data. |
128 | """ |
129 | + self.log.debug('Validating role data...') |
130 | self.log.debug('actual: {}'.format(repr(actual))) |
131 | for e in expected: |
132 | found = False |
133 | @@ -134,6 +143,7 @@ |
134 | Validate a list of actual user data vs a list of expected user |
135 | data. |
136 | """ |
137 | + self.log.debug('Validating user data...') |
138 | self.log.debug('actual: {}'.format(repr(actual))) |
139 | for e in expected: |
140 | found = False |
141 | @@ -155,17 +165,30 @@ |
142 | |
143 | Validate a list of actual flavors vs a list of expected flavors. |
144 | """ |
145 | + self.log.debug('Validating flavor data...') |
146 | self.log.debug('actual: {}'.format(repr(actual))) |
147 | act = [a.name for a in actual] |
148 | return self._validate_list_data(expected, act) |
149 | |
150 | def tenant_exists(self, keystone, tenant): |
151 | """Return True if tenant exists.""" |
152 | + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
153 | return tenant in [t.name for t in keystone.tenants.list()] |
154 | |
155 | + def authenticate_cinder_admin(self, keystone_sentry, username, |
156 | + password, tenant): |
157 | + """Authenticates admin user with cinder.""" |
158 | + # NOTE(beisner): cinder python client doesn't accept tokens. |
159 | + service_ip = \ |
160 | + keystone_sentry.relation('shared-db', |
161 | + 'mysql:shared-db')['private-address'] |
162 | + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
163 | + return cinder_client.Client(username, password, tenant, ept) |
164 | + |
165 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
166 | tenant): |
167 | """Authenticates admin user with the keystone admin endpoint.""" |
168 | + self.log.debug('Authenticating keystone admin...') |
169 | unit = keystone_sentry |
170 | service_ip = unit.relation('shared-db', |
171 | 'mysql:shared-db')['private-address'] |
172 | @@ -175,6 +198,7 @@ |
173 | |
174 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
175 | """Authenticates a regular user with the keystone public endpoint.""" |
176 | + self.log.debug('Authenticating keystone user ({})...'.format(user)) |
177 | ep = keystone.service_catalog.url_for(service_type='identity', |
178 | endpoint_type='publicURL') |
179 | return keystone_client.Client(username=user, password=password, |
180 | @@ -182,19 +206,49 @@ |
181 | |
182 | def authenticate_glance_admin(self, keystone): |
183 | """Authenticates admin user with glance.""" |
184 | + self.log.debug('Authenticating glance admin...') |
185 | ep = keystone.service_catalog.url_for(service_type='image', |
186 | endpoint_type='adminURL') |
187 | return glance_client.Client(ep, token=keystone.auth_token) |
188 | |
189 | + def authenticate_heat_admin(self, keystone): |
190 | + """Authenticates the admin user with heat.""" |
191 | + self.log.debug('Authenticating heat admin...') |
192 | + ep = keystone.service_catalog.url_for(service_type='orchestration', |
193 | + endpoint_type='publicURL') |
194 | + return heat_client.Client(endpoint=ep, token=keystone.auth_token) |
195 | + |
196 | def authenticate_nova_user(self, keystone, user, password, tenant): |
197 | """Authenticates a regular user with nova-api.""" |
198 | + self.log.debug('Authenticating nova user ({})...'.format(user)) |
199 | ep = keystone.service_catalog.url_for(service_type='identity', |
200 | endpoint_type='publicURL') |
201 | return nova_client.Client(username=user, api_key=password, |
202 | project_id=tenant, auth_url=ep) |
203 | |
204 | + def authenticate_swift_user(self, keystone, user, password, tenant): |
205 | + """Authenticates a regular user with swift api.""" |
206 | + self.log.debug('Authenticating swift user ({})...'.format(user)) |
207 | + ep = keystone.service_catalog.url_for(service_type='identity', |
208 | + endpoint_type='publicURL') |
209 | + return swiftclient.Connection(authurl=ep, |
210 | + user=user, |
211 | + key=password, |
212 | + tenant_name=tenant, |
213 | + auth_version='2.0') |
214 | + |
215 | def create_cirros_image(self, glance, image_name): |
216 | - """Download the latest cirros image and upload it to glance.""" |
217 | + """Download the latest cirros image and upload it to glance, |
218 | + validate and return a resource pointer. |
219 | + |
220 | + :param glance: pointer to authenticated glance connection |
221 | + :param image_name: display name for new image |
222 | + :returns: glance image pointer |
223 | + """ |
224 | + self.log.debug('Creating glance cirros image ' |
225 | + '({})...'.format(image_name)) |
226 | + |
227 | + # Download cirros image |
228 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
229 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
230 | if http_proxy: |
231 | @@ -203,57 +257,67 @@ |
232 | else: |
233 | opener = urllib.FancyURLopener() |
234 | |
235 | - f = opener.open("http://download.cirros-cloud.net/version/released") |
236 | + f = opener.open('http://download.cirros-cloud.net/version/released') |
237 | version = f.read().strip() |
238 | - cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
239 | + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
240 | local_path = os.path.join('tests', cirros_img) |
241 | |
242 | if not os.path.exists(local_path): |
243 | - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
244 | + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
245 | version, cirros_img) |
246 | opener.retrieve(cirros_url, local_path) |
247 | f.close() |
248 | |
249 | + # Create glance image |
250 | with open(local_path) as f: |
251 | image = glance.images.create(name=image_name, is_public=True, |
252 | disk_format='qcow2', |
253 | container_format='bare', data=f) |
254 | - count = 1 |
255 | - status = image.status |
256 | - while status != 'active' and count < 10: |
257 | - time.sleep(3) |
258 | - image = glance.images.get(image.id) |
259 | - status = image.status |
260 | - self.log.debug('image status: {}'.format(status)) |
261 | - count += 1 |
262 | - |
263 | - if status != 'active': |
264 | - self.log.error('image creation timed out') |
265 | - return None |
266 | + |
267 | + # Wait for image to reach active status |
268 | + img_id = image.id |
269 | + ret = self.resource_reaches_status(glance.images, img_id, |
270 | + expected_stat='active', |
271 | + msg='Image status wait') |
272 | + if not ret: |
273 | + msg = 'Glance image failed to reach expected state.' |
274 | + amulet.raise_status(amulet.FAIL, msg=msg) |
275 | + |
276 | + # Re-validate new image |
277 | + self.log.debug('Validating image attributes...') |
278 | + val_img_name = glance.images.get(img_id).name |
279 | + val_img_stat = glance.images.get(img_id).status |
280 | + val_img_pub = glance.images.get(img_id).is_public |
281 | + val_img_cfmt = glance.images.get(img_id).container_format |
282 | + val_img_dfmt = glance.images.get(img_id).disk_format |
283 | + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
284 | + 'container fmt:{} disk fmt:{}'.format( |
285 | + val_img_name, val_img_pub, img_id, |
286 | + val_img_stat, val_img_cfmt, val_img_dfmt)) |
287 | + |
288 | + if val_img_name == image_name and val_img_stat == 'active' \ |
289 | + and val_img_pub is True and val_img_cfmt == 'bare' \ |
290 | + and val_img_dfmt == 'qcow2': |
291 | + self.log.debug(msg_attr) |
292 | + else: |
293 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
294 | + amulet.raise_status(amulet.FAIL, msg=msg) |
295 | |
296 | return image |
297 | |
298 | def delete_image(self, glance, image): |
299 | """Delete the specified image.""" |
300 | - num_before = len(list(glance.images.list())) |
301 | - glance.images.delete(image) |
302 | - |
303 | - count = 1 |
304 | - num_after = len(list(glance.images.list())) |
305 | - while num_after != (num_before - 1) and count < 10: |
306 | - time.sleep(3) |
307 | - num_after = len(list(glance.images.list())) |
308 | - self.log.debug('number of images: {}'.format(num_after)) |
309 | - count += 1 |
310 | - |
311 | - if num_after != (num_before - 1): |
312 | - self.log.error('image deletion timed out') |
313 | - return False |
314 | - |
315 | - return True |
316 | + |
317 | + # /!\ DEPRECATION WARNING |
318 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
319 | + 'delete_resource instead of delete_image.') |
320 | + self.log.debug('Deleting glance image ({})...'.format(image)) |
321 | + return self.delete_resource(glance.images, image, msg='glance image') |
322 | |
323 | def create_instance(self, nova, image_name, instance_name, flavor): |
324 | """Create the specified instance.""" |
325 | + self.log.debug('Creating instance ' |
326 | + '({}|{}|{})'.format(instance_name, image_name, flavor)) |
327 | image = nova.images.find(name=image_name) |
328 | flavor = nova.flavors.find(name=flavor) |
329 | instance = nova.servers.create(name=instance_name, image=image, |
330 | @@ -276,19 +340,265 @@ |
331 | |
332 | def delete_instance(self, nova, instance): |
333 | """Delete the specified instance.""" |
334 | - num_before = len(list(nova.servers.list())) |
335 | - nova.servers.delete(instance) |
336 | - |
337 | - count = 1 |
338 | - num_after = len(list(nova.servers.list())) |
339 | - while num_after != (num_before - 1) and count < 10: |
340 | - time.sleep(3) |
341 | - num_after = len(list(nova.servers.list())) |
342 | - self.log.debug('number of instances: {}'.format(num_after)) |
343 | - count += 1 |
344 | - |
345 | - if num_after != (num_before - 1): |
346 | - self.log.error('instance deletion timed out') |
347 | - return False |
348 | - |
349 | - return True |
350 | + |
351 | + # /!\ DEPRECATION WARNING |
352 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
353 | + 'delete_resource instead of delete_instance.') |
354 | + self.log.debug('Deleting instance ({})...'.format(instance)) |
355 | + return self.delete_resource(nova.servers, instance, |
356 | + msg='nova instance') |
357 | + |
358 | + def create_or_get_keypair(self, nova, keypair_name="testkey"): |
359 | + """Create a new keypair, or return pointer if it already exists.""" |
360 | + try: |
361 | + _keypair = nova.keypairs.get(keypair_name) |
362 | + self.log.debug('Keypair ({}) already exists, ' |
363 | + 'using it.'.format(keypair_name)) |
364 | + return _keypair |
365 | + except: |
366 | + self.log.debug('Keypair ({}) does not exist, ' |
367 | + 'creating it.'.format(keypair_name)) |
368 | + |
369 | + _keypair = nova.keypairs.create(name=keypair_name) |
370 | + return _keypair |
371 | + |
372 | + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
373 | + img_id=None, src_vol_id=None, snap_id=None): |
374 | + """Create cinder volume, optionally from a glance image, OR |
375 | + optionally as a clone of an existing volume, OR optionally |
376 | + from a snapshot. Wait for the new volume status to reach |
377 | + the expected status, validate and return a resource pointer. |
378 | + |
379 | + :param vol_name: cinder volume display name |
380 | + :param vol_size: size in gigabytes |
381 | + :param img_id: optional glance image id |
382 | + :param src_vol_id: optional source volume id to clone |
383 | + :param snap_id: optional snapshot id to use |
384 | + :returns: cinder volume pointer |
385 | + """ |
386 | + # Handle parameter input and avoid impossible combinations |
387 | + if img_id and not src_vol_id and not snap_id: |
388 | + # Create volume from image |
389 | + self.log.debug('Creating cinder volume from glance image...') |
390 | + bootable = 'true' |
391 | + elif src_vol_id and not img_id and not snap_id: |
392 | + # Clone an existing volume |
393 | + self.log.debug('Cloning cinder volume...') |
394 | + bootable = cinder.volumes.get(src_vol_id).bootable |
395 | + elif snap_id and not src_vol_id and not img_id: |
396 | + # Create volume from snapshot |
397 | + self.log.debug('Creating cinder volume from snapshot...') |
398 | + snap = cinder.volume_snapshots.find(id=snap_id) |
399 | + vol_size = snap.size |
400 | + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
401 | + bootable = cinder.volumes.get(snap_vol_id).bootable |
402 | + elif not img_id and not src_vol_id and not snap_id: |
403 | + # Create volume |
404 | + self.log.debug('Creating cinder volume...') |
405 | + bootable = 'false' |
406 | + else: |
407 | + # Impossible combination of parameters |
408 | + msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
409 | + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
410 | + img_id, src_vol_id, |
411 | + snap_id)) |
412 | + amulet.raise_status(amulet.FAIL, msg=msg) |
413 | + |
414 | + # Create new volume |
415 | + try: |
416 | + vol_new = cinder.volumes.create(display_name=vol_name, |
417 | + imageRef=img_id, |
418 | + size=vol_size, |
419 | + source_volid=src_vol_id, |
420 | + snapshot_id=snap_id) |
421 | + vol_id = vol_new.id |
422 | + except Exception as e: |
423 | + msg = 'Failed to create volume: {}'.format(e) |
424 | + amulet.raise_status(amulet.FAIL, msg=msg) |
425 | + |
426 | + # Wait for volume to reach available status |
427 | + ret = self.resource_reaches_status(cinder.volumes, vol_id, |
428 | + expected_stat="available", |
429 | + msg="Volume status wait") |
430 | + if not ret: |
431 | + msg = 'Cinder volume failed to reach expected state.' |
432 | + amulet.raise_status(amulet.FAIL, msg=msg) |
433 | + |
434 | + # Re-validate new volume |
435 | + self.log.debug('Validating volume attributes...') |
436 | + val_vol_name = cinder.volumes.get(vol_id).display_name |
437 | + val_vol_boot = cinder.volumes.get(vol_id).bootable |
438 | + val_vol_stat = cinder.volumes.get(vol_id).status |
439 | + val_vol_size = cinder.volumes.get(vol_id).size |
440 | + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
441 | + '{} size:{}'.format(val_vol_name, vol_id, |
442 | + val_vol_stat, val_vol_boot, |
443 | + val_vol_size)) |
444 | + |
445 | + if val_vol_boot == bootable and val_vol_stat == 'available' \ |
446 | + and val_vol_name == vol_name and val_vol_size == vol_size: |
447 | + self.log.debug(msg_attr) |
448 | + else: |
449 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
450 | + amulet.raise_status(amulet.FAIL, msg=msg) |
451 | + |
452 | + return vol_new |
453 | + |
454 | + def delete_resource(self, resource, resource_id, |
455 | + msg="resource", max_wait=120): |
456 | + """Delete one openstack resource, such as one instance, keypair, |
457 | + image, volume, stack, etc., and confirm deletion within max wait time. |
458 | + |
459 | + :param resource: pointer to os resource type, ex:glance_client.images |
460 | + :param resource_id: unique name or id for the openstack resource |
461 | + :param msg: text to identify purpose in logging |
462 | + :param max_wait: maximum wait time in seconds |
463 | + :returns: True if successful, otherwise False |
464 | + """ |
465 | + self.log.debug('Deleting OpenStack resource ' |
466 | + '{} ({})'.format(resource_id, msg)) |
467 | + num_before = len(list(resource.list())) |
468 | + resource.delete(resource_id) |
469 | + |
470 | + tries = 0 |
471 | + num_after = len(list(resource.list())) |
472 | + while num_after != (num_before - 1) and tries < (max_wait / 4): |
473 | + self.log.debug('{} delete check: ' |
474 | + '{} [{}:{}] {}'.format(msg, tries, |
475 | + num_before, |
476 | + num_after, |
477 | + resource_id)) |
478 | + time.sleep(4) |
479 | + num_after = len(list(resource.list())) |
480 | + tries += 1 |
481 | + |
482 | + self.log.debug('{}: expected, actual count = {}, ' |
483 | + '{}'.format(msg, num_before - 1, num_after)) |
484 | + |
485 | + if num_after == (num_before - 1): |
486 | + return True |
487 | + else: |
488 | + self.log.error('{} delete timed out'.format(msg)) |
489 | + return False |
490 | + |
491 | + def resource_reaches_status(self, resource, resource_id, |
492 | + expected_stat='available', |
493 | + msg='resource', max_wait=120): |
494 | + """Wait for an openstack resources status to reach an |
495 | + expected status within a specified time. Useful to confirm that |
496 | + nova instances, cinder vols, snapshots, glance images, heat stacks |
497 | + and other resources eventually reach the expected status. |
498 | + |
499 | + :param resource: pointer to os resource type, ex: heat_client.stacks |
500 | + :param resource_id: unique id for the openstack resource |
501 | + :param expected_stat: status to expect resource to reach |
502 | + :param msg: text to identify purpose in logging |
503 | + :param max_wait: maximum wait time in seconds |
504 | + :returns: True if successful, False if status is not reached |
505 | + """ |
506 | + |
507 | + tries = 0 |
508 | + resource_stat = resource.get(resource_id).status |
509 | + while resource_stat != expected_stat and tries < (max_wait / 4): |
510 | + self.log.debug('{} status check: ' |
511 | + '{} [{}:{}] {}'.format(msg, tries, |
512 | + resource_stat, |
513 | + expected_stat, |
514 | + resource_id)) |
515 | + time.sleep(4) |
516 | + resource_stat = resource.get(resource_id).status |
517 | + tries += 1 |
518 | + |
519 | + self.log.debug('{}: expected, actual status = {}, ' |
520 | + '{}'.format(msg, resource_stat, expected_stat)) |
521 | + |
522 | + if resource_stat == expected_stat: |
523 | + return True |
524 | + else: |
525 | + self.log.debug('{} never reached expected status: ' |
526 | + '{}'.format(resource_id, expected_stat)) |
527 | + return False |
528 | + |
529 | + def get_ceph_osd_id_cmd(self, index): |
530 | + """Produce a shell command that will return a ceph-osd id.""" |
531 | + return ("`initctl list | grep 'ceph-osd ' | " |
532 | + "awk 'NR=={} {{ print $2 }}' | " |
533 | + "grep -o '[0-9]*'`".format(index + 1)) |
534 | + |
535 | + def get_ceph_pools(self, sentry_unit): |
536 | + """Return a dict of ceph pools from a single ceph unit, with |
537 | + pool name as keys, pool id as vals.""" |
538 | + pools = {} |
539 | + cmd = 'sudo ceph osd lspools' |
540 | + output, code = sentry_unit.run(cmd) |
541 | + if code != 0: |
542 | + msg = ('{} `{}` returned {} ' |
543 | + '{}'.format(sentry_unit.info['unit_name'], |
544 | + cmd, code, output)) |
545 | + amulet.raise_status(amulet.FAIL, msg=msg) |
546 | + |
547 | + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
548 | + for pool in str(output).split(','): |
549 | + pool_id_name = pool.split(' ') |
550 | + if len(pool_id_name) == 2: |
551 | + pool_id = pool_id_name[0] |
552 | + pool_name = pool_id_name[1] |
553 | + pools[pool_name] = int(pool_id) |
554 | + |
555 | + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
556 | + pools)) |
557 | + return pools |
558 | + |
559 | + def get_ceph_df(self, sentry_unit): |
560 | + """Return dict of ceph df json output, including ceph pool state. |
561 | + |
562 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
563 | + :returns: Dict of ceph df output |
564 | + """ |
565 | + cmd = 'sudo ceph df --format=json' |
566 | + output, code = sentry_unit.run(cmd) |
567 | + if code != 0: |
568 | + msg = ('{} `{}` returned {} ' |
569 | + '{}'.format(sentry_unit.info['unit_name'], |
570 | + cmd, code, output)) |
571 | + amulet.raise_status(amulet.FAIL, msg=msg) |
572 | + return json.loads(output) |
573 | + |
574 | + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
575 | + """Take a sample of attributes of a ceph pool, returning ceph |
576 | + pool name, object count and disk space used for the specified |
577 | + pool ID number. |
578 | + |
579 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
580 | + :param pool_id: Ceph pool ID |
581 | + :returns: List of pool name, object count, kb disk space used |
582 | + """ |
583 | + df = self.get_ceph_df(sentry_unit) |
584 | + pool_name = df['pools'][pool_id]['name'] |
585 | + obj_count = df['pools'][pool_id]['stats']['objects'] |
586 | + kb_used = df['pools'][pool_id]['stats']['kb_used'] |
587 | + self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
588 | + '{} kb used'.format(pool_name, pool_id, |
589 | + obj_count, kb_used)) |
590 | + return pool_name, obj_count, kb_used |
591 | + |
592 | + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
593 | + """Validate ceph pool samples taken over time, such as pool |
594 | + object counts or pool kb used, before adding, after adding, and |
595 | + after deleting items which affect those pool attributes. The |
596 | + 2nd element is expected to be greater than the 1st; 3rd is expected |
597 | + to be less than the 2nd. |
598 | + |
599 | + :param samples: List containing 3 data samples |
600 | + :param sample_type: String for logging and usage context |
601 | + :returns: None if successful, Failure message otherwise |
602 | + """ |
603 | + original, created, deleted = range(3) |
604 | + if samples[created] <= samples[original] or \ |
605 | + samples[deleted] >= samples[created]: |
606 | + return ('Ceph {} samples ({}) ' |
607 | + 'unexpected.'.format(sample_type, samples)) |
608 | + else: |
609 | + self.log.debug('Ceph {} samples (OK): ' |
610 | + '{}'.format(sample_type, samples)) |
611 | + return None |
612 | |
613 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
614 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-06-24 12:22:08 +0000 |
615 | +++ hooks/charmhelpers/contrib/openstack/context.py 2015-11-03 04:19:02 +0000 |
616 | @@ -122,21 +122,24 @@ |
617 | of specifying multiple key value pairs within the same string. For |
618 | example, a string in the format of 'key1=value1, key2=value2' will |
619 | return a dict of: |
620 | - {'key1': 'value1', |
621 | - 'key2': 'value2'}. |
622 | + |
623 | + {'key1': 'value1', |
624 | + 'key2': 'value2'}. |
625 | |
626 | 2. A string in the above format, but supporting a comma-delimited list |
627 | of values for the same key. For example, a string in the format of |
628 | 'key1=value1, key2=value3,value4,value5' will return a dict of: |
629 | - {'key1', 'value1', |
630 | - 'key2', 'value2,value3,value4'} |
631 | + |
632 | + {'key1', 'value1', |
633 | + 'key2', 'value2,value3,value4'} |
634 | |
635 | 3. A string containing a colon character (:) prior to an equal |
636 | character (=) will be treated as yaml and parsed as such. This can be |
637 | used to specify more complex key value pairs. For example, |
638 | a string in the format of 'key1: subkey1=value1, subkey2=value2' will |
639 | return a dict of: |
640 | - {'key1', 'subkey1=value1, subkey2=value2'} |
641 | + |
642 | + {'key1', 'subkey1=value1, subkey2=value2'} |
643 | |
644 | The provided config_flags string may be a list of comma-separated values |
645 | which themselves may be comma-separated list of values. |
646 | @@ -240,7 +243,7 @@ |
647 | if self.relation_prefix: |
648 | password_setting = self.relation_prefix + '_password' |
649 | |
650 | - for rid in relation_ids('shared-db'): |
651 | + for rid in relation_ids(self.interfaces[0]): |
652 | for unit in related_units(rid): |
653 | rdata = relation_get(rid=rid, unit=unit) |
654 | host = rdata.get('db_host') |
655 | @@ -891,8 +894,6 @@ |
656 | return ctxt |
657 | |
658 | def __call__(self): |
659 | - self._ensure_packages() |
660 | - |
661 | if self.network_manager not in ['quantum', 'neutron']: |
662 | return {} |
663 | |
664 | @@ -1050,13 +1051,22 @@ |
665 | :param config_file : Service's config file to query sections |
666 | :param interface : Subordinate interface to inspect |
667 | """ |
668 | - self.service = service |
669 | self.config_file = config_file |
670 | - self.interface = interface |
671 | + if isinstance(service, list): |
672 | + self.services = service |
673 | + else: |
674 | + self.services = [service] |
675 | + if isinstance(interface, list): |
676 | + self.interfaces = interface |
677 | + else: |
678 | + self.interfaces = [interface] |
679 | |
680 | def __call__(self): |
681 | ctxt = {'sections': {}} |
682 | - for rid in relation_ids(self.interface): |
683 | + rids = [] |
684 | + for interface in self.interfaces: |
685 | + rids.extend(relation_ids(interface)) |
686 | + for rid in rids: |
687 | for unit in related_units(rid): |
688 | sub_config = relation_get('subordinate_configuration', |
689 | rid=rid, unit=unit) |
690 | @@ -1068,29 +1078,32 @@ |
691 | 'setting from %s' % rid, level=ERROR) |
692 | continue |
693 | |
694 | - if self.service not in sub_config: |
695 | - log('Found subordinate_config on %s but it contained' |
696 | - 'nothing for %s service' % (rid, self.service), |
697 | - level=INFO) |
698 | - continue |
699 | - |
700 | - sub_config = sub_config[self.service] |
701 | - if self.config_file not in sub_config: |
702 | - log('Found subordinate_config on %s but it contained' |
703 | - 'nothing for %s' % (rid, self.config_file), |
704 | - level=INFO) |
705 | - continue |
706 | - |
707 | - sub_config = sub_config[self.config_file] |
708 | - for k, v in six.iteritems(sub_config): |
709 | - if k == 'sections': |
710 | - for section, config_dict in six.iteritems(v): |
711 | - log("adding section '%s'" % (section), |
712 | - level=DEBUG) |
713 | - ctxt[k][section] = config_dict |
714 | - else: |
715 | - ctxt[k] = v |
716 | - |
717 | + for service in self.services: |
718 | + if service not in sub_config: |
719 | + log('Found subordinate_config on %s but it contained' |
720 | + 'nothing for %s service' % (rid, service), |
721 | + level=INFO) |
722 | + continue |
723 | + |
724 | + sub_config = sub_config[service] |
725 | + if self.config_file not in sub_config: |
726 | + log('Found subordinate_config on %s but it contained' |
727 | + 'nothing for %s' % (rid, self.config_file), |
728 | + level=INFO) |
729 | + continue |
730 | + |
731 | + sub_config = sub_config[self.config_file] |
732 | + for k, v in six.iteritems(sub_config): |
733 | + if k == 'sections': |
734 | + for section, config_list in six.iteritems(v): |
735 | + log("adding section '%s'" % (section), |
736 | + level=DEBUG) |
737 | + if ctxt[k].get(section): |
738 | + ctxt[k][section].extend(config_list) |
739 | + else: |
740 | + ctxt[k][section] = config_list |
741 | + else: |
742 | + ctxt[k] = v |
743 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
744 | return ctxt |
745 | |
746 | |
747 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' |
748 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-06-24 12:22:08 +0000 |
749 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-11-03 04:19:02 +0000 |
750 | @@ -5,11 +5,11 @@ |
751 | ############################################################################### |
752 | [global] |
753 | {% if auth -%} |
754 | - auth_supported = {{ auth }} |
755 | - keyring = /etc/ceph/$cluster.$name.keyring |
756 | - mon host = {{ mon_hosts }} |
757 | +auth_supported = {{ auth }} |
758 | +keyring = /etc/ceph/$cluster.$name.keyring |
759 | +mon host = {{ mon_hosts }} |
760 | {% endif -%} |
761 | - log to syslog = {{ use_syslog }} |
762 | - err to syslog = {{ use_syslog }} |
763 | - clog to syslog = {{ use_syslog }} |
764 | +log to syslog = {{ use_syslog }} |
765 | +err to syslog = {{ use_syslog }} |
766 | +clog to syslog = {{ use_syslog }} |
767 | |
768 | |
769 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' |
770 | --- hooks/charmhelpers/contrib/openstack/templating.py 2015-06-24 12:22:08 +0000 |
771 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2015-11-03 04:19:02 +0000 |
772 | @@ -29,14 +29,13 @@ |
773 | try: |
774 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
775 | except ImportError: |
776 | - # python-jinja2 may not be installed yet, or we're running unittests. |
777 | - FileSystemLoader = ChoiceLoader = Environment = exceptions = None |
778 | + apt_install('python-jinja2', fatal=True) |
779 | + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
780 | |
781 | |
782 | class OSConfigException(Exception): |
783 | pass |
784 | |
785 | - |
786 | def os_template_dirs(templates_dir, os_release): |
787 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) |
788 | for rel in six.itervalues(OPENSTACK_CODENAMES)] |
789 | @@ -61,7 +60,6 @@ |
790 | ' '.join(dirs), level=INFO) |
791 | return dirs |
792 | |
793 | - |
794 | def get_loader(templates_dir, os_release): |
795 | """ |
796 | Create a jinja2.ChoiceLoader containing template dirs up to |
797 | |
798 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
799 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-24 12:22:08 +0000 |
800 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-11-03 04:19:02 +0000 |
801 | @@ -25,6 +25,7 @@ |
802 | import os |
803 | import sys |
804 | import uuid |
805 | +import re |
806 | |
807 | import six |
808 | import yaml |
809 | @@ -42,7 +43,7 @@ |
810 | INFO, |
811 | relation_ids, |
812 | related_units, |
813 | - relation_set, |
814 | + relation_set |
815 | ) |
816 | |
817 | from charmhelpers.contrib.storage.linux.lvm import ( |
818 | @@ -71,7 +72,6 @@ |
819 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' |
820 | 'restricted main multiverse universe') |
821 | |
822 | - |
823 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ |
824 | ('oneiric', 'diablo'), |
825 | ('precise', 'essex'), |
826 | @@ -81,6 +81,7 @@ |
827 | ('trusty', 'icehouse'), |
828 | ('utopic', 'juno'), |
829 | ('vivid', 'kilo'), |
830 | + ('wily', 'liberty'), |
831 | ]) |
832 | |
833 | |
834 | @@ -93,6 +94,7 @@ |
835 | ('2014.1', 'icehouse'), |
836 | ('2014.2', 'juno'), |
837 | ('2015.1', 'kilo'), |
838 | + ('2015.2', 'liberty'), |
839 | ]) |
840 | |
841 | # The ugly duckling |
842 | @@ -115,8 +117,37 @@ |
843 | ('2.2.0', 'juno'), |
844 | ('2.2.1', 'kilo'), |
845 | ('2.2.2', 'kilo'), |
846 | + ('2.3.0', 'liberty'), |
847 | ]) |
848 | |
849 | +# >= Liberty version->codename mapping |
850 | +PACKAGE_CODENAMES = { |
851 | + 'nova-common': OrderedDict([ |
852 | + ('12.0.0', 'liberty'), |
853 | + ]), |
854 | + 'neutron-common': OrderedDict([ |
855 | + ('7.0.0', 'liberty'), |
856 | + ]), |
857 | + 'cinder-common': OrderedDict([ |
858 | + ('7.0.0', 'liberty'), |
859 | + ]), |
860 | + 'keystone': OrderedDict([ |
861 | + ('8.0.0', 'liberty'), |
862 | + ]), |
863 | + 'horizon-common': OrderedDict([ |
864 | + ('8.0.0', 'liberty'), |
865 | + ]), |
866 | + 'ceilometer-common': OrderedDict([ |
867 | + ('5.0.0', 'liberty'), |
868 | + ]), |
869 | + 'heat-common': OrderedDict([ |
870 | + ('5.0.0', 'liberty'), |
871 | + ]), |
872 | + 'glance-common': OrderedDict([ |
873 | + ('11.0.0', 'liberty'), |
874 | + ]), |
875 | +} |
876 | + |
877 | DEFAULT_LOOPBACK_SIZE = '5G' |
878 | |
879 | |
880 | @@ -200,20 +231,29 @@ |
881 | error_out(e) |
882 | |
883 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
884 | + match = re.match('^(\d)\.(\d)\.(\d)', vers) |
885 | + if match: |
886 | + vers = match.group(0) |
887 | |
888 | - try: |
889 | - if 'swift' in pkg.name: |
890 | - swift_vers = vers[:5] |
891 | - if swift_vers not in SWIFT_CODENAMES: |
892 | - # Deal with 1.10.0 upward |
893 | - swift_vers = vers[:6] |
894 | - return SWIFT_CODENAMES[swift_vers] |
895 | - else: |
896 | - vers = vers[:6] |
897 | - return OPENSTACK_CODENAMES[vers] |
898 | - except KeyError: |
899 | - e = 'Could not determine OpenStack codename for version %s' % vers |
900 | - error_out(e) |
901 | + # >= Liberty independent project versions |
902 | + if (package in PACKAGE_CODENAMES and |
903 | + vers in PACKAGE_CODENAMES[package]): |
904 | + return PACKAGE_CODENAMES[package][vers] |
905 | + else: |
906 | + # < Liberty co-ordinated project versions |
907 | + try: |
908 | + if 'swift' in pkg.name: |
909 | + swift_vers = vers[:5] |
910 | + if swift_vers not in SWIFT_CODENAMES: |
911 | + # Deal with 1.10.0 upward |
912 | + swift_vers = vers[:6] |
913 | + return SWIFT_CODENAMES[swift_vers] |
914 | + else: |
915 | + vers = vers[:6] |
916 | + return OPENSTACK_CODENAMES[vers] |
917 | + except KeyError: |
918 | + e = 'Could not determine OpenStack codename for version %s' % vers |
919 | + error_out(e) |
920 | |
921 | |
922 | def get_os_version_package(pkg, fatal=True): |
923 | @@ -323,6 +363,9 @@ |
924 | 'kilo': 'trusty-updates/kilo', |
925 | 'kilo/updates': 'trusty-updates/kilo', |
926 | 'kilo/proposed': 'trusty-proposed/kilo', |
927 | + 'liberty': 'trusty-updates/liberty', |
928 | + 'liberty/updates': 'trusty-updates/liberty', |
929 | + 'liberty/proposed': 'trusty-proposed/liberty', |
930 | } |
931 | |
932 | try: |
933 | @@ -518,6 +561,7 @@ |
934 | Clone/install all specified OpenStack repositories. |
935 | |
936 | The expected format of projects_yaml is: |
937 | + |
938 | repositories: |
939 | - {name: keystone, |
940 | repository: 'git://git.openstack.org/openstack/keystone.git', |
941 | @@ -525,11 +569,13 @@ |
942 | - {name: requirements, |
943 | repository: 'git://git.openstack.org/openstack/requirements.git', |
944 | branch: 'stable/icehouse'} |
945 | + |
946 | directory: /mnt/openstack-git |
947 | http_proxy: squid-proxy-url |
948 | https_proxy: squid-proxy-url |
949 | |
950 | - The directory, http_proxy, and https_proxy keys are optional. |
951 | + The directory, http_proxy, and https_proxy keys are optional. |
952 | + |
953 | """ |
954 | global requirements_dir |
955 | parent_dir = '/mnt/openstack-git' |
956 | @@ -551,6 +597,12 @@ |
957 | |
958 | pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
959 | |
960 | + # Upgrade setuptools and pip from default virtualenv versions. The default |
961 | + # versions in trusty break master OpenStack branch deployments. |
962 | + for p in ['pip', 'setuptools']: |
963 | + pip_install(p, upgrade=True, proxy=http_proxy, |
964 | + venv=os.path.join(parent_dir, 'venv')) |
965 | + |
966 | for p in projects['repositories']: |
967 | repo = p['repository'] |
968 | branch = p['branch'] |
969 | @@ -612,24 +664,24 @@ |
970 | else: |
971 | repo_dir = dest_dir |
972 | |
973 | + venv = os.path.join(parent_dir, 'venv') |
974 | + |
975 | if update_requirements: |
976 | if not requirements_dir: |
977 | error_out('requirements repo must be cloned before ' |
978 | 'updating from global requirements.') |
979 | - _git_update_requirements(repo_dir, requirements_dir) |
980 | + _git_update_requirements(venv, repo_dir, requirements_dir) |
981 | |
982 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) |
983 | if http_proxy: |
984 | - pip_install(repo_dir, proxy=http_proxy, |
985 | - venv=os.path.join(parent_dir, 'venv')) |
986 | + pip_install(repo_dir, proxy=http_proxy, venv=venv) |
987 | else: |
988 | - pip_install(repo_dir, |
989 | - venv=os.path.join(parent_dir, 'venv')) |
990 | + pip_install(repo_dir, venv=venv) |
991 | |
992 | return repo_dir |
993 | |
994 | |
995 | -def _git_update_requirements(package_dir, reqs_dir): |
996 | +def _git_update_requirements(venv, package_dir, reqs_dir): |
997 | """ |
998 | Update from global requirements. |
999 | |
1000 | @@ -638,12 +690,14 @@ |
1001 | """ |
1002 | orig_dir = os.getcwd() |
1003 | os.chdir(reqs_dir) |
1004 | - cmd = ['python', 'update.py', package_dir] |
1005 | + python = os.path.join(venv, 'bin/python') |
1006 | + cmd = [python, 'update.py', package_dir] |
1007 | try: |
1008 | subprocess.check_call(cmd) |
1009 | except subprocess.CalledProcessError: |
1010 | package = os.path.basename(package_dir) |
1011 | - error_out("Error updating {} from global-requirements.txt".format(package)) |
1012 | + error_out("Error updating {} from " |
1013 | + "global-requirements.txt".format(package)) |
1014 | os.chdir(orig_dir) |
1015 | |
1016 | |
1017 | @@ -690,7 +744,6 @@ |
1018 | |
1019 | return None |
1020 | |
1021 | - |
1022 | def remote_restart(rel_name, remote_service=None): |
1023 | trigger = { |
1024 | 'restart-trigger': str(uuid.uuid4()), |
1025 | @@ -705,3 +758,4 @@ |
1026 | relation_set(relation_id=rid, |
1027 | relation_settings=trigger, |
1028 | ) |
1029 | + |
1030 | |
1031 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' |
1032 | --- hooks/charmhelpers/contrib/python/packages.py 2015-06-24 12:22:08 +0000 |
1033 | +++ hooks/charmhelpers/contrib/python/packages.py 2015-11-03 04:19:02 +0000 |
1034 | @@ -36,6 +36,8 @@ |
1035 | def parse_options(given, available): |
1036 | """Given a set of options, check if available""" |
1037 | for key, value in sorted(given.items()): |
1038 | + if not value: |
1039 | + continue |
1040 | if key in available: |
1041 | yield "--{0}={1}".format(key, value) |
1042 | |
1043 | |
1044 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' |
1045 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-06-24 12:22:08 +0000 |
1046 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-11-03 04:19:02 +0000 |
1047 | @@ -60,12 +60,12 @@ |
1048 | KEYFILE = '/etc/ceph/ceph.client.{}.key' |
1049 | |
1050 | CEPH_CONF = """[global] |
1051 | - auth supported = {auth} |
1052 | - keyring = {keyring} |
1053 | - mon host = {mon_hosts} |
1054 | - log to syslog = {use_syslog} |
1055 | - err to syslog = {use_syslog} |
1056 | - clog to syslog = {use_syslog} |
1057 | +auth supported = {auth} |
1058 | +keyring = {keyring} |
1059 | +mon host = {mon_hosts} |
1060 | +log to syslog = {use_syslog} |
1061 | +err to syslog = {use_syslog} |
1062 | +clog to syslog = {use_syslog} |
1063 | """ |
1064 | |
1065 | |
1066 | |
1067 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' |
1068 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-06-24 12:22:08 +0000 |
1069 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-11-03 04:19:02 +0000 |
1070 | @@ -43,9 +43,10 @@ |
1071 | |
1072 | :param block_device: str: Full path of block device to clean. |
1073 | ''' |
1074 | + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b |
1075 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up |
1076 | - call(['sgdisk', '--zap-all', '--mbrtogpt', |
1077 | - '--clear', block_device]) |
1078 | + call(['sgdisk', '--zap-all', '--', block_device]) |
1079 | + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) |
1080 | dev_end = check_output(['blockdev', '--getsz', |
1081 | block_device]).decode('UTF-8') |
1082 | gpt_end = int(dev_end.split()[0]) - 100 |
1083 | @@ -67,4 +68,4 @@ |
1084 | out = check_output(['mount']).decode('UTF-8') |
1085 | if is_partition: |
1086 | return bool(re.search(device + r"\b", out)) |
1087 | - return bool(re.search(device + r"[0-9]+\b", out)) |
1088 | + return bool(re.search(device + r"[0-9]*\b", out)) |
1089 | |
1090 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
1091 | --- hooks/charmhelpers/core/hookenv.py 2015-06-24 12:22:08 +0000 |
1092 | +++ hooks/charmhelpers/core/hookenv.py 2015-11-03 04:19:02 +0000 |
1093 | @@ -21,7 +21,10 @@ |
1094 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
1095 | |
1096 | from __future__ import print_function |
1097 | +import copy |
1098 | +from distutils.version import LooseVersion |
1099 | from functools import wraps |
1100 | +import glob |
1101 | import os |
1102 | import json |
1103 | import yaml |
1104 | @@ -71,6 +74,7 @@ |
1105 | res = func(*args, **kwargs) |
1106 | cache[key] = res |
1107 | return res |
1108 | + wrapper._wrapped = func |
1109 | return wrapper |
1110 | |
1111 | |
1112 | @@ -170,9 +174,19 @@ |
1113 | return os.environ.get('JUJU_RELATION', None) |
1114 | |
1115 | |
1116 | -def relation_id(): |
1117 | - """The relation ID for the current relation hook""" |
1118 | - return os.environ.get('JUJU_RELATION_ID', None) |
1119 | +@cached |
1120 | +def relation_id(relation_name=None, service_or_unit=None): |
1121 | + """The relation ID for the current or a specified relation""" |
1122 | + if not relation_name and not service_or_unit: |
1123 | + return os.environ.get('JUJU_RELATION_ID', None) |
1124 | + elif relation_name and service_or_unit: |
1125 | + service_name = service_or_unit.split('/')[0] |
1126 | + for relid in relation_ids(relation_name): |
1127 | + remote_service = remote_service_name(relid) |
1128 | + if remote_service == service_name: |
1129 | + return relid |
1130 | + else: |
1131 | + raise ValueError('Must specify neither or both of relation_name and service_or_unit') |
1132 | |
1133 | |
1134 | def local_unit(): |
1135 | @@ -190,9 +204,20 @@ |
1136 | return local_unit().split('/')[0] |
1137 | |
1138 | |
1139 | +@cached |
1140 | +def remote_service_name(relid=None): |
1141 | + """The remote service name for a given relation-id (or the current relation)""" |
1142 | + if relid is None: |
1143 | + unit = remote_unit() |
1144 | + else: |
1145 | + units = related_units(relid) |
1146 | + unit = units[0] if units else None |
1147 | + return unit.split('/')[0] if unit else None |
1148 | + |
1149 | + |
1150 | def hook_name(): |
1151 | """The name of the currently executing hook""" |
1152 | - return os.path.basename(sys.argv[0]) |
1153 | + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) |
1154 | |
1155 | |
1156 | class Config(dict): |
1157 | @@ -242,29 +267,7 @@ |
1158 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
1159 | if os.path.exists(self.path): |
1160 | self.load_previous() |
1161 | - |
1162 | - def __getitem__(self, key): |
1163 | - """For regular dict lookups, check the current juju config first, |
1164 | - then the previous (saved) copy. This ensures that user-saved values |
1165 | - will be returned by a dict lookup. |
1166 | - |
1167 | - """ |
1168 | - try: |
1169 | - return dict.__getitem__(self, key) |
1170 | - except KeyError: |
1171 | - return (self._prev_dict or {})[key] |
1172 | - |
1173 | - def get(self, key, default=None): |
1174 | - try: |
1175 | - return self[key] |
1176 | - except KeyError: |
1177 | - return default |
1178 | - |
1179 | - def keys(self): |
1180 | - prev_keys = [] |
1181 | - if self._prev_dict is not None: |
1182 | - prev_keys = self._prev_dict.keys() |
1183 | - return list(set(prev_keys + list(dict.keys(self)))) |
1184 | + atexit(self._implicit_save) |
1185 | |
1186 | def load_previous(self, path=None): |
1187 | """Load previous copy of config from disk. |
1188 | @@ -283,6 +286,9 @@ |
1189 | self.path = path or self.path |
1190 | with open(self.path) as f: |
1191 | self._prev_dict = json.load(f) |
1192 | + for k, v in copy.deepcopy(self._prev_dict).items(): |
1193 | + if k not in self: |
1194 | + self[k] = v |
1195 | |
1196 | def changed(self, key): |
1197 | """Return True if the current value for this key is different from |
1198 | @@ -314,13 +320,13 @@ |
1199 | instance. |
1200 | |
1201 | """ |
1202 | - if self._prev_dict: |
1203 | - for k, v in six.iteritems(self._prev_dict): |
1204 | - if k not in self: |
1205 | - self[k] = v |
1206 | with open(self.path, 'w') as f: |
1207 | json.dump(self, f) |
1208 | |
1209 | + def _implicit_save(self): |
1210 | + if self.implicit_save: |
1211 | + self.save() |
1212 | + |
1213 | |
1214 | @cached |
1215 | def config(scope=None): |
1216 | @@ -485,6 +491,63 @@ |
1217 | |
1218 | |
1219 | @cached |
1220 | +def relation_to_interface(relation_name): |
1221 | + """ |
1222 | + Given the name of a relation, return the interface that relation uses. |
1223 | + |
1224 | + :returns: The interface name, or ``None``. |
1225 | + """ |
1226 | + return relation_to_role_and_interface(relation_name)[1] |
1227 | + |
1228 | + |
1229 | +@cached |
1230 | +def relation_to_role_and_interface(relation_name): |
1231 | + """ |
1232 | + Given the name of a relation, return the role and the name of the interface |
1233 | + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). |
1234 | + |
1235 | + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. |
1236 | + """ |
1237 | + _metadata = metadata() |
1238 | + for role in ('provides', 'requires', 'peer'): |
1239 | + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') |
1240 | + if interface: |
1241 | + return role, interface |
1242 | + return None, None |
1243 | + |
1244 | + |
1245 | +@cached |
1246 | +def role_and_interface_to_relations(role, interface_name): |
1247 | + """ |
1248 | + Given a role and interface name, return a list of relation names for the |
1249 | + current charm that use that interface under that role (where role is one |
1250 | + of ``provides``, ``requires``, or ``peer``). |
1251 | + |
1252 | + :returns: A list of relation names. |
1253 | + """ |
1254 | + _metadata = metadata() |
1255 | + results = [] |
1256 | + for relation_name, relation in _metadata.get(role, {}).items(): |
1257 | + if relation['interface'] == interface_name: |
1258 | + results.append(relation_name) |
1259 | + return results |
1260 | + |
1261 | + |
1262 | +@cached |
1263 | +def interface_to_relations(interface_name): |
1264 | + """ |
1265 | + Given an interface, return a list of relation names for the current |
1266 | + charm that use that interface. |
1267 | + |
1268 | + :returns: A list of relation names. |
1269 | + """ |
1270 | + results = [] |
1271 | + for role in ('provides', 'requires', 'peer'): |
1272 | + results.extend(role_and_interface_to_relations(role, interface_name)) |
1273 | + return results |
1274 | + |
1275 | + |
1276 | +@cached |
1277 | def charm_name(): |
1278 | """Get the name of the current charm as is specified on metadata.yaml""" |
1279 | return metadata().get('name') |
1280 | @@ -587,10 +650,14 @@ |
1281 | hooks.execute(sys.argv) |
1282 | """ |
1283 | |
1284 | - def __init__(self, config_save=True): |
1285 | + def __init__(self, config_save=None): |
1286 | super(Hooks, self).__init__() |
1287 | self._hooks = {} |
1288 | - self._config_save = config_save |
1289 | + |
1290 | + # For unknown reasons, we allow the Hooks constructor to override |
1291 | + # config().implicit_save. |
1292 | + if config_save is not None: |
1293 | + config().implicit_save = config_save |
1294 | |
1295 | def register(self, name, function): |
1296 | """Register a hook""" |
1297 | @@ -598,13 +665,16 @@ |
1298 | |
1299 | def execute(self, args): |
1300 | """Execute a registered hook based on args[0]""" |
1301 | + _run_atstart() |
1302 | hook_name = os.path.basename(args[0]) |
1303 | if hook_name in self._hooks: |
1304 | - self._hooks[hook_name]() |
1305 | - if self._config_save: |
1306 | - cfg = config() |
1307 | - if cfg.implicit_save: |
1308 | - cfg.save() |
1309 | + try: |
1310 | + self._hooks[hook_name]() |
1311 | + except SystemExit as x: |
1312 | + if x.code is None or x.code == 0: |
1313 | + _run_atexit() |
1314 | + raise |
1315 | + _run_atexit() |
1316 | else: |
1317 | raise UnregisteredHookError(hook_name) |
1318 | |
1319 | @@ -653,6 +723,21 @@ |
1320 | subprocess.check_call(['action-fail', message]) |
1321 | |
1322 | |
1323 | +def action_name(): |
1324 | + """Get the name of the currently executing action.""" |
1325 | + return os.environ.get('JUJU_ACTION_NAME') |
1326 | + |
1327 | + |
1328 | +def action_uuid(): |
1329 | + """Get the UUID of the currently executing action.""" |
1330 | + return os.environ.get('JUJU_ACTION_UUID') |
1331 | + |
1332 | + |
1333 | +def action_tag(): |
1334 | + """Get the tag for the currently executing action.""" |
1335 | + return os.environ.get('JUJU_ACTION_TAG') |
1336 | + |
1337 | + |
1338 | def status_set(workload_state, message): |
1339 | """Set the workload state with a message |
1340 | |
1341 | @@ -732,13 +817,80 @@ |
1342 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
1343 | def leader_set(settings=None, **kwargs): |
1344 | """Juju leader set value(s)""" |
1345 | - log("Juju leader-set '%s'" % (settings), level=DEBUG) |
1346 | + # Don't log secrets. |
1347 | + # log("Juju leader-set '%s'" % (settings), level=DEBUG) |
1348 | cmd = ['leader-set'] |
1349 | settings = settings or {} |
1350 | settings.update(kwargs) |
1351 | - for k, v in settings.iteritems(): |
1352 | + for k, v in settings.items(): |
1353 | if v is None: |
1354 | cmd.append('{}='.format(k)) |
1355 | else: |
1356 | cmd.append('{}={}'.format(k, v)) |
1357 | subprocess.check_call(cmd) |
1358 | + |
1359 | + |
1360 | +@cached |
1361 | +def juju_version(): |
1362 | + """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
1363 | + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 |
1364 | + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] |
1365 | + return subprocess.check_output([jujud, 'version'], |
1366 | + universal_newlines=True).strip() |
1367 | + |
1368 | + |
1369 | +@cached |
1370 | +def has_juju_version(minimum_version): |
1371 | + """Return True if the Juju version is at least the provided version""" |
1372 | + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) |
1373 | + |
1374 | + |
1375 | +_atexit = [] |
1376 | +_atstart = [] |
1377 | + |
1378 | + |
1379 | +def atstart(callback, *args, **kwargs): |
1380 | + '''Schedule a callback to run before the main hook. |
1381 | + |
1382 | + Callbacks are run in the order they were added. |
1383 | + |
1384 | + This is useful for modules and classes to perform initialization |
1385 | + and inject behavior. In particular: |
1386 | + |
1387 | + - Run common code before all of your hooks, such as logging |
1388 | + the hook name or interesting relation data. |
1389 | + - Defer object or module initialization that requires a hook |
1390 | + context until we know there actually is a hook context, |
1391 | + making testing easier. |
1392 | + - Rather than requiring charm authors to include boilerplate to |
1393 | + invoke your helper's behavior, have it run automatically if |
1394 | + your object is instantiated or module imported. |
1395 | + |
1396 | + This is not at all useful after your hook framework as been launched. |
1397 | + ''' |
1398 | + global _atstart |
1399 | + _atstart.append((callback, args, kwargs)) |
1400 | + |
1401 | + |
1402 | +def atexit(callback, *args, **kwargs): |
1403 | + '''Schedule a callback to run on successful hook completion. |
1404 | + |
1405 | + Callbacks are run in the reverse order that they were added.''' |
1406 | + _atexit.append((callback, args, kwargs)) |
1407 | + |
1408 | + |
1409 | +def _run_atstart(): |
1410 | + '''Hook frameworks must invoke this before running the main hook body.''' |
1411 | + global _atstart |
1412 | + for callback, args, kwargs in _atstart: |
1413 | + callback(*args, **kwargs) |
1414 | + del _atstart[:] |
1415 | + |
1416 | + |
1417 | +def _run_atexit(): |
1418 | + '''Hook frameworks must invoke this after the main hook body has |
1419 | + successfully completed. Do not invoke it if the hook fails.''' |
1420 | + global _atexit |
1421 | + for callback, args, kwargs in reversed(_atexit): |
1422 | + callback(*args, **kwargs) |
1423 | + del _atexit[:] |
1424 | |
1425 | === modified file 'hooks/charmhelpers/core/host.py' |
1426 | --- hooks/charmhelpers/core/host.py 2015-06-24 12:22:08 +0000 |
1427 | +++ hooks/charmhelpers/core/host.py 2015-11-03 04:19:02 +0000 |
1428 | @@ -63,6 +63,36 @@ |
1429 | return service_result |
1430 | |
1431 | |
1432 | +def service_pause(service_name, init_dir=None): |
1433 | + """Pause a system service. |
1434 | + |
1435 | + Stop it, and prevent it from starting again at boot.""" |
1436 | + if init_dir is None: |
1437 | + init_dir = "/etc/init" |
1438 | + stopped = service_stop(service_name) |
1439 | + # XXX: Support systemd too |
1440 | + override_path = os.path.join( |
1441 | + init_dir, '{}.override'.format(service_name)) |
1442 | + with open(override_path, 'w') as fh: |
1443 | + fh.write("manual\n") |
1444 | + return stopped |
1445 | + |
1446 | + |
1447 | +def service_resume(service_name, init_dir=None): |
1448 | + """Resume a system service. |
1449 | + |
1450 | + Reenable starting again at boot. Start the service""" |
1451 | + # XXX: Support systemd too |
1452 | + if init_dir is None: |
1453 | + init_dir = "/etc/init" |
1454 | + override_path = os.path.join( |
1455 | + init_dir, '{}.override'.format(service_name)) |
1456 | + if os.path.exists(override_path): |
1457 | + os.unlink(override_path) |
1458 | + started = service_start(service_name) |
1459 | + return started |
1460 | + |
1461 | + |
1462 | def service(action, service_name): |
1463 | """Control a system service""" |
1464 | cmd = ['service', service_name, action] |
1465 | @@ -120,7 +150,7 @@ |
1466 | |
1467 | def user_exists(username): |
1468 | try: |
1469 | - user_info = pwd.getpwnam(username) |
1470 | + pwd.getpwnam(username) |
1471 | user_exists = True |
1472 | except KeyError: |
1473 | user_exists = False |
1474 | @@ -149,11 +179,7 @@ |
1475 | |
1476 | def add_user_to_group(username, group): |
1477 | """Add a user to a group""" |
1478 | - cmd = [ |
1479 | - 'gpasswd', '-a', |
1480 | - username, |
1481 | - group |
1482 | - ] |
1483 | + cmd = ['gpasswd', '-a', username, group] |
1484 | log("Adding user {} to group {}".format(username, group)) |
1485 | subprocess.check_call(cmd) |
1486 | |
1487 | @@ -263,7 +289,6 @@ |
1488 | return system_mounts |
1489 | |
1490 | |
1491 | - |
1492 | def fstab_mount(mountpoint): |
1493 | cmd_args = ['mount', mountpoint] |
1494 | try: |
1495 | |
1496 | === modified file 'hooks/charmhelpers/core/hugepage.py' |
1497 | --- hooks/charmhelpers/core/hugepage.py 2015-06-24 12:22:08 +0000 |
1498 | +++ hooks/charmhelpers/core/hugepage.py 2015-11-03 04:19:02 +0000 |
1499 | @@ -1,4 +1,3 @@ |
1500 | - |
1501 | #!/usr/bin/env python |
1502 | # -*- coding: utf-8 -*- |
1503 | |
1504 | @@ -19,10 +18,8 @@ |
1505 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1506 | |
1507 | import yaml |
1508 | -from charmhelpers.core.fstab import Fstab |
1509 | -from charmhelpers.core.sysctl import ( |
1510 | - create, |
1511 | -) |
1512 | +from charmhelpers.core import fstab |
1513 | +from charmhelpers.core import sysctl |
1514 | from charmhelpers.core.host import ( |
1515 | add_group, |
1516 | add_user_to_group, |
1517 | @@ -30,8 +27,9 @@ |
1518 | mkdir, |
1519 | ) |
1520 | |
1521 | + |
1522 | def hugepage_support(user, group='hugetlb', nr_hugepages=256, |
1523 | - max_map_count=65536, mnt_point='/hugepages', |
1524 | + max_map_count=65536, mnt_point='/run/hugepages/kvm', |
1525 | pagesize='2MB', mount=True): |
1526 | group_info = add_group(group) |
1527 | gid = group_info.gr_gid |
1528 | @@ -41,14 +39,14 @@ |
1529 | 'vm.max_map_count': max_map_count, # 1GB |
1530 | 'vm.hugetlb_shm_group': gid, |
1531 | } |
1532 | - create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') |
1533 | + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') |
1534 | mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) |
1535 | - fstab = Fstab() |
1536 | - fstab_entry = fstab.get_entry_by_attr('mountpoint', mnt_point) |
1537 | + lfstab = fstab.Fstab() |
1538 | + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) |
1539 | if fstab_entry: |
1540 | - fstab.remove_entry(fstab_entry) |
1541 | - entry = fstab.Entry('nodev', mnt_point, 'hugetlbfs', |
1542 | - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) |
1543 | - fstab.add_entry(entry) |
1544 | + lfstab.remove_entry(fstab_entry) |
1545 | + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', |
1546 | + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) |
1547 | + lfstab.add_entry(entry) |
1548 | if mount: |
1549 | fstab_mount(mnt_point) |
1550 | |
1551 | === modified file 'hooks/charmhelpers/core/services/base.py' |
1552 | --- hooks/charmhelpers/core/services/base.py 2015-06-24 12:22:08 +0000 |
1553 | +++ hooks/charmhelpers/core/services/base.py 2015-11-03 04:19:02 +0000 |
1554 | @@ -128,15 +128,18 @@ |
1555 | """ |
1556 | Handle the current hook by doing The Right Thing with the registered services. |
1557 | """ |
1558 | - hook_name = hookenv.hook_name() |
1559 | - if hook_name == 'stop': |
1560 | - self.stop_services() |
1561 | - else: |
1562 | - self.reconfigure_services() |
1563 | - self.provide_data() |
1564 | - cfg = hookenv.config() |
1565 | - if cfg.implicit_save: |
1566 | - cfg.save() |
1567 | + hookenv._run_atstart() |
1568 | + try: |
1569 | + hook_name = hookenv.hook_name() |
1570 | + if hook_name == 'stop': |
1571 | + self.stop_services() |
1572 | + else: |
1573 | + self.reconfigure_services() |
1574 | + self.provide_data() |
1575 | + except SystemExit as x: |
1576 | + if x.code is None or x.code == 0: |
1577 | + hookenv._run_atexit() |
1578 | + hookenv._run_atexit() |
1579 | |
1580 | def provide_data(self): |
1581 | """ |
1582 | |
1583 | === modified file 'hooks/charmhelpers/core/unitdata.py' |
1584 | --- hooks/charmhelpers/core/unitdata.py 2015-06-24 12:22:08 +0000 |
1585 | +++ hooks/charmhelpers/core/unitdata.py 2015-11-03 04:19:02 +0000 |
1586 | @@ -152,6 +152,7 @@ |
1587 | import collections |
1588 | import contextlib |
1589 | import datetime |
1590 | +import itertools |
1591 | import json |
1592 | import os |
1593 | import pprint |
1594 | @@ -164,8 +165,7 @@ |
1595 | class Storage(object): |
1596 | """Simple key value database for local unit state within charms. |
1597 | |
1598 | - Modifications are automatically committed at hook exit. That's |
1599 | - currently regardless of exit code. |
1600 | + Modifications are not persisted unless :meth:`flush` is called. |
1601 | |
1602 | To support dicts, lists, integer, floats, and booleans values |
1603 | are automatically json encoded/decoded. |
1604 | @@ -173,8 +173,11 @@ |
1605 | def __init__(self, path=None): |
1606 | self.db_path = path |
1607 | if path is None: |
1608 | - self.db_path = os.path.join( |
1609 | - os.environ.get('CHARM_DIR', ''), '.unit-state.db') |
1610 | + if 'UNIT_STATE_DB' in os.environ: |
1611 | + self.db_path = os.environ['UNIT_STATE_DB'] |
1612 | + else: |
1613 | + self.db_path = os.path.join( |
1614 | + os.environ.get('CHARM_DIR', ''), '.unit-state.db') |
1615 | self.conn = sqlite3.connect('%s' % self.db_path) |
1616 | self.cursor = self.conn.cursor() |
1617 | self.revision = None |
1618 | @@ -189,15 +192,8 @@ |
1619 | self.conn.close() |
1620 | self._closed = True |
1621 | |
1622 | - def _scoped_query(self, stmt, params=None): |
1623 | - if params is None: |
1624 | - params = [] |
1625 | - return stmt, params |
1626 | - |
1627 | def get(self, key, default=None, record=False): |
1628 | - self.cursor.execute( |
1629 | - *self._scoped_query( |
1630 | - 'select data from kv where key=?', [key])) |
1631 | + self.cursor.execute('select data from kv where key=?', [key]) |
1632 | result = self.cursor.fetchone() |
1633 | if not result: |
1634 | return default |
1635 | @@ -206,33 +202,81 @@ |
1636 | return json.loads(result[0]) |
1637 | |
1638 | def getrange(self, key_prefix, strip=False): |
1639 | - stmt = "select key, data from kv where key like '%s%%'" % key_prefix |
1640 | - self.cursor.execute(*self._scoped_query(stmt)) |
1641 | + """ |
1642 | + Get a range of keys starting with a common prefix as a mapping of |
1643 | + keys to values. |
1644 | + |
1645 | + :param str key_prefix: Common prefix among all keys |
1646 | + :param bool strip: Optionally strip the common prefix from the key |
1647 | + names in the returned dict |
1648 | + :return dict: A (possibly empty) dict of key-value mappings |
1649 | + """ |
1650 | + self.cursor.execute("select key, data from kv where key like ?", |
1651 | + ['%s%%' % key_prefix]) |
1652 | result = self.cursor.fetchall() |
1653 | |
1654 | if not result: |
1655 | - return None |
1656 | + return {} |
1657 | if not strip: |
1658 | key_prefix = '' |
1659 | return dict([ |
1660 | (k[len(key_prefix):], json.loads(v)) for k, v in result]) |
1661 | |
1662 | def update(self, mapping, prefix=""): |
1663 | + """ |
1664 | + Set the values of multiple keys at once. |
1665 | + |
1666 | + :param dict mapping: Mapping of keys to values |
1667 | + :param str prefix: Optional prefix to apply to all keys in `mapping` |
1668 | + before setting |
1669 | + """ |
1670 | for k, v in mapping.items(): |
1671 | self.set("%s%s" % (prefix, k), v) |
1672 | |
1673 | def unset(self, key): |
1674 | + """ |
1675 | + Remove a key from the database entirely. |
1676 | + """ |
1677 | self.cursor.execute('delete from kv where key=?', [key]) |
1678 | if self.revision and self.cursor.rowcount: |
1679 | self.cursor.execute( |
1680 | 'insert into kv_revisions values (?, ?, ?)', |
1681 | [key, self.revision, json.dumps('DELETED')]) |
1682 | |
1683 | + def unsetrange(self, keys=None, prefix=""): |
1684 | + """ |
1685 | + Remove a range of keys starting with a common prefix, from the database |
1686 | + entirely. |
1687 | + |
1688 | + :param list keys: List of keys to remove. |
1689 | + :param str prefix: Optional prefix to apply to all keys in ``keys`` |
1690 | + before removing. |
1691 | + """ |
1692 | + if keys is not None: |
1693 | + keys = ['%s%s' % (prefix, key) for key in keys] |
1694 | + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) |
1695 | + if self.revision and self.cursor.rowcount: |
1696 | + self.cursor.execute( |
1697 | + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), |
1698 | + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) |
1699 | + else: |
1700 | + self.cursor.execute('delete from kv where key like ?', |
1701 | + ['%s%%' % prefix]) |
1702 | + if self.revision and self.cursor.rowcount: |
1703 | + self.cursor.execute( |
1704 | + 'insert into kv_revisions values (?, ?, ?)', |
1705 | + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) |
1706 | + |
1707 | def set(self, key, value): |
1708 | + """ |
1709 | + Set a value in the database. |
1710 | + |
1711 | + :param str key: Key to set the value for |
1712 | + :param value: Any JSON-serializable value to be set |
1713 | + """ |
1714 | serialized = json.dumps(value) |
1715 | |
1716 | - self.cursor.execute( |
1717 | - 'select data from kv where key=?', [key]) |
1718 | + self.cursor.execute('select data from kv where key=?', [key]) |
1719 | exists = self.cursor.fetchone() |
1720 | |
1721 | # Skip mutations to the same value |
1722 | |
1723 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
1724 | --- hooks/charmhelpers/fetch/__init__.py 2015-06-24 12:22:08 +0000 |
1725 | +++ hooks/charmhelpers/fetch/__init__.py 2015-11-03 04:19:02 +0000 |
1726 | @@ -90,6 +90,14 @@ |
1727 | 'kilo/proposed': 'trusty-proposed/kilo', |
1728 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', |
1729 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', |
1730 | + # Liberty |
1731 | + 'liberty': 'trusty-updates/liberty', |
1732 | + 'trusty-liberty': 'trusty-updates/liberty', |
1733 | + 'trusty-liberty/updates': 'trusty-updates/liberty', |
1734 | + 'trusty-updates/liberty': 'trusty-updates/liberty', |
1735 | + 'liberty/proposed': 'trusty-proposed/liberty', |
1736 | + 'trusty-liberty/proposed': 'trusty-proposed/liberty', |
1737 | + 'trusty-proposed/liberty': 'trusty-proposed/liberty', |
1738 | } |
1739 | |
1740 | # The order of this list is very important. Handlers should be listed in from |
1741 | @@ -215,19 +223,27 @@ |
1742 | _run_apt_command(cmd, fatal) |
1743 | |
1744 | |
1745 | +def apt_mark(packages, mark, fatal=False): |
1746 | + """Flag one or more packages using apt-mark""" |
1747 | + cmd = ['apt-mark', mark] |
1748 | + if isinstance(packages, six.string_types): |
1749 | + cmd.append(packages) |
1750 | + else: |
1751 | + cmd.extend(packages) |
1752 | + log("Holding {}".format(packages)) |
1753 | + |
1754 | + if fatal: |
1755 | + subprocess.check_call(cmd, universal_newlines=True) |
1756 | + else: |
1757 | + subprocess.call(cmd, universal_newlines=True) |
1758 | + |
1759 | + |
1760 | def apt_hold(packages, fatal=False): |
1761 | - """Hold one or more packages""" |
1762 | - cmd = ['apt-mark', 'hold'] |
1763 | - if isinstance(packages, six.string_types): |
1764 | - cmd.append(packages) |
1765 | - else: |
1766 | - cmd.extend(packages) |
1767 | - log("Holding {}".format(packages)) |
1768 | - |
1769 | - if fatal: |
1770 | - subprocess.check_call(cmd) |
1771 | - else: |
1772 | - subprocess.call(cmd) |
1773 | + return apt_mark(packages, 'hold', fatal=fatal) |
1774 | + |
1775 | + |
1776 | +def apt_unhold(packages, fatal=False): |
1777 | + return apt_mark(packages, 'unhold', fatal=fatal) |
1778 | |
1779 | |
1780 | def add_source(source, key=None): |
1781 | @@ -370,8 +386,9 @@ |
1782 | for handler in handlers: |
1783 | try: |
1784 | installed_to = handler.install(source, *args, **kwargs) |
1785 | - except UnhandledSource: |
1786 | - pass |
1787 | + except UnhandledSource as e: |
1788 | + log('Install source attempt unsuccessful: {}'.format(e), |
1789 | + level='WARNING') |
1790 | if not installed_to: |
1791 | raise UnhandledSource("No handler found for source {}".format(source)) |
1792 | return installed_to |
1793 | |
1794 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' |
1795 | --- hooks/charmhelpers/fetch/archiveurl.py 2015-06-24 12:22:08 +0000 |
1796 | +++ hooks/charmhelpers/fetch/archiveurl.py 2015-11-03 04:19:02 +0000 |
1797 | @@ -77,6 +77,8 @@ |
1798 | def can_handle(self, source): |
1799 | url_parts = self.parse_url(source) |
1800 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
1801 | + # XXX: Why is this returning a boolean and a string? It's |
1802 | + # doomed to fail since "bool(can_handle('foo://'))" will be True. |
1803 | return "Wrong source type" |
1804 | if get_archive_handler(self.base_url(source)): |
1805 | return True |
1806 | @@ -155,7 +157,11 @@ |
1807 | else: |
1808 | algorithms = hashlib.algorithms_available |
1809 | if key in algorithms: |
1810 | - check_hash(dld_file, value, key) |
1811 | + if len(value) != 1: |
1812 | + raise TypeError( |
1813 | + "Expected 1 hash value, not %d" % len(value)) |
1814 | + expected = value[0] |
1815 | + check_hash(dld_file, expected, key) |
1816 | if checksum: |
1817 | check_hash(dld_file, checksum, hash_type) |
1818 | return extract(dld_file, dest) |
1819 | |
1820 | === modified file 'hooks/charmhelpers/fetch/giturl.py' |
1821 | --- hooks/charmhelpers/fetch/giturl.py 2015-06-24 12:22:08 +0000 |
1822 | +++ hooks/charmhelpers/fetch/giturl.py 2015-11-03 04:19:02 +0000 |
1823 | @@ -67,7 +67,7 @@ |
1824 | try: |
1825 | self.clone(source, dest_dir, branch, depth) |
1826 | except GitCommandError as e: |
1827 | - raise UnhandledSource(e.message) |
1828 | + raise UnhandledSource(e) |
1829 | except OSError as e: |
1830 | raise UnhandledSource(e.strerror) |
1831 | return dest_dir |
1832 | |
1833 | === added file 'vpp.patch' |
1834 | --- vpp.patch 1970-01-01 00:00:00 +0000 |
1835 | +++ vpp.patch 2015-11-03 04:19:02 +0000 |
1836 | @@ -0,0 +1,1832 @@ |
1837 | +=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' |
1838 | +--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-24 12:22:08 +0000 |
1839 | ++++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-30 01:28:39 +0000 |
1840 | +@@ -44,7 +44,7 @@ |
1841 | + Determine if the local branch being tested is derived from its |
1842 | + stable or next (dev) branch, and based on this, use the corresonding |
1843 | + stable or next branches for the other_services.""" |
1844 | +- base_charms = ['mysql', 'mongodb'] |
1845 | ++ base_charms = ['mysql', 'mongodb', 'nrpe'] |
1846 | + |
1847 | + if self.series in ['precise', 'trusty']: |
1848 | + base_series = self.series |
1849 | +@@ -83,9 +83,10 @@ |
1850 | + services.append(this_service) |
1851 | + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
1852 | + 'ceph-osd', 'ceph-radosgw'] |
1853 | +- # Openstack subordinate charms do not expose an origin option as that |
1854 | +- # is controlled by the principle |
1855 | +- ignore = ['neutron-openvswitch', 'cisco-vpp'] |
1856 | ++ # Most OpenStack subordinate charms do not expose an origin option |
1857 | ++ # as that is controlled by the principle. |
1858 | ++ ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
1859 | ++ 'cisco-vpp', 'odl-controller'] |
1860 | + |
1861 | + if self.openstack: |
1862 | + for svc in services: |
1863 | +@@ -152,3 +153,36 @@ |
1864 | + return os_origin.split('%s-' % self.series)[1].split('/')[0] |
1865 | + else: |
1866 | + return releases[self.series] |
1867 | ++ |
1868 | ++ def get_ceph_expected_pools(self, radosgw=False): |
1869 | ++ """Return a list of expected ceph pools in a ceph + cinder + glance |
1870 | ++ test scenario, based on OpenStack release and whether ceph radosgw |
1871 | ++ is flagged as present or not.""" |
1872 | ++ |
1873 | ++ if self._get_openstack_release() >= self.trusty_kilo: |
1874 | ++ # Kilo or later |
1875 | ++ pools = [ |
1876 | ++ 'rbd', |
1877 | ++ 'cinder', |
1878 | ++ 'glance' |
1879 | ++ ] |
1880 | ++ else: |
1881 | ++ # Juno or earlier |
1882 | ++ pools = [ |
1883 | ++ 'data', |
1884 | ++ 'metadata', |
1885 | ++ 'rbd', |
1886 | ++ 'cinder', |
1887 | ++ 'glance' |
1888 | ++ ] |
1889 | ++ |
1890 | ++ if radosgw: |
1891 | ++ pools.extend([ |
1892 | ++ '.rgw.root', |
1893 | ++ '.rgw.control', |
1894 | ++ '.rgw', |
1895 | ++ '.rgw.gc', |
1896 | ++ '.users.uid' |
1897 | ++ ]) |
1898 | ++ |
1899 | ++ return pools |
1900 | + |
1901 | +=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' |
1902 | +--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-24 12:22:08 +0000 |
1903 | ++++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-10-30 01:28:39 +0000 |
1904 | +@@ -14,16 +14,20 @@ |
1905 | + # You should have received a copy of the GNU Lesser General Public License |
1906 | + # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1907 | + |
1908 | ++import amulet |
1909 | ++import json |
1910 | + import logging |
1911 | + import os |
1912 | ++import six |
1913 | + import time |
1914 | + import urllib |
1915 | + |
1916 | ++import cinderclient.v1.client as cinder_client |
1917 | + import glanceclient.v1.client as glance_client |
1918 | ++import heatclient.v1.client as heat_client |
1919 | + import keystoneclient.v2_0 as keystone_client |
1920 | + import novaclient.v1_1.client as nova_client |
1921 | +- |
1922 | +-import six |
1923 | ++import swiftclient |
1924 | + |
1925 | + from charmhelpers.contrib.amulet.utils import ( |
1926 | + AmuletUtils |
1927 | +@@ -37,7 +41,7 @@ |
1928 | + """OpenStack amulet utilities. |
1929 | + |
1930 | + This class inherits from AmuletUtils and has additional support |
1931 | +- that is specifically for use by OpenStack charms. |
1932 | ++ that is specifically for use by OpenStack charm tests. |
1933 | + """ |
1934 | + |
1935 | + def __init__(self, log_level=ERROR): |
1936 | +@@ -51,6 +55,8 @@ |
1937 | + Validate actual endpoint data vs expected endpoint data. The ports |
1938 | + are used to find the matching endpoint. |
1939 | + """ |
1940 | ++ self.log.debug('Validating endpoint data...') |
1941 | ++ self.log.debug('actual: {}'.format(repr(endpoints))) |
1942 | + found = False |
1943 | + for ep in endpoints: |
1944 | + self.log.debug('endpoint: {}'.format(repr(ep))) |
1945 | +@@ -77,6 +83,7 @@ |
1946 | + Validate a list of actual service catalog endpoints vs a list of |
1947 | + expected service catalog endpoints. |
1948 | + """ |
1949 | ++ self.log.debug('Validating service catalog endpoint data...') |
1950 | + self.log.debug('actual: {}'.format(repr(actual))) |
1951 | + for k, v in six.iteritems(expected): |
1952 | + if k in actual: |
1953 | +@@ -93,6 +100,7 @@ |
1954 | + Validate a list of actual tenant data vs list of expected tenant |
1955 | + data. |
1956 | + """ |
1957 | ++ self.log.debug('Validating tenant data...') |
1958 | + self.log.debug('actual: {}'.format(repr(actual))) |
1959 | + for e in expected: |
1960 | + found = False |
1961 | +@@ -114,6 +122,7 @@ |
1962 | + Validate a list of actual role data vs a list of expected role |
1963 | + data. |
1964 | + """ |
1965 | ++ self.log.debug('Validating role data...') |
1966 | + self.log.debug('actual: {}'.format(repr(actual))) |
1967 | + for e in expected: |
1968 | + found = False |
1969 | +@@ -134,6 +143,7 @@ |
1970 | + Validate a list of actual user data vs a list of expected user |
1971 | + data. |
1972 | + """ |
1973 | ++ self.log.debug('Validating user data...') |
1974 | + self.log.debug('actual: {}'.format(repr(actual))) |
1975 | + for e in expected: |
1976 | + found = False |
1977 | +@@ -155,17 +165,30 @@ |
1978 | + |
1979 | + Validate a list of actual flavors vs a list of expected flavors. |
1980 | + """ |
1981 | ++ self.log.debug('Validating flavor data...') |
1982 | + self.log.debug('actual: {}'.format(repr(actual))) |
1983 | + act = [a.name for a in actual] |
1984 | + return self._validate_list_data(expected, act) |
1985 | + |
1986 | + def tenant_exists(self, keystone, tenant): |
1987 | + """Return True if tenant exists.""" |
1988 | ++ self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
1989 | + return tenant in [t.name for t in keystone.tenants.list()] |
1990 | + |
1991 | ++ def authenticate_cinder_admin(self, keystone_sentry, username, |
1992 | ++ password, tenant): |
1993 | ++ """Authenticates admin user with cinder.""" |
1994 | ++ # NOTE(beisner): cinder python client doesn't accept tokens. |
1995 | ++ service_ip = \ |
1996 | ++ keystone_sentry.relation('shared-db', |
1997 | ++ 'mysql:shared-db')['private-address'] |
1998 | ++ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
1999 | ++ return cinder_client.Client(username, password, tenant, ept) |
2000 | ++ |
2001 | + def authenticate_keystone_admin(self, keystone_sentry, user, password, |
2002 | + tenant): |
2003 | + """Authenticates admin user with the keystone admin endpoint.""" |
2004 | ++ self.log.debug('Authenticating keystone admin...') |
2005 | + unit = keystone_sentry |
2006 | + service_ip = unit.relation('shared-db', |
2007 | + 'mysql:shared-db')['private-address'] |
2008 | +@@ -175,6 +198,7 @@ |
2009 | + |
2010 | + def authenticate_keystone_user(self, keystone, user, password, tenant): |
2011 | + """Authenticates a regular user with the keystone public endpoint.""" |
2012 | ++ self.log.debug('Authenticating keystone user ({})...'.format(user)) |
2013 | + ep = keystone.service_catalog.url_for(service_type='identity', |
2014 | + endpoint_type='publicURL') |
2015 | + return keystone_client.Client(username=user, password=password, |
2016 | +@@ -182,19 +206,49 @@ |
2017 | + |
2018 | + def authenticate_glance_admin(self, keystone): |
2019 | + """Authenticates admin user with glance.""" |
2020 | ++ self.log.debug('Authenticating glance admin...') |
2021 | + ep = keystone.service_catalog.url_for(service_type='image', |
2022 | + endpoint_type='adminURL') |
2023 | + return glance_client.Client(ep, token=keystone.auth_token) |
2024 | + |
2025 | ++ def authenticate_heat_admin(self, keystone): |
2026 | ++ """Authenticates the admin user with heat.""" |
2027 | ++ self.log.debug('Authenticating heat admin...') |
2028 | ++ ep = keystone.service_catalog.url_for(service_type='orchestration', |
2029 | ++ endpoint_type='publicURL') |
2030 | ++ return heat_client.Client(endpoint=ep, token=keystone.auth_token) |
2031 | ++ |
2032 | + def authenticate_nova_user(self, keystone, user, password, tenant): |
2033 | + """Authenticates a regular user with nova-api.""" |
2034 | ++ self.log.debug('Authenticating nova user ({})...'.format(user)) |
2035 | + ep = keystone.service_catalog.url_for(service_type='identity', |
2036 | + endpoint_type='publicURL') |
2037 | + return nova_client.Client(username=user, api_key=password, |
2038 | + project_id=tenant, auth_url=ep) |
2039 | + |
2040 | ++ def authenticate_swift_user(self, keystone, user, password, tenant): |
2041 | ++ """Authenticates a regular user with swift api.""" |
2042 | ++ self.log.debug('Authenticating swift user ({})...'.format(user)) |
2043 | ++ ep = keystone.service_catalog.url_for(service_type='identity', |
2044 | ++ endpoint_type='publicURL') |
2045 | ++ return swiftclient.Connection(authurl=ep, |
2046 | ++ user=user, |
2047 | ++ key=password, |
2048 | ++ tenant_name=tenant, |
2049 | ++ auth_version='2.0') |
2050 | ++ |
2051 | + def create_cirros_image(self, glance, image_name): |
2052 | +- """Download the latest cirros image and upload it to glance.""" |
2053 | ++ """Download the latest cirros image and upload it to glance, |
2054 | ++ validate and return a resource pointer. |
2055 | ++ |
2056 | ++ :param glance: pointer to authenticated glance connection |
2057 | ++ :param image_name: display name for new image |
2058 | ++ :returns: glance image pointer |
2059 | ++ """ |
2060 | ++ self.log.debug('Creating glance cirros image ' |
2061 | ++ '({})...'.format(image_name)) |
2062 | ++ |
2063 | ++ # Download cirros image |
2064 | + http_proxy = os.getenv('AMULET_HTTP_PROXY') |
2065 | + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
2066 | + if http_proxy: |
2067 | +@@ -203,57 +257,67 @@ |
2068 | + else: |
2069 | + opener = urllib.FancyURLopener() |
2070 | + |
2071 | +- f = opener.open("http://download.cirros-cloud.net/version/released") |
2072 | ++ f = opener.open('http://download.cirros-cloud.net/version/released') |
2073 | + version = f.read().strip() |
2074 | +- cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
2075 | ++ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
2076 | + local_path = os.path.join('tests', cirros_img) |
2077 | + |
2078 | + if not os.path.exists(local_path): |
2079 | +- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
2080 | ++ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
2081 | + version, cirros_img) |
2082 | + opener.retrieve(cirros_url, local_path) |
2083 | + f.close() |
2084 | + |
2085 | ++ # Create glance image |
2086 | + with open(local_path) as f: |
2087 | + image = glance.images.create(name=image_name, is_public=True, |
2088 | + disk_format='qcow2', |
2089 | + container_format='bare', data=f) |
2090 | +- count = 1 |
2091 | +- status = image.status |
2092 | +- while status != 'active' and count < 10: |
2093 | +- time.sleep(3) |
2094 | +- image = glance.images.get(image.id) |
2095 | +- status = image.status |
2096 | +- self.log.debug('image status: {}'.format(status)) |
2097 | +- count += 1 |
2098 | +- |
2099 | +- if status != 'active': |
2100 | +- self.log.error('image creation timed out') |
2101 | +- return None |
2102 | ++ |
2103 | ++ # Wait for image to reach active status |
2104 | ++ img_id = image.id |
2105 | ++ ret = self.resource_reaches_status(glance.images, img_id, |
2106 | ++ expected_stat='active', |
2107 | ++ msg='Image status wait') |
2108 | ++ if not ret: |
2109 | ++ msg = 'Glance image failed to reach expected state.' |
2110 | ++ amulet.raise_status(amulet.FAIL, msg=msg) |
2111 | ++ |
2112 | ++ # Re-validate new image |
2113 | ++ self.log.debug('Validating image attributes...') |
2114 | ++ val_img_name = glance.images.get(img_id).name |
2115 | ++ val_img_stat = glance.images.get(img_id).status |
2116 | ++ val_img_pub = glance.images.get(img_id).is_public |
2117 | ++ val_img_cfmt = glance.images.get(img_id).container_format |
2118 | ++ val_img_dfmt = glance.images.get(img_id).disk_format |
2119 | ++ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
2120 | ++ 'container fmt:{} disk fmt:{}'.format( |
2121 | ++ val_img_name, val_img_pub, img_id, |
2122 | ++ val_img_stat, val_img_cfmt, val_img_dfmt)) |
2123 | ++ |
2124 | ++ if val_img_name == image_name and val_img_stat == 'active' \ |
2125 | ++ and val_img_pub is True and val_img_cfmt == 'bare' \ |
2126 | ++ and val_img_dfmt == 'qcow2': |
2127 | ++ self.log.debug(msg_attr) |
2128 | ++ else: |
2129 | ++ msg = ('Volume validation failed, {}'.format(msg_attr)) |
2130 | ++ amulet.raise_status(amulet.FAIL, msg=msg) |
2131 | + |
2132 | + return image |
2133 | + |
2134 | + def delete_image(self, glance, image): |
2135 | + """Delete the specified image.""" |
2136 | +- num_before = len(list(glance.images.list())) |
2137 | +- glance.images.delete(image) |
2138 | +- |
2139 | +- count = 1 |
2140 | +- num_after = len(list(glance.images.list())) |
2141 | +- while num_after != (num_before - 1) and count < 10: |
2142 | +- time.sleep(3) |
2143 | +- num_after = len(list(glance.images.list())) |
2144 | +- self.log.debug('number of images: {}'.format(num_after)) |
2145 | +- count += 1 |
2146 | +- |
2147 | +- if num_after != (num_before - 1): |
2148 | +- self.log.error('image deletion timed out') |
2149 | +- return False |
2150 | +- |
2151 | +- return True |
2152 | ++ |
2153 | ++ # /!\ DEPRECATION WARNING |
2154 | ++ self.log.warn('/!\\ DEPRECATION WARNING: use ' |
2155 | ++ 'delete_resource instead of delete_image.') |
2156 | ++ self.log.debug('Deleting glance image ({})...'.format(image)) |
2157 | ++ return self.delete_resource(glance.images, image, msg='glance image') |
2158 | + |
2159 | + def create_instance(self, nova, image_name, instance_name, flavor): |
2160 | + """Create the specified instance.""" |
2161 | ++ self.log.debug('Creating instance ' |
2162 | ++ '({}|{}|{})'.format(instance_name, image_name, flavor)) |
2163 | + image = nova.images.find(name=image_name) |
2164 | + flavor = nova.flavors.find(name=flavor) |
2165 | + instance = nova.servers.create(name=instance_name, image=image, |
2166 | +@@ -276,19 +340,265 @@ |
2167 | + |
2168 | + def delete_instance(self, nova, instance): |
2169 | + """Delete the specified instance.""" |
2170 | +- num_before = len(list(nova.servers.list())) |
2171 | +- nova.servers.delete(instance) |
2172 | +- |
2173 | +- count = 1 |
2174 | +- num_after = len(list(nova.servers.list())) |
2175 | +- while num_after != (num_before - 1) and count < 10: |
2176 | +- time.sleep(3) |
2177 | +- num_after = len(list(nova.servers.list())) |
2178 | +- self.log.debug('number of instances: {}'.format(num_after)) |
2179 | +- count += 1 |
2180 | +- |
2181 | +- if num_after != (num_before - 1): |
2182 | +- self.log.error('instance deletion timed out') |
2183 | +- return False |
2184 | +- |
2185 | +- return True |
2186 | ++ |
2187 | ++ # /!\ DEPRECATION WARNING |
2188 | ++ self.log.warn('/!\\ DEPRECATION WARNING: use ' |
2189 | ++ 'delete_resource instead of delete_instance.') |
2190 | ++ self.log.debug('Deleting instance ({})...'.format(instance)) |
2191 | ++ return self.delete_resource(nova.servers, instance, |
2192 | ++ msg='nova instance') |
2193 | ++ |
2194 | ++ def create_or_get_keypair(self, nova, keypair_name="testkey"): |
2195 | ++ """Create a new keypair, or return pointer if it already exists.""" |
2196 | ++ try: |
2197 | ++ _keypair = nova.keypairs.get(keypair_name) |
2198 | ++ self.log.debug('Keypair ({}) already exists, ' |
2199 | ++ 'using it.'.format(keypair_name)) |
2200 | ++ return _keypair |
2201 | ++ except: |
2202 | ++ self.log.debug('Keypair ({}) does not exist, ' |
2203 | ++ 'creating it.'.format(keypair_name)) |
2204 | ++ |
2205 | ++ _keypair = nova.keypairs.create(name=keypair_name) |
2206 | ++ return _keypair |
2207 | ++ |
2208 | ++ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
2209 | ++ img_id=None, src_vol_id=None, snap_id=None): |
2210 | ++ """Create cinder volume, optionally from a glance image, OR |
2211 | ++ optionally as a clone of an existing volume, OR optionally |
2212 | ++ from a snapshot. Wait for the new volume status to reach |
2213 | ++ the expected status, validate and return a resource pointer. |
2214 | ++ |
2215 | ++ :param vol_name: cinder volume display name |
2216 | ++ :param vol_size: size in gigabytes |
2217 | ++ :param img_id: optional glance image id |
2218 | ++ :param src_vol_id: optional source volume id to clone |
2219 | ++ :param snap_id: optional snapshot id to use |
2220 | ++ :returns: cinder volume pointer |
2221 | ++ """ |
2222 | ++ # Handle parameter input and avoid impossible combinations |
2223 | ++ if img_id and not src_vol_id and not snap_id: |
2224 | ++ # Create volume from image |
2225 | ++ self.log.debug('Creating cinder volume from glance image...') |
2226 | ++ bootable = 'true' |
2227 | ++ elif src_vol_id and not img_id and not snap_id: |
2228 | ++ # Clone an existing volume |
2229 | ++ self.log.debug('Cloning cinder volume...') |
2230 | ++ bootable = cinder.volumes.get(src_vol_id).bootable |
2231 | ++ elif snap_id and not src_vol_id and not img_id: |
2232 | ++ # Create volume from snapshot |
2233 | ++ self.log.debug('Creating cinder volume from snapshot...') |
2234 | ++ snap = cinder.volume_snapshots.find(id=snap_id) |
2235 | ++ vol_size = snap.size |
2236 | ++ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
2237 | ++ bootable = cinder.volumes.get(snap_vol_id).bootable |
2238 | ++ elif not img_id and not src_vol_id and not snap_id: |
2239 | ++ # Create volume |
2240 | ++ self.log.debug('Creating cinder volume...') |
2241 | ++ bootable = 'false' |
2242 | ++ else: |
2243 | ++ # Impossible combination of parameters |
2244 | ++ msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
2245 | ++ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
2246 | ++ img_id, src_vol_id, |
2247 | ++ snap_id)) |
2248 | ++ amulet.raise_status(amulet.FAIL, msg=msg) |
2249 | ++ |
2250 | ++ # Create new volume |
2251 | ++ try: |
2252 | ++ vol_new = cinder.volumes.create(display_name=vol_name, |
2253 | ++ imageRef=img_id, |
2254 | ++ size=vol_size, |
2255 | ++ source_volid=src_vol_id, |
2256 | ++ snapshot_id=snap_id) |
2257 | ++ vol_id = vol_new.id |
2258 | ++ except Exception as e: |
2259 | ++ msg = 'Failed to create volume: {}'.format(e) |
2260 | ++ amulet.raise_status(amulet.FAIL, msg=msg) |
2261 | ++ |
2262 | ++ # Wait for volume to reach available status |
2263 | ++ ret = self.resource_reaches_status(cinder.volumes, vol_id, |
2264 | ++ expected_stat="available", |
2265 | ++ msg="Volume status wait") |
2266 | ++ if not ret: |
2267 | ++ msg = 'Cinder volume failed to reach expected state.' |
2268 | ++ amulet.raise_status(amulet.FAIL, msg=msg) |
2269 | ++ |
2270 | ++ # Re-validate new volume |
2271 | ++ self.log.debug('Validating volume attributes...') |
2272 | ++ val_vol_name = cinder.volumes.get(vol_id).display_name |
2273 | ++ val_vol_boot = cinder.volumes.get(vol_id).bootable |
2274 | ++ val_vol_stat = cinder.volumes.get(vol_id).status |
2275 | ++ val_vol_size = cinder.volumes.get(vol_id).size |
2276 | ++ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
2277 | ++ '{} size:{}'.format(val_vol_name, vol_id, |
2278 | ++ val_vol_stat, val_vol_boot, |
2279 | ++ val_vol_size)) |
2280 | ++ |
2281 | ++ if val_vol_boot == bootable and val_vol_stat == 'available' \ |
2282 | ++ and val_vol_name == vol_name and val_vol_size == vol_size: |
2283 | ++ self.log.debug(msg_attr) |
2284 | ++ else: |
2285 | ++ msg = ('Volume validation failed, {}'.format(msg_attr)) |
2286 | ++ amulet.raise_status(amulet.FAIL, msg=msg) |
2287 | ++ |
2288 | ++ return vol_new |
2289 | ++ |
2290 | ++ def delete_resource(self, resource, resource_id, |
2291 | ++ msg="resource", max_wait=120): |
2292 | ++ """Delete one openstack resource, such as one instance, keypair, |
2293 | ++ image, volume, stack, etc., and confirm deletion within max wait time. |
2294 | ++ |
2295 | ++ :param resource: pointer to os resource type, ex:glance_client.images |
2296 | ++ :param resource_id: unique name or id for the openstack resource |
2297 | ++ :param msg: text to identify purpose in logging |
2298 | ++ :param max_wait: maximum wait time in seconds |
2299 | ++ :returns: True if successful, otherwise False |
2300 | ++ """ |
2301 | ++ self.log.debug('Deleting OpenStack resource ' |
2302 | ++ '{} ({})'.format(resource_id, msg)) |
2303 | ++ num_before = len(list(resource.list())) |
2304 | ++ resource.delete(resource_id) |
2305 | ++ |
2306 | ++ tries = 0 |
2307 | ++ num_after = len(list(resource.list())) |
2308 | ++ while num_after != (num_before - 1) and tries < (max_wait / 4): |
2309 | ++ self.log.debug('{} delete check: ' |
2310 | ++ '{} [{}:{}] {}'.format(msg, tries, |
2311 | ++ num_before, |
2312 | ++ num_after, |
2313 | ++ resource_id)) |
2314 | ++ time.sleep(4) |
2315 | ++ num_after = len(list(resource.list())) |
2316 | ++ tries += 1 |
2317 | ++ |
2318 | ++ self.log.debug('{}: expected, actual count = {}, ' |
2319 | ++ '{}'.format(msg, num_before - 1, num_after)) |
2320 | ++ |
2321 | ++ if num_after == (num_before - 1): |
2322 | ++ return True |
2323 | ++ else: |
2324 | ++ self.log.error('{} delete timed out'.format(msg)) |
2325 | ++ return False |
2326 | ++ |
2327 | ++ def resource_reaches_status(self, resource, resource_id, |
2328 | ++ expected_stat='available', |
2329 | ++ msg='resource', max_wait=120): |
2330 | ++ """Wait for an openstack resources status to reach an |
2331 | ++ expected status within a specified time. Useful to confirm that |
2332 | ++ nova instances, cinder vols, snapshots, glance images, heat stacks |
2333 | ++ and other resources eventually reach the expected status. |
2334 | ++ |
2335 | ++ :param resource: pointer to os resource type, ex: heat_client.stacks |
2336 | ++ :param resource_id: unique id for the openstack resource |
2337 | ++ :param expected_stat: status to expect resource to reach |
2338 | ++ :param msg: text to identify purpose in logging |
2339 | ++ :param max_wait: maximum wait time in seconds |
2340 | ++ :returns: True if successful, False if status is not reached |
2341 | ++ """ |
2342 | ++ |
2343 | ++ tries = 0 |
2344 | ++ resource_stat = resource.get(resource_id).status |
2345 | ++ while resource_stat != expected_stat and tries < (max_wait / 4): |
2346 | ++ self.log.debug('{} status check: ' |
2347 | ++ '{} [{}:{}] {}'.format(msg, tries, |
2348 | ++ resource_stat, |
2349 | ++ expected_stat, |
2350 | ++ resource_id)) |
2351 | ++ time.sleep(4) |
2352 | ++ resource_stat = resource.get(resource_id).status |
2353 | ++ tries += 1 |
2354 | ++ |
2355 | ++ self.log.debug('{}: expected, actual status = {}, ' |
2356 | ++ '{}'.format(msg, resource_stat, expected_stat)) |
2357 | ++ |
2358 | ++ if resource_stat == expected_stat: |
2359 | ++ return True |
2360 | ++ else: |
2361 | ++ self.log.debug('{} never reached expected status: ' |
2362 | ++ '{}'.format(resource_id, expected_stat)) |
2363 | ++ return False |
2364 | ++ |
2365 | ++ def get_ceph_osd_id_cmd(self, index): |
2366 | ++ """Produce a shell command that will return a ceph-osd id.""" |
2367 | ++ return ("`initctl list | grep 'ceph-osd ' | " |
2368 | ++ "awk 'NR=={} {{ print $2 }}' | " |
2369 | ++ "grep -o '[0-9]*'`".format(index + 1)) |
2370 | ++ |
2371 | ++ def get_ceph_pools(self, sentry_unit): |
2372 | ++ """Return a dict of ceph pools from a single ceph unit, with |
2373 | ++ pool name as keys, pool id as vals.""" |
2374 | ++ pools = {} |
2375 | ++ cmd = 'sudo ceph osd lspools' |
2376 | ++ output, code = sentry_unit.run(cmd) |
2377 | ++ if code != 0: |
2378 | ++ msg = ('{} `{}` returned {} ' |
2379 | ++ '{}'.format(sentry_unit.info['unit_name'], |
2380 | ++ cmd, code, output)) |
2381 | ++ amulet.raise_status(amulet.FAIL, msg=msg) |
2382 | ++ |
2383 | ++ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
2384 | ++ for pool in str(output).split(','): |
2385 | ++ pool_id_name = pool.split(' ') |
2386 | ++ if len(pool_id_name) == 2: |
2387 | ++ pool_id = pool_id_name[0] |
2388 | ++ pool_name = pool_id_name[1] |
2389 | ++ pools[pool_name] = int(pool_id) |
2390 | ++ |
2391 | ++ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
2392 | ++ pools)) |
2393 | ++ return pools |
2394 | ++ |
2395 | ++ def get_ceph_df(self, sentry_unit): |
2396 | ++ """Return dict of ceph df json output, including ceph pool state. |
2397 | ++ |
2398 | ++ :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
2399 | ++ :returns: Dict of ceph df output |
2400 | ++ """ |
2401 | ++ cmd = 'sudo ceph df --format=json' |
2402 | ++ output, code = sentry_unit.run(cmd) |
2403 | ++ if code != 0: |
2404 | ++ msg = ('{} `{}` returned {} ' |
2405 | ++ '{}'.format(sentry_unit.info['unit_name'], |
2406 | ++ cmd, code, output)) |
2407 | ++ amulet.raise_status(amulet.FAIL, msg=msg) |
2408 | ++ return json.loads(output) |
2409 | ++ |
2410 | ++ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
2411 | ++ """Take a sample of attributes of a ceph pool, returning ceph |
2412 | ++ pool name, object count and disk space used for the specified |
2413 | ++ pool ID number. |
2414 | ++ |
2415 | ++ :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
2416 | ++ :param pool_id: Ceph pool ID |
2417 | ++ :returns: List of pool name, object count, kb disk space used |
2418 | ++ """ |
2419 | ++ df = self.get_ceph_df(sentry_unit) |
2420 | ++ pool_name = df['pools'][pool_id]['name'] |
2421 | ++ obj_count = df['pools'][pool_id]['stats']['objects'] |
2422 | ++ kb_used = df['pools'][pool_id]['stats']['kb_used'] |
2423 | ++ self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
2424 | ++ '{} kb used'.format(pool_name, pool_id, |
2425 | ++ obj_count, kb_used)) |
2426 | ++ return pool_name, obj_count, kb_used |
2427 | ++ |
2428 | ++ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
2429 | ++ """Validate ceph pool samples taken over time, such as pool |
2430 | ++ object counts or pool kb used, before adding, after adding, and |
2431 | ++ after deleting items which affect those pool attributes. The |
2432 | ++ 2nd element is expected to be greater than the 1st; 3rd is expected |
2433 | ++ to be less than the 2nd. |
2434 | ++ |
2435 | ++ :param samples: List containing 3 data samples |
2436 | ++ :param sample_type: String for logging and usage context |
2437 | ++ :returns: None if successful, Failure message otherwise |
2438 | ++ """ |
2439 | ++ original, created, deleted = range(3) |
2440 | ++ if samples[created] <= samples[original] or \ |
2441 | ++ samples[deleted] >= samples[created]: |
2442 | ++ return ('Ceph {} samples ({}) ' |
2443 | ++ 'unexpected.'.format(sample_type, samples)) |
2444 | ++ else: |
2445 | ++ self.log.debug('Ceph {} samples (OK): ' |
2446 | ++ '{}'.format(sample_type, samples)) |
2447 | ++ return None |
2448 | + |
2449 | +=== modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
2450 | +--- hooks/charmhelpers/contrib/openstack/context.py 2015-06-24 12:22:08 +0000 |
2451 | ++++ hooks/charmhelpers/contrib/openstack/context.py 2015-10-30 01:28:39 +0000 |
2452 | +@@ -122,21 +122,24 @@ |
2453 | + of specifying multiple key value pairs within the same string. For |
2454 | + example, a string in the format of 'key1=value1, key2=value2' will |
2455 | + return a dict of: |
2456 | +- {'key1': 'value1', |
2457 | +- 'key2': 'value2'}. |
2458 | ++ |
2459 | ++ {'key1': 'value1', |
2460 | ++ 'key2': 'value2'}. |
2461 | + |
2462 | + 2. A string in the above format, but supporting a comma-delimited list |
2463 | + of values for the same key. For example, a string in the format of |
2464 | + 'key1=value1, key2=value3,value4,value5' will return a dict of: |
2465 | +- {'key1', 'value1', |
2466 | +- 'key2', 'value2,value3,value4'} |
2467 | ++ |
2468 | ++ {'key1', 'value1', |
2469 | ++ 'key2', 'value2,value3,value4'} |
2470 | + |
2471 | + 3. A string containing a colon character (:) prior to an equal |
2472 | + character (=) will be treated as yaml and parsed as such. This can be |
2473 | + used to specify more complex key value pairs. For example, |
2474 | + a string in the format of 'key1: subkey1=value1, subkey2=value2' will |
2475 | + return a dict of: |
2476 | +- {'key1', 'subkey1=value1, subkey2=value2'} |
2477 | ++ |
2478 | ++ {'key1', 'subkey1=value1, subkey2=value2'} |
2479 | + |
2480 | + The provided config_flags string may be a list of comma-separated values |
2481 | + which themselves may be comma-separated list of values. |
2482 | +@@ -240,7 +243,7 @@ |
2483 | + if self.relation_prefix: |
2484 | + password_setting = self.relation_prefix + '_password' |
2485 | + |
2486 | +- for rid in relation_ids('shared-db'): |
2487 | ++ for rid in relation_ids(self.interfaces[0]): |
2488 | + for unit in related_units(rid): |
2489 | + rdata = relation_get(rid=rid, unit=unit) |
2490 | + host = rdata.get('db_host') |
2491 | +@@ -891,8 +894,6 @@ |
2492 | + return ctxt |
2493 | + |
2494 | + def __call__(self): |
2495 | +- self._ensure_packages() |
2496 | +- |
2497 | + if self.network_manager not in ['quantum', 'neutron']: |
2498 | + return {} |
2499 | + |
2500 | +@@ -1050,13 +1051,22 @@ |
2501 | + :param config_file : Service's config file to query sections |
2502 | + :param interface : Subordinate interface to inspect |
2503 | + """ |
2504 | +- self.service = service |
2505 | + self.config_file = config_file |
2506 | +- self.interface = interface |
2507 | ++ if isinstance(service, list): |
2508 | ++ self.services = service |
2509 | ++ else: |
2510 | ++ self.services = [service] |
2511 | ++ if isinstance(interface, list): |
2512 | ++ self.interfaces = interface |
2513 | ++ else: |
2514 | ++ self.interfaces = [interface] |
2515 | + |
2516 | + def __call__(self): |
2517 | + ctxt = {'sections': {}} |
2518 | +- for rid in relation_ids(self.interface): |
2519 | ++ rids = [] |
2520 | ++ for interface in self.interfaces: |
2521 | ++ rids.extend(relation_ids(interface)) |
2522 | ++ for rid in rids: |
2523 | + for unit in related_units(rid): |
2524 | + sub_config = relation_get('subordinate_configuration', |
2525 | + rid=rid, unit=unit) |
2526 | +@@ -1068,29 +1078,32 @@ |
2527 | + 'setting from %s' % rid, level=ERROR) |
2528 | + continue |
2529 | + |
2530 | +- if self.service not in sub_config: |
2531 | +- log('Found subordinate_config on %s but it contained' |
2532 | +- 'nothing for %s service' % (rid, self.service), |
2533 | +- level=INFO) |
2534 | +- continue |
2535 | +- |
2536 | +- sub_config = sub_config[self.service] |
2537 | +- if self.config_file not in sub_config: |
2538 | +- log('Found subordinate_config on %s but it contained' |
2539 | +- 'nothing for %s' % (rid, self.config_file), |
2540 | +- level=INFO) |
2541 | +- continue |
2542 | +- |
2543 | +- sub_config = sub_config[self.config_file] |
2544 | +- for k, v in six.iteritems(sub_config): |
2545 | +- if k == 'sections': |
2546 | +- for section, config_dict in six.iteritems(v): |
2547 | +- log("adding section '%s'" % (section), |
2548 | +- level=DEBUG) |
2549 | +- ctxt[k][section] = config_dict |
2550 | +- else: |
2551 | +- ctxt[k] = v |
2552 | +- |
2553 | ++ for service in self.services: |
2554 | ++ if service not in sub_config: |
2555 | ++ log('Found subordinate_config on %s but it contained' |
2556 | ++ 'nothing for %s service' % (rid, service), |
2557 | ++ level=INFO) |
2558 | ++ continue |
2559 | ++ |
2560 | ++ sub_config = sub_config[service] |
2561 | ++ if self.config_file not in sub_config: |
2562 | ++ log('Found subordinate_config on %s but it contained' |
2563 | ++ 'nothing for %s' % (rid, self.config_file), |
2564 | ++ level=INFO) |
2565 | ++ continue |
2566 | ++ |
2567 | ++ sub_config = sub_config[self.config_file] |
2568 | ++ for k, v in six.iteritems(sub_config): |
2569 | ++ if k == 'sections': |
2570 | ++ for section, config_list in six.iteritems(v): |
2571 | ++ log("adding section '%s'" % (section), |
2572 | ++ level=DEBUG) |
2573 | ++ if ctxt[k].get(section): |
2574 | ++ ctxt[k][section].extend(config_list) |
2575 | ++ else: |
2576 | ++ ctxt[k][section] = config_list |
2577 | ++ else: |
2578 | ++ ctxt[k] = v |
2579 | + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
2580 | + return ctxt |
2581 | + |
2582 | + |
2583 | +=== modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' |
2584 | +--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-06-24 12:22:08 +0000 |
2585 | ++++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-10-30 01:28:39 +0000 |
2586 | +@@ -5,11 +5,11 @@ |
2587 | + ############################################################################### |
2588 | + [global] |
2589 | + {% if auth -%} |
2590 | +- auth_supported = {{ auth }} |
2591 | +- keyring = /etc/ceph/$cluster.$name.keyring |
2592 | +- mon host = {{ mon_hosts }} |
2593 | ++auth_supported = {{ auth }} |
2594 | ++keyring = /etc/ceph/$cluster.$name.keyring |
2595 | ++mon host = {{ mon_hosts }} |
2596 | + {% endif -%} |
2597 | +- log to syslog = {{ use_syslog }} |
2598 | +- err to syslog = {{ use_syslog }} |
2599 | +- clog to syslog = {{ use_syslog }} |
2600 | ++log to syslog = {{ use_syslog }} |
2601 | ++err to syslog = {{ use_syslog }} |
2602 | ++clog to syslog = {{ use_syslog }} |
2603 | + |
2604 | + |
2605 | +=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py' |
2606 | +--- hooks/charmhelpers/contrib/openstack/templating.py 2015-06-24 12:22:08 +0000 |
2607 | ++++ hooks/charmhelpers/contrib/openstack/templating.py 2015-10-30 15:39:12 +0000 |
2608 | +@@ -29,14 +29,13 @@ |
2609 | + try: |
2610 | + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
2611 | + except ImportError: |
2612 | +- # python-jinja2 may not be installed yet, or we're running unittests. |
2613 | +- FileSystemLoader = ChoiceLoader = Environment = exceptions = None |
2614 | ++ apt_install('python-jinja2', fatal=True) |
2615 | ++ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
2616 | + |
2617 | + |
2618 | + class OSConfigException(Exception): |
2619 | + pass |
2620 | + |
2621 | +- |
2622 | + def os_template_dirs(templates_dir, os_release): |
2623 | + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) |
2624 | + for rel in six.itervalues(OPENSTACK_CODENAMES)] |
2625 | +@@ -61,7 +60,6 @@ |
2626 | + ' '.join(dirs), level=INFO) |
2627 | + return dirs |
2628 | + |
2629 | +- |
2630 | + def get_loader(templates_dir, os_release): |
2631 | + """ |
2632 | + Create a jinja2.ChoiceLoader containing template dirs up to |
2633 | + |
2634 | +=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
2635 | +--- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-24 12:22:08 +0000 |
2636 | ++++ hooks/charmhelpers/contrib/openstack/utils.py 2015-10-31 07:55:21 +0000 |
2637 | +@@ -25,6 +25,7 @@ |
2638 | + import os |
2639 | + import sys |
2640 | + import uuid |
2641 | ++import re |
2642 | + |
2643 | + import six |
2644 | + import yaml |
2645 | +@@ -42,7 +43,7 @@ |
2646 | + INFO, |
2647 | + relation_ids, |
2648 | + related_units, |
2649 | +- relation_set, |
2650 | ++ relation_set |
2651 | + ) |
2652 | + |
2653 | + from charmhelpers.contrib.storage.linux.lvm import ( |
2654 | +@@ -71,7 +72,6 @@ |
2655 | + DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' |
2656 | + 'restricted main multiverse universe') |
2657 | + |
2658 | +- |
2659 | + UBUNTU_OPENSTACK_RELEASE = OrderedDict([ |
2660 | + ('oneiric', 'diablo'), |
2661 | + ('precise', 'essex'), |
2662 | +@@ -81,6 +81,7 @@ |
2663 | + ('trusty', 'icehouse'), |
2664 | + ('utopic', 'juno'), |
2665 | + ('vivid', 'kilo'), |
2666 | ++ ('wily', 'liberty'), |
2667 | + ]) |
2668 | + |
2669 | + |
2670 | +@@ -93,6 +94,7 @@ |
2671 | + ('2014.1', 'icehouse'), |
2672 | + ('2014.2', 'juno'), |
2673 | + ('2015.1', 'kilo'), |
2674 | ++ ('2015.2', 'liberty'), |
2675 | + ]) |
2676 | + |
2677 | + # The ugly duckling |
2678 | +@@ -115,8 +117,37 @@ |
2679 | + ('2.2.0', 'juno'), |
2680 | + ('2.2.1', 'kilo'), |
2681 | + ('2.2.2', 'kilo'), |
2682 | ++ ('2.3.0', 'liberty'), |
2683 | + ]) |
2684 | + |
2685 | ++# >= Liberty version->codename mapping |
2686 | ++PACKAGE_CODENAMES = { |
2687 | ++ 'nova-common': OrderedDict([ |
2688 | ++ ('12.0.0', 'liberty'), |
2689 | ++ ]), |
2690 | ++ 'neutron-common': OrderedDict([ |
2691 | ++ ('7.0.0', 'liberty'), |
2692 | ++ ]), |
2693 | ++ 'cinder-common': OrderedDict([ |
2694 | ++ ('7.0.0', 'liberty'), |
2695 | ++ ]), |
2696 | ++ 'keystone': OrderedDict([ |
2697 | ++ ('8.0.0', 'liberty'), |
2698 | ++ ]), |
2699 | ++ 'horizon-common': OrderedDict([ |
2700 | ++ ('8.0.0', 'liberty'), |
2701 | ++ ]), |
2702 | ++ 'ceilometer-common': OrderedDict([ |
2703 | ++ ('5.0.0', 'liberty'), |
2704 | ++ ]), |
2705 | ++ 'heat-common': OrderedDict([ |
2706 | ++ ('5.0.0', 'liberty'), |
2707 | ++ ]), |
2708 | ++ 'glance-common': OrderedDict([ |
2709 | ++ ('11.0.0', 'liberty'), |
2710 | ++ ]), |
2711 | ++} |
2712 | ++ |
2713 | + DEFAULT_LOOPBACK_SIZE = '5G' |
2714 | + |
2715 | + |
2716 | +@@ -200,20 +231,29 @@ |
2717 | + error_out(e) |
2718 | + |
2719 | + vers = apt.upstream_version(pkg.current_ver.ver_str) |
2720 | ++ match = re.match('^(\d)\.(\d)\.(\d)', vers) |
2721 | ++ if match: |
2722 | ++ vers = match.group(0) |
2723 | + |
2724 | +- try: |
2725 | +- if 'swift' in pkg.name: |
2726 | +- swift_vers = vers[:5] |
2727 | +- if swift_vers not in SWIFT_CODENAMES: |
2728 | +- # Deal with 1.10.0 upward |
2729 | +- swift_vers = vers[:6] |
2730 | +- return SWIFT_CODENAMES[swift_vers] |
2731 | +- else: |
2732 | +- vers = vers[:6] |
2733 | +- return OPENSTACK_CODENAMES[vers] |
2734 | +- except KeyError: |
2735 | +- e = 'Could not determine OpenStack codename for version %s' % vers |
2736 | +- error_out(e) |
2737 | ++ # >= Liberty independent project versions |
2738 | ++ if (package in PACKAGE_CODENAMES and |
2739 | ++ vers in PACKAGE_CODENAMES[package]): |
2740 | ++ return PACKAGE_CODENAMES[package][vers] |
2741 | ++ else: |
2742 | ++ # < Liberty co-ordinated project versions |
2743 | ++ try: |
2744 | ++ if 'swift' in pkg.name: |
2745 | ++ swift_vers = vers[:5] |
2746 | ++ if swift_vers not in SWIFT_CODENAMES: |
2747 | ++ # Deal with 1.10.0 upward |
2748 | ++ swift_vers = vers[:6] |
2749 | ++ return SWIFT_CODENAMES[swift_vers] |
2750 | ++ else: |
2751 | ++ vers = vers[:6] |
2752 | ++ return OPENSTACK_CODENAMES[vers] |
2753 | ++ except KeyError: |
2754 | ++ e = 'Could not determine OpenStack codename for version %s' % vers |
2755 | ++ error_out(e) |
2756 | + |
2757 | + |
2758 | + def get_os_version_package(pkg, fatal=True): |
2759 | +@@ -323,6 +363,9 @@ |
2760 | + 'kilo': 'trusty-updates/kilo', |
2761 | + 'kilo/updates': 'trusty-updates/kilo', |
2762 | + 'kilo/proposed': 'trusty-proposed/kilo', |
2763 | ++ 'liberty': 'trusty-updates/liberty', |
2764 | ++ 'liberty/updates': 'trusty-updates/liberty', |
2765 | ++ 'liberty/proposed': 'trusty-proposed/liberty', |
2766 | + } |
2767 | + |
2768 | + try: |
2769 | +@@ -518,6 +561,7 @@ |
2770 | + Clone/install all specified OpenStack repositories. |
2771 | + |
2772 | + The expected format of projects_yaml is: |
2773 | ++ |
2774 | + repositories: |
2775 | + - {name: keystone, |
2776 | + repository: 'git://git.openstack.org/openstack/keystone.git', |
2777 | +@@ -525,11 +569,13 @@ |
2778 | + - {name: requirements, |
2779 | + repository: 'git://git.openstack.org/openstack/requirements.git', |
2780 | + branch: 'stable/icehouse'} |
2781 | ++ |
2782 | + directory: /mnt/openstack-git |
2783 | + http_proxy: squid-proxy-url |
2784 | + https_proxy: squid-proxy-url |
2785 | + |
2786 | +- The directory, http_proxy, and https_proxy keys are optional. |
2787 | ++ The directory, http_proxy, and https_proxy keys are optional. |
2788 | ++ |
2789 | + """ |
2790 | + global requirements_dir |
2791 | + parent_dir = '/mnt/openstack-git' |
2792 | +@@ -551,6 +597,12 @@ |
2793 | + |
2794 | + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
2795 | + |
2796 | ++ # Upgrade setuptools and pip from default virtualenv versions. The default |
2797 | ++ # versions in trusty break master OpenStack branch deployments. |
2798 | ++ for p in ['pip', 'setuptools']: |
2799 | ++ pip_install(p, upgrade=True, proxy=http_proxy, |
2800 | ++ venv=os.path.join(parent_dir, 'venv')) |
2801 | ++ |
2802 | + for p in projects['repositories']: |
2803 | + repo = p['repository'] |
2804 | + branch = p['branch'] |
2805 | +@@ -612,24 +664,24 @@ |
2806 | + else: |
2807 | + repo_dir = dest_dir |
2808 | + |
2809 | ++ venv = os.path.join(parent_dir, 'venv') |
2810 | ++ |
2811 | + if update_requirements: |
2812 | + if not requirements_dir: |
2813 | + error_out('requirements repo must be cloned before ' |
2814 | + 'updating from global requirements.') |
2815 | +- _git_update_requirements(repo_dir, requirements_dir) |
2816 | ++ _git_update_requirements(venv, repo_dir, requirements_dir) |
2817 | + |
2818 | + juju_log('Installing git repo from dir: {}'.format(repo_dir)) |
2819 | + if http_proxy: |
2820 | +- pip_install(repo_dir, proxy=http_proxy, |
2821 | +- venv=os.path.join(parent_dir, 'venv')) |
2822 | ++ pip_install(repo_dir, proxy=http_proxy, venv=venv) |
2823 | + else: |
2824 | +- pip_install(repo_dir, |
2825 | +- venv=os.path.join(parent_dir, 'venv')) |
2826 | ++ pip_install(repo_dir, venv=venv) |
2827 | + |
2828 | + return repo_dir |
2829 | + |
2830 | + |
2831 | +-def _git_update_requirements(package_dir, reqs_dir): |
2832 | ++def _git_update_requirements(venv, package_dir, reqs_dir): |
2833 | + """ |
2834 | + Update from global requirements. |
2835 | + |
2836 | +@@ -638,12 +690,14 @@ |
2837 | + """ |
2838 | + orig_dir = os.getcwd() |
2839 | + os.chdir(reqs_dir) |
2840 | +- cmd = ['python', 'update.py', package_dir] |
2841 | ++ python = os.path.join(venv, 'bin/python') |
2842 | ++ cmd = [python, 'update.py', package_dir] |
2843 | + try: |
2844 | + subprocess.check_call(cmd) |
2845 | + except subprocess.CalledProcessError: |
2846 | + package = os.path.basename(package_dir) |
2847 | +- error_out("Error updating {} from global-requirements.txt".format(package)) |
2848 | ++ error_out("Error updating {} from " |
2849 | ++ "global-requirements.txt".format(package)) |
2850 | + os.chdir(orig_dir) |
2851 | + |
2852 | + |
2853 | +@@ -690,7 +744,6 @@ |
2854 | + |
2855 | + return None |
2856 | + |
2857 | +- |
2858 | + def remote_restart(rel_name, remote_service=None): |
2859 | + trigger = { |
2860 | + 'restart-trigger': str(uuid.uuid4()), |
2861 | +@@ -705,3 +758,4 @@ |
2862 | + relation_set(relation_id=rid, |
2863 | + relation_settings=trigger, |
2864 | + ) |
2865 | ++ |
2866 | + |
2867 | +=== modified file 'hooks/charmhelpers/contrib/python/packages.py' |
2868 | +--- hooks/charmhelpers/contrib/python/packages.py 2015-06-24 12:22:08 +0000 |
2869 | ++++ hooks/charmhelpers/contrib/python/packages.py 2015-10-30 01:28:43 +0000 |
2870 | +@@ -36,6 +36,8 @@ |
2871 | + def parse_options(given, available): |
2872 | + """Given a set of options, check if available""" |
2873 | + for key, value in sorted(given.items()): |
2874 | ++ if not value: |
2875 | ++ continue |
2876 | + if key in available: |
2877 | + yield "--{0}={1}".format(key, value) |
2878 | + |
2879 | + |
2880 | +=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' |
2881 | +--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-06-24 12:22:08 +0000 |
2882 | ++++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-10-30 01:28:39 +0000 |
2883 | +@@ -60,12 +60,12 @@ |
2884 | + KEYFILE = '/etc/ceph/ceph.client.{}.key' |
2885 | + |
2886 | + CEPH_CONF = """[global] |
2887 | +- auth supported = {auth} |
2888 | +- keyring = {keyring} |
2889 | +- mon host = {mon_hosts} |
2890 | +- log to syslog = {use_syslog} |
2891 | +- err to syslog = {use_syslog} |
2892 | +- clog to syslog = {use_syslog} |
2893 | ++auth supported = {auth} |
2894 | ++keyring = {keyring} |
2895 | ++mon host = {mon_hosts} |
2896 | ++log to syslog = {use_syslog} |
2897 | ++err to syslog = {use_syslog} |
2898 | ++clog to syslog = {use_syslog} |
2899 | + """ |
2900 | + |
2901 | + |
2902 | + |
2903 | +=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' |
2904 | +--- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-06-24 12:22:08 +0000 |
2905 | ++++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-10-30 01:28:39 +0000 |
2906 | +@@ -43,9 +43,10 @@ |
2907 | + |
2908 | + :param block_device: str: Full path of block device to clean. |
2909 | + ''' |
2910 | ++ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b |
2911 | + # sometimes sgdisk exits non-zero; this is OK, dd will clean up |
2912 | +- call(['sgdisk', '--zap-all', '--mbrtogpt', |
2913 | +- '--clear', block_device]) |
2914 | ++ call(['sgdisk', '--zap-all', '--', block_device]) |
2915 | ++ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) |
2916 | + dev_end = check_output(['blockdev', '--getsz', |
2917 | + block_device]).decode('UTF-8') |
2918 | + gpt_end = int(dev_end.split()[0]) - 100 |
2919 | +@@ -67,4 +68,4 @@ |
2920 | + out = check_output(['mount']).decode('UTF-8') |
2921 | + if is_partition: |
2922 | + return bool(re.search(device + r"\b", out)) |
2923 | +- return bool(re.search(device + r"[0-9]+\b", out)) |
2924 | ++ return bool(re.search(device + r"[0-9]*\b", out)) |
2925 | + |
2926 | +=== modified file 'hooks/charmhelpers/core/hookenv.py' |
2927 | +--- hooks/charmhelpers/core/hookenv.py 2015-06-24 12:22:08 +0000 |
2928 | ++++ hooks/charmhelpers/core/hookenv.py 2015-10-30 01:28:39 +0000 |
2929 | +@@ -21,7 +21,10 @@ |
2930 | + # Charm Helpers Developers <juju@lists.ubuntu.com> |
2931 | + |
2932 | + from __future__ import print_function |
2933 | ++import copy |
2934 | ++from distutils.version import LooseVersion |
2935 | + from functools import wraps |
2936 | ++import glob |
2937 | + import os |
2938 | + import json |
2939 | + import yaml |
2940 | +@@ -71,6 +74,7 @@ |
2941 | + res = func(*args, **kwargs) |
2942 | + cache[key] = res |
2943 | + return res |
2944 | ++ wrapper._wrapped = func |
2945 | + return wrapper |
2946 | + |
2947 | + |
2948 | +@@ -170,9 +174,19 @@ |
2949 | + return os.environ.get('JUJU_RELATION', None) |
2950 | + |
2951 | + |
2952 | +-def relation_id(): |
2953 | +- """The relation ID for the current relation hook""" |
2954 | +- return os.environ.get('JUJU_RELATION_ID', None) |
2955 | ++@cached |
2956 | ++def relation_id(relation_name=None, service_or_unit=None): |
2957 | ++ """The relation ID for the current or a specified relation""" |
2958 | ++ if not relation_name and not service_or_unit: |
2959 | ++ return os.environ.get('JUJU_RELATION_ID', None) |
2960 | ++ elif relation_name and service_or_unit: |
2961 | ++ service_name = service_or_unit.split('/')[0] |
2962 | ++ for relid in relation_ids(relation_name): |
2963 | ++ remote_service = remote_service_name(relid) |
2964 | ++ if remote_service == service_name: |
2965 | ++ return relid |
2966 | ++ else: |
2967 | ++ raise ValueError('Must specify neither or both of relation_name and service_or_unit') |
2968 | + |
2969 | + |
2970 | + def local_unit(): |
2971 | +@@ -190,9 +204,20 @@ |
2972 | + return local_unit().split('/')[0] |
2973 | + |
2974 | + |
2975 | ++@cached |
2976 | ++def remote_service_name(relid=None): |
2977 | ++ """The remote service name for a given relation-id (or the current relation)""" |
2978 | ++ if relid is None: |
2979 | ++ unit = remote_unit() |
2980 | ++ else: |
2981 | ++ units = related_units(relid) |
2982 | ++ unit = units[0] if units else None |
2983 | ++ return unit.split('/')[0] if unit else None |
2984 | ++ |
2985 | ++ |
2986 | + def hook_name(): |
2987 | + """The name of the currently executing hook""" |
2988 | +- return os.path.basename(sys.argv[0]) |
2989 | ++ return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) |
2990 | + |
2991 | + |
2992 | + class Config(dict): |
2993 | +@@ -242,29 +267,7 @@ |
2994 | + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
2995 | + if os.path.exists(self.path): |
2996 | + self.load_previous() |
2997 | +- |
2998 | +- def __getitem__(self, key): |
2999 | +- """For regular dict lookups, check the current juju config first, |
3000 | +- then the previous (saved) copy. This ensures that user-saved values |
3001 | +- will be returned by a dict lookup. |
3002 | +- |
3003 | +- """ |
3004 | +- try: |
3005 | +- return dict.__getitem__(self, key) |
3006 | +- except KeyError: |
3007 | +- return (self._prev_dict or {})[key] |
3008 | +- |
3009 | +- def get(self, key, default=None): |
3010 | +- try: |
3011 | +- return self[key] |
3012 | +- except KeyError: |
3013 | +- return default |
3014 | +- |
3015 | +- def keys(self): |
3016 | +- prev_keys = [] |
3017 | +- if self._prev_dict is not None: |
3018 | +- prev_keys = self._prev_dict.keys() |
3019 | +- return list(set(prev_keys + list(dict.keys(self)))) |
3020 | ++ atexit(self._implicit_save) |
3021 | + |
3022 | + def load_previous(self, path=None): |
3023 | + """Load previous copy of config from disk. |
3024 | +@@ -283,6 +286,9 @@ |
3025 | + self.path = path or self.path |
3026 | + with open(self.path) as f: |
3027 | + self._prev_dict = json.load(f) |
3028 | ++ for k, v in copy.deepcopy(self._prev_dict).items(): |
3029 | ++ if k not in self: |
3030 | ++ self[k] = v |
3031 | + |
3032 | + def changed(self, key): |
3033 | + """Return True if the current value for this key is different from |
3034 | +@@ -314,13 +320,13 @@ |
3035 | + instance. |
3036 | + |
3037 | + """ |
3038 | +- if self._prev_dict: |
3039 | +- for k, v in six.iteritems(self._prev_dict): |
3040 | +- if k not in self: |
3041 | +- self[k] = v |
3042 | + with open(self.path, 'w') as f: |
3043 | + json.dump(self, f) |
3044 | + |
3045 | ++ def _implicit_save(self): |
3046 | ++ if self.implicit_save: |
3047 | ++ self.save() |
3048 | ++ |
3049 | + |
3050 | + @cached |
3051 | + def config(scope=None): |
3052 | +@@ -485,6 +491,63 @@ |
3053 | + |
3054 | + |
3055 | + @cached |
3056 | ++def relation_to_interface(relation_name): |
3057 | ++ """ |
3058 | ++ Given the name of a relation, return the interface that relation uses. |
3059 | ++ |
3060 | ++ :returns: The interface name, or ``None``. |
3061 | ++ """ |
3062 | ++ return relation_to_role_and_interface(relation_name)[1] |
3063 | ++ |
3064 | ++ |
3065 | ++@cached |
3066 | ++def relation_to_role_and_interface(relation_name): |
3067 | ++ """ |
3068 | ++ Given the name of a relation, return the role and the name of the interface |
3069 | ++ that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). |
3070 | ++ |
3071 | ++ :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. |
3072 | ++ """ |
3073 | ++ _metadata = metadata() |
3074 | ++ for role in ('provides', 'requires', 'peer'): |
3075 | ++ interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') |
3076 | ++ if interface: |
3077 | ++ return role, interface |
3078 | ++ return None, None |
3079 | ++ |
3080 | ++ |
3081 | ++@cached |
3082 | ++def role_and_interface_to_relations(role, interface_name): |
3083 | ++ """ |
3084 | ++ Given a role and interface name, return a list of relation names for the |
3085 | ++ current charm that use that interface under that role (where role is one |
3086 | ++ of ``provides``, ``requires``, or ``peer``). |
3087 | ++ |
3088 | ++ :returns: A list of relation names. |
3089 | ++ """ |
3090 | ++ _metadata = metadata() |
3091 | ++ results = [] |
3092 | ++ for relation_name, relation in _metadata.get(role, {}).items(): |
3093 | ++ if relation['interface'] == interface_name: |
3094 | ++ results.append(relation_name) |
3095 | ++ return results |
3096 | ++ |
3097 | ++ |
3098 | ++@cached |
3099 | ++def interface_to_relations(interface_name): |
3100 | ++ """ |
3101 | ++ Given an interface, return a list of relation names for the current |
3102 | ++ charm that use that interface. |
3103 | ++ |
3104 | ++ :returns: A list of relation names. |
3105 | ++ """ |
3106 | ++ results = [] |
3107 | ++ for role in ('provides', 'requires', 'peer'): |
3108 | ++ results.extend(role_and_interface_to_relations(role, interface_name)) |
3109 | ++ return results |
3110 | ++ |
3111 | ++ |
3112 | ++@cached |
3113 | + def charm_name(): |
3114 | + """Get the name of the current charm as is specified on metadata.yaml""" |
3115 | + return metadata().get('name') |
3116 | +@@ -587,10 +650,14 @@ |
3117 | + hooks.execute(sys.argv) |
3118 | + """ |
3119 | + |
3120 | +- def __init__(self, config_save=True): |
3121 | ++ def __init__(self, config_save=None): |
3122 | + super(Hooks, self).__init__() |
3123 | + self._hooks = {} |
3124 | +- self._config_save = config_save |
3125 | ++ |
3126 | ++ # For unknown reasons, we allow the Hooks constructor to override |
3127 | ++ # config().implicit_save. |
3128 | ++ if config_save is not None: |
3129 | ++ config().implicit_save = config_save |
3130 | + |
3131 | + def register(self, name, function): |
3132 | + """Register a hook""" |
3133 | +@@ -598,13 +665,16 @@ |
3134 | + |
3135 | + def execute(self, args): |
3136 | + """Execute a registered hook based on args[0]""" |
3137 | ++ _run_atstart() |
3138 | + hook_name = os.path.basename(args[0]) |
3139 | + if hook_name in self._hooks: |
3140 | +- self._hooks[hook_name]() |
3141 | +- if self._config_save: |
3142 | +- cfg = config() |
3143 | +- if cfg.implicit_save: |
3144 | +- cfg.save() |
3145 | ++ try: |
3146 | ++ self._hooks[hook_name]() |
3147 | ++ except SystemExit as x: |
3148 | ++ if x.code is None or x.code == 0: |
3149 | ++ _run_atexit() |
3150 | ++ raise |
3151 | ++ _run_atexit() |
3152 | + else: |
3153 | + raise UnregisteredHookError(hook_name) |
3154 | + |
3155 | +@@ -653,6 +723,21 @@ |
3156 | + subprocess.check_call(['action-fail', message]) |
3157 | + |
3158 | + |
3159 | ++def action_name(): |
3160 | ++ """Get the name of the currently executing action.""" |
3161 | ++ return os.environ.get('JUJU_ACTION_NAME') |
3162 | ++ |
3163 | ++ |
3164 | ++def action_uuid(): |
3165 | ++ """Get the UUID of the currently executing action.""" |
3166 | ++ return os.environ.get('JUJU_ACTION_UUID') |
3167 | ++ |
3168 | ++ |
3169 | ++def action_tag(): |
3170 | ++ """Get the tag for the currently executing action.""" |
3171 | ++ return os.environ.get('JUJU_ACTION_TAG') |
3172 | ++ |
3173 | ++ |
3174 | + def status_set(workload_state, message): |
3175 | + """Set the workload state with a message |
3176 | + |
3177 | +@@ -732,13 +817,80 @@ |
3178 | + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3179 | + def leader_set(settings=None, **kwargs): |
3180 | + """Juju leader set value(s)""" |
3181 | +- log("Juju leader-set '%s'" % (settings), level=DEBUG) |
3182 | ++ # Don't log secrets. |
3183 | ++ # log("Juju leader-set '%s'" % (settings), level=DEBUG) |
3184 | + cmd = ['leader-set'] |
3185 | + settings = settings or {} |
3186 | + settings.update(kwargs) |
3187 | +- for k, v in settings.iteritems(): |
3188 | ++ for k, v in settings.items(): |
3189 | + if v is None: |
3190 | + cmd.append('{}='.format(k)) |
3191 | + else: |
3192 | + cmd.append('{}={}'.format(k, v)) |
3193 | + subprocess.check_call(cmd) |
3194 | ++ |
3195 | ++ |
3196 | ++@cached |
3197 | ++def juju_version(): |
3198 | ++ """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
3199 | ++ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 |
3200 | ++ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] |
3201 | ++ return subprocess.check_output([jujud, 'version'], |
3202 | ++ universal_newlines=True).strip() |
3203 | ++ |
3204 | ++ |
3205 | ++@cached |
3206 | ++def has_juju_version(minimum_version): |
3207 | ++ """Return True if the Juju version is at least the provided version""" |
3208 | ++ return LooseVersion(juju_version()) >= LooseVersion(minimum_version) |
3209 | ++ |
3210 | ++ |
3211 | ++_atexit = [] |
3212 | ++_atstart = [] |
3213 | ++ |
3214 | ++ |
3215 | ++def atstart(callback, *args, **kwargs): |
3216 | ++ '''Schedule a callback to run before the main hook. |
3217 | ++ |
3218 | ++ Callbacks are run in the order they were added. |
3219 | ++ |
3220 | ++ This is useful for modules and classes to perform initialization |
3221 | ++ and inject behavior. In particular: |
3222 | ++ |
3223 | ++ - Run common code before all of your hooks, such as logging |
3224 | ++ the hook name or interesting relation data. |
3225 | ++ - Defer object or module initialization that requires a hook |
3226 | ++ context until we know there actually is a hook context, |
3227 | ++ making testing easier. |
3228 | ++ - Rather than requiring charm authors to include boilerplate to |
3229 | ++ invoke your helper's behavior, have it run automatically if |
3230 | ++ your object is instantiated or module imported. |
3231 | ++ |
3232 | ++ This is not at all useful after your hook framework as been launched. |
3233 | ++ ''' |
3234 | ++ global _atstart |
3235 | ++ _atstart.append((callback, args, kwargs)) |
3236 | ++ |
3237 | ++ |
3238 | ++def atexit(callback, *args, **kwargs): |
3239 | ++ '''Schedule a callback to run on successful hook completion. |
3240 | ++ |
3241 | ++ Callbacks are run in the reverse order that they were added.''' |
3242 | ++ _atexit.append((callback, args, kwargs)) |
3243 | ++ |
3244 | ++ |
3245 | ++def _run_atstart(): |
3246 | ++ '''Hook frameworks must invoke this before running the main hook body.''' |
3247 | ++ global _atstart |
3248 | ++ for callback, args, kwargs in _atstart: |
3249 | ++ callback(*args, **kwargs) |
3250 | ++ del _atstart[:] |
3251 | ++ |
3252 | ++ |
3253 | ++def _run_atexit(): |
3254 | ++ '''Hook frameworks must invoke this after the main hook body has |
3255 | ++ successfully completed. Do not invoke it if the hook fails.''' |
3256 | ++ global _atexit |
3257 | ++ for callback, args, kwargs in reversed(_atexit): |
3258 | ++ callback(*args, **kwargs) |
3259 | ++ del _atexit[:] |
3260 | + |
3261 | +=== modified file 'hooks/charmhelpers/core/host.py' |
3262 | +--- hooks/charmhelpers/core/host.py 2015-06-24 12:22:08 +0000 |
3263 | ++++ hooks/charmhelpers/core/host.py 2015-10-30 01:28:39 +0000 |
3264 | +@@ -63,6 +63,36 @@ |
3265 | + return service_result |
3266 | + |
3267 | + |
3268 | ++def service_pause(service_name, init_dir=None): |
3269 | ++ """Pause a system service. |
3270 | ++ |
3271 | ++ Stop it, and prevent it from starting again at boot.""" |
3272 | ++ if init_dir is None: |
3273 | ++ init_dir = "/etc/init" |
3274 | ++ stopped = service_stop(service_name) |
3275 | ++ # XXX: Support systemd too |
3276 | ++ override_path = os.path.join( |
3277 | ++ init_dir, '{}.override'.format(service_name)) |
3278 | ++ with open(override_path, 'w') as fh: |
3279 | ++ fh.write("manual\n") |
3280 | ++ return stopped |
3281 | ++ |
3282 | ++ |
3283 | ++def service_resume(service_name, init_dir=None): |
3284 | ++ """Resume a system service. |
3285 | ++ |
3286 | ++ Reenable starting again at boot. Start the service""" |
3287 | ++ # XXX: Support systemd too |
3288 | ++ if init_dir is None: |
3289 | ++ init_dir = "/etc/init" |
3290 | ++ override_path = os.path.join( |
3291 | ++ init_dir, '{}.override'.format(service_name)) |
3292 | ++ if os.path.exists(override_path): |
3293 | ++ os.unlink(override_path) |
3294 | ++ started = service_start(service_name) |
3295 | ++ return started |
3296 | ++ |
3297 | ++ |
3298 | + def service(action, service_name): |
3299 | + """Control a system service""" |
3300 | + cmd = ['service', service_name, action] |
3301 | +@@ -120,7 +150,7 @@ |
3302 | + |
3303 | + def user_exists(username): |
3304 | + try: |
3305 | +- user_info = pwd.getpwnam(username) |
3306 | ++ pwd.getpwnam(username) |
3307 | + user_exists = True |
3308 | + except KeyError: |
3309 | + user_exists = False |
3310 | +@@ -149,11 +179,7 @@ |
3311 | + |
3312 | + def add_user_to_group(username, group): |
3313 | + """Add a user to a group""" |
3314 | +- cmd = [ |
3315 | +- 'gpasswd', '-a', |
3316 | +- username, |
3317 | +- group |
3318 | +- ] |
3319 | ++ cmd = ['gpasswd', '-a', username, group] |
3320 | + log("Adding user {} to group {}".format(username, group)) |
3321 | + subprocess.check_call(cmd) |
3322 | + |
3323 | +@@ -263,7 +289,6 @@ |
3324 | + return system_mounts |
3325 | + |
3326 | + |
3327 | +- |
3328 | + def fstab_mount(mountpoint): |
3329 | + cmd_args = ['mount', mountpoint] |
3330 | + try: |
3331 | + |
3332 | +=== modified file 'hooks/charmhelpers/core/hugepage.py' |
3333 | +--- hooks/charmhelpers/core/hugepage.py 2015-06-24 12:22:08 +0000 |
3334 | ++++ hooks/charmhelpers/core/hugepage.py 2015-10-30 01:28:39 +0000 |
3335 | +@@ -1,4 +1,3 @@ |
3336 | +- |
3337 | + #!/usr/bin/env python |
3338 | + # -*- coding: utf-8 -*- |
3339 | + |
3340 | +@@ -19,10 +18,8 @@ |
3341 | + # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
3342 | + |
3343 | + import yaml |
3344 | +-from charmhelpers.core.fstab import Fstab |
3345 | +-from charmhelpers.core.sysctl import ( |
3346 | +- create, |
3347 | +-) |
3348 | ++from charmhelpers.core import fstab |
3349 | ++from charmhelpers.core import sysctl |
3350 | + from charmhelpers.core.host import ( |
3351 | + add_group, |
3352 | + add_user_to_group, |
3353 | +@@ -30,8 +27,9 @@ |
3354 | + mkdir, |
3355 | + ) |
3356 | + |
3357 | ++ |
3358 | + def hugepage_support(user, group='hugetlb', nr_hugepages=256, |
3359 | +- max_map_count=65536, mnt_point='/hugepages', |
3360 | ++ max_map_count=65536, mnt_point='/run/hugepages/kvm', |
3361 | + pagesize='2MB', mount=True): |
3362 | + group_info = add_group(group) |
3363 | + gid = group_info.gr_gid |
3364 | +@@ -41,14 +39,14 @@ |
3365 | + 'vm.max_map_count': max_map_count, # 1GB |
3366 | + 'vm.hugetlb_shm_group': gid, |
3367 | + } |
3368 | +- create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') |
3369 | ++ sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') |
3370 | + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) |
3371 | +- fstab = Fstab() |
3372 | +- fstab_entry = fstab.get_entry_by_attr('mountpoint', mnt_point) |
3373 | ++ lfstab = fstab.Fstab() |
3374 | ++ fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) |
3375 | + if fstab_entry: |
3376 | +- fstab.remove_entry(fstab_entry) |
3377 | +- entry = fstab.Entry('nodev', mnt_point, 'hugetlbfs', |
3378 | +- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) |
3379 | +- fstab.add_entry(entry) |
3380 | ++ lfstab.remove_entry(fstab_entry) |
3381 | ++ entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', |
3382 | ++ 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) |
3383 | ++ lfstab.add_entry(entry) |
3384 | + if mount: |
3385 | + fstab_mount(mnt_point) |
3386 | + |
3387 | +=== modified file 'hooks/charmhelpers/core/services/base.py' |
3388 | +--- hooks/charmhelpers/core/services/base.py 2015-06-24 12:22:08 +0000 |
3389 | ++++ hooks/charmhelpers/core/services/base.py 2015-10-30 01:28:39 +0000 |
3390 | +@@ -128,15 +128,18 @@ |
3391 | + """ |
3392 | + Handle the current hook by doing The Right Thing with the registered services. |
3393 | + """ |
3394 | +- hook_name = hookenv.hook_name() |
3395 | +- if hook_name == 'stop': |
3396 | +- self.stop_services() |
3397 | +- else: |
3398 | +- self.reconfigure_services() |
3399 | +- self.provide_data() |
3400 | +- cfg = hookenv.config() |
3401 | +- if cfg.implicit_save: |
3402 | +- cfg.save() |
3403 | ++ hookenv._run_atstart() |
3404 | ++ try: |
3405 | ++ hook_name = hookenv.hook_name() |
3406 | ++ if hook_name == 'stop': |
3407 | ++ self.stop_services() |
3408 | ++ else: |
3409 | ++ self.reconfigure_services() |
3410 | ++ self.provide_data() |
3411 | ++ except SystemExit as x: |
3412 | ++ if x.code is None or x.code == 0: |
3413 | ++ hookenv._run_atexit() |
3414 | ++ hookenv._run_atexit() |
3415 | + |
3416 | + def provide_data(self): |
3417 | + """ |
3418 | + |
3419 | +=== modified file 'hooks/charmhelpers/core/unitdata.py' |
3420 | +--- hooks/charmhelpers/core/unitdata.py 2015-06-24 12:22:08 +0000 |
3421 | ++++ hooks/charmhelpers/core/unitdata.py 2015-10-30 01:28:39 +0000 |
3422 | +@@ -152,6 +152,7 @@ |
3423 | + import collections |
3424 | + import contextlib |
3425 | + import datetime |
3426 | ++import itertools |
3427 | + import json |
3428 | + import os |
3429 | + import pprint |
3430 | +@@ -164,8 +165,7 @@ |
3431 | + class Storage(object): |
3432 | + """Simple key value database for local unit state within charms. |
3433 | + |
3434 | +- Modifications are automatically committed at hook exit. That's |
3435 | +- currently regardless of exit code. |
3436 | ++ Modifications are not persisted unless :meth:`flush` is called. |
3437 | + |
3438 | + To support dicts, lists, integer, floats, and booleans values |
3439 | + are automatically json encoded/decoded. |
3440 | +@@ -173,8 +173,11 @@ |
3441 | + def __init__(self, path=None): |
3442 | + self.db_path = path |
3443 | + if path is None: |
3444 | +- self.db_path = os.path.join( |
3445 | +- os.environ.get('CHARM_DIR', ''), '.unit-state.db') |
3446 | ++ if 'UNIT_STATE_DB' in os.environ: |
3447 | ++ self.db_path = os.environ['UNIT_STATE_DB'] |
3448 | ++ else: |
3449 | ++ self.db_path = os.path.join( |
3450 | ++ os.environ.get('CHARM_DIR', ''), '.unit-state.db') |
3451 | + self.conn = sqlite3.connect('%s' % self.db_path) |
3452 | + self.cursor = self.conn.cursor() |
3453 | + self.revision = None |
3454 | +@@ -189,15 +192,8 @@ |
3455 | + self.conn.close() |
3456 | + self._closed = True |
3457 | + |
3458 | +- def _scoped_query(self, stmt, params=None): |
3459 | +- if params is None: |
3460 | +- params = [] |
3461 | +- return stmt, params |
3462 | +- |
3463 | + def get(self, key, default=None, record=False): |
3464 | +- self.cursor.execute( |
3465 | +- *self._scoped_query( |
3466 | +- 'select data from kv where key=?', [key])) |
3467 | ++ self.cursor.execute('select data from kv where key=?', [key]) |
3468 | + result = self.cursor.fetchone() |
3469 | + if not result: |
3470 | + return default |
3471 | +@@ -206,33 +202,81 @@ |
3472 | + return json.loads(result[0]) |
3473 | + |
3474 | + def getrange(self, key_prefix, strip=False): |
3475 | +- stmt = "select key, data from kv where key like '%s%%'" % key_prefix |
3476 | +- self.cursor.execute(*self._scoped_query(stmt)) |
3477 | ++ """ |
3478 | ++ Get a range of keys starting with a common prefix as a mapping of |
3479 | ++ keys to values. |
3480 | ++ |
3481 | ++ :param str key_prefix: Common prefix among all keys |
3482 | ++ :param bool strip: Optionally strip the common prefix from the key |
3483 | ++ names in the returned dict |
3484 | ++ :return dict: A (possibly empty) dict of key-value mappings |
3485 | ++ """ |
3486 | ++ self.cursor.execute("select key, data from kv where key like ?", |
3487 | ++ ['%s%%' % key_prefix]) |
3488 | + result = self.cursor.fetchall() |
3489 | + |
3490 | + if not result: |
3491 | +- return None |
3492 | ++ return {} |
3493 | + if not strip: |
3494 | + key_prefix = '' |
3495 | + return dict([ |
3496 | + (k[len(key_prefix):], json.loads(v)) for k, v in result]) |
3497 | + |
3498 | + def update(self, mapping, prefix=""): |
3499 | ++ """ |
3500 | ++ Set the values of multiple keys at once. |
3501 | ++ |
3502 | ++ :param dict mapping: Mapping of keys to values |
3503 | ++ :param str prefix: Optional prefix to apply to all keys in `mapping` |
3504 | ++ before setting |
3505 | ++ """ |
3506 | + for k, v in mapping.items(): |
3507 | + self.set("%s%s" % (prefix, k), v) |
3508 | + |
3509 | + def unset(self, key): |
3510 | ++ """ |
3511 | ++ Remove a key from the database entirely. |
3512 | ++ """ |
3513 | + self.cursor.execute('delete from kv where key=?', [key]) |
3514 | + if self.revision and self.cursor.rowcount: |
3515 | + self.cursor.execute( |
3516 | + 'insert into kv_revisions values (?, ?, ?)', |
3517 | + [key, self.revision, json.dumps('DELETED')]) |
3518 | + |
3519 | ++ def unsetrange(self, keys=None, prefix=""): |
3520 | ++ """ |
3521 | ++ Remove a range of keys starting with a common prefix, from the database |
3522 | ++ entirely. |
3523 | ++ |
3524 | ++ :param list keys: List of keys to remove. |
3525 | ++ :param str prefix: Optional prefix to apply to all keys in ``keys`` |
3526 | ++ before removing. |
3527 | ++ """ |
3528 | ++ if keys is not None: |
3529 | ++ keys = ['%s%s' % (prefix, key) for key in keys] |
3530 | ++ self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) |
3531 | ++ if self.revision and self.cursor.rowcount: |
3532 | ++ self.cursor.execute( |
3533 | ++ 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), |
3534 | ++ list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) |
3535 | ++ else: |
3536 | ++ self.cursor.execute('delete from kv where key like ?', |
3537 | ++ ['%s%%' % prefix]) |
3538 | ++ if self.revision and self.cursor.rowcount: |
3539 | ++ self.cursor.execute( |
3540 | ++ 'insert into kv_revisions values (?, ?, ?)', |
3541 | ++ ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) |
3542 | ++ |
3543 | + def set(self, key, value): |
3544 | ++ """ |
3545 | ++ Set a value in the database. |
3546 | ++ |
3547 | ++ :param str key: Key to set the value for |
3548 | ++ :param value: Any JSON-serializable value to be set |
3549 | ++ """ |
3550 | + serialized = json.dumps(value) |
3551 | + |
3552 | +- self.cursor.execute( |
3553 | +- 'select data from kv where key=?', [key]) |
3554 | ++ self.cursor.execute('select data from kv where key=?', [key]) |
3555 | + exists = self.cursor.fetchone() |
3556 | + |
3557 | + # Skip mutations to the same value |
3558 | + |
3559 | +=== modified file 'hooks/charmhelpers/fetch/__init__.py' |
3560 | +--- hooks/charmhelpers/fetch/__init__.py 2015-06-24 12:22:08 +0000 |
3561 | ++++ hooks/charmhelpers/fetch/__init__.py 2015-10-30 01:28:39 +0000 |
3562 | +@@ -90,6 +90,14 @@ |
3563 | + 'kilo/proposed': 'trusty-proposed/kilo', |
3564 | + 'trusty-kilo/proposed': 'trusty-proposed/kilo', |
3565 | + 'trusty-proposed/kilo': 'trusty-proposed/kilo', |
3566 | ++ # Liberty |
3567 | ++ 'liberty': 'trusty-updates/liberty', |
3568 | ++ 'trusty-liberty': 'trusty-updates/liberty', |
3569 | ++ 'trusty-liberty/updates': 'trusty-updates/liberty', |
3570 | ++ 'trusty-updates/liberty': 'trusty-updates/liberty', |
3571 | ++ 'liberty/proposed': 'trusty-proposed/liberty', |
3572 | ++ 'trusty-liberty/proposed': 'trusty-proposed/liberty', |
3573 | ++ 'trusty-proposed/liberty': 'trusty-proposed/liberty', |
3574 | + } |
3575 | + |
3576 | + # The order of this list is very important. Handlers should be listed in from |
3577 | +@@ -215,19 +223,27 @@ |
3578 | + _run_apt_command(cmd, fatal) |
3579 | + |
3580 | + |
3581 | ++def apt_mark(packages, mark, fatal=False): |
3582 | ++ """Flag one or more packages using apt-mark""" |
3583 | ++ cmd = ['apt-mark', mark] |
3584 | ++ if isinstance(packages, six.string_types): |
3585 | ++ cmd.append(packages) |
3586 | ++ else: |
3587 | ++ cmd.extend(packages) |
3588 | ++ log("Holding {}".format(packages)) |
3589 | ++ |
3590 | ++ if fatal: |
3591 | ++ subprocess.check_call(cmd, universal_newlines=True) |
3592 | ++ else: |
3593 | ++ subprocess.call(cmd, universal_newlines=True) |
3594 | ++ |
3595 | ++ |
3596 | + def apt_hold(packages, fatal=False): |
3597 | +- """Hold one or more packages""" |
3598 | +- cmd = ['apt-mark', 'hold'] |
3599 | +- if isinstance(packages, six.string_types): |
3600 | +- cmd.append(packages) |
3601 | +- else: |
3602 | +- cmd.extend(packages) |
3603 | +- log("Holding {}".format(packages)) |
3604 | +- |
3605 | +- if fatal: |
3606 | +- subprocess.check_call(cmd) |
3607 | +- else: |
3608 | +- subprocess.call(cmd) |
3609 | ++ return apt_mark(packages, 'hold', fatal=fatal) |
3610 | ++ |
3611 | ++ |
3612 | ++def apt_unhold(packages, fatal=False): |
3613 | ++ return apt_mark(packages, 'unhold', fatal=fatal) |
3614 | + |
3615 | + |
3616 | + def add_source(source, key=None): |
3617 | +@@ -370,8 +386,9 @@ |
3618 | + for handler in handlers: |
3619 | + try: |
3620 | + installed_to = handler.install(source, *args, **kwargs) |
3621 | +- except UnhandledSource: |
3622 | +- pass |
3623 | ++ except UnhandledSource as e: |
3624 | ++ log('Install source attempt unsuccessful: {}'.format(e), |
3625 | ++ level='WARNING') |
3626 | + if not installed_to: |
3627 | + raise UnhandledSource("No handler found for source {}".format(source)) |
3628 | + return installed_to |
3629 | + |
3630 | +=== modified file 'hooks/charmhelpers/fetch/archiveurl.py' |
3631 | +--- hooks/charmhelpers/fetch/archiveurl.py 2015-06-24 12:22:08 +0000 |
3632 | ++++ hooks/charmhelpers/fetch/archiveurl.py 2015-10-30 01:28:39 +0000 |
3633 | +@@ -77,6 +77,8 @@ |
3634 | + def can_handle(self, source): |
3635 | + url_parts = self.parse_url(source) |
3636 | + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
3637 | ++ # XXX: Why is this returning a boolean and a string? It's |
3638 | ++ # doomed to fail since "bool(can_handle('foo://'))" will be True. |
3639 | + return "Wrong source type" |
3640 | + if get_archive_handler(self.base_url(source)): |
3641 | + return True |
3642 | +@@ -155,7 +157,11 @@ |
3643 | + else: |
3644 | + algorithms = hashlib.algorithms_available |
3645 | + if key in algorithms: |
3646 | +- check_hash(dld_file, value, key) |
3647 | ++ if len(value) != 1: |
3648 | ++ raise TypeError( |
3649 | ++ "Expected 1 hash value, not %d" % len(value)) |
3650 | ++ expected = value[0] |
3651 | ++ check_hash(dld_file, expected, key) |
3652 | + if checksum: |
3653 | + check_hash(dld_file, checksum, hash_type) |
3654 | + return extract(dld_file, dest) |
3655 | + |
3656 | +=== modified file 'hooks/charmhelpers/fetch/giturl.py' |
3657 | +--- hooks/charmhelpers/fetch/giturl.py 2015-06-24 12:22:08 +0000 |
3658 | ++++ hooks/charmhelpers/fetch/giturl.py 2015-10-30 01:28:39 +0000 |
3659 | +@@ -67,7 +67,7 @@ |
3660 | + try: |
3661 | + self.clone(source, dest_dir, branch, depth) |
3662 | + except GitCommandError as e: |
3663 | +- raise UnhandledSource(e.message) |
3664 | ++ raise UnhandledSource(e) |
3665 | + except OSError as e: |
3666 | + raise UnhandledSource(e.strerror) |
3667 | + return dest_dir |
3668 | + |
What is the vpp.patch included in this mp, was it included in error?