Merge lp:~corey.bryant/charms/trusty/keystone/sync-ch into lp:~openstack-charmers-archive/charms/trusty/keystone/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 163
Proposed branch: lp:~corey.bryant/charms/trusty/keystone/sync-ch
Merge into: lp:~openstack-charmers-archive/charms/trusty/keystone/next
Diff against target: 968 lines (+475/-145)
14 files modified
config.yaml (+1/-1)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+36/-3)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+240/-49)
hooks/charmhelpers/contrib/openstack/context.py (+8/-7)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6)
hooks/charmhelpers/contrib/openstack/utils.py (+9/-5)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+6/-6)
hooks/charmhelpers/core/hookenv.py (+93/-36)
hooks/charmhelpers/core/host.py (+31/-5)
hooks/charmhelpers/core/services/base.py (+12/-9)
hooks/charmhelpers/core/services/helpers.py (+2/-2)
hooks/charmhelpers/fetch/__init__.py (+23/-14)
hooks/charmhelpers/fetch/archiveurl.py (+7/-1)
hooks/charmhelpers/fetch/giturl.py (+1/-1)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/keystone/sync-ch
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+265048@code.launchpad.net
To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #6286 keystone-next for corey.bryant mp265048
    LINT FAIL: lint-test failed
    LINT FAIL: charm-proof failed

LINT Results (max last 2 lines):
make: *** [lint] Error 100
ERROR:root:Make target returned non-zero.

Full lint test output: http://paste.ubuntu.com/11889711/
Build: http://10.245.162.77:8080/job/charm_lint_check/6286/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #5918 keystone-next for corey.bryant mp265048
    UNIT OK: passed

Build: http://10.245.162.77:8080/job/charm_unit_test/5918/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #5141 keystone-next for corey.bryant mp265048
    AMULET OK: passed

Build: http://10.245.162.77:8080/job/charm_amulet_test/5141/

164. By Corey Bryant

Fix lint error in config.yaml

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #6341 keystone-next for corey.bryant mp265048
    LINT OK: passed

Build: http://10.245.162.77:8080/job/charm_lint_check/6341/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #5973 keystone-next for corey.bryant mp265048
    UNIT OK: passed

Build: http://10.245.162.77:8080/job/charm_unit_test/5973/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #5151 keystone-next for corey.bryant mp265048
    AMULET OK: passed

Build: http://10.245.162.77:8080/job/charm_amulet_test/5151/

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'config.yaml'
2--- config.yaml 2015-07-10 14:14:30 +0000
3+++ config.yaml 2015-07-17 01:29:50 +0000
4@@ -130,7 +130,7 @@
5 identity-backend:
6 type: string
7 default: "sql"
8- description:
9+ description: |
10 Keystone identity backend, valid options are: sql, ldap, kvs, pam.
11 assignment-backend:
12 type: string
13
14=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
15--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-19 14:56:49 +0000
16+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-17 01:29:50 +0000
17@@ -79,9 +79,9 @@
18 services.append(this_service)
19 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
20 'ceph-osd', 'ceph-radosgw']
21- # Openstack subordinate charms do not expose an origin option as that
22- # is controlled by the principle
23- ignore = ['neutron-openvswitch']
24+ # Most OpenStack subordinate charms do not expose an origin option
25+ # as that is controlled by the principle.
26+ ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
27
28 if self.openstack:
29 for svc in services:
30@@ -148,3 +148,36 @@
31 return os_origin.split('%s-' % self.series)[1].split('/')[0]
32 else:
33 return releases[self.series]
34+
35+ def get_ceph_expected_pools(self, radosgw=False):
36+ """Return a list of expected ceph pools in a ceph + cinder + glance
37+ test scenario, based on OpenStack release and whether ceph radosgw
38+ is flagged as present or not."""
39+
40+ if self._get_openstack_release() >= self.trusty_kilo:
41+ # Kilo or later
42+ pools = [
43+ 'rbd',
44+ 'cinder',
45+ 'glance'
46+ ]
47+ else:
48+ # Juno or earlier
49+ pools = [
50+ 'data',
51+ 'metadata',
52+ 'rbd',
53+ 'cinder',
54+ 'glance'
55+ ]
56+
57+ if radosgw:
58+ pools.extend([
59+ '.rgw.root',
60+ '.rgw.control',
61+ '.rgw',
62+ '.rgw.gc',
63+ '.users.uid'
64+ ])
65+
66+ return pools
67
68=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
69--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-19 14:56:49 +0000
70+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-17 01:29:50 +0000
71@@ -14,16 +14,20 @@
72 # You should have received a copy of the GNU Lesser General Public License
73 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
74
75+import amulet
76+import json
77 import logging
78 import os
79 import six
80 import time
81 import urllib
82
83+import cinderclient.v1.client as cinder_client
84 import glanceclient.v1.client as glance_client
85 import heatclient.v1.client as heat_client
86 import keystoneclient.v2_0 as keystone_client
87 import novaclient.v1_1.client as nova_client
88+import swiftclient
89
90 from charmhelpers.contrib.amulet.utils import (
91 AmuletUtils
92@@ -171,6 +175,16 @@
93 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
94 return tenant in [t.name for t in keystone.tenants.list()]
95
96+ def authenticate_cinder_admin(self, keystone_sentry, username,
97+ password, tenant):
98+ """Authenticates admin user with cinder."""
99+ # NOTE(beisner): cinder python client doesn't accept tokens.
100+ service_ip = \
101+ keystone_sentry.relation('shared-db',
102+ 'mysql:shared-db')['private-address']
103+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
104+ return cinder_client.Client(username, password, tenant, ept)
105+
106 def authenticate_keystone_admin(self, keystone_sentry, user, password,
107 tenant):
108 """Authenticates admin user with the keystone admin endpoint."""
109@@ -212,9 +226,29 @@
110 return nova_client.Client(username=user, api_key=password,
111 project_id=tenant, auth_url=ep)
112
113+ def authenticate_swift_user(self, keystone, user, password, tenant):
114+ """Authenticates a regular user with swift api."""
115+ self.log.debug('Authenticating swift user ({})...'.format(user))
116+ ep = keystone.service_catalog.url_for(service_type='identity',
117+ endpoint_type='publicURL')
118+ return swiftclient.Connection(authurl=ep,
119+ user=user,
120+ key=password,
121+ tenant_name=tenant,
122+ auth_version='2.0')
123+
124 def create_cirros_image(self, glance, image_name):
125- """Download the latest cirros image and upload it to glance."""
126- self.log.debug('Creating glance image ({})...'.format(image_name))
127+ """Download the latest cirros image and upload it to glance,
128+ validate and return a resource pointer.
129+
130+ :param glance: pointer to authenticated glance connection
131+ :param image_name: display name for new image
132+ :returns: glance image pointer
133+ """
134+ self.log.debug('Creating glance cirros image '
135+ '({})...'.format(image_name))
136+
137+ # Download cirros image
138 http_proxy = os.getenv('AMULET_HTTP_PROXY')
139 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
140 if http_proxy:
141@@ -223,33 +257,51 @@
142 else:
143 opener = urllib.FancyURLopener()
144
145- f = opener.open("http://download.cirros-cloud.net/version/released")
146+ f = opener.open('http://download.cirros-cloud.net/version/released')
147 version = f.read().strip()
148- cirros_img = "cirros-{}-x86_64-disk.img".format(version)
149+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
150 local_path = os.path.join('tests', cirros_img)
151
152 if not os.path.exists(local_path):
153- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
154+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
155 version, cirros_img)
156 opener.retrieve(cirros_url, local_path)
157 f.close()
158
159+ # Create glance image
160 with open(local_path) as f:
161 image = glance.images.create(name=image_name, is_public=True,
162 disk_format='qcow2',
163 container_format='bare', data=f)
164- count = 1
165- status = image.status
166- while status != 'active' and count < 10:
167- time.sleep(3)
168- image = glance.images.get(image.id)
169- status = image.status
170- self.log.debug('image status: {}'.format(status))
171- count += 1
172-
173- if status != 'active':
174- self.log.error('image creation timed out')
175- return None
176+
177+ # Wait for image to reach active status
178+ img_id = image.id
179+ ret = self.resource_reaches_status(glance.images, img_id,
180+ expected_stat='active',
181+ msg='Image status wait')
182+ if not ret:
183+ msg = 'Glance image failed to reach expected state.'
184+ amulet.raise_status(amulet.FAIL, msg=msg)
185+
186+ # Re-validate new image
187+ self.log.debug('Validating image attributes...')
188+ val_img_name = glance.images.get(img_id).name
189+ val_img_stat = glance.images.get(img_id).status
190+ val_img_pub = glance.images.get(img_id).is_public
191+ val_img_cfmt = glance.images.get(img_id).container_format
192+ val_img_dfmt = glance.images.get(img_id).disk_format
193+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
194+ 'container fmt:{} disk fmt:{}'.format(
195+ val_img_name, val_img_pub, img_id,
196+ val_img_stat, val_img_cfmt, val_img_dfmt))
197+
198+ if val_img_name == image_name and val_img_stat == 'active' \
199+ and val_img_pub is True and val_img_cfmt == 'bare' \
200+ and val_img_dfmt == 'qcow2':
201+ self.log.debug(msg_attr)
202+ else:
203+ msg = ('Volume validation failed, {}'.format(msg_attr))
204+ amulet.raise_status(amulet.FAIL, msg=msg)
205
206 return image
207
208@@ -260,22 +312,7 @@
209 self.log.warn('/!\\ DEPRECATION WARNING: use '
210 'delete_resource instead of delete_image.')
211 self.log.debug('Deleting glance image ({})...'.format(image))
212- num_before = len(list(glance.images.list()))
213- glance.images.delete(image)
214-
215- count = 1
216- num_after = len(list(glance.images.list()))
217- while num_after != (num_before - 1) and count < 10:
218- time.sleep(3)
219- num_after = len(list(glance.images.list()))
220- self.log.debug('number of images: {}'.format(num_after))
221- count += 1
222-
223- if num_after != (num_before - 1):
224- self.log.error('image deletion timed out')
225- return False
226-
227- return True
228+ return self.delete_resource(glance.images, image, msg='glance image')
229
230 def create_instance(self, nova, image_name, instance_name, flavor):
231 """Create the specified instance."""
232@@ -308,22 +345,8 @@
233 self.log.warn('/!\\ DEPRECATION WARNING: use '
234 'delete_resource instead of delete_instance.')
235 self.log.debug('Deleting instance ({})...'.format(instance))
236- num_before = len(list(nova.servers.list()))
237- nova.servers.delete(instance)
238-
239- count = 1
240- num_after = len(list(nova.servers.list()))
241- while num_after != (num_before - 1) and count < 10:
242- time.sleep(3)
243- num_after = len(list(nova.servers.list()))
244- self.log.debug('number of instances: {}'.format(num_after))
245- count += 1
246-
247- if num_after != (num_before - 1):
248- self.log.error('instance deletion timed out')
249- return False
250-
251- return True
252+ return self.delete_resource(nova.servers, instance,
253+ msg='nova instance')
254
255 def create_or_get_keypair(self, nova, keypair_name="testkey"):
256 """Create a new keypair, or return pointer if it already exists."""
257@@ -339,6 +362,88 @@
258 _keypair = nova.keypairs.create(name=keypair_name)
259 return _keypair
260
261+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
262+ img_id=None, src_vol_id=None, snap_id=None):
263+ """Create cinder volume, optionally from a glance image, OR
264+ optionally as a clone of an existing volume, OR optionally
265+ from a snapshot. Wait for the new volume status to reach
266+ the expected status, validate and return a resource pointer.
267+
268+ :param vol_name: cinder volume display name
269+ :param vol_size: size in gigabytes
270+ :param img_id: optional glance image id
271+ :param src_vol_id: optional source volume id to clone
272+ :param snap_id: optional snapshot id to use
273+ :returns: cinder volume pointer
274+ """
275+ # Handle parameter input and avoid impossible combinations
276+ if img_id and not src_vol_id and not snap_id:
277+ # Create volume from image
278+ self.log.debug('Creating cinder volume from glance image...')
279+ bootable = 'true'
280+ elif src_vol_id and not img_id and not snap_id:
281+ # Clone an existing volume
282+ self.log.debug('Cloning cinder volume...')
283+ bootable = cinder.volumes.get(src_vol_id).bootable
284+ elif snap_id and not src_vol_id and not img_id:
285+ # Create volume from snapshot
286+ self.log.debug('Creating cinder volume from snapshot...')
287+ snap = cinder.volume_snapshots.find(id=snap_id)
288+ vol_size = snap.size
289+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
290+ bootable = cinder.volumes.get(snap_vol_id).bootable
291+ elif not img_id and not src_vol_id and not snap_id:
292+ # Create volume
293+ self.log.debug('Creating cinder volume...')
294+ bootable = 'false'
295+ else:
296+ # Impossible combination of parameters
297+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
298+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
299+ img_id, src_vol_id,
300+ snap_id))
301+ amulet.raise_status(amulet.FAIL, msg=msg)
302+
303+ # Create new volume
304+ try:
305+ vol_new = cinder.volumes.create(display_name=vol_name,
306+ imageRef=img_id,
307+ size=vol_size,
308+ source_volid=src_vol_id,
309+ snapshot_id=snap_id)
310+ vol_id = vol_new.id
311+ except Exception as e:
312+ msg = 'Failed to create volume: {}'.format(e)
313+ amulet.raise_status(amulet.FAIL, msg=msg)
314+
315+ # Wait for volume to reach available status
316+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
317+ expected_stat="available",
318+ msg="Volume status wait")
319+ if not ret:
320+ msg = 'Cinder volume failed to reach expected state.'
321+ amulet.raise_status(amulet.FAIL, msg=msg)
322+
323+ # Re-validate new volume
324+ self.log.debug('Validating volume attributes...')
325+ val_vol_name = cinder.volumes.get(vol_id).display_name
326+ val_vol_boot = cinder.volumes.get(vol_id).bootable
327+ val_vol_stat = cinder.volumes.get(vol_id).status
328+ val_vol_size = cinder.volumes.get(vol_id).size
329+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
330+ '{} size:{}'.format(val_vol_name, vol_id,
331+ val_vol_stat, val_vol_boot,
332+ val_vol_size))
333+
334+ if val_vol_boot == bootable and val_vol_stat == 'available' \
335+ and val_vol_name == vol_name and val_vol_size == vol_size:
336+ self.log.debug(msg_attr)
337+ else:
338+ msg = ('Volume validation failed, {}'.format(msg_attr))
339+ amulet.raise_status(amulet.FAIL, msg=msg)
340+
341+ return vol_new
342+
343 def delete_resource(self, resource, resource_id,
344 msg="resource", max_wait=120):
345 """Delete one openstack resource, such as one instance, keypair,
346@@ -350,6 +455,8 @@
347 :param max_wait: maximum wait time in seconds
348 :returns: True if successful, otherwise False
349 """
350+ self.log.debug('Deleting OpenStack resource '
351+ '{} ({})'.format(resource_id, msg))
352 num_before = len(list(resource.list()))
353 resource.delete(resource_id)
354
355@@ -411,3 +518,87 @@
356 self.log.debug('{} never reached expected status: '
357 '{}'.format(resource_id, expected_stat))
358 return False
359+
360+ def get_ceph_osd_id_cmd(self, index):
361+ """Produce a shell command that will return a ceph-osd id."""
362+ return ("`initctl list | grep 'ceph-osd ' | "
363+ "awk 'NR=={} {{ print $2 }}' | "
364+ "grep -o '[0-9]*'`".format(index + 1))
365+
366+ def get_ceph_pools(self, sentry_unit):
367+ """Return a dict of ceph pools from a single ceph unit, with
368+ pool name as keys, pool id as vals."""
369+ pools = {}
370+ cmd = 'sudo ceph osd lspools'
371+ output, code = sentry_unit.run(cmd)
372+ if code != 0:
373+ msg = ('{} `{}` returned {} '
374+ '{}'.format(sentry_unit.info['unit_name'],
375+ cmd, code, output))
376+ amulet.raise_status(amulet.FAIL, msg=msg)
377+
378+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
379+ for pool in str(output).split(','):
380+ pool_id_name = pool.split(' ')
381+ if len(pool_id_name) == 2:
382+ pool_id = pool_id_name[0]
383+ pool_name = pool_id_name[1]
384+ pools[pool_name] = int(pool_id)
385+
386+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
387+ pools))
388+ return pools
389+
390+ def get_ceph_df(self, sentry_unit):
391+ """Return dict of ceph df json output, including ceph pool state.
392+
393+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
394+ :returns: Dict of ceph df output
395+ """
396+ cmd = 'sudo ceph df --format=json'
397+ output, code = sentry_unit.run(cmd)
398+ if code != 0:
399+ msg = ('{} `{}` returned {} '
400+ '{}'.format(sentry_unit.info['unit_name'],
401+ cmd, code, output))
402+ amulet.raise_status(amulet.FAIL, msg=msg)
403+ return json.loads(output)
404+
405+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
406+ """Take a sample of attributes of a ceph pool, returning ceph
407+ pool name, object count and disk space used for the specified
408+ pool ID number.
409+
410+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
411+ :param pool_id: Ceph pool ID
412+ :returns: List of pool name, object count, kb disk space used
413+ """
414+ df = self.get_ceph_df(sentry_unit)
415+ pool_name = df['pools'][pool_id]['name']
416+ obj_count = df['pools'][pool_id]['stats']['objects']
417+ kb_used = df['pools'][pool_id]['stats']['kb_used']
418+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
419+ '{} kb used'.format(pool_name, pool_id,
420+ obj_count, kb_used))
421+ return pool_name, obj_count, kb_used
422+
423+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
424+ """Validate ceph pool samples taken over time, such as pool
425+ object counts or pool kb used, before adding, after adding, and
426+ after deleting items which affect those pool attributes. The
427+ 2nd element is expected to be greater than the 1st; 3rd is expected
428+ to be less than the 2nd.
429+
430+ :param samples: List containing 3 data samples
431+ :param sample_type: String for logging and usage context
432+ :returns: None if successful, Failure message otherwise
433+ """
434+ original, created, deleted = range(3)
435+ if samples[created] <= samples[original] or \
436+ samples[deleted] >= samples[created]:
437+ return ('Ceph {} samples ({}) '
438+ 'unexpected.'.format(sample_type, samples))
439+ else:
440+ self.log.debug('Ceph {} samples (OK): '
441+ '{}'.format(sample_type, samples))
442+ return None
443
444=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
445--- hooks/charmhelpers/contrib/openstack/context.py 2015-06-19 14:56:49 +0000
446+++ hooks/charmhelpers/contrib/openstack/context.py 2015-07-17 01:29:50 +0000
447@@ -122,21 +122,24 @@
448 of specifying multiple key value pairs within the same string. For
449 example, a string in the format of 'key1=value1, key2=value2' will
450 return a dict of:
451- {'key1': 'value1',
452- 'key2': 'value2'}.
453+
454+ {'key1': 'value1',
455+ 'key2': 'value2'}.
456
457 2. A string in the above format, but supporting a comma-delimited list
458 of values for the same key. For example, a string in the format of
459 'key1=value1, key2=value3,value4,value5' will return a dict of:
460- {'key1', 'value1',
461- 'key2', 'value2,value3,value4'}
462+
463+ {'key1', 'value1',
464+ 'key2', 'value2,value3,value4'}
465
466 3. A string containing a colon character (:) prior to an equal
467 character (=) will be treated as yaml and parsed as such. This can be
468 used to specify more complex key value pairs. For example,
469 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
470 return a dict of:
471- {'key1', 'subkey1=value1, subkey2=value2'}
472+
473+ {'key1', 'subkey1=value1, subkey2=value2'}
474
475 The provided config_flags string may be a list of comma-separated values
476 which themselves may be comma-separated list of values.
477@@ -891,8 +894,6 @@
478 return ctxt
479
480 def __call__(self):
481- self._ensure_packages()
482-
483 if self.network_manager not in ['quantum', 'neutron']:
484 return {}
485
486
487=== modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
488--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2014-03-27 10:54:38 +0000
489+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-07-17 01:29:50 +0000
490@@ -5,11 +5,11 @@
491 ###############################################################################
492 [global]
493 {% if auth -%}
494- auth_supported = {{ auth }}
495- keyring = /etc/ceph/$cluster.$name.keyring
496- mon host = {{ mon_hosts }}
497+auth_supported = {{ auth }}
498+keyring = /etc/ceph/$cluster.$name.keyring
499+mon host = {{ mon_hosts }}
500 {% endif -%}
501- log to syslog = {{ use_syslog }}
502- err to syslog = {{ use_syslog }}
503- clog to syslog = {{ use_syslog }}
504+log to syslog = {{ use_syslog }}
505+err to syslog = {{ use_syslog }}
506+clog to syslog = {{ use_syslog }}
507
508
509=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
510--- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-19 14:56:49 +0000
511+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-07-17 01:29:50 +0000
512@@ -522,6 +522,7 @@
513 Clone/install all specified OpenStack repositories.
514
515 The expected format of projects_yaml is:
516+
517 repositories:
518 - {name: keystone,
519 repository: 'git://git.openstack.org/openstack/keystone.git',
520@@ -529,11 +530,13 @@
521 - {name: requirements,
522 repository: 'git://git.openstack.org/openstack/requirements.git',
523 branch: 'stable/icehouse'}
524+
525 directory: /mnt/openstack-git
526 http_proxy: squid-proxy-url
527 https_proxy: squid-proxy-url
528
529- The directory, http_proxy, and https_proxy keys are optional.
530+ The directory, http_proxy, and https_proxy keys are optional.
531+
532 """
533 global requirements_dir
534 parent_dir = '/mnt/openstack-git'
535@@ -555,10 +558,11 @@
536
537 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
538
539- # Upgrade setuptools from default virtualenv version. The default version
540- # in trusty breaks update.py in global requirements master branch.
541- pip_install('setuptools', upgrade=True, proxy=http_proxy,
542- venv=os.path.join(parent_dir, 'venv'))
543+ # Upgrade setuptools and pip from default virtualenv versions. The default
544+ # versions in trusty break master OpenStack branch deployments.
545+ for p in ['pip', 'setuptools']:
546+ pip_install(p, upgrade=True, proxy=http_proxy,
547+ venv=os.path.join(parent_dir, 'venv'))
548
549 for p in projects['repositories']:
550 repo = p['repository']
551
552=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
553--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-03-11 11:45:09 +0000
554+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-17 01:29:50 +0000
555@@ -60,12 +60,12 @@
556 KEYFILE = '/etc/ceph/ceph.client.{}.key'
557
558 CEPH_CONF = """[global]
559- auth supported = {auth}
560- keyring = {keyring}
561- mon host = {mon_hosts}
562- log to syslog = {use_syslog}
563- err to syslog = {use_syslog}
564- clog to syslog = {use_syslog}
565+auth supported = {auth}
566+keyring = {keyring}
567+mon host = {mon_hosts}
568+log to syslog = {use_syslog}
569+err to syslog = {use_syslog}
570+clog to syslog = {use_syslog}
571 """
572
573
574
575=== modified file 'hooks/charmhelpers/core/hookenv.py'
576--- hooks/charmhelpers/core/hookenv.py 2015-06-10 20:44:02 +0000
577+++ hooks/charmhelpers/core/hookenv.py 2015-07-17 01:29:50 +0000
578@@ -21,7 +21,9 @@
579 # Charm Helpers Developers <juju@lists.ubuntu.com>
580
581 from __future__ import print_function
582+from distutils.version import LooseVersion
583 from functools import wraps
584+import glob
585 import os
586 import json
587 import yaml
588@@ -242,29 +244,7 @@
589 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
590 if os.path.exists(self.path):
591 self.load_previous()
592-
593- def __getitem__(self, key):
594- """For regular dict lookups, check the current juju config first,
595- then the previous (saved) copy. This ensures that user-saved values
596- will be returned by a dict lookup.
597-
598- """
599- try:
600- return dict.__getitem__(self, key)
601- except KeyError:
602- return (self._prev_dict or {})[key]
603-
604- def get(self, key, default=None):
605- try:
606- return self[key]
607- except KeyError:
608- return default
609-
610- def keys(self):
611- prev_keys = []
612- if self._prev_dict is not None:
613- prev_keys = self._prev_dict.keys()
614- return list(set(prev_keys + list(dict.keys(self))))
615+ atexit(self._implicit_save)
616
617 def load_previous(self, path=None):
618 """Load previous copy of config from disk.
619@@ -283,6 +263,9 @@
620 self.path = path or self.path
621 with open(self.path) as f:
622 self._prev_dict = json.load(f)
623+ for k, v in self._prev_dict.items():
624+ if k not in self:
625+ self[k] = v
626
627 def changed(self, key):
628 """Return True if the current value for this key is different from
629@@ -314,13 +297,13 @@
630 instance.
631
632 """
633- if self._prev_dict:
634- for k, v in six.iteritems(self._prev_dict):
635- if k not in self:
636- self[k] = v
637 with open(self.path, 'w') as f:
638 json.dump(self, f)
639
640+ def _implicit_save(self):
641+ if self.implicit_save:
642+ self.save()
643+
644
645 @cached
646 def config(scope=None):
647@@ -587,10 +570,14 @@
648 hooks.execute(sys.argv)
649 """
650
651- def __init__(self, config_save=True):
652+ def __init__(self, config_save=None):
653 super(Hooks, self).__init__()
654 self._hooks = {}
655- self._config_save = config_save
656+
657+ # For unknown reasons, we allow the Hooks constructor to override
658+ # config().implicit_save.
659+ if config_save is not None:
660+ config().implicit_save = config_save
661
662 def register(self, name, function):
663 """Register a hook"""
664@@ -598,13 +585,16 @@
665
666 def execute(self, args):
667 """Execute a registered hook based on args[0]"""
668+ _run_atstart()
669 hook_name = os.path.basename(args[0])
670 if hook_name in self._hooks:
671- self._hooks[hook_name]()
672- if self._config_save:
673- cfg = config()
674- if cfg.implicit_save:
675- cfg.save()
676+ try:
677+ self._hooks[hook_name]()
678+ except SystemExit as x:
679+ if x.code is None or x.code == 0:
680+ _run_atexit()
681+ raise
682+ _run_atexit()
683 else:
684 raise UnregisteredHookError(hook_name)
685
686@@ -732,13 +722,80 @@
687 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
688 def leader_set(settings=None, **kwargs):
689 """Juju leader set value(s)"""
690- log("Juju leader-set '%s'" % (settings), level=DEBUG)
691+ # Don't log secrets.
692+ # log("Juju leader-set '%s'" % (settings), level=DEBUG)
693 cmd = ['leader-set']
694 settings = settings or {}
695 settings.update(kwargs)
696- for k, v in settings.iteritems():
697+ for k, v in settings.items():
698 if v is None:
699 cmd.append('{}='.format(k))
700 else:
701 cmd.append('{}={}'.format(k, v))
702 subprocess.check_call(cmd)
703+
704+
705+@cached
706+def juju_version():
707+ """Full version string (eg. '1.23.3.1-trusty-amd64')"""
708+ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
709+ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
710+ return subprocess.check_output([jujud, 'version'],
711+ universal_newlines=True).strip()
712+
713+
714+@cached
715+def has_juju_version(minimum_version):
716+ """Return True if the Juju version is at least the provided version"""
717+ return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
718+
719+
720+_atexit = []
721+_atstart = []
722+
723+
724+def atstart(callback, *args, **kwargs):
725+ '''Schedule a callback to run before the main hook.
726+
727+ Callbacks are run in the order they were added.
728+
729+ This is useful for modules and classes to perform initialization
730+ and inject behavior. In particular:
731+
732+ - Run common code before all of your hooks, such as logging
733+ the hook name or interesting relation data.
734+ - Defer object or module initialization that requires a hook
735+ context until we know there actually is a hook context,
736+ making testing easier.
737+ - Rather than requiring charm authors to include boilerplate to
738+ invoke your helper's behavior, have it run automatically if
739+ your object is instantiated or module imported.
740+
741+ This is not at all useful after your hook framework as been launched.
742+ '''
743+ global _atstart
744+ _atstart.append((callback, args, kwargs))
745+
746+
747+def atexit(callback, *args, **kwargs):
748+ '''Schedule a callback to run on successful hook completion.
749+
750+ Callbacks are run in the reverse order that they were added.'''
751+ _atexit.append((callback, args, kwargs))
752+
753+
754+def _run_atstart():
755+ '''Hook frameworks must invoke this before running the main hook body.'''
756+ global _atstart
757+ for callback, args, kwargs in _atstart:
758+ callback(*args, **kwargs)
759+ del _atstart[:]
760+
761+
762+def _run_atexit():
763+ '''Hook frameworks must invoke this after the main hook body has
764+ successfully completed. Do not invoke it if the hook fails.'''
765+ global _atexit
766+ for callback, args, kwargs in reversed(_atexit):
767+ callback(*args, **kwargs)
768+ del _atexit[:]
769
770=== modified file 'hooks/charmhelpers/core/host.py'
771--- hooks/charmhelpers/core/host.py 2015-06-19 14:56:49 +0000
772+++ hooks/charmhelpers/core/host.py 2015-07-17 01:29:50 +0000
773@@ -63,6 +63,36 @@
774 return service_result
775
776
777+def service_pause(service_name, init_dir=None):
778+ """Pause a system service.
779+
780+ Stop it, and prevent it from starting again at boot."""
781+ if init_dir is None:
782+ init_dir = "/etc/init"
783+ stopped = service_stop(service_name)
784+ # XXX: Support systemd too
785+ override_path = os.path.join(
786+ init_dir, '{}.conf.override'.format(service_name))
787+ with open(override_path, 'w') as fh:
788+ fh.write("manual\n")
789+ return stopped
790+
791+
792+def service_resume(service_name, init_dir=None):
793+ """Resume a system service.
794+
795+ Reenable starting again at boot. Start the service"""
796+ # XXX: Support systemd too
797+ if init_dir is None:
798+ init_dir = "/etc/init"
799+ override_path = os.path.join(
800+ init_dir, '{}.conf.override'.format(service_name))
801+ if os.path.exists(override_path):
802+ os.unlink(override_path)
803+ started = service_start(service_name)
804+ return started
805+
806+
807 def service(action, service_name):
808 """Control a system service"""
809 cmd = ['service', service_name, action]
810@@ -140,11 +170,7 @@
811
812 def add_user_to_group(username, group):
813 """Add a user to a group"""
814- cmd = [
815- 'gpasswd', '-a',
816- username,
817- group
818- ]
819+ cmd = ['gpasswd', '-a', username, group]
820 log("Adding user {} to group {}".format(username, group))
821 subprocess.check_call(cmd)
822
823
824=== modified file 'hooks/charmhelpers/core/services/base.py'
825--- hooks/charmhelpers/core/services/base.py 2015-06-10 20:44:02 +0000
826+++ hooks/charmhelpers/core/services/base.py 2015-07-17 01:29:50 +0000
827@@ -128,15 +128,18 @@
828 """
829 Handle the current hook by doing The Right Thing with the registered services.
830 """
831- hook_name = hookenv.hook_name()
832- if hook_name == 'stop':
833- self.stop_services()
834- else:
835- self.reconfigure_services()
836- self.provide_data()
837- cfg = hookenv.config()
838- if cfg.implicit_save:
839- cfg.save()
840+ hookenv._run_atstart()
841+ try:
842+ hook_name = hookenv.hook_name()
843+ if hook_name == 'stop':
844+ self.stop_services()
845+ else:
846+ self.reconfigure_services()
847+ self.provide_data()
848+ except SystemExit as x:
849+ if x.code is None or x.code == 0:
850+ hookenv._run_atexit()
851+ hookenv._run_atexit()
852
853 def provide_data(self):
854 """
855
856=== modified file 'hooks/charmhelpers/core/services/helpers.py'
857--- hooks/charmhelpers/core/services/helpers.py 2015-05-11 07:38:06 +0000
858+++ hooks/charmhelpers/core/services/helpers.py 2015-07-17 01:29:50 +0000
859@@ -239,12 +239,12 @@
860 action.
861
862 :param str source: The template source file, relative to
863- `$CHARM_DIR/templates`
864-
865+ `$CHARM_DIR/templates`
866 :param str target: The target to write the rendered template to
867 :param str owner: The owner of the rendered file
868 :param str group: The group of the rendered file
869 :param int perms: The permissions of the rendered file
870+
871 """
872 def __init__(self, source, target,
873 owner='root', group='root', perms=0o444):
874
875=== modified file 'hooks/charmhelpers/fetch/__init__.py'
876--- hooks/charmhelpers/fetch/__init__.py 2015-06-10 20:44:02 +0000
877+++ hooks/charmhelpers/fetch/__init__.py 2015-07-17 01:29:50 +0000
878@@ -215,19 +215,27 @@
879 _run_apt_command(cmd, fatal)
880
881
882+def apt_mark(packages, mark, fatal=False):
883+ """Flag one or more packages using apt-mark"""
884+ cmd = ['apt-mark', mark]
885+ if isinstance(packages, six.string_types):
886+ cmd.append(packages)
887+ else:
888+ cmd.extend(packages)
889+ log("Holding {}".format(packages))
890+
891+ if fatal:
892+ subprocess.check_call(cmd, universal_newlines=True)
893+ else:
894+ subprocess.call(cmd, universal_newlines=True)
895+
896+
897 def apt_hold(packages, fatal=False):
898- """Hold one or more packages"""
899- cmd = ['apt-mark', 'hold']
900- if isinstance(packages, six.string_types):
901- cmd.append(packages)
902- else:
903- cmd.extend(packages)
904- log("Holding {}".format(packages))
905-
906- if fatal:
907- subprocess.check_call(cmd)
908- else:
909- subprocess.call(cmd)
910+ return apt_mark(packages, 'hold', fatal=fatal)
911+
912+
913+def apt_unhold(packages, fatal=False):
914+ return apt_mark(packages, 'unhold', fatal=fatal)
915
916
917 def add_source(source, key=None):
918@@ -370,8 +378,9 @@
919 for handler in handlers:
920 try:
921 installed_to = handler.install(source, *args, **kwargs)
922- except UnhandledSource:
923- pass
924+ except UnhandledSource as e:
925+ log('Install source attempt unsuccessful: {}'.format(e),
926+ level='WARNING')
927 if not installed_to:
928 raise UnhandledSource("No handler found for source {}".format(source))
929 return installed_to
930
931=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
932--- hooks/charmhelpers/fetch/archiveurl.py 2015-03-11 11:45:09 +0000
933+++ hooks/charmhelpers/fetch/archiveurl.py 2015-07-17 01:29:50 +0000
934@@ -77,6 +77,8 @@
935 def can_handle(self, source):
936 url_parts = self.parse_url(source)
937 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
938+ # XXX: Why is this returning a boolean and a string? It's
939+ # doomed to fail since "bool(can_handle('foo://'))" will be True.
940 return "Wrong source type"
941 if get_archive_handler(self.base_url(source)):
942 return True
943@@ -155,7 +157,11 @@
944 else:
945 algorithms = hashlib.algorithms_available
946 if key in algorithms:
947- check_hash(dld_file, value, key)
948+ if len(value) != 1:
949+ raise TypeError(
950+ "Expected 1 hash value, not %d" % len(value))
951+ expected = value[0]
952+ check_hash(dld_file, expected, key)
953 if checksum:
954 check_hash(dld_file, checksum, hash_type)
955 return extract(dld_file, dest)
956
957=== modified file 'hooks/charmhelpers/fetch/giturl.py'
958--- hooks/charmhelpers/fetch/giturl.py 2015-06-10 20:44:02 +0000
959+++ hooks/charmhelpers/fetch/giturl.py 2015-07-17 01:29:50 +0000
960@@ -67,7 +67,7 @@
961 try:
962 self.clone(source, dest_dir, branch, depth)
963 except GitCommandError as e:
964- raise UnhandledSource(e.message)
965+ raise UnhandledSource(e)
966 except OSError as e:
967 raise UnhandledSource(e.strerror)
968 return dest_dir

Subscribers

People subscribed via source and target branches