Merge lp:~corey.bryant/charms/trusty/glance/sync-ch into lp:~openstack-charmers-archive/charms/trusty/glance/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 128
Proposed branch: lp:~corey.bryant/charms/trusty/glance/sync-ch
Merge into: lp:~openstack-charmers-archive/charms/trusty/glance/next
Diff against target: 1095 lines (+437/-170)
15 files modified
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+36/-3)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+240/-49)
hooks/charmhelpers/contrib/openstack/context.py (+8/-7)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6)
hooks/charmhelpers/contrib/openstack/utils.py (+9/-5)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+6/-6)
hooks/charmhelpers/core/hookenv.py (+1/-0)
hooks/charmhelpers/core/host.py (+31/-5)
hooks/charmhelpers/core/services/helpers.py (+2/-2)
hooks/charmhelpers/fetch/__init__.py (+23/-14)
hooks/charmhelpers/fetch/archiveurl.py (+7/-1)
hooks/charmhelpers/fetch/giturl.py (+1/-1)
tests/charmhelpers/contrib/amulet/utils.py (+38/-46)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+3/-2)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+26/-23)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/glance/sync-ch
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+265047@code.launchpad.net
To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #6285 glance-next for corey.bryant mp265047
    LINT OK: passed

Build: http://10.245.162.77:8080/job/charm_lint_check/6285/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #5917 glance-next for corey.bryant mp265047
    UNIT OK: passed

Build: http://10.245.162.77:8080/job/charm_unit_test/5917/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #5140 glance-next for corey.bryant mp265047
    AMULET OK: passed

Build: http://10.245.162.77:8080/job/charm_amulet_test/5140/

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
2--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-19 15:08:48 +0000
3+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-16 20:31:40 +0000
4@@ -79,9 +79,9 @@
5 services.append(this_service)
6 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
7 'ceph-osd', 'ceph-radosgw']
8- # Openstack subordinate charms do not expose an origin option as that
9- # is controlled by the principle
10- ignore = ['neutron-openvswitch']
11+ # Most OpenStack subordinate charms do not expose an origin option
12+ # as that is controlled by the principle.
13+ ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
14
15 if self.openstack:
16 for svc in services:
17@@ -148,3 +148,36 @@
18 return os_origin.split('%s-' % self.series)[1].split('/')[0]
19 else:
20 return releases[self.series]
21+
22+ def get_ceph_expected_pools(self, radosgw=False):
23+ """Return a list of expected ceph pools in a ceph + cinder + glance
24+ test scenario, based on OpenStack release and whether ceph radosgw
25+ is flagged as present or not."""
26+
27+ if self._get_openstack_release() >= self.trusty_kilo:
28+ # Kilo or later
29+ pools = [
30+ 'rbd',
31+ 'cinder',
32+ 'glance'
33+ ]
34+ else:
35+ # Juno or earlier
36+ pools = [
37+ 'data',
38+ 'metadata',
39+ 'rbd',
40+ 'cinder',
41+ 'glance'
42+ ]
43+
44+ if radosgw:
45+ pools.extend([
46+ '.rgw.root',
47+ '.rgw.control',
48+ '.rgw',
49+ '.rgw.gc',
50+ '.users.uid'
51+ ])
52+
53+ return pools
54
55=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
56--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-19 15:08:48 +0000
57+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:31:40 +0000
58@@ -14,16 +14,20 @@
59 # You should have received a copy of the GNU Lesser General Public License
60 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
61
62+import amulet
63+import json
64 import logging
65 import os
66 import six
67 import time
68 import urllib
69
70+import cinderclient.v1.client as cinder_client
71 import glanceclient.v1.client as glance_client
72 import heatclient.v1.client as heat_client
73 import keystoneclient.v2_0 as keystone_client
74 import novaclient.v1_1.client as nova_client
75+import swiftclient
76
77 from charmhelpers.contrib.amulet.utils import (
78 AmuletUtils
79@@ -171,6 +175,16 @@
80 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
81 return tenant in [t.name for t in keystone.tenants.list()]
82
83+ def authenticate_cinder_admin(self, keystone_sentry, username,
84+ password, tenant):
85+ """Authenticates admin user with cinder."""
86+ # NOTE(beisner): cinder python client doesn't accept tokens.
87+ service_ip = \
88+ keystone_sentry.relation('shared-db',
89+ 'mysql:shared-db')['private-address']
90+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
91+ return cinder_client.Client(username, password, tenant, ept)
92+
93 def authenticate_keystone_admin(self, keystone_sentry, user, password,
94 tenant):
95 """Authenticates admin user with the keystone admin endpoint."""
96@@ -212,9 +226,29 @@
97 return nova_client.Client(username=user, api_key=password,
98 project_id=tenant, auth_url=ep)
99
100+ def authenticate_swift_user(self, keystone, user, password, tenant):
101+ """Authenticates a regular user with swift api."""
102+ self.log.debug('Authenticating swift user ({})...'.format(user))
103+ ep = keystone.service_catalog.url_for(service_type='identity',
104+ endpoint_type='publicURL')
105+ return swiftclient.Connection(authurl=ep,
106+ user=user,
107+ key=password,
108+ tenant_name=tenant,
109+ auth_version='2.0')
110+
111 def create_cirros_image(self, glance, image_name):
112- """Download the latest cirros image and upload it to glance."""
113- self.log.debug('Creating glance image ({})...'.format(image_name))
114+ """Download the latest cirros image and upload it to glance,
115+ validate and return a resource pointer.
116+
117+ :param glance: pointer to authenticated glance connection
118+ :param image_name: display name for new image
119+ :returns: glance image pointer
120+ """
121+ self.log.debug('Creating glance cirros image '
122+ '({})...'.format(image_name))
123+
124+ # Download cirros image
125 http_proxy = os.getenv('AMULET_HTTP_PROXY')
126 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
127 if http_proxy:
128@@ -223,33 +257,51 @@
129 else:
130 opener = urllib.FancyURLopener()
131
132- f = opener.open("http://download.cirros-cloud.net/version/released")
133+ f = opener.open('http://download.cirros-cloud.net/version/released')
134 version = f.read().strip()
135- cirros_img = "cirros-{}-x86_64-disk.img".format(version)
136+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
137 local_path = os.path.join('tests', cirros_img)
138
139 if not os.path.exists(local_path):
140- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
141+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
142 version, cirros_img)
143 opener.retrieve(cirros_url, local_path)
144 f.close()
145
146+ # Create glance image
147 with open(local_path) as f:
148 image = glance.images.create(name=image_name, is_public=True,
149 disk_format='qcow2',
150 container_format='bare', data=f)
151- count = 1
152- status = image.status
153- while status != 'active' and count < 10:
154- time.sleep(3)
155- image = glance.images.get(image.id)
156- status = image.status
157- self.log.debug('image status: {}'.format(status))
158- count += 1
159-
160- if status != 'active':
161- self.log.error('image creation timed out')
162- return None
163+
164+ # Wait for image to reach active status
165+ img_id = image.id
166+ ret = self.resource_reaches_status(glance.images, img_id,
167+ expected_stat='active',
168+ msg='Image status wait')
169+ if not ret:
170+ msg = 'Glance image failed to reach expected state.'
171+ amulet.raise_status(amulet.FAIL, msg=msg)
172+
173+ # Re-validate new image
174+ self.log.debug('Validating image attributes...')
175+ val_img_name = glance.images.get(img_id).name
176+ val_img_stat = glance.images.get(img_id).status
177+ val_img_pub = glance.images.get(img_id).is_public
178+ val_img_cfmt = glance.images.get(img_id).container_format
179+ val_img_dfmt = glance.images.get(img_id).disk_format
180+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
181+ 'container fmt:{} disk fmt:{}'.format(
182+ val_img_name, val_img_pub, img_id,
183+ val_img_stat, val_img_cfmt, val_img_dfmt))
184+
185+ if val_img_name == image_name and val_img_stat == 'active' \
186+ and val_img_pub is True and val_img_cfmt == 'bare' \
187+ and val_img_dfmt == 'qcow2':
188+ self.log.debug(msg_attr)
189+ else:
190+ msg = ('Volume validation failed, {}'.format(msg_attr))
191+ amulet.raise_status(amulet.FAIL, msg=msg)
192
193 return image
194
195@@ -260,22 +312,7 @@
196 self.log.warn('/!\\ DEPRECATION WARNING: use '
197 'delete_resource instead of delete_image.')
198 self.log.debug('Deleting glance image ({})...'.format(image))
199- num_before = len(list(glance.images.list()))
200- glance.images.delete(image)
201-
202- count = 1
203- num_after = len(list(glance.images.list()))
204- while num_after != (num_before - 1) and count < 10:
205- time.sleep(3)
206- num_after = len(list(glance.images.list()))
207- self.log.debug('number of images: {}'.format(num_after))
208- count += 1
209-
210- if num_after != (num_before - 1):
211- self.log.error('image deletion timed out')
212- return False
213-
214- return True
215+ return self.delete_resource(glance.images, image, msg='glance image')
216
217 def create_instance(self, nova, image_name, instance_name, flavor):
218 """Create the specified instance."""
219@@ -308,22 +345,8 @@
220 self.log.warn('/!\\ DEPRECATION WARNING: use '
221 'delete_resource instead of delete_instance.')
222 self.log.debug('Deleting instance ({})...'.format(instance))
223- num_before = len(list(nova.servers.list()))
224- nova.servers.delete(instance)
225-
226- count = 1
227- num_after = len(list(nova.servers.list()))
228- while num_after != (num_before - 1) and count < 10:
229- time.sleep(3)
230- num_after = len(list(nova.servers.list()))
231- self.log.debug('number of instances: {}'.format(num_after))
232- count += 1
233-
234- if num_after != (num_before - 1):
235- self.log.error('instance deletion timed out')
236- return False
237-
238- return True
239+ return self.delete_resource(nova.servers, instance,
240+ msg='nova instance')
241
242 def create_or_get_keypair(self, nova, keypair_name="testkey"):
243 """Create a new keypair, or return pointer if it already exists."""
244@@ -339,6 +362,88 @@
245 _keypair = nova.keypairs.create(name=keypair_name)
246 return _keypair
247
248+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
249+ img_id=None, src_vol_id=None, snap_id=None):
250+ """Create cinder volume, optionally from a glance image, OR
251+ optionally as a clone of an existing volume, OR optionally
252+ from a snapshot. Wait for the new volume status to reach
253+ the expected status, validate and return a resource pointer.
254+
255+ :param vol_name: cinder volume display name
256+ :param vol_size: size in gigabytes
257+ :param img_id: optional glance image id
258+ :param src_vol_id: optional source volume id to clone
259+ :param snap_id: optional snapshot id to use
260+ :returns: cinder volume pointer
261+ """
262+ # Handle parameter input and avoid impossible combinations
263+ if img_id and not src_vol_id and not snap_id:
264+ # Create volume from image
265+ self.log.debug('Creating cinder volume from glance image...')
266+ bootable = 'true'
267+ elif src_vol_id and not img_id and not snap_id:
268+ # Clone an existing volume
269+ self.log.debug('Cloning cinder volume...')
270+ bootable = cinder.volumes.get(src_vol_id).bootable
271+ elif snap_id and not src_vol_id and not img_id:
272+ # Create volume from snapshot
273+ self.log.debug('Creating cinder volume from snapshot...')
274+ snap = cinder.volume_snapshots.find(id=snap_id)
275+ vol_size = snap.size
276+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
277+ bootable = cinder.volumes.get(snap_vol_id).bootable
278+ elif not img_id and not src_vol_id and not snap_id:
279+ # Create volume
280+ self.log.debug('Creating cinder volume...')
281+ bootable = 'false'
282+ else:
283+ # Impossible combination of parameters
284+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
285+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
286+ img_id, src_vol_id,
287+ snap_id))
288+ amulet.raise_status(amulet.FAIL, msg=msg)
289+
290+ # Create new volume
291+ try:
292+ vol_new = cinder.volumes.create(display_name=vol_name,
293+ imageRef=img_id,
294+ size=vol_size,
295+ source_volid=src_vol_id,
296+ snapshot_id=snap_id)
297+ vol_id = vol_new.id
298+ except Exception as e:
299+ msg = 'Failed to create volume: {}'.format(e)
300+ amulet.raise_status(amulet.FAIL, msg=msg)
301+
302+ # Wait for volume to reach available status
303+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
304+ expected_stat="available",
305+ msg="Volume status wait")
306+ if not ret:
307+ msg = 'Cinder volume failed to reach expected state.'
308+ amulet.raise_status(amulet.FAIL, msg=msg)
309+
310+ # Re-validate new volume
311+ self.log.debug('Validating volume attributes...')
312+ val_vol_name = cinder.volumes.get(vol_id).display_name
313+ val_vol_boot = cinder.volumes.get(vol_id).bootable
314+ val_vol_stat = cinder.volumes.get(vol_id).status
315+ val_vol_size = cinder.volumes.get(vol_id).size
316+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
317+ '{} size:{}'.format(val_vol_name, vol_id,
318+ val_vol_stat, val_vol_boot,
319+ val_vol_size))
320+
321+ if val_vol_boot == bootable and val_vol_stat == 'available' \
322+ and val_vol_name == vol_name and val_vol_size == vol_size:
323+ self.log.debug(msg_attr)
324+ else:
325+ msg = ('Volume validation failed, {}'.format(msg_attr))
326+ amulet.raise_status(amulet.FAIL, msg=msg)
327+
328+ return vol_new
329+
330 def delete_resource(self, resource, resource_id,
331 msg="resource", max_wait=120):
332 """Delete one openstack resource, such as one instance, keypair,
333@@ -350,6 +455,8 @@
334 :param max_wait: maximum wait time in seconds
335 :returns: True if successful, otherwise False
336 """
337+ self.log.debug('Deleting OpenStack resource '
338+ '{} ({})'.format(resource_id, msg))
339 num_before = len(list(resource.list()))
340 resource.delete(resource_id)
341
342@@ -411,3 +518,87 @@
343 self.log.debug('{} never reached expected status: '
344 '{}'.format(resource_id, expected_stat))
345 return False
346+
347+ def get_ceph_osd_id_cmd(self, index):
348+ """Produce a shell command that will return a ceph-osd id."""
349+ return ("`initctl list | grep 'ceph-osd ' | "
350+ "awk 'NR=={} {{ print $2 }}' | "
351+ "grep -o '[0-9]*'`".format(index + 1))
352+
353+ def get_ceph_pools(self, sentry_unit):
354+ """Return a dict of ceph pools from a single ceph unit, with
355+ pool name as keys, pool id as vals."""
356+ pools = {}
357+ cmd = 'sudo ceph osd lspools'
358+ output, code = sentry_unit.run(cmd)
359+ if code != 0:
360+ msg = ('{} `{}` returned {} '
361+ '{}'.format(sentry_unit.info['unit_name'],
362+ cmd, code, output))
363+ amulet.raise_status(amulet.FAIL, msg=msg)
364+
365+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
366+ for pool in str(output).split(','):
367+ pool_id_name = pool.split(' ')
368+ if len(pool_id_name) == 2:
369+ pool_id = pool_id_name[0]
370+ pool_name = pool_id_name[1]
371+ pools[pool_name] = int(pool_id)
372+
373+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
374+ pools))
375+ return pools
376+
377+ def get_ceph_df(self, sentry_unit):
378+ """Return dict of ceph df json output, including ceph pool state.
379+
380+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
381+ :returns: Dict of ceph df output
382+ """
383+ cmd = 'sudo ceph df --format=json'
384+ output, code = sentry_unit.run(cmd)
385+ if code != 0:
386+ msg = ('{} `{}` returned {} '
387+ '{}'.format(sentry_unit.info['unit_name'],
388+ cmd, code, output))
389+ amulet.raise_status(amulet.FAIL, msg=msg)
390+ return json.loads(output)
391+
392+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
393+ """Take a sample of attributes of a ceph pool, returning ceph
394+ pool name, object count and disk space used for the specified
395+ pool ID number.
396+
397+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
398+ :param pool_id: Ceph pool ID
399+ :returns: List of pool name, object count, kb disk space used
400+ """
401+ df = self.get_ceph_df(sentry_unit)
402+ pool_name = df['pools'][pool_id]['name']
403+ obj_count = df['pools'][pool_id]['stats']['objects']
404+ kb_used = df['pools'][pool_id]['stats']['kb_used']
405+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
406+ '{} kb used'.format(pool_name, pool_id,
407+ obj_count, kb_used))
408+ return pool_name, obj_count, kb_used
409+
410+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
411+ """Validate ceph pool samples taken over time, such as pool
412+ object counts or pool kb used, before adding, after adding, and
413+ after deleting items which affect those pool attributes. The
414+ 2nd element is expected to be greater than the 1st; 3rd is expected
415+ to be less than the 2nd.
416+
417+ :param samples: List containing 3 data samples
418+ :param sample_type: String for logging and usage context
419+ :returns: None if successful, Failure message otherwise
420+ """
421+ original, created, deleted = range(3)
422+ if samples[created] <= samples[original] or \
423+ samples[deleted] >= samples[created]:
424+ return ('Ceph {} samples ({}) '
425+ 'unexpected.'.format(sample_type, samples))
426+ else:
427+ self.log.debug('Ceph {} samples (OK): '
428+ '{}'.format(sample_type, samples))
429+ return None
430
431=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
432--- hooks/charmhelpers/contrib/openstack/context.py 2015-06-19 15:08:48 +0000
433+++ hooks/charmhelpers/contrib/openstack/context.py 2015-07-16 20:31:40 +0000
434@@ -122,21 +122,24 @@
435 of specifying multiple key value pairs within the same string. For
436 example, a string in the format of 'key1=value1, key2=value2' will
437 return a dict of:
438- {'key1': 'value1',
439- 'key2': 'value2'}.
440+
441+ {'key1': 'value1',
442+ 'key2': 'value2'}.
443
444 2. A string in the above format, but supporting a comma-delimited list
445 of values for the same key. For example, a string in the format of
446 'key1=value1, key2=value3,value4,value5' will return a dict of:
447- {'key1', 'value1',
448- 'key2', 'value2,value3,value4'}
449+
450+ {'key1', 'value1',
451+ 'key2', 'value2,value3,value4'}
452
453 3. A string containing a colon character (:) prior to an equal
454 character (=) will be treated as yaml and parsed as such. This can be
455 used to specify more complex key value pairs. For example,
456 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
457 return a dict of:
458- {'key1', 'subkey1=value1, subkey2=value2'}
459+
460+ {'key1', 'subkey1=value1, subkey2=value2'}
461
462 The provided config_flags string may be a list of comma-separated values
463 which themselves may be comma-separated list of values.
464@@ -891,8 +894,6 @@
465 return ctxt
466
467 def __call__(self):
468- self._ensure_packages()
469-
470 if self.network_manager not in ['quantum', 'neutron']:
471 return {}
472
473
474=== modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
475--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2014-06-25 11:34:08 +0000
476+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-07-16 20:31:40 +0000
477@@ -5,11 +5,11 @@
478 ###############################################################################
479 [global]
480 {% if auth -%}
481- auth_supported = {{ auth }}
482- keyring = /etc/ceph/$cluster.$name.keyring
483- mon host = {{ mon_hosts }}
484+auth_supported = {{ auth }}
485+keyring = /etc/ceph/$cluster.$name.keyring
486+mon host = {{ mon_hosts }}
487 {% endif -%}
488- log to syslog = {{ use_syslog }}
489- err to syslog = {{ use_syslog }}
490- clog to syslog = {{ use_syslog }}
491+log to syslog = {{ use_syslog }}
492+err to syslog = {{ use_syslog }}
493+clog to syslog = {{ use_syslog }}
494
495
496=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
497--- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-19 15:08:48 +0000
498+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-07-16 20:31:40 +0000
499@@ -522,6 +522,7 @@
500 Clone/install all specified OpenStack repositories.
501
502 The expected format of projects_yaml is:
503+
504 repositories:
505 - {name: keystone,
506 repository: 'git://git.openstack.org/openstack/keystone.git',
507@@ -529,11 +530,13 @@
508 - {name: requirements,
509 repository: 'git://git.openstack.org/openstack/requirements.git',
510 branch: 'stable/icehouse'}
511+
512 directory: /mnt/openstack-git
513 http_proxy: squid-proxy-url
514 https_proxy: squid-proxy-url
515
516- The directory, http_proxy, and https_proxy keys are optional.
517+ The directory, http_proxy, and https_proxy keys are optional.
518+
519 """
520 global requirements_dir
521 parent_dir = '/mnt/openstack-git'
522@@ -555,10 +558,11 @@
523
524 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
525
526- # Upgrade setuptools from default virtualenv version. The default version
527- # in trusty breaks update.py in global requirements master branch.
528- pip_install('setuptools', upgrade=True, proxy=http_proxy,
529- venv=os.path.join(parent_dir, 'venv'))
530+ # Upgrade setuptools and pip from default virtualenv versions. The default
531+ # versions in trusty break master OpenStack branch deployments.
532+ for p in ['pip', 'setuptools']:
533+ pip_install(p, upgrade=True, proxy=http_proxy,
534+ venv=os.path.join(parent_dir, 'venv'))
535
536 for p in projects['repositories']:
537 repo = p['repository']
538
539=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
540--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-03-20 17:15:02 +0000
541+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-16 20:31:40 +0000
542@@ -60,12 +60,12 @@
543 KEYFILE = '/etc/ceph/ceph.client.{}.key'
544
545 CEPH_CONF = """[global]
546- auth supported = {auth}
547- keyring = {keyring}
548- mon host = {mon_hosts}
549- log to syslog = {use_syslog}
550- err to syslog = {use_syslog}
551- clog to syslog = {use_syslog}
552+auth supported = {auth}
553+keyring = {keyring}
554+mon host = {mon_hosts}
555+log to syslog = {use_syslog}
556+err to syslog = {use_syslog}
557+clog to syslog = {use_syslog}
558 """
559
560
561
562=== modified file 'hooks/charmhelpers/core/hookenv.py'
563--- hooks/charmhelpers/core/hookenv.py 2015-06-26 18:56:53 +0000
564+++ hooks/charmhelpers/core/hookenv.py 2015-07-16 20:31:40 +0000
565@@ -761,6 +761,7 @@
566
567 This is useful for modules and classes to perform initialization
568 and inject behavior. In particular:
569+
570 - Run common code before all of your hooks, such as logging
571 the hook name or interesting relation data.
572 - Defer object or module initialization that requires a hook
573
574=== modified file 'hooks/charmhelpers/core/host.py'
575--- hooks/charmhelpers/core/host.py 2015-06-19 15:08:48 +0000
576+++ hooks/charmhelpers/core/host.py 2015-07-16 20:31:40 +0000
577@@ -63,6 +63,36 @@
578 return service_result
579
580
581+def service_pause(service_name, init_dir=None):
582+ """Pause a system service.
583+
584+ Stop it, and prevent it from starting again at boot."""
585+ if init_dir is None:
586+ init_dir = "/etc/init"
587+ stopped = service_stop(service_name)
588+ # XXX: Support systemd too
589+ override_path = os.path.join(
590+ init_dir, '{}.conf.override'.format(service_name))
591+ with open(override_path, 'w') as fh:
592+ fh.write("manual\n")
593+ return stopped
594+
595+
596+def service_resume(service_name, init_dir=None):
597+ """Resume a system service.
598+
599+ Reenable starting again at boot. Start the service"""
600+ # XXX: Support systemd too
601+ if init_dir is None:
602+ init_dir = "/etc/init"
603+ override_path = os.path.join(
604+ init_dir, '{}.conf.override'.format(service_name))
605+ if os.path.exists(override_path):
606+ os.unlink(override_path)
607+ started = service_start(service_name)
608+ return started
609+
610+
611 def service(action, service_name):
612 """Control a system service"""
613 cmd = ['service', service_name, action]
614@@ -140,11 +170,7 @@
615
616 def add_user_to_group(username, group):
617 """Add a user to a group"""
618- cmd = [
619- 'gpasswd', '-a',
620- username,
621- group
622- ]
623+ cmd = ['gpasswd', '-a', username, group]
624 log("Adding user {} to group {}".format(username, group))
625 subprocess.check_call(cmd)
626
627
628=== modified file 'hooks/charmhelpers/core/services/helpers.py'
629--- hooks/charmhelpers/core/services/helpers.py 2015-05-11 07:28:22 +0000
630+++ hooks/charmhelpers/core/services/helpers.py 2015-07-16 20:31:40 +0000
631@@ -239,12 +239,12 @@
632 action.
633
634 :param str source: The template source file, relative to
635- `$CHARM_DIR/templates`
636-
637+ `$CHARM_DIR/templates`
638 :param str target: The target to write the rendered template to
639 :param str owner: The owner of the rendered file
640 :param str group: The group of the rendered file
641 :param int perms: The permissions of the rendered file
642+
643 """
644 def __init__(self, source, target,
645 owner='root', group='root', perms=0o444):
646
647=== modified file 'hooks/charmhelpers/fetch/__init__.py'
648--- hooks/charmhelpers/fetch/__init__.py 2015-06-10 20:31:46 +0000
649+++ hooks/charmhelpers/fetch/__init__.py 2015-07-16 20:31:40 +0000
650@@ -215,19 +215,27 @@
651 _run_apt_command(cmd, fatal)
652
653
654+def apt_mark(packages, mark, fatal=False):
655+ """Flag one or more packages using apt-mark"""
656+ cmd = ['apt-mark', mark]
657+ if isinstance(packages, six.string_types):
658+ cmd.append(packages)
659+ else:
660+ cmd.extend(packages)
661+ log("Holding {}".format(packages))
662+
663+ if fatal:
664+ subprocess.check_call(cmd, universal_newlines=True)
665+ else:
666+ subprocess.call(cmd, universal_newlines=True)
667+
668+
669 def apt_hold(packages, fatal=False):
670- """Hold one or more packages"""
671- cmd = ['apt-mark', 'hold']
672- if isinstance(packages, six.string_types):
673- cmd.append(packages)
674- else:
675- cmd.extend(packages)
676- log("Holding {}".format(packages))
677-
678- if fatal:
679- subprocess.check_call(cmd)
680- else:
681- subprocess.call(cmd)
682+ return apt_mark(packages, 'hold', fatal=fatal)
683+
684+
685+def apt_unhold(packages, fatal=False):
686+ return apt_mark(packages, 'unhold', fatal=fatal)
687
688
689 def add_source(source, key=None):
690@@ -370,8 +378,9 @@
691 for handler in handlers:
692 try:
693 installed_to = handler.install(source, *args, **kwargs)
694- except UnhandledSource:
695- pass
696+ except UnhandledSource as e:
697+ log('Install source attempt unsuccessful: {}'.format(e),
698+ level='WARNING')
699 if not installed_to:
700 raise UnhandledSource("No handler found for source {}".format(source))
701 return installed_to
702
703=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
704--- hooks/charmhelpers/fetch/archiveurl.py 2015-03-20 17:15:02 +0000
705+++ hooks/charmhelpers/fetch/archiveurl.py 2015-07-16 20:31:40 +0000
706@@ -77,6 +77,8 @@
707 def can_handle(self, source):
708 url_parts = self.parse_url(source)
709 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
710+ # XXX: Why is this returning a boolean and a string? It's
711+ # doomed to fail since "bool(can_handle('foo://'))" will be True.
712 return "Wrong source type"
713 if get_archive_handler(self.base_url(source)):
714 return True
715@@ -155,7 +157,11 @@
716 else:
717 algorithms = hashlib.algorithms_available
718 if key in algorithms:
719- check_hash(dld_file, value, key)
720+ if len(value) != 1:
721+ raise TypeError(
722+ "Expected 1 hash value, not %d" % len(value))
723+ expected = value[0]
724+ check_hash(dld_file, expected, key)
725 if checksum:
726 check_hash(dld_file, checksum, hash_type)
727 return extract(dld_file, dest)
728
729=== modified file 'hooks/charmhelpers/fetch/giturl.py'
730--- hooks/charmhelpers/fetch/giturl.py 2015-05-27 13:01:32 +0000
731+++ hooks/charmhelpers/fetch/giturl.py 2015-07-16 20:31:40 +0000
732@@ -67,7 +67,7 @@
733 try:
734 self.clone(source, dest_dir, branch, depth)
735 except GitCommandError as e:
736- raise UnhandledSource(e.message)
737+ raise UnhandledSource(e)
738 except OSError as e:
739 raise UnhandledSource(e.strerror)
740 return dest_dir
741
742=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
743--- tests/charmhelpers/contrib/amulet/utils.py 2015-06-26 19:03:04 +0000
744+++ tests/charmhelpers/contrib/amulet/utils.py 2015-07-16 20:31:40 +0000
745@@ -14,6 +14,7 @@
746 # You should have received a copy of the GNU Lesser General Public License
747 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
748
749+import amulet
750 import ConfigParser
751 import distro_info
752 import io
753@@ -173,6 +174,11 @@
754
755 Verify that the specified section of the config file contains
756 the expected option key:value pairs.
757+
758+ Compare expected dictionary data vs actual dictionary data.
759+ The values in the 'expected' dictionary can be strings, bools, ints,
760+ longs, or can be a function that evaluates a variable and returns a
761+ bool.
762 """
763 self.log.debug('Validating config file data ({} in {} on {})'
764 '...'.format(section, config_file,
765@@ -195,20 +201,18 @@
766 if actual != v:
767 return "section [{}] {}:{} != expected {}:{}".format(
768 section, k, actual, k, expected[k])
769- else:
770- # handle not_null, valid_ip boolean comparison methods, etc.
771- if v(actual):
772- return None
773- else:
774- return "section [{}] {}:{} != expected {}:{}".format(
775- section, k, actual, k, expected[k])
776+ # handle function pointers, such as not_null or valid_ip
777+ elif not v(actual):
778+ return "section [{}] {}:{} != expected {}:{}".format(
779+ section, k, actual, k, expected[k])
780+ return None
781
782 def _validate_dict_data(self, expected, actual):
783 """Validate dictionary data.
784
785 Compare expected dictionary data vs actual dictionary data.
786 The values in the 'expected' dictionary can be strings, bools, ints,
787- longs, or can be a function that evaluate a variable and returns a
788+ longs, or can be a function that evaluates a variable and returns a
789 bool.
790 """
791 self.log.debug('actual: {}'.format(repr(actual)))
792@@ -219,8 +223,10 @@
793 if (isinstance(v, six.string_types) or
794 isinstance(v, bool) or
795 isinstance(v, six.integer_types)):
796+ # handle explicit values
797 if v != actual[k]:
798 return "{}:{}".format(k, actual[k])
799+ # handle function pointers, such as not_null or valid_ip
800 elif not v(actual[k]):
801 return "{}:{}".format(k, actual[k])
802 else:
803@@ -435,15 +441,13 @@
804 for cmd in commands:
805 output, code = sentry_unit.run(cmd)
806 if code == 0:
807- msg = ('{} `{}` returned {} '
808- '(OK)'.format(sentry_unit.info['unit_name'],
809- cmd, code))
810- self.log.debug(msg)
811+ self.log.debug('{} `{}` returned {} '
812+ '(OK)'.format(sentry_unit.info['unit_name'],
813+ cmd, code))
814 else:
815- msg = ('{} `{}` returned {} '
816- '{}'.format(sentry_unit.info['unit_name'],
817- cmd, code, output))
818- return msg
819+ return ('{} `{}` returned {} '
820+ '{}'.format(sentry_unit.info['unit_name'],
821+ cmd, code, output))
822 return None
823
824 def get_process_id_list(self, sentry_unit, process_name):
825@@ -460,7 +464,7 @@
826 msg = ('{} `{}` returned {} '
827 '{}'.format(sentry_unit.info['unit_name'],
828 cmd, code, output))
829- raise RuntimeError(msg)
830+ amulet.raise_status(amulet.FAIL, msg=msg)
831 return str(output).split()
832
833 def get_unit_process_ids(self, unit_processes):
834@@ -481,47 +485,37 @@
835 self.log.debug('Actual PIDs: {}'.format(actual))
836
837 if len(actual) != len(expected):
838- msg = ('Unit count mismatch. expected, actual: {}, '
839- '{} '.format(len(expected), len(actual)))
840- return msg
841+ return ('Unit count mismatch. expected, actual: {}, '
842+ '{} '.format(len(expected), len(actual)))
843
844 for (e_sentry, e_proc_names) in expected.iteritems():
845 e_sentry_name = e_sentry.info['unit_name']
846 if e_sentry in actual.keys():
847 a_proc_names = actual[e_sentry]
848 else:
849- msg = ('Expected sentry ({}) not found in actual dict data.'
850- '{}'.format(e_sentry_name, e_sentry))
851- return msg
852+ return ('Expected sentry ({}) not found in actual dict data.'
853+ '{}'.format(e_sentry_name, e_sentry))
854
855 if len(e_proc_names.keys()) != len(a_proc_names.keys()):
856- msg = ('Process name count mismatch. expected, actual: {}, '
857- '{}'.format(len(expected), len(actual)))
858- return msg
859+ return ('Process name count mismatch. expected, actual: {}, '
860+ '{}'.format(len(expected), len(actual)))
861
862 for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
863 zip(e_proc_names.items(), a_proc_names.items()):
864 if e_proc_name != a_proc_name:
865- msg = ('Process name mismatch. expected, actual: {}, '
866- '{}'.format(e_proc_name, a_proc_name))
867- return msg
868+ return ('Process name mismatch. expected, actual: {}, '
869+ '{}'.format(e_proc_name, a_proc_name))
870
871 a_pids_length = len(a_pids)
872 if e_pids_length != a_pids_length:
873- msg = ('PID count mismatch. {} ({}) expected, actual: {}, '
874- '{} ({})'.format(e_sentry_name,
875- e_proc_name,
876- e_pids_length,
877- a_pids_length,
878- a_pids))
879- return msg
880+ return ('PID count mismatch. {} ({}) expected, actual: '
881+ '{}, {} ({})'.format(e_sentry_name, e_proc_name,
882+ e_pids_length, a_pids_length,
883+ a_pids))
884 else:
885- msg = ('PID check OK: {} {} {}: '
886- '{}'.format(e_sentry_name,
887- e_proc_name,
888- e_pids_length,
889- a_pids))
890- self.log.debug(msg)
891+ self.log.debug('PID check OK: {} {} {}: '
892+ '{}'.format(e_sentry_name, e_proc_name,
893+ e_pids_length, a_pids))
894 return None
895
896 def validate_list_of_identical_dicts(self, list_of_dicts):
897@@ -532,10 +526,8 @@
898
899 self.log.debug('Hashes: {}'.format(hashes))
900 if len(set(hashes)) == 1:
901- msg = 'Dicts within list are identical'
902- self.log.debug(msg)
903+ self.log.debug('Dicts within list are identical')
904 else:
905- msg = 'Dicts within list are not identical'
906- return msg
907+ return 'Dicts within list are not identical'
908
909 return None
910
911=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
912--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-26 19:03:04 +0000
913+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-16 20:31:40 +0000
914@@ -150,8 +150,9 @@
915 return releases[self.series]
916
917 def get_ceph_expected_pools(self, radosgw=False):
918- """Return a list of expected ceph pools based on Ubuntu-OpenStack
919- release and whether ceph radosgw is flagged as present or not."""
920+ """Return a list of expected ceph pools in a ceph + cinder + glance
921+ test scenario, based on OpenStack release and whether ceph radosgw
922+ is flagged as present or not."""
923
924 if self._get_openstack_release() >= self.trusty_kilo:
925 # Kilo or later
926
927=== modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
928--- tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-26 19:03:04 +0000
929+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:31:40 +0000
930@@ -14,6 +14,7 @@
931 # You should have received a copy of the GNU Lesser General Public License
932 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
933
934+import amulet
935 import json
936 import logging
937 import os
938@@ -177,6 +178,7 @@
939 def authenticate_cinder_admin(self, keystone_sentry, username,
940 password, tenant):
941 """Authenticates admin user with cinder."""
942+ # NOTE(beisner): cinder python client doesn't accept tokens.
943 service_ip = \
944 keystone_sentry.relation('shared-db',
945 'mysql:shared-db')['private-address']
946@@ -279,7 +281,7 @@
947 msg='Image status wait')
948 if not ret:
949 msg = 'Glance image failed to reach expected state.'
950- raise RuntimeError(msg)
951+ amulet.raise_status(amulet.FAIL, msg=msg)
952
953 # Re-validate new image
954 self.log.debug('Validating image attributes...')
955@@ -299,7 +301,7 @@
956 self.log.debug(msg_attr)
957 else:
958 msg = ('Volume validation failed, {}'.format(msg_attr))
959- raise RuntimeError(msg)
960+ amulet.raise_status(amulet.FAIL, msg=msg)
961
962 return image
963
964@@ -362,8 +364,8 @@
965
966 def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
967 img_id=None, src_vol_id=None, snap_id=None):
968- """Create cinder volume, optionally from a glance image, or
969- optionally as a clone of an existing volume, or optionally
970+ """Create cinder volume, optionally from a glance image, OR
971+ optionally as a clone of an existing volume, OR optionally
972 from a snapshot. Wait for the new volume status to reach
973 the expected status, validate and return a resource pointer.
974
975@@ -374,29 +376,33 @@
976 :param snap_id: optional snapshot id to use
977 :returns: cinder volume pointer
978 """
979- # Handle parameter input
980+ # Handle parameter input and avoid impossible combinations
981 if img_id and not src_vol_id and not snap_id:
982- self.log.debug('Creating cinder volume from glance image '
983- '({})...'.format(img_id))
984+ # Create volume from image
985+ self.log.debug('Creating cinder volume from glance image...')
986 bootable = 'true'
987 elif src_vol_id and not img_id and not snap_id:
988+ # Clone an existing volume
989 self.log.debug('Cloning cinder volume...')
990 bootable = cinder.volumes.get(src_vol_id).bootable
991 elif snap_id and not src_vol_id and not img_id:
992+ # Create volume from snapshot
993 self.log.debug('Creating cinder volume from snapshot...')
994 snap = cinder.volume_snapshots.find(id=snap_id)
995 vol_size = snap.size
996 snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
997 bootable = cinder.volumes.get(snap_vol_id).bootable
998 elif not img_id and not src_vol_id and not snap_id:
999+ # Create volume
1000 self.log.debug('Creating cinder volume...')
1001 bootable = 'false'
1002 else:
1003+ # Impossible combination of parameters
1004 msg = ('Invalid method use - name:{} size:{} img_id:{} '
1005 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
1006 img_id, src_vol_id,
1007 snap_id))
1008- raise RuntimeError(msg)
1009+ amulet.raise_status(amulet.FAIL, msg=msg)
1010
1011 # Create new volume
1012 try:
1013@@ -408,7 +414,7 @@
1014 vol_id = vol_new.id
1015 except Exception as e:
1016 msg = 'Failed to create volume: {}'.format(e)
1017- raise RuntimeError(msg)
1018+ amulet.raise_status(amulet.FAIL, msg=msg)
1019
1020 # Wait for volume to reach available status
1021 ret = self.resource_reaches_status(cinder.volumes, vol_id,
1022@@ -416,7 +422,7 @@
1023 msg="Volume status wait")
1024 if not ret:
1025 msg = 'Cinder volume failed to reach expected state.'
1026- raise RuntimeError(msg)
1027+ amulet.raise_status(amulet.FAIL, msg=msg)
1028
1029 # Re-validate new volume
1030 self.log.debug('Validating volume attributes...')
1031@@ -434,7 +440,7 @@
1032 self.log.debug(msg_attr)
1033 else:
1034 msg = ('Volume validation failed, {}'.format(msg_attr))
1035- raise RuntimeError(msg)
1036+ amulet.raise_status(amulet.FAIL, msg=msg)
1037
1038 return vol_new
1039
1040@@ -515,9 +521,9 @@
1041
1042 def get_ceph_osd_id_cmd(self, index):
1043 """Produce a shell command that will return a ceph-osd id."""
1044- cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'"
1045- " | grep -o '[0-9]*'`".format(index + 1))
1046- return cmd
1047+ return ("`initctl list | grep 'ceph-osd ' | "
1048+ "awk 'NR=={} {{ print $2 }}' | "
1049+ "grep -o '[0-9]*'`".format(index + 1))
1050
1051 def get_ceph_pools(self, sentry_unit):
1052 """Return a dict of ceph pools from a single ceph unit, with
1053@@ -529,7 +535,7 @@
1054 msg = ('{} `{}` returned {} '
1055 '{}'.format(sentry_unit.info['unit_name'],
1056 cmd, code, output))
1057- raise RuntimeError(msg)
1058+ amulet.raise_status(amulet.FAIL, msg=msg)
1059
1060 # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
1061 for pool in str(output).split(','):
1062@@ -555,7 +561,7 @@
1063 msg = ('{} `{}` returned {} '
1064 '{}'.format(sentry_unit.info['unit_name'],
1065 cmd, code, output))
1066- raise RuntimeError(msg)
1067+ amulet.raise_status(amulet.FAIL, msg=msg)
1068 return json.loads(output)
1069
1070 def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
1071@@ -572,10 +578,8 @@
1072 obj_count = df['pools'][pool_id]['stats']['objects']
1073 kb_used = df['pools'][pool_id]['stats']['kb_used']
1074 self.log.debug('Ceph {} pool (ID {}): {} objects, '
1075- '{} kb used'.format(pool_name,
1076- pool_id,
1077- obj_count,
1078- kb_used))
1079+ '{} kb used'.format(pool_name, pool_id,
1080+ obj_count, kb_used))
1081 return pool_name, obj_count, kb_used
1082
1083 def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
1084@@ -592,9 +596,8 @@
1085 original, created, deleted = range(3)
1086 if samples[created] <= samples[original] or \
1087 samples[deleted] >= samples[created]:
1088- msg = ('Ceph {} samples ({}) '
1089- 'unexpected.'.format(sample_type, samples))
1090- return msg
1091+ return ('Ceph {} samples ({}) '
1092+ 'unexpected.'.format(sample_type, samples))
1093 else:
1094 self.log.debug('Ceph {} samples (OK): '
1095 '{}'.format(sample_type, samples))

Subscribers

People subscribed via source and target branches