Merge lp:~corey.bryant/charms/trusty/ceph-osd/amulet-basics into lp:~openstack-charmers-archive/charms/trusty/ceph-osd/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 29
Proposed branch: lp:~corey.bryant/charms/trusty/ceph-osd/amulet-basics
Merge into: lp:~openstack-charmers-archive/charms/trusty/ceph-osd/next
Diff against target: 1165 lines (+1026/-28)
14 files modified
Makefile (+11/-2)
charm-helpers-hooks.yaml (+9/-0)
charm-helpers-sync.yaml (+0/-9)
charm-helpers-tests.yaml (+5/-0)
templates/ceph.conf (+17/-17)
tests/00-setup (+10/-0)
tests/14-basic-precise-icehouse (+11/-0)
tests/15-basic-trusty-icehouse (+9/-0)
tests/README (+53/-0)
tests/basic_deployment.py (+281/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+77/-0)
tests/charmhelpers/contrib/amulet/utils.py (+176/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+91/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/ceph-osd/amulet-basics
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+236273@code.launchpad.net
To post a comment you must log in.
32. By Corey Bryant

Sync charm-helpers to pick up lint fix.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'Makefile'
2--- Makefile 2014-09-26 08:25:57 +0000
3+++ Makefile 2014-09-29 20:46:36 +0000
4@@ -2,16 +2,25 @@
5 PYTHON := /usr/bin/env python
6
7 lint:
8- @flake8 --exclude hooks/charmhelpers hooks
9+ @flake8 --exclude hooks/charmhelpers hooks tests
10 @charm proof || true
11
12+test:
13+ @echo Starting Amulet tests...
14+ # coreycb note: The -v should only be temporary until Amulet sends
15+ # raise_status() messages to stderr:
16+ # https://bugs.launchpad.net/amulet/+bug/1320357
17+ @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \
18+ 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse
19+
20 bin/charm_helpers_sync.py:
21 @mkdir -p bin
22 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
23 > bin/charm_helpers_sync.py
24
25 sync: bin/charm_helpers_sync.py
26- $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
27+ $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
28+ $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
29
30 publish: lint
31 bzr push lp:charms/ceph-osd
32
33=== added file 'charm-helpers-hooks.yaml'
34--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
35+++ charm-helpers-hooks.yaml 2014-09-29 20:46:36 +0000
36@@ -0,0 +1,9 @@
37+branch: lp:charm-helpers
38+destination: hooks/charmhelpers
39+include:
40+ - core
41+ - fetch
42+ - contrib.storage.linux:
43+ - utils
44+ - contrib.openstack.alternatives
45+ - contrib.network.ip
46
47=== removed file 'charm-helpers-sync.yaml'
48--- charm-helpers-sync.yaml 2014-07-25 08:07:41 +0000
49+++ charm-helpers-sync.yaml 1970-01-01 00:00:00 +0000
50@@ -1,9 +0,0 @@
51-branch: lp:charm-helpers
52-destination: hooks/charmhelpers
53-include:
54- - core
55- - fetch
56- - contrib.storage.linux:
57- - utils
58- - contrib.openstack.alternatives
59- - contrib.network.ip
60
61=== added file 'charm-helpers-tests.yaml'
62--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
63+++ charm-helpers-tests.yaml 2014-09-29 20:46:36 +0000
64@@ -0,0 +1,5 @@
65+branch: lp:charm-helpers
66+destination: tests/charmhelpers
67+include:
68+ - contrib.amulet
69+ - contrib.openstack.amulet
70
71=== modified file 'templates/ceph.conf'
72--- templates/ceph.conf 2014-07-25 08:07:41 +0000
73+++ templates/ceph.conf 2014-09-29 20:46:36 +0000
74@@ -1,34 +1,34 @@
75 [global]
76 {% if old_auth %}
77- auth supported = {{ auth_supported }}
78+auth supported = {{ auth_supported }}
79 {% else %}
80- auth cluster required = {{ auth_supported }}
81- auth service required = {{ auth_supported }}
82- auth client required = {{ auth_supported }}
83+auth cluster required = {{ auth_supported }}
84+auth service required = {{ auth_supported }}
85+auth client required = {{ auth_supported }}
86 {% endif %}
87- keyring = /etc/ceph/$cluster.$name.keyring
88- mon host = {{ mon_hosts }}
89- fsid = {{ fsid }}
90+keyring = /etc/ceph/$cluster.$name.keyring
91+mon host = {{ mon_hosts }}
92+fsid = {{ fsid }}
93
94- log to syslog = {{ use_syslog }}
95- err to syslog = {{ use_syslog }}
96- clog to syslog = {{ use_syslog }}
97+log to syslog = {{ use_syslog }}
98+err to syslog = {{ use_syslog }}
99+clog to syslog = {{ use_syslog }}
100
101 {%- if ceph_public_network is string %}
102- public network = {{ ceph_public_network }}
103+public network = {{ ceph_public_network }}
104 {%- endif %}
105 {%- if ceph_cluster_network is string %}
106- cluster network = {{ ceph_cluster_network }}
107+cluster network = {{ ceph_cluster_network }}
108 {%- endif %}
109
110 [mon]
111- keyring = /var/lib/ceph/mon/$cluster-$id/keyring
112+keyring = /var/lib/ceph/mon/$cluster-$id/keyring
113
114 [mds]
115- keyring = /var/lib/ceph/mds/$cluster-$id/keyring
116+keyring = /var/lib/ceph/mds/$cluster-$id/keyring
117
118 [osd]
119- keyring = /var/lib/ceph/osd/$cluster-$id/keyring
120- osd journal size = {{ osd_journal_size }}
121- filestore xattr use omap = true
122+keyring = /var/lib/ceph/osd/$cluster-$id/keyring
123+osd journal size = {{ osd_journal_size }}
124+filestore xattr use omap = true
125
126
127=== added directory 'tests'
128=== added file 'tests/00-setup'
129--- tests/00-setup 1970-01-01 00:00:00 +0000
130+++ tests/00-setup 2014-09-29 20:46:36 +0000
131@@ -0,0 +1,10 @@
132+#!/bin/bash
133+
134+set -ex
135+
136+sudo add-apt-repository --yes ppa:juju/stable
137+sudo apt-get update --yes
138+sudo apt-get install --yes python-amulet \
139+ python-keystoneclient \
140+ python-glanceclient \
141+ python-novaclient
142
143=== added file 'tests/14-basic-precise-icehouse'
144--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
145+++ tests/14-basic-precise-icehouse 2014-09-29 20:46:36 +0000
146@@ -0,0 +1,11 @@
147+#!/usr/bin/python
148+
149+"""Amulet tests on a basic ceph-osd deployment on precise-icehouse."""
150+
151+from basic_deployment import CephOsdBasicDeployment
152+
153+if __name__ == '__main__':
154+ deployment = CephOsdBasicDeployment(series='precise',
155+ openstack='cloud:precise-icehouse',
156+ source='cloud:precise-updates/icehouse')
157+ deployment.run_tests()
158
159=== added file 'tests/15-basic-trusty-icehouse'
160--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
161+++ tests/15-basic-trusty-icehouse 2014-09-29 20:46:36 +0000
162@@ -0,0 +1,9 @@
163+#!/usr/bin/python
164+
165+"""Amulet tests on a basic ceph-osd deployment on trusty-icehouse."""
166+
167+from basic_deployment import CephOsdBasicDeployment
168+
169+if __name__ == '__main__':
170+ deployment = CephOsdBasicDeployment(series='trusty')
171+ deployment.run_tests()
172
173=== added file 'tests/README'
174--- tests/README 1970-01-01 00:00:00 +0000
175+++ tests/README 2014-09-29 20:46:36 +0000
176@@ -0,0 +1,53 @@
177+This directory provides Amulet tests that focus on verification of ceph-osd
178+deployments.
179+
180+In order to run tests, you'll need charm-tools installed (in addition to
181+juju, of course):
182+ sudo add-apt-repository ppa:juju/stable
183+ sudo apt-get update
184+ sudo apt-get install charm-tools
185+
186+If you use a web proxy server to access the web, you'll need to set the
187+AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
188+
189+The following examples demonstrate different ways that tests can be executed.
190+All examples are run from the charm's root directory.
191+
192+ * To run all tests (starting with 00-setup):
193+
194+ make test
195+
196+ * To run a specific test module (or modules):
197+
198+ juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
199+
200+ * To run a specific test module (or modules), and keep the environment
201+ deployed after a failure:
202+
203+ juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
204+
205+ * To re-run a test module against an already deployed environment (one
206+ that was deployed by a previous call to 'juju test --set-e'):
207+
208+ ./tests/15-basic-trusty-icehouse
209+
210+For debugging and test development purposes, all code should be idempotent.
211+In other words, the code should have the ability to be re-run without changing
212+the results beyond the initial run. This enables editing and re-running of a
213+test module against an already deployed environment, as described above.
214+
215+Manual debugging tips:
216+
217+ * Set the following env vars before using the OpenStack CLI as admin:
218+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
219+ export OS_TENANT_NAME=admin
220+ export OS_USERNAME=admin
221+ export OS_PASSWORD=openstack
222+ export OS_REGION_NAME=RegionOne
223+
224+ * Set the following env vars before using the OpenStack CLI as demoUser:
225+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
226+ export OS_TENANT_NAME=demoTenant
227+ export OS_USERNAME=demoUser
228+ export OS_PASSWORD=password
229+ export OS_REGION_NAME=RegionOne
230
231=== added file 'tests/basic_deployment.py'
232--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
233+++ tests/basic_deployment.py 2014-09-29 20:46:36 +0000
234@@ -0,0 +1,281 @@
235+#!/usr/bin/python import amulet
236+
237+import amulet
238+from charmhelpers.contrib.openstack.amulet.deployment import (
239+ OpenStackAmuletDeployment
240+)
241+from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
242+ OpenStackAmuletUtils,
243+ DEBUG,
244+ ERROR
245+)
246+
247+# Use DEBUG to turn on debug logging
248+u = OpenStackAmuletUtils(ERROR)
249+
250+
251+class CephOsdBasicDeployment(OpenStackAmuletDeployment):
252+ """Amulet tests on a basic ceph-osd deployment."""
253+
254+ def __init__(self, series=None, openstack=None, source=None,
255+ stable=False):
256+ """Deploy the entire test environment."""
257+ super(CephOsdBasicDeployment, self).__init__(series, openstack,
258+ source, stable)
259+ self._add_services()
260+ self._add_relations()
261+ self._configure_services()
262+ self._deploy()
263+ self._initialize_tests()
264+
265+ def _add_services(self):
266+ """Add services
267+
268+ Add the services that we're testing, where ceph-osd is local,
269+ and the rest of the service are from lp branches that are
270+ compatible with the local charm (e.g. stable or next).
271+ """
272+ this_service = {'name': 'ceph-osd'}
273+ other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'},
274+ {'name': 'keystone'}, {'name': 'rabbitmq-server'},
275+ {'name': 'nova-compute'}, {'name': 'glance'},
276+ {'name': 'cinder'}]
277+ super(CephOsdBasicDeployment, self)._add_services(this_service,
278+ other_services)
279+
280+ def _add_relations(self):
281+ """Add all of the relations for the services."""
282+ relations = {
283+ 'nova-compute:shared-db': 'mysql:shared-db',
284+ 'nova-compute:amqp': 'rabbitmq-server:amqp',
285+ 'nova-compute:image-service': 'glance:image-service',
286+ 'nova-compute:ceph': 'ceph:client',
287+ 'keystone:shared-db': 'mysql:shared-db',
288+ 'glance:shared-db': 'mysql:shared-db',
289+ 'glance:identity-service': 'keystone:identity-service',
290+ 'glance:amqp': 'rabbitmq-server:amqp',
291+ 'glance:ceph': 'ceph:client',
292+ 'cinder:shared-db': 'mysql:shared-db',
293+ 'cinder:identity-service': 'keystone:identity-service',
294+ 'cinder:amqp': 'rabbitmq-server:amqp',
295+ 'cinder:image-service': 'glance:image-service',
296+ 'cinder:ceph': 'ceph:client',
297+ 'ceph-osd:mon': 'ceph:osd'
298+ }
299+ super(CephOsdBasicDeployment, self)._add_relations(relations)
300+
301+ def _configure_services(self):
302+ """Configure all of the services."""
303+ keystone_config = {'admin-password': 'openstack',
304+ 'admin-token': 'ubuntutesting'}
305+ mysql_config = {'dataset-size': '50%'}
306+ cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
307+ ceph_config = {
308+ 'monitor-count': '3',
309+ 'auth-supported': 'none',
310+ 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
311+ 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
312+ 'osd-reformat': 'yes',
313+ 'ephemeral-unmount': '/mnt',
314+ 'osd-devices': '/dev/vdb /srv/ceph'
315+ }
316+ ceph_osd_config = {
317+ 'osd-reformat': 'yes',
318+ 'ephemeral-unmount': '/mnt',
319+ 'osd-devices': '/dev/vdb /srv/ceph'
320+ }
321+
322+ configs = {'keystone': keystone_config,
323+ 'mysql': mysql_config,
324+ 'cinder': cinder_config,
325+ 'ceph': ceph_config,
326+ 'ceph-osd': ceph_osd_config}
327+ super(CephOsdBasicDeployment, self)._configure_services(configs)
328+
329+ def _initialize_tests(self):
330+ """Perform final initialization before tests get run."""
331+ # Access the sentries for inspecting service units
332+ self.mysql_sentry = self.d.sentry.unit['mysql/0']
333+ self.keystone_sentry = self.d.sentry.unit['keystone/0']
334+ self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
335+ self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
336+ self.glance_sentry = self.d.sentry.unit['glance/0']
337+ self.cinder_sentry = self.d.sentry.unit['cinder/0']
338+ self.ceph0_sentry = self.d.sentry.unit['ceph/0']
339+ self.ceph1_sentry = self.d.sentry.unit['ceph/1']
340+ self.ceph2_sentry = self.d.sentry.unit['ceph/2']
341+ self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0']
342+
343+ # Authenticate admin with keystone
344+ self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
345+ user='admin',
346+ password='openstack',
347+ tenant='admin')
348+
349+ # Authenticate admin with glance endpoint
350+ self.glance = u.authenticate_glance_admin(self.keystone)
351+
352+ # Create a demo tenant/role/user
353+ self.demo_tenant = 'demoTenant'
354+ self.demo_role = 'demoRole'
355+ self.demo_user = 'demoUser'
356+ if not u.tenant_exists(self.keystone, self.demo_tenant):
357+ tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
358+ description='demo tenant',
359+ enabled=True)
360+ self.keystone.roles.create(name=self.demo_role)
361+ self.keystone.users.create(name=self.demo_user,
362+ password='password',
363+ tenant_id=tenant.id,
364+ email='demo@demo.com')
365+
366+ # Authenticate demo user with keystone
367+ self.keystone_demo = u.authenticate_keystone_user(self.keystone,
368+ self.demo_user,
369+ 'password',
370+ self.demo_tenant)
371+
372+ # Authenticate demo user with nova-api
373+ self.nova_demo = u.authenticate_nova_user(self.keystone,
374+ self.demo_user,
375+ 'password',
376+ self.demo_tenant)
377+
378+ def _ceph_osd_id(self, index):
379+ """Produce a shell command that will return a ceph-osd id."""
380+ return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa
381+
382+ def test_services(self):
383+ """Verify the expected services are running on the service units."""
384+ commands = {
385+ self.mysql_sentry: ['status mysql'],
386+ self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
387+ self.nova_compute_sentry: ['status nova-compute'],
388+ self.keystone_sentry: ['status keystone'],
389+ self.glance_sentry: ['status glance-registry',
390+ 'status glance-api'],
391+ self.cinder_sentry: ['status cinder-api',
392+ 'status cinder-scheduler',
393+ 'status cinder-volume']
394+ }
395+ ceph_services = ['status ceph-mon-all',
396+ 'status ceph-mon id=`hostname`']
397+ ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
398+ ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1))
399+ ceph_osd_services = [ceph_osd0, ceph_osd1, 'status ceph-osd-all']
400+ ceph_services.extend(ceph_osd_services)
401+ commands[self.ceph0_sentry] = ceph_services
402+ commands[self.ceph1_sentry] = ceph_services
403+ commands[self.ceph2_sentry] = ceph_services
404+ commands[self.ceph_osd_sentry] = ceph_osd_services
405+
406+ ret = u.validate_services(commands)
407+ if ret:
408+ amulet.raise_status(amulet.FAIL, msg=ret)
409+
410+ def test_ceph_osd_ceph_relation(self):
411+ """Verify the ceph-osd to ceph relation data."""
412+ unit = self.ceph_osd_sentry
413+ relation = ['mon', 'ceph:osd']
414+ expected = {
415+ 'private-address': u.valid_ip
416+ }
417+
418+ ret = u.validate_relation_data(unit, relation, expected)
419+ if ret:
420+ message = u.relation_error('ceph-osd to ceph', ret)
421+ amulet.raise_status(amulet.FAIL, msg=message)
422+
423+ def test_ceph0_to_ceph_osd_relation(self):
424+ """Verify the ceph0 to ceph-osd relation data."""
425+ unit = self.ceph0_sentry
426+ relation = ['osd', 'ceph-osd:mon']
427+ expected = {
428+ 'osd_bootstrap_key': u.not_null,
429+ 'private-address': u.valid_ip,
430+ 'auth': u'none',
431+ 'ceph-public-address': u.valid_ip,
432+ 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
433+ }
434+
435+ ret = u.validate_relation_data(unit, relation, expected)
436+ if ret:
437+ message = u.relation_error('ceph0 to ceph-osd', ret)
438+ amulet.raise_status(amulet.FAIL, msg=message)
439+
440+ def test_ceph1_to_ceph_osd_relation(self):
441+ """Verify the ceph1 to ceph-osd relation data."""
442+ unit = self.ceph1_sentry
443+ relation = ['osd', 'ceph-osd:mon']
444+ expected = {
445+ 'osd_bootstrap_key': u.not_null,
446+ 'private-address': u.valid_ip,
447+ 'auth': u'none',
448+ 'ceph-public-address': u.valid_ip,
449+ 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
450+ }
451+
452+ ret = u.validate_relation_data(unit, relation, expected)
453+ if ret:
454+ message = u.relation_error('ceph1 to ceph-osd', ret)
455+ amulet.raise_status(amulet.FAIL, msg=message)
456+
457+ def test_ceph2_to_ceph_osd_relation(self):
458+ """Verify the ceph2 to ceph-osd relation data."""
459+ unit = self.ceph2_sentry
460+ relation = ['osd', 'ceph-osd:mon']
461+ expected = {
462+ 'osd_bootstrap_key': u.not_null,
463+ 'private-address': u.valid_ip,
464+ 'auth': u'none',
465+ 'ceph-public-address': u.valid_ip,
466+ 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
467+ }
468+
469+ ret = u.validate_relation_data(unit, relation, expected)
470+ if ret:
471+ message = u.relation_error('ceph2 to ceph-osd', ret)
472+ amulet.raise_status(amulet.FAIL, msg=message)
473+
474+ def test_ceph_config(self):
475+ """Verify the data in the ceph config file."""
476+ unit = self.ceph_osd_sentry
477+ conf = '/etc/ceph/ceph.conf'
478+ expected = {
479+ 'global': {
480+ 'auth cluster required': 'none',
481+ 'auth service required': 'none',
482+ 'auth client required': 'none',
483+ 'keyring': '/etc/ceph/$cluster.$name.keyring',
484+ 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
485+ 'log to syslog': 'false',
486+ 'err to syslog': 'false',
487+ 'clog to syslog': 'false'
488+ },
489+ 'mon': {
490+ 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring'
491+ },
492+ 'mds': {
493+ 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring'
494+ },
495+ 'osd': {
496+ 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring',
497+ 'osd journal size': '1024',
498+ 'filestore xattr use omap': 'true'
499+ },
500+ }
501+
502+ for section, pairs in expected.iteritems():
503+ ret = u.validate_config_data(unit, conf, section, pairs)
504+ if ret:
505+ message = "ceph config error: {}".format(ret)
506+ amulet.raise_status(amulet.FAIL, msg=message)
507+
508+ def test_restart_on_config_change(self):
509+ """Verify the specified services are restarted on config change."""
510+ # NOTE(coreycb): Test not implemented but should it be? ceph-osd svcs
511+ # aren't restarted by charm after config change. Should
512+ # they be restarted?
513+ if self._get_openstack_release() >= self.precise_essex:
514+ u.log.error("Test not implemented")
515+ return
516
517=== added directory 'tests/charmhelpers'
518=== added file 'tests/charmhelpers/__init__.py'
519=== added directory 'tests/charmhelpers/contrib'
520=== added file 'tests/charmhelpers/contrib/__init__.py'
521=== added directory 'tests/charmhelpers/contrib/amulet'
522=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
523=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
524--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
525+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-09-29 20:46:36 +0000
526@@ -0,0 +1,77 @@
527+import amulet
528+
529+import os
530+
531+
532+class AmuletDeployment(object):
533+ """Amulet deployment.
534+
535+ This class provides generic Amulet deployment and test runner
536+ methods.
537+ """
538+
539+ def __init__(self, series=None):
540+ """Initialize the deployment environment."""
541+ self.series = None
542+
543+ if series:
544+ self.series = series
545+ self.d = amulet.Deployment(series=self.series)
546+ else:
547+ self.d = amulet.Deployment()
548+
549+ def _add_services(self, this_service, other_services):
550+ """Add services.
551+
552+ Add services to the deployment where this_service is the local charm
553+ that we're testing and other_services are the other services that
554+ are being used in the local amulet tests.
555+ """
556+ if this_service['name'] != os.path.basename(os.getcwd()):
557+ s = this_service['name']
558+ msg = "The charm's root directory name needs to be {}".format(s)
559+ amulet.raise_status(amulet.FAIL, msg=msg)
560+
561+ if 'units' not in this_service:
562+ this_service['units'] = 1
563+
564+ self.d.add(this_service['name'], units=this_service['units'])
565+
566+ for svc in other_services:
567+ if 'location' in svc:
568+ branch_location = svc['location']
569+ elif self.series:
570+ branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
571+ else:
572+ branch_location = None
573+
574+ if 'units' not in svc:
575+ svc['units'] = 1
576+
577+ self.d.add(svc['name'], charm=branch_location, units=svc['units'])
578+
579+ def _add_relations(self, relations):
580+ """Add all of the relations for the services."""
581+ for k, v in relations.iteritems():
582+ self.d.relate(k, v)
583+
584+ def _configure_services(self, configs):
585+ """Configure all of the services."""
586+ for service, config in configs.iteritems():
587+ self.d.configure(service, config)
588+
589+ def _deploy(self):
590+ """Deploy environment and wait for all hooks to finish executing."""
591+ try:
592+ self.d.setup(timeout=900)
593+ self.d.sentry.wait(timeout=900)
594+ except amulet.helpers.TimeoutError:
595+ amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
596+ except Exception:
597+ raise
598+
599+ def run_tests(self):
600+ """Run all of the methods that are prefixed with 'test_'."""
601+ for test in dir(self):
602+ if test.startswith('test_'):
603+ getattr(self, test)()
604
605=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
606--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
607+++ tests/charmhelpers/contrib/amulet/utils.py 2014-09-29 20:46:36 +0000
608@@ -0,0 +1,176 @@
609+import ConfigParser
610+import io
611+import logging
612+import re
613+import sys
614+import time
615+
616+
617+class AmuletUtils(object):
618+ """Amulet utilities.
619+
620+ This class provides common utility functions that are used by Amulet
621+ tests.
622+ """
623+
624+ def __init__(self, log_level=logging.ERROR):
625+ self.log = self.get_logger(level=log_level)
626+
627+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
628+ """Get a logger object that will log to stdout."""
629+ log = logging
630+ logger = log.getLogger(name)
631+ fmt = log.Formatter("%(asctime)s %(funcName)s "
632+ "%(levelname)s: %(message)s")
633+
634+ handler = log.StreamHandler(stream=sys.stdout)
635+ handler.setLevel(level)
636+ handler.setFormatter(fmt)
637+
638+ logger.addHandler(handler)
639+ logger.setLevel(level)
640+
641+ return logger
642+
643+ def valid_ip(self, ip):
644+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
645+ return True
646+ else:
647+ return False
648+
649+ def valid_url(self, url):
650+ p = re.compile(
651+ r'^(?:http|ftp)s?://'
652+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
653+ r'localhost|'
654+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
655+ r'(?::\d+)?'
656+ r'(?:/?|[/?]\S+)$',
657+ re.IGNORECASE)
658+ if p.match(url):
659+ return True
660+ else:
661+ return False
662+
663+ def validate_services(self, commands):
664+ """Validate services.
665+
666+ Verify the specified services are running on the corresponding
667+ service units.
668+ """
669+ for k, v in commands.iteritems():
670+ for cmd in v:
671+ output, code = k.run(cmd)
672+ if code != 0:
673+ return "command `{}` returned {}".format(cmd, str(code))
674+ return None
675+
676+ def _get_config(self, unit, filename):
677+ """Get a ConfigParser object for parsing a unit's config file."""
678+ file_contents = unit.file_contents(filename)
679+ config = ConfigParser.ConfigParser()
680+ config.readfp(io.StringIO(file_contents))
681+ return config
682+
683+ def validate_config_data(self, sentry_unit, config_file, section,
684+ expected):
685+ """Validate config file data.
686+
687+ Verify that the specified section of the config file contains
688+ the expected option key:value pairs.
689+ """
690+ config = self._get_config(sentry_unit, config_file)
691+
692+ if section != 'DEFAULT' and not config.has_section(section):
693+ return "section [{}] does not exist".format(section)
694+
695+ for k in expected.keys():
696+ if not config.has_option(section, k):
697+ return "section [{}] is missing option {}".format(section, k)
698+ if config.get(section, k) != expected[k]:
699+ return "section [{}] {}:{} != expected {}:{}".format(
700+ section, k, config.get(section, k), k, expected[k])
701+ return None
702+
703+ def _validate_dict_data(self, expected, actual):
704+ """Validate dictionary data.
705+
706+ Compare expected dictionary data vs actual dictionary data.
707+ The values in the 'expected' dictionary can be strings, bools, ints,
708+ longs, or can be a function that evaluate a variable and returns a
709+ bool.
710+ """
711+ for k, v in expected.iteritems():
712+ if k in actual:
713+ if (isinstance(v, basestring) or
714+ isinstance(v, bool) or
715+ isinstance(v, (int, long))):
716+ if v != actual[k]:
717+ return "{}:{}".format(k, actual[k])
718+ elif not v(actual[k]):
719+ return "{}:{}".format(k, actual[k])
720+ else:
721+ return "key '{}' does not exist".format(k)
722+ return None
723+
724+ def validate_relation_data(self, sentry_unit, relation, expected):
725+ """Validate actual relation data based on expected relation data."""
726+ actual = sentry_unit.relation(relation[0], relation[1])
727+ self.log.debug('actual: {}'.format(repr(actual)))
728+ return self._validate_dict_data(expected, actual)
729+
730+ def _validate_list_data(self, expected, actual):
731+ """Compare expected list vs actual list data."""
732+ for e in expected:
733+ if e not in actual:
734+ return "expected item {} not found in actual list".format(e)
735+ return None
736+
737+ def not_null(self, string):
738+ if string is not None:
739+ return True
740+ else:
741+ return False
742+
743+ def _get_file_mtime(self, sentry_unit, filename):
744+ """Get last modification time of file."""
745+ return sentry_unit.file_stat(filename)['mtime']
746+
747+ def _get_dir_mtime(self, sentry_unit, directory):
748+ """Get last modification time of directory."""
749+ return sentry_unit.directory_stat(directory)['mtime']
750+
751+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
752+ """Get process' start time.
753+
754+ Determine start time of the process based on the last modification
755+ time of the /proc/pid directory. If pgrep_full is True, the process
756+ name is matched against the full command line.
757+ """
758+ if pgrep_full:
759+ cmd = 'pgrep -o -f {}'.format(service)
760+ else:
761+ cmd = 'pgrep -o {}'.format(service)
762+ proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
763+ return self._get_dir_mtime(sentry_unit, proc_dir)
764+
765+ def service_restarted(self, sentry_unit, service, filename,
766+ pgrep_full=False, sleep_time=20):
767+ """Check if service was restarted.
768+
769+ Compare a service's start time vs a file's last modification time
770+ (such as a config file for that service) to determine if the service
771+ has been restarted.
772+ """
773+ time.sleep(sleep_time)
774+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
775+ self._get_file_mtime(sentry_unit, filename)):
776+ return True
777+ else:
778+ return False
779+
780+ def relation_error(self, name, data):
781+ return 'unexpected relation data in {} - {}'.format(name, data)
782+
783+ def endpoint_error(self, name, data):
784+ return 'unexpected endpoint data in {} - {}'.format(name, data)
785
786=== added directory 'tests/charmhelpers/contrib/openstack'
787=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
788=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
789=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
790=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
791--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
792+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-29 20:46:36 +0000
793@@ -0,0 +1,91 @@
794+from charmhelpers.contrib.amulet.deployment import (
795+ AmuletDeployment
796+)
797+
798+
799+class OpenStackAmuletDeployment(AmuletDeployment):
800+ """OpenStack amulet deployment.
801+
802+ This class inherits from AmuletDeployment and has additional support
803+ that is specifically for use by OpenStack charms.
804+ """
805+
806+ def __init__(self, series=None, openstack=None, source=None, stable=True):
807+ """Initialize the deployment environment."""
808+ super(OpenStackAmuletDeployment, self).__init__(series)
809+ self.openstack = openstack
810+ self.source = source
811+ self.stable = stable
812+ # Note(coreycb): this needs to be changed when new next branches come
813+ # out.
814+ self.current_next = "trusty"
815+
816+ def _determine_branch_locations(self, other_services):
817+ """Determine the branch locations for the other services.
818+
819+ Determine if the local branch being tested is derived from its
820+ stable or next (dev) branch, and based on this, use the corresonding
821+ stable or next branches for the other_services."""
822+ base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
823+
824+ if self.stable:
825+ for svc in other_services:
826+ temp = 'lp:charms/{}'
827+ svc['location'] = temp.format(svc['name'])
828+ else:
829+ for svc in other_services:
830+ if svc['name'] in base_charms:
831+ temp = 'lp:charms/{}'
832+ svc['location'] = temp.format(svc['name'])
833+ else:
834+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
835+ svc['location'] = temp.format(self.current_next,
836+ svc['name'])
837+ return other_services
838+
839+ def _add_services(self, this_service, other_services):
840+ """Add services to the deployment and set openstack-origin/source."""
841+ other_services = self._determine_branch_locations(other_services)
842+
843+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
844+ other_services)
845+
846+ services = other_services
847+ services.append(this_service)
848+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
849+ 'ceph-osd', 'ceph-radosgw']
850+
851+ if self.openstack:
852+ for svc in services:
853+ if svc['name'] not in use_source:
854+ config = {'openstack-origin': self.openstack}
855+ self.d.configure(svc['name'], config)
856+
857+ if self.source:
858+ for svc in services:
859+ if svc['name'] in use_source:
860+ config = {'source': self.source}
861+ self.d.configure(svc['name'], config)
862+
863+ def _configure_services(self, configs):
864+ """Configure all of the services."""
865+ for service, config in configs.iteritems():
866+ self.d.configure(service, config)
867+
868+ def _get_openstack_release(self):
869+ """Get openstack release.
870+
871+ Return an integer representing the enum value of the openstack
872+ release.
873+ """
874+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
875+ self.precise_havana, self.precise_icehouse,
876+ self.trusty_icehouse) = range(6)
877+ releases = {
878+ ('precise', None): self.precise_essex,
879+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
880+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
881+ ('precise', 'cloud:precise-havana'): self.precise_havana,
882+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
883+ ('trusty', None): self.trusty_icehouse}
884+ return releases[(self.series, self.openstack)]
885
886=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
887--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
888+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-29 20:46:36 +0000
889@@ -0,0 +1,276 @@
890+import logging
891+import os
892+import time
893+import urllib
894+
895+import glanceclient.v1.client as glance_client
896+import keystoneclient.v2_0 as keystone_client
897+import novaclient.v1_1.client as nova_client
898+
899+from charmhelpers.contrib.amulet.utils import (
900+ AmuletUtils
901+)
902+
903+DEBUG = logging.DEBUG
904+ERROR = logging.ERROR
905+
906+
907+class OpenStackAmuletUtils(AmuletUtils):
908+ """OpenStack amulet utilities.
909+
910+ This class inherits from AmuletUtils and has additional support
911+ that is specifically for use by OpenStack charms.
912+ """
913+
914+ def __init__(self, log_level=ERROR):
915+ """Initialize the deployment environment."""
916+ super(OpenStackAmuletUtils, self).__init__(log_level)
917+
918+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
919+ public_port, expected):
920+ """Validate endpoint data.
921+
922+ Validate actual endpoint data vs expected endpoint data. The ports
923+ are used to find the matching endpoint.
924+ """
925+ found = False
926+ for ep in endpoints:
927+ self.log.debug('endpoint: {}'.format(repr(ep)))
928+ if (admin_port in ep.adminurl and
929+ internal_port in ep.internalurl and
930+ public_port in ep.publicurl):
931+ found = True
932+ actual = {'id': ep.id,
933+ 'region': ep.region,
934+ 'adminurl': ep.adminurl,
935+ 'internalurl': ep.internalurl,
936+ 'publicurl': ep.publicurl,
937+ 'service_id': ep.service_id}
938+ ret = self._validate_dict_data(expected, actual)
939+ if ret:
940+ return 'unexpected endpoint data - {}'.format(ret)
941+
942+ if not found:
943+ return 'endpoint not found'
944+
945+ def validate_svc_catalog_endpoint_data(self, expected, actual):
946+ """Validate service catalog endpoint data.
947+
948+ Validate a list of actual service catalog endpoints vs a list of
949+ expected service catalog endpoints.
950+ """
951+ self.log.debug('actual: {}'.format(repr(actual)))
952+ for k, v in expected.iteritems():
953+ if k in actual:
954+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
955+ if ret:
956+ return self.endpoint_error(k, ret)
957+ else:
958+ return "endpoint {} does not exist".format(k)
959+ return ret
960+
961+ def validate_tenant_data(self, expected, actual):
962+ """Validate tenant data.
963+
964+ Validate a list of actual tenant data vs list of expected tenant
965+ data.
966+ """
967+ self.log.debug('actual: {}'.format(repr(actual)))
968+ for e in expected:
969+ found = False
970+ for act in actual:
971+ a = {'enabled': act.enabled, 'description': act.description,
972+ 'name': act.name, 'id': act.id}
973+ if e['name'] == a['name']:
974+ found = True
975+ ret = self._validate_dict_data(e, a)
976+ if ret:
977+ return "unexpected tenant data - {}".format(ret)
978+ if not found:
979+ return "tenant {} does not exist".format(e['name'])
980+ return ret
981+
982+ def validate_role_data(self, expected, actual):
983+ """Validate role data.
984+
985+ Validate a list of actual role data vs a list of expected role
986+ data.
987+ """
988+ self.log.debug('actual: {}'.format(repr(actual)))
989+ for e in expected:
990+ found = False
991+ for act in actual:
992+ a = {'name': act.name, 'id': act.id}
993+ if e['name'] == a['name']:
994+ found = True
995+ ret = self._validate_dict_data(e, a)
996+ if ret:
997+ return "unexpected role data - {}".format(ret)
998+ if not found:
999+ return "role {} does not exist".format(e['name'])
1000+ return ret
1001+
1002+ def validate_user_data(self, expected, actual):
1003+ """Validate user data.
1004+
1005+ Validate a list of actual user data vs a list of expected user
1006+ data.
1007+ """
1008+ self.log.debug('actual: {}'.format(repr(actual)))
1009+ for e in expected:
1010+ found = False
1011+ for act in actual:
1012+ a = {'enabled': act.enabled, 'name': act.name,
1013+ 'email': act.email, 'tenantId': act.tenantId,
1014+ 'id': act.id}
1015+ if e['name'] == a['name']:
1016+ found = True
1017+ ret = self._validate_dict_data(e, a)
1018+ if ret:
1019+ return "unexpected user data - {}".format(ret)
1020+ if not found:
1021+ return "user {} does not exist".format(e['name'])
1022+ return ret
1023+
1024+ def validate_flavor_data(self, expected, actual):
1025+ """Validate flavor data.
1026+
1027+ Validate a list of actual flavors vs a list of expected flavors.
1028+ """
1029+ self.log.debug('actual: {}'.format(repr(actual)))
1030+ act = [a.name for a in actual]
1031+ return self._validate_list_data(expected, act)
1032+
1033+ def tenant_exists(self, keystone, tenant):
1034+ """Return True if tenant exists."""
1035+ return tenant in [t.name for t in keystone.tenants.list()]
1036+
1037+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
1038+ tenant):
1039+ """Authenticates admin user with the keystone admin endpoint."""
1040+ unit = keystone_sentry
1041+ service_ip = unit.relation('shared-db',
1042+ 'mysql:shared-db')['private-address']
1043+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
1044+ return keystone_client.Client(username=user, password=password,
1045+ tenant_name=tenant, auth_url=ep)
1046+
1047+ def authenticate_keystone_user(self, keystone, user, password, tenant):
1048+ """Authenticates a regular user with the keystone public endpoint."""
1049+ ep = keystone.service_catalog.url_for(service_type='identity',
1050+ endpoint_type='publicURL')
1051+ return keystone_client.Client(username=user, password=password,
1052+ tenant_name=tenant, auth_url=ep)
1053+
1054+ def authenticate_glance_admin(self, keystone):
1055+ """Authenticates admin user with glance."""
1056+ ep = keystone.service_catalog.url_for(service_type='image',
1057+ endpoint_type='adminURL')
1058+ return glance_client.Client(ep, token=keystone.auth_token)
1059+
1060+ def authenticate_nova_user(self, keystone, user, password, tenant):
1061+ """Authenticates a regular user with nova-api."""
1062+ ep = keystone.service_catalog.url_for(service_type='identity',
1063+ endpoint_type='publicURL')
1064+ return nova_client.Client(username=user, api_key=password,
1065+ project_id=tenant, auth_url=ep)
1066+
1067+ def create_cirros_image(self, glance, image_name):
1068+ """Download the latest cirros image and upload it to glance."""
1069+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
1070+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
1071+ if http_proxy:
1072+ proxies = {'http': http_proxy}
1073+ opener = urllib.FancyURLopener(proxies)
1074+ else:
1075+ opener = urllib.FancyURLopener()
1076+
1077+ f = opener.open("http://download.cirros-cloud.net/version/released")
1078+ version = f.read().strip()
1079+ cirros_img = "cirros-{}-x86_64-disk.img".format(version)
1080+ local_path = os.path.join('tests', cirros_img)
1081+
1082+ if not os.path.exists(local_path):
1083+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1084+ version, cirros_img)
1085+ opener.retrieve(cirros_url, local_path)
1086+ f.close()
1087+
1088+ with open(local_path) as f:
1089+ image = glance.images.create(name=image_name, is_public=True,
1090+ disk_format='qcow2',
1091+ container_format='bare', data=f)
1092+ count = 1
1093+ status = image.status
1094+ while status != 'active' and count < 10:
1095+ time.sleep(3)
1096+ image = glance.images.get(image.id)
1097+ status = image.status
1098+ self.log.debug('image status: {}'.format(status))
1099+ count += 1
1100+
1101+ if status != 'active':
1102+ self.log.error('image creation timed out')
1103+ return None
1104+
1105+ return image
1106+
1107+ def delete_image(self, glance, image):
1108+ """Delete the specified image."""
1109+ num_before = len(list(glance.images.list()))
1110+ glance.images.delete(image)
1111+
1112+ count = 1
1113+ num_after = len(list(glance.images.list()))
1114+ while num_after != (num_before - 1) and count < 10:
1115+ time.sleep(3)
1116+ num_after = len(list(glance.images.list()))
1117+ self.log.debug('number of images: {}'.format(num_after))
1118+ count += 1
1119+
1120+ if num_after != (num_before - 1):
1121+ self.log.error('image deletion timed out')
1122+ return False
1123+
1124+ return True
1125+
1126+ def create_instance(self, nova, image_name, instance_name, flavor):
1127+ """Create the specified instance."""
1128+ image = nova.images.find(name=image_name)
1129+ flavor = nova.flavors.find(name=flavor)
1130+ instance = nova.servers.create(name=instance_name, image=image,
1131+ flavor=flavor)
1132+
1133+ count = 1
1134+ status = instance.status
1135+ while status != 'ACTIVE' and count < 60:
1136+ time.sleep(3)
1137+ instance = nova.servers.get(instance.id)
1138+ status = instance.status
1139+ self.log.debug('instance status: {}'.format(status))
1140+ count += 1
1141+
1142+ if status != 'ACTIVE':
1143+ self.log.error('instance creation timed out')
1144+ return None
1145+
1146+ return instance
1147+
1148+ def delete_instance(self, nova, instance):
1149+ """Delete the specified instance."""
1150+ num_before = len(list(nova.servers.list()))
1151+ nova.servers.delete(instance)
1152+
1153+ count = 1
1154+ num_after = len(list(nova.servers.list()))
1155+ while num_after != (num_before - 1) and count < 10:
1156+ time.sleep(3)
1157+ num_after = len(list(nova.servers.list()))
1158+ self.log.debug('number of instances: {}'.format(num_after))
1159+ count += 1
1160+
1161+ if num_after != (num_before - 1):
1162+ self.log.error('instance deletion timed out')
1163+ return False
1164+
1165+ return True

Subscribers

People subscribed via source and target branches