Merge lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 84
Proposed branch: lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next
Diff against target: 2296 lines (+1781/-86)
30 files modified
Makefile (+12/-4)
charm-helpers-hooks.yaml (+10/-0)
charm-helpers-tests.yaml (+5/-0)
charm-helpers.yaml (+0/-10)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0)
hooks/charmhelpers/contrib/openstack/context.py (+45/-13)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+5/-2)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/utils.py (+1/-0)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+5/-4)
hooks/charmhelpers/core/host.py (+28/-12)
hooks/charmhelpers/fetch/__init__.py (+24/-16)
hooks/charmhelpers/fetch/bzrurl.py (+2/-1)
tests/00-setup (+10/-0)
tests/10-basic-precise-essex (+10/-0)
tests/11-basic-precise-folsom (+18/-0)
tests/12-basic-precise-grizzly (+12/-0)
tests/13-basic-precise-havana (+12/-0)
tests/14-basic-precise-icehouse (+12/-0)
tests/15-basic-trusty-icehouse (+10/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+520/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+63/-0)
tests/charmhelpers/contrib/amulet/utils.py (+157/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic
Reviewer Review Type Date Requested Status
Liam Young (community) Needs Fixing
Review via email: mp+226503@code.launchpad.net
To post a comment you must log in.
84. By Corey Bryant

Add Amulet basic tests

Revision history for this message
Liam Young (gnuoy) wrote :

Looks good but some lint fixes are needed

review: Needs Fixing
Revision history for this message
Corey Bryant (corey.bryant) wrote :

Thanks for the review Liam. Good catch on the noqa issue. I'll fix that in the charm-helpers branch and will fix up any lint issues throughout the charm tests.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'Makefile'
--- Makefile 2014-05-21 10:14:28 +0000
+++ Makefile 2014-07-11 17:34:59 +0000
@@ -2,15 +2,23 @@
2PYTHON := /usr/bin/env python2PYTHON := /usr/bin/env python
33
4lint:4lint:
5 @flake8 --exclude hooks/charmhelpers hooks unit_tests5 @flake8 --exclude hooks/charmhelpers hooks unit_tests tests
6 @charm proof6 @charm proof
77
8unit_test:
9 @echo Starting unit tests...
10 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
11
8test:12test:
9 @echo Starting tests...13 @echo Starting Amulet tests...
10 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests14 # coreycb note: The -v should only be temporary until Amulet sends
15 # raise_status() messages to stderr:
16 # https://bugs.launchpad.net/amulet/+bug/1320357
17 @juju test -v -p AMULET_HTTP_PROXY
1118
12sync:19sync:
13 @charm-helper-sync -c charm-helpers.yaml20 @charm-helper-sync -c charm-helpers-hooks.yaml
21 @charm-helper-sync -c charm-helpers-tests.yaml
1422
15publish: lint test23publish: lint test
16 bzr push lp:charms/nova-cloud-controller24 bzr push lp:charms/nova-cloud-controller
1725
=== added file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-hooks.yaml 2014-07-11 17:34:59 +0000
@@ -0,0 +1,10 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack|inc=*
7 - contrib.storage
8 - contrib.hahelpers:
9 - apache
10 - payload.execd
011
=== added file 'charm-helpers-tests.yaml'
--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-tests.yaml 2014-07-11 17:34:59 +0000
@@ -0,0 +1,5 @@
1branch: lp:charm-helpers
2destination: tests/charmhelpers
3include:
4 - contrib.amulet
5 - contrib.openstack.amulet
06
=== removed file 'charm-helpers.yaml'
--- charm-helpers.yaml 2014-05-10 02:00:22 +0000
+++ charm-helpers.yaml 1970-01-01 00:00:00 +0000
@@ -1,10 +0,0 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack|inc=*
7 - contrib.storage
8 - contrib.hahelpers:
9 - apache
10 - payload.execd
110
=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-11 17:34:59 +0000
@@ -0,0 +1,57 @@
1from charmhelpers.contrib.amulet.deployment import (
2 AmuletDeployment
3)
4
5
6class OpenStackAmuletDeployment(AmuletDeployment):
7 """This class inherits from AmuletDeployment and has additional support
8 that is specifically for use by OpenStack charms."""
9
10 def __init__(self, series, openstack=None, source=None):
11 """Initialize the deployment environment."""
12 super(OpenStackAmuletDeployment, self).__init__(series)
13 self.openstack = openstack
14 self.source = source
15
16 def _add_services(self, this_service, other_services):
17 """Add services to the deployment and set openstack-origin."""
18 super(OpenStackAmuletDeployment, self)._add_services(this_service,
19 other_services)
20 name = 0
21 services = other_services
22 services.append(this_service)
23 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
24
25 if self.openstack:
26 for svc in services:
27 charm_name = self._get_charm_name(svc[name])
28 if charm_name not in use_source:
29 config = {'openstack-origin': self.openstack}
30 self.d.configure(svc[name], config)
31
32 if self.source:
33 for svc in services:
34 charm_name = self._get_charm_name(svc[name])
35 if charm_name in use_source:
36 config = {'source': self.source}
37 self.d.configure(svc[name], config)
38
39 def _configure_services(self, configs):
40 """Configure all of the services."""
41 for service, config in configs.iteritems():
42 self.d.configure(service, config)
43
44 def _get_openstack_release(self):
45 """Return an integer representing the enum value of the openstack
46 release."""
47 self.precise_essex, self.precise_folsom, self.precise_grizzly, \
48 self.precise_havana, self.precise_icehouse, \
49 self.trusty_icehouse = range(6)
50 releases = {
51 ('precise', None): self.precise_essex,
52 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
53 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
54 ('precise', 'cloud:precise-havana'): self.precise_havana,
55 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
56 ('trusty', None): self.trusty_icehouse}
57 return releases[(self.series, self.openstack)]
058
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-11 17:34:59 +0000
@@ -0,0 +1,253 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """This class inherits from AmuletUtils and has additional support
20 that is specifically for use by OpenStack charms."""
21
22 def __init__(self, log_level=ERROR):
23 """Initialize the deployment environment."""
24 super(OpenStackAmuletUtils, self).__init__(log_level)
25
26 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
27 public_port, expected):
28 """Validate actual endpoint data vs expected endpoint data. The ports
29 are used to find the matching endpoint."""
30 found = False
31 for ep in endpoints:
32 self.log.debug('endpoint: {}'.format(repr(ep)))
33 if admin_port in ep.adminurl and internal_port in ep.internalurl \
34 and public_port in ep.publicurl:
35 found = True
36 actual = {'id': ep.id,
37 'region': ep.region,
38 'adminurl': ep.adminurl,
39 'internalurl': ep.internalurl,
40 'publicurl': ep.publicurl,
41 'service_id': ep.service_id}
42 ret = self._validate_dict_data(expected, actual)
43 if ret:
44 return 'unexpected endpoint data - {}'.format(ret)
45
46 if not found:
47 return 'endpoint not found'
48
49 def validate_svc_catalog_endpoint_data(self, expected, actual):
50 """Validate a list of actual service catalog endpoints vs a list of
51 expected service catalog endpoints."""
52 self.log.debug('actual: {}'.format(repr(actual)))
53 for k, v in expected.iteritems():
54 if k in actual:
55 ret = self._validate_dict_data(expected[k][0], actual[k][0])
56 if ret:
57 return self.endpoint_error(k, ret)
58 else:
59 return "endpoint {} does not exist".format(k)
60 return ret
61
62 def validate_tenant_data(self, expected, actual):
63 """Validate a list of actual tenant data vs list of expected tenant
64 data."""
65 self.log.debug('actual: {}'.format(repr(actual)))
66 for e in expected:
67 found = False
68 for act in actual:
69 a = {'enabled': act.enabled, 'description': act.description,
70 'name': act.name, 'id': act.id}
71 if e['name'] == a['name']:
72 found = True
73 ret = self._validate_dict_data(e, a)
74 if ret:
75 return "unexpected tenant data - {}".format(ret)
76 if not found:
77 return "tenant {} does not exist".format(e['name'])
78 return ret
79
80 def validate_role_data(self, expected, actual):
81 """Validate a list of actual role data vs a list of expected role
82 data."""
83 self.log.debug('actual: {}'.format(repr(actual)))
84 for e in expected:
85 found = False
86 for act in actual:
87 a = {'name': act.name, 'id': act.id}
88 if e['name'] == a['name']:
89 found = True
90 ret = self._validate_dict_data(e, a)
91 if ret:
92 return "unexpected role data - {}".format(ret)
93 if not found:
94 return "role {} does not exist".format(e['name'])
95 return ret
96
97 def validate_user_data(self, expected, actual):
98 """Validate a list of actual user data vs a list of expected user
99 data."""
100 self.log.debug('actual: {}'.format(repr(actual)))
101 for e in expected:
102 found = False
103 for act in actual:
104 a = {'enabled': act.enabled, 'name': act.name,
105 'email': act.email, 'tenantId': act.tenantId,
106 'id': act.id}
107 if e['name'] == a['name']:
108 found = True
109 ret = self._validate_dict_data(e, a)
110 if ret:
111 return "unexpected user data - {}".format(ret)
112 if not found:
113 return "user {} does not exist".format(e['name'])
114 return ret
115
116 def validate_flavor_data(self, expected, actual):
117 """Validate a list of actual flavors vs a list of expected flavors."""
118 self.log.debug('actual: {}'.format(repr(actual)))
119 act = [a.name for a in actual]
120 return self._validate_list_data(expected, act)
121
122 def tenant_exists(self, keystone, tenant):
123 """Return True if tenant exists"""
124 return tenant in [t.name for t in keystone.tenants.list()]
125
126 def authenticate_keystone_admin(self, keystone_sentry, user, password,
127 tenant):
128 """Authenticates admin user with the keystone admin endpoint."""
129 service_ip = \
130 keystone_sentry.relation('shared-db',
131 'mysql:shared-db')['private-address']
132 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
133 return keystone_client.Client(username=user, password=password,
134 tenant_name=tenant, auth_url=ep)
135
136 def authenticate_keystone_user(self, keystone, user, password, tenant):
137 """Authenticates a regular user with the keystone public endpoint."""
138 ep = keystone.service_catalog.url_for(service_type='identity',
139 endpoint_type='publicURL')
140 return keystone_client.Client(username=user, password=password,
141 tenant_name=tenant, auth_url=ep)
142
143 def authenticate_glance_admin(self, keystone):
144 """Authenticates admin user with glance."""
145 ep = keystone.service_catalog.url_for(service_type='image',
146 endpoint_type='adminURL')
147 return glance_client.Client(ep, token=keystone.auth_token)
148
149 def authenticate_nova_user(self, keystone, user, password, tenant):
150 """Authenticates a regular user with nova-api."""
151 ep = keystone.service_catalog.url_for(service_type='identity',
152 endpoint_type='publicURL')
153 return nova_client.Client(username=user, api_key=password,
154 project_id=tenant, auth_url=ep)
155
156 def create_cirros_image(self, glance, image_name):
157 """Download the latest cirros image and upload it to glance."""
158 http_proxy = os.getenv('AMULET_HTTP_PROXY')
159 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
160 if http_proxy:
161 proxies = {'http': http_proxy}
162 opener = urllib.FancyURLopener(proxies)
163 else:
164 opener = urllib.FancyURLopener()
165
166 f = opener.open("http://download.cirros-cloud.net/version/released")
167 version = f.read().strip()
168 cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
169
170 if not os.path.exists(cirros_img):
171 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
172 version, cirros_img)
173 opener.retrieve(cirros_url, cirros_img)
174 f.close()
175
176 with open(cirros_img) as f:
177 image = glance.images.create(name=image_name, is_public=True,
178 disk_format='qcow2',
179 container_format='bare', data=f)
180 count = 1
181 status = image.status
182 while status != 'active' and count < 10:
183 time.sleep(3)
184 image = glance.images.get(image.id)
185 status = image.status
186 self.log.debug('image status: {}'.format(status))
187 count += 1
188
189 if status != 'active':
190 self.log.error('image creation timed out')
191 return None
192
193 return image
194
195 def delete_image(self, glance, image):
196 """Delete the specified image."""
197 num_before = len(list(glance.images.list()))
198 glance.images.delete(image)
199
200 count = 1
201 num_after = len(list(glance.images.list()))
202 while num_after != (num_before - 1) and count < 10:
203 time.sleep(3)
204 num_after = len(list(glance.images.list()))
205 self.log.debug('number of images: {}'.format(num_after))
206 count += 1
207
208 if num_after != (num_before - 1):
209 self.log.error('image deletion timed out')
210 return False
211
212 return True
213
214 def create_instance(self, nova, image_name, instance_name, flavor):
215 """Create the specified instance."""
216 image = nova.images.find(name=image_name)
217 flavor = nova.flavors.find(name=flavor)
218 instance = nova.servers.create(name=instance_name, image=image,
219 flavor=flavor)
220
221 count = 1
222 status = instance.status
223 while status != 'ACTIVE' and count < 60:
224 time.sleep(3)
225 instance = nova.servers.get(instance.id)
226 status = instance.status
227 self.log.debug('instance status: {}'.format(status))
228 count += 1
229
230 if status != 'ACTIVE':
231 self.log.error('instance creation timed out')
232 return None
233
234 return instance
235
236 def delete_instance(self, nova, instance):
237 """Delete the specified instance."""
238 num_before = len(list(nova.servers.list()))
239 nova.servers.delete(instance)
240
241 count = 1
242 num_after = len(list(nova.servers.list()))
243 while num_after != (num_before - 1) and count < 10:
244 time.sleep(3)
245 num_after = len(list(nova.servers.list()))
246 self.log.debug('number of instances: {}'.format(num_after))
247 count += 1
248
249 if num_after != (num_before - 1):
250 self.log.error('instance deletion timed out')
251 return False
252
253 return True
0254
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-05-21 10:28:14 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-11 17:34:59 +0000
@@ -243,23 +243,31 @@
243243
244244
245class AMQPContext(OSContextGenerator):245class AMQPContext(OSContextGenerator):
246 interfaces = ['amqp']
247246
248 def __init__(self, ssl_dir=None):247 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
249 self.ssl_dir = ssl_dir248 self.ssl_dir = ssl_dir
249 self.rel_name = rel_name
250 self.relation_prefix = relation_prefix
251 self.interfaces = [rel_name]
250252
251 def __call__(self):253 def __call__(self):
252 log('Generating template context for amqp')254 log('Generating template context for amqp')
253 conf = config()255 conf = config()
256 user_setting = 'rabbit-user'
257 vhost_setting = 'rabbit-vhost'
258 if self.relation_prefix:
259 user_setting = self.relation_prefix + '-rabbit-user'
260 vhost_setting = self.relation_prefix + '-rabbit-vhost'
261
254 try:262 try:
255 username = conf['rabbit-user']263 username = conf[user_setting]
256 vhost = conf['rabbit-vhost']264 vhost = conf[vhost_setting]
257 except KeyError as e:265 except KeyError as e:
258 log('Could not generate shared_db context. '266 log('Could not generate shared_db context. '
259 'Missing required charm config options: %s.' % e)267 'Missing required charm config options: %s.' % e)
260 raise OSContextError268 raise OSContextError
261 ctxt = {}269 ctxt = {}
262 for rid in relation_ids('amqp'):270 for rid in relation_ids(self.rel_name):
263 ha_vip_only = False271 ha_vip_only = False
264 for unit in related_units(rid):272 for unit in related_units(rid):
265 if relation_get('clustered', rid=rid, unit=unit):273 if relation_get('clustered', rid=rid, unit=unit):
@@ -418,12 +426,13 @@
418 """426 """
419 Generates a context for an apache vhost configuration that configures427 Generates a context for an apache vhost configuration that configures
420 HTTPS reverse proxying for one or many endpoints. Generated context428 HTTPS reverse proxying for one or many endpoints. Generated context
421 looks something like:429 looks something like::
422 {430
423 'namespace': 'cinder',431 {
424 'private_address': 'iscsi.mycinderhost.com',432 'namespace': 'cinder',
425 'endpoints': [(8776, 8766), (8777, 8767)]433 'private_address': 'iscsi.mycinderhost.com',
426 }434 'endpoints': [(8776, 8766), (8777, 8767)]
435 }
427436
428 The endpoints list consists of a tuples mapping external ports437 The endpoints list consists of a tuples mapping external ports
429 to internal ports.438 to internal ports.
@@ -541,6 +550,26 @@
541550
542 return nvp_ctxt551 return nvp_ctxt
543552
553 def n1kv_ctxt(self):
554 driver = neutron_plugin_attribute(self.plugin, 'driver',
555 self.network_manager)
556 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
557 self.network_manager)
558 n1kv_ctxt = {
559 'core_plugin': driver,
560 'neutron_plugin': 'n1kv',
561 'neutron_security_groups': self.neutron_security_groups,
562 'local_ip': unit_private_ip(),
563 'config': n1kv_config,
564 'vsm_ip': config('n1kv-vsm-ip'),
565 'vsm_username': config('n1kv-vsm-username'),
566 'vsm_password': config('n1kv-vsm-password'),
567 'restrict_policy_profiles': config(
568 'n1kv_restrict_policy_profiles'),
569 }
570
571 return n1kv_ctxt
572
544 def neutron_ctxt(self):573 def neutron_ctxt(self):
545 if https():574 if https():
546 proto = 'https'575 proto = 'https'
@@ -572,6 +601,8 @@
572 ctxt.update(self.ovs_ctxt())601 ctxt.update(self.ovs_ctxt())
573 elif self.plugin in ['nvp', 'nsx']:602 elif self.plugin in ['nvp', 'nsx']:
574 ctxt.update(self.nvp_ctxt())603 ctxt.update(self.nvp_ctxt())
604 elif self.plugin == 'n1kv':
605 ctxt.update(self.n1kv_ctxt())
575606
576 alchemy_flags = config('neutron-alchemy-flags')607 alchemy_flags = config('neutron-alchemy-flags')
577 if alchemy_flags:608 if alchemy_flags:
@@ -611,7 +642,7 @@
611 The subordinate interface allows subordinates to export their642 The subordinate interface allows subordinates to export their
612 configuration requirements to the principle for multiple config643 configuration requirements to the principle for multiple config
613 files and multiple serivces. Ie, a subordinate that has interfaces644 files and multiple serivces. Ie, a subordinate that has interfaces
614 to both glance and nova may export to following yaml blob as json:645 to both glance and nova may export to following yaml blob as json::
615646
616 glance:647 glance:
617 /etc/glance/glance-api.conf:648 /etc/glance/glance-api.conf:
@@ -630,7 +661,8 @@
630661
631 It is then up to the principle charms to subscribe this context to662 It is then up to the principle charms to subscribe this context to
632 the service+config file it is interestd in. Configuration data will663 the service+config file it is interestd in. Configuration data will
633 be available in the template context, in glance's case, as:664 be available in the template context, in glance's case, as::
665
634 ctxt = {666 ctxt = {
635 ... other context ...667 ... other context ...
636 'subordinate_config': {668 'subordinate_config': {
637669
=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-21 10:28:14 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-11 17:34:59 +0000
@@ -128,6 +128,20 @@
128 'server_packages': ['neutron-server',128 'server_packages': ['neutron-server',
129 'neutron-plugin-vmware'],129 'neutron-plugin-vmware'],
130 'server_services': ['neutron-server']130 'server_services': ['neutron-server']
131 },
132 'n1kv': {
133 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
134 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
135 'contexts': [
136 context.SharedDBContext(user=config('neutron-database-user'),
137 database=config('neutron-database'),
138 relation_prefix='neutron',
139 ssl_dir=NEUTRON_CONF_DIR)],
140 'services': [],
141 'packages': [['neutron-plugin-cisco']],
142 'server_packages': ['neutron-server',
143 'neutron-plugin-cisco'],
144 'server_services': ['neutron-server']
131 }145 }
132 }146 }
133 if release >= 'icehouse':147 if release >= 'icehouse':
134148
=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:31:57 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-11 17:34:59 +0000
@@ -30,17 +30,17 @@
30 loading dir.30 loading dir.
3131
32 A charm may also ship a templates dir with this module32 A charm may also ship a templates dir with this module
33 and it will be appended to the bottom of the search list, eg:33 and it will be appended to the bottom of the search list, eg::
34 hooks/charmhelpers/contrib/openstack/templates.34
3535 hooks/charmhelpers/contrib/openstack/templates
36 :param templates_dir: str: Base template directory containing release36
37 sub-directories.37 :param templates_dir (str): Base template directory containing release
38 :param os_release : str: OpenStack release codename to construct template38 sub-directories.
39 loader.39 :param os_release (str): OpenStack release codename to construct template
4040 loader.
41 :returns : jinja2.ChoiceLoader constructed with a list of41 :returns: jinja2.ChoiceLoader constructed with a list of
42 jinja2.FilesystemLoaders, ordered in descending42 jinja2.FilesystemLoaders, ordered in descending
43 order by OpenStack release.43 order by OpenStack release.
44 """44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in OPENSTACK_CODENAMES.itervalues()]46 for rel in OPENSTACK_CODENAMES.itervalues()]
@@ -111,7 +111,8 @@
111 and ease the burden of managing config templates across multiple OpenStack111 and ease the burden of managing config templates across multiple OpenStack
112 releases.112 releases.
113113
114 Basic usage:114 Basic usage::
115
115 # import some common context generates from charmhelpers116 # import some common context generates from charmhelpers
116 from charmhelpers.contrib.openstack import context117 from charmhelpers.contrib.openstack import context
117118
@@ -131,21 +132,19 @@
131 # write out all registered configs132 # write out all registered configs
132 configs.write_all()133 configs.write_all()
133134
134 Details:135 **OpenStack Releases and template loading**
135136
136 OpenStack Releases and template loading
137 ---------------------------------------
138 When the object is instantiated, it is associated with a specific OS137 When the object is instantiated, it is associated with a specific OS
139 release. This dictates how the template loader will be constructed.138 release. This dictates how the template loader will be constructed.
140139
141 The constructed loader attempts to load the template from several places140 The constructed loader attempts to load the template from several places
142 in the following order:141 in the following order:
143 - from the most recent OS release-specific template dir (if one exists)142 - from the most recent OS release-specific template dir (if one exists)
144 - the base templates_dir143 - the base templates_dir
145 - a template directory shipped in the charm with this helper file.144 - a template directory shipped in the charm with this helper file.
146145
147146 For the example above, '/tmp/templates' contains the following structure::
148 For the example above, '/tmp/templates' contains the following structure:147
149 /tmp/templates/nova.conf148 /tmp/templates/nova.conf
150 /tmp/templates/api-paste.ini149 /tmp/templates/api-paste.ini
151 /tmp/templates/grizzly/api-paste.ini150 /tmp/templates/grizzly/api-paste.ini
@@ -169,8 +168,8 @@
169 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows168 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
170 us to ship common templates (haproxy, apache) with the helpers.169 us to ship common templates (haproxy, apache) with the helpers.
171170
172 Context generators171 **Context generators**
173 ---------------------------------------172
174 Context generators are used to generate template contexts during hook173 Context generators are used to generate template contexts during hook
175 execution. Doing so may require inspecting service relations, charm174 execution. Doing so may require inspecting service relations, charm
176 config, etc. When registered, a config file is associated with a list175 config, etc. When registered, a config file is associated with a list
177176
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-06-16 14:47:23 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-07-11 17:34:59 +0000
@@ -3,7 +3,6 @@
3# Common python helper functions used for OpenStack charms.3# Common python helper functions used for OpenStack charms.
4from collections import OrderedDict4from collections import OrderedDict
55
6import apt_pkg as apt
7import subprocess6import subprocess
8import os7import os
9import socket8import socket
@@ -85,6 +84,8 @@
85 '''Derive OpenStack release codename from a given installation source.'''84 '''Derive OpenStack release codename from a given installation source.'''
86 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']85 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
87 rel = ''86 rel = ''
87 if src is None:
88 return rel
88 if src in ['distro', 'distro-proposed']:89 if src in ['distro', 'distro-proposed']:
89 try:90 try:
90 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]91 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@@ -132,6 +133,7 @@
132133
133def get_os_codename_package(package, fatal=True):134def get_os_codename_package(package, fatal=True):
134 '''Derive OpenStack release codename from an installed package.'''135 '''Derive OpenStack release codename from an installed package.'''
136 import apt_pkg as apt
135 apt.init()137 apt.init()
136138
137 # Tell apt to build an in-memory cache to prevent race conditions (if139 # Tell apt to build an in-memory cache to prevent race conditions (if
@@ -189,7 +191,7 @@
189 for version, cname in vers_map.iteritems():191 for version, cname in vers_map.iteritems():
190 if cname == codename:192 if cname == codename:
191 return version193 return version
192 #e = "Could not determine OpenStack version for package: %s" % pkg194 # e = "Could not determine OpenStack version for package: %s" % pkg
193 # error_out(e)195 # error_out(e)
194196
195197
@@ -325,6 +327,7 @@
325327
326 """328 """
327329
330 import apt_pkg as apt
328 src = config('openstack-origin')331 src = config('openstack-origin')
329 cur_vers = get_os_version_package(package)332 cur_vers = get_os_version_package(package)
330 available_vers = get_os_version_install_source(src)333 available_vers = get_os_version_install_source(src)
331334
=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:02:24 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-11 17:34:59 +0000
@@ -303,7 +303,7 @@
303 blk_device, fstype, system_services=[]):303 blk_device, fstype, system_services=[]):
304 """304 """
305 NOTE: This function must only be called from a single service unit for305 NOTE: This function must only be called from a single service unit for
306 the same rbd_img otherwise data loss will occur.306 the same rbd_img otherwise data loss will occur.
307307
308 Ensures given pool and RBD image exists, is mapped to a block device,308 Ensures given pool and RBD image exists, is mapped to a block device,
309 and the device is formatted and mounted at the given mount_point.309 and the device is formatted and mounted at the given mount_point.
310310
=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-21 10:28:14 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-11 17:34:59 +0000
@@ -37,6 +37,7 @@
37 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),37 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
38 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])38 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
3939
40
40def is_device_mounted(device):41def is_device_mounted(device):
41 '''Given a device path, return True if that device is mounted, and False42 '''Given a device path, return True if that device is mounted, and False
42 if it isn't.43 if it isn't.
4344
=== added file 'hooks/charmhelpers/core/fstab.py'
--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/fstab.py 2014-07-11 17:34:59 +0000
@@ -0,0 +1,116 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
5
6import os
7
8
9class Fstab(file):
10 """This class extends file in order to implement a file reader/writer
11 for file `/etc/fstab`
12 """
13
14 class Entry(object):
15 """Entry class represents a non-comment line on the `/etc/fstab` file
16 """
17 def __init__(self, device, mountpoint, filesystem,
18 options, d=0, p=0):
19 self.device = device
20 self.mountpoint = mountpoint
21 self.filesystem = filesystem
22
23 if not options:
24 options = "defaults"
25
26 self.options = options
27 self.d = d
28 self.p = p
29
30 def __eq__(self, o):
31 return str(self) == str(o)
32
33 def __str__(self):
34 return "{} {} {} {} {} {}".format(self.device,
35 self.mountpoint,
36 self.filesystem,
37 self.options,
38 self.d,
39 self.p)
40
41 DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
42
43 def __init__(self, path=None):
44 if path:
45 self._path = path
46 else:
47 self._path = self.DEFAULT_PATH
48 file.__init__(self, self._path, 'r+')
49
50 def _hydrate_entry(self, line):
51 # NOTE: use split with no arguments to split on any
52 # whitespace including tabs
53 return Fstab.Entry(*filter(
54 lambda x: x not in ('', None),
55 line.strip("\n").split()))
56
57 @property
58 def entries(self):
59 self.seek(0)
60 for line in self.readlines():
61 try:
62 if not line.startswith("#"):
63 yield self._hydrate_entry(line)
64 except ValueError:
65 pass
66
67 def get_entry_by_attr(self, attr, value):
68 for entry in self.entries:
69 e_attr = getattr(entry, attr)
70 if e_attr == value:
71 return entry
72 return None
73
74 def add_entry(self, entry):
75 if self.get_entry_by_attr('device', entry.device):
76 return False
77
78 self.write(str(entry) + '\n')
79 self.truncate()
80 return entry
81
82 def remove_entry(self, entry):
83 self.seek(0)
84
85 lines = self.readlines()
86
87 found = False
88 for index, line in enumerate(lines):
89 if not line.startswith("#"):
90 if self._hydrate_entry(line) == entry:
91 found = True
92 break
93
94 if not found:
95 return False
96
97 lines.remove(line)
98
99 self.seek(0)
100 self.write(''.join(lines))
101 self.truncate()
102 return True
103
104 @classmethod
105 def remove_by_mountpoint(cls, mountpoint, path=None):
106 fstab = cls(path=path)
107 entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
108 if entry:
109 return fstab.remove_entry(entry)
110 return False
111
112 @classmethod
113 def add(cls, device, mountpoint, filesystem, options=None, path=None):
114 return cls(path=path).add_entry(Fstab.Entry(device,
115 mountpoint, filesystem,
116 options=options))
0117
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-07-11 17:34:59 +0000
@@ -25,7 +25,7 @@
25def cached(func):25def cached(func):
26 """Cache return values for multiple executions of func + args26 """Cache return values for multiple executions of func + args
2727
28 For example:28 For example::
2929
30 @cached30 @cached
31 def unit_get(attribute):31 def unit_get(attribute):
@@ -445,18 +445,19 @@
445class Hooks(object):445class Hooks(object):
446 """A convenient handler for hook functions.446 """A convenient handler for hook functions.
447447
448 Example:448 Example::
449
449 hooks = Hooks()450 hooks = Hooks()
450451
451 # register a hook, taking its name from the function name452 # register a hook, taking its name from the function name
452 @hooks.hook()453 @hooks.hook()
453 def install():454 def install():
454 ...455 pass # your code here
455456
456 # register a hook, providing a custom hook name457 # register a hook, providing a custom hook name
457 @hooks.hook("config-changed")458 @hooks.hook("config-changed")
458 def config_changed():459 def config_changed():
459 ...460 pass # your code here
460461
461 if __name__ == "__main__":462 if __name__ == "__main__":
462 # execute a hook based on the name the program is called by463 # execute a hook based on the name the program is called by
463464
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/core/host.py 2014-07-11 17:34:59 +0000
@@ -12,11 +12,11 @@
12import string12import string
13import subprocess13import subprocess
14import hashlib14import hashlib
15import apt_pkg
1615
17from collections import OrderedDict16from collections import OrderedDict
1817
19from hookenv import log18from hookenv import log
19from fstab import Fstab
2020
2121
22def service_start(service_name):22def service_start(service_name):
@@ -35,7 +35,8 @@
3535
3636
37def service_reload(service_name, restart_on_failure=False):37def service_reload(service_name, restart_on_failure=False):
38 """Reload a system service, optionally falling back to restart if reload fails"""38 """Reload a system service, optionally falling back to restart if
39 reload fails"""
39 service_result = service('reload', service_name)40 service_result = service('reload', service_name)
40 if not service_result and restart_on_failure:41 if not service_result and restart_on_failure:
41 service_result = service('restart', service_name)42 service_result = service('restart', service_name)
@@ -144,7 +145,19 @@
144 target.write(content)145 target.write(content)
145146
146147
147def mount(device, mountpoint, options=None, persist=False):148def fstab_remove(mp):
149 """Remove the given mountpoint entry from /etc/fstab
150 """
151 return Fstab.remove_by_mountpoint(mp)
152
153
154def fstab_add(dev, mp, fs, options=None):
155 """Adds the given device entry to the /etc/fstab file
156 """
157 return Fstab.add(dev, mp, fs, options=options)
158
159
160def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
148 """Mount a filesystem at a particular mountpoint"""161 """Mount a filesystem at a particular mountpoint"""
149 cmd_args = ['mount']162 cmd_args = ['mount']
150 if options is not None:163 if options is not None:
@@ -155,9 +168,9 @@
155 except subprocess.CalledProcessError, e:168 except subprocess.CalledProcessError, e:
156 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))169 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
157 return False170 return False
171
158 if persist:172 if persist:
159 # TODO: update fstab173 return fstab_add(device, mountpoint, filesystem, options=options)
160 pass
161 return True174 return True
162175
163176
@@ -169,9 +182,9 @@
169 except subprocess.CalledProcessError, e:182 except subprocess.CalledProcessError, e:
170 log('Error unmounting {}\n{}'.format(mountpoint, e.output))183 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
171 return False184 return False
185
172 if persist:186 if persist:
173 # TODO: update fstab187 return fstab_remove(mountpoint)
174 pass
175 return True188 return True
176189
177190
@@ -198,13 +211,13 @@
198def restart_on_change(restart_map, stopstart=False):211def restart_on_change(restart_map, stopstart=False):
199 """Restart services based on configuration files changing212 """Restart services based on configuration files changing
200213
201 This function is used a decorator, for example214 This function is used a decorator, for example::
202215
203 @restart_on_change({216 @restart_on_change({
204 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]217 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
205 })218 })
206 def ceph_client_changed():219 def ceph_client_changed():
207 ...220 pass # your code here
208221
209 In this example, the cinder-api and cinder-volume services222 In this example, the cinder-api and cinder-volume services
210 would be restarted if /etc/ceph/ceph.conf is changed by the223 would be restarted if /etc/ceph/ceph.conf is changed by the
@@ -300,10 +313,13 @@
300313
301def cmp_pkgrevno(package, revno, pkgcache=None):314def cmp_pkgrevno(package, revno, pkgcache=None):
302 '''Compare supplied revno with the revno of the installed package315 '''Compare supplied revno with the revno of the installed package
303 1 => Installed revno is greater than supplied arg316
304 0 => Installed revno is the same as supplied arg317 * 1 => Installed revno is greater than supplied arg
305 -1 => Installed revno is less than supplied arg318 * 0 => Installed revno is the same as supplied arg
319 * -1 => Installed revno is less than supplied arg
320
306 '''321 '''
322 import apt_pkg
307 if not pkgcache:323 if not pkgcache:
308 apt_pkg.init()324 apt_pkg.init()
309 pkgcache = apt_pkg.Cache()325 pkgcache = apt_pkg.Cache()
310326
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-06-04 13:06:13 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-07-11 17:34:59 +0000
@@ -13,7 +13,6 @@
13 config,13 config,
14 log,14 log,
15)15)
16import apt_pkg
17import os16import os
1817
1918
@@ -117,6 +116,7 @@
117116
118def filter_installed_packages(packages):117def filter_installed_packages(packages):
119 """Returns a list of packages that require installation"""118 """Returns a list of packages that require installation"""
119 import apt_pkg
120 apt_pkg.init()120 apt_pkg.init()
121121
122 # Tell apt to build an in-memory cache to prevent race conditions (if122 # Tell apt to build an in-memory cache to prevent race conditions (if
@@ -235,31 +235,39 @@
235 sources_var='install_sources',235 sources_var='install_sources',
236 keys_var='install_keys'):236 keys_var='install_keys'):
237 """237 """
238 Configure multiple sources from charm configuration238 Configure multiple sources from charm configuration.
239
240 The lists are encoded as yaml fragments in the configuration.
241 The frament needs to be included as a string.
239242
240 Example config:243 Example config:
241 install_sources:244 install_sources: |
242 - "ppa:foo"245 - "ppa:foo"
243 - "http://example.com/repo precise main"246 - "http://example.com/repo precise main"
244 install_keys:247 install_keys: |
245 - null248 - null
246 - "a1b2c3d4"249 - "a1b2c3d4"
247250
248 Note that 'null' (a.k.a. None) should not be quoted.251 Note that 'null' (a.k.a. None) should not be quoted.
249 """252 """
250 sources = safe_load(config(sources_var))253 sources = safe_load((config(sources_var) or '').strip()) or []
251 keys = config(keys_var)254 keys = safe_load((config(keys_var) or '').strip()) or None
252 if keys is not None:255
253 keys = safe_load(keys)256 if isinstance(sources, basestring):
254 if isinstance(sources, basestring) and (257 sources = [sources]
255 keys is None or isinstance(keys, basestring)):258
256 add_source(sources, keys)259 if keys is None:
260 for source in sources:
261 add_source(source, None)
257 else:262 else:
258 if not len(sources) == len(keys):263 if isinstance(keys, basestring):
259 msg = 'Install sources and keys lists are different lengths'264 keys = [keys]
260 raise SourceConfigError(msg)265
261 for src_num in range(len(sources)):266 if len(sources) != len(keys):
262 add_source(sources[src_num], keys[src_num])267 raise SourceConfigError(
268 'Install sources and keys lists are different lengths')
269 for source, key in zip(sources, keys):
270 add_source(source, key)
263 if update:271 if update:
264 apt_update(fatal=True)272 apt_update(fatal=True)
265273
266274
=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
--- hooks/charmhelpers/fetch/bzrurl.py 2013-11-06 03:48:26 +0000
+++ hooks/charmhelpers/fetch/bzrurl.py 2014-07-11 17:34:59 +0000
@@ -39,7 +39,8 @@
39 def install(self, source):39 def install(self, source):
40 url_parts = self.parse_url(source)40 url_parts = self.parse_url(source)
41 branch_name = url_parts.path.strip("/").split("/")[-1]41 branch_name = url_parts.path.strip("/").split("/")[-1]
42 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)42 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
43 branch_name)
43 if not os.path.exists(dest_dir):44 if not os.path.exists(dest_dir):
44 mkdir(dest_dir, perms=0755)45 mkdir(dest_dir, perms=0755)
45 try:46 try:
4647
=== added directory 'tests'
=== added file 'tests/00-setup'
--- tests/00-setup 1970-01-01 00:00:00 +0000
+++ tests/00-setup 2014-07-11 17:34:59 +0000
@@ -0,0 +1,10 @@
1#!/bin/bash
2
3set -ex
4
5sudo add-apt-repository --yes ppa:juju/stable
6sudo apt-get update --yes
7sudo apt-get install --yes python-amulet
8sudo apt-get install --yes python-glanceclient
9sudo apt-get install --yes python-keystoneclient
10sudo apt-get install --yes python-novaclient
011
=== added file 'tests/10-basic-precise-essex'
--- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000
+++ tests/10-basic-precise-essex 2014-07-11 17:34:59 +0000
@@ -0,0 +1,10 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-essex."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise')
10 deployment.run_tests()
011
=== added file 'tests/11-basic-precise-folsom'
--- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000
+++ tests/11-basic-precise-folsom 2014-07-11 17:34:59 +0000
@@ -0,0 +1,18 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-folsom."""
5
6import amulet
7from basic_deployment import NovaCCBasicDeployment
8
9if __name__ == '__main__':
10 # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync'
11 # fails in shared-db-relation-changed (only fails on folsom)
12 message = "Skipping failing test until resolved"
13 amulet.raise_status(amulet.SKIP, msg=message)
14
15 deployment = NovaCCBasicDeployment(series='precise',
16 openstack='cloud:precise-folsom',
17 source='cloud:precise-updates/folsom')
18 deployment.run_tests()
019
=== added file 'tests/12-basic-precise-grizzly'
--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
+++ tests/12-basic-precise-grizzly 2014-07-11 17:34:59 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-grizzly."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-grizzly',
11 source='cloud:precise-updates/grizzly')
12 deployment.run_tests()
013
=== added file 'tests/13-basic-precise-havana'
--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
+++ tests/13-basic-precise-havana 2014-07-11 17:34:59 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-havana."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-havana',
11 source='cloud:precise-updates/havana')
12 deployment.run_tests()
013
=== added file 'tests/14-basic-precise-icehouse'
--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
+++ tests/14-basic-precise-icehouse 2014-07-11 17:34:59 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-icehouse."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-icehouse',
11 source='cloud:precise-updates/icehouse')
12 deployment.run_tests()
013
=== added file 'tests/15-basic-trusty-icehouse'
--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
+++ tests/15-basic-trusty-icehouse 2014-07-11 17:34:59 +0000
@@ -0,0 +1,10 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 trusty-icehouse."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='trusty')
10 deployment.run_tests()
011
=== added file 'tests/README'
--- tests/README 1970-01-01 00:00:00 +0000
+++ tests/README 2014-07-11 17:34:59 +0000
@@ -0,0 +1,47 @@
1This directory provides Amulet tests that focus on verification of Nova Cloud
2Controller deployments.
3
4If you use a web proxy server to access the web, you'll need to set the
5AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
6
7The following examples demonstrate different ways that tests can be executed.
8All examples are run from the charm's root directory.
9
10 * To run all tests (starting with 00-setup):
11
12 make test
13
14 * To run a specific test module (or modules):
15
16 juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
17
18 * To run a specific test module (or modules), and keep the environment
19 deployed after a failure:
20
21 juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
22
23 * To re-run a test module against an already deployed environment (one
24 that was deployed by a previous call to 'juju test --set-e'):
25
26 ./tests/15-basic-trusty-icehouse
27
28For debugging and test development purposes, all code should be idempotent.
29In other words, the code should have the ability to be re-run without changing
30the results beyond the initial run. This enables editing and re-running of a
31test module against an already deployed environment, as described above.
32
33Manual debugging tips:
34
35 * Set the following env vars before using the OpenStack CLI as admin:
36 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
37 export OS_TENANT_NAME=admin
38 export OS_USERNAME=admin
39 export OS_PASSWORD=openstack
40 export OS_REGION_NAME=RegionOne
41
42 * Set the following env vars before using the OpenStack CLI as demoUser:
43 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
44 export OS_TENANT_NAME=demoTenant
45 export OS_USERNAME=demoUser
46 export OS_PASSWORD=password
47 export OS_REGION_NAME=RegionOne
048
=== added file 'tests/basic_deployment.py'
--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
+++ tests/basic_deployment.py 2014-07-11 17:34:59 +0000
@@ -0,0 +1,520 @@
1#!/usr/bin/python
2
3import amulet
4
5from charmhelpers.contrib.openstack.amulet.deployment import (
6 OpenStackAmuletDeployment
7)
8
9from charmhelpers.contrib.openstack.amulet.utils import (
10 OpenStackAmuletUtils,
11 DEBUG, # flake8: noqa
12 ERROR
13)
14
15# Use DEBUG to turn on debug logging
16u = OpenStackAmuletUtils(ERROR)
17
18
19class NovaCCBasicDeployment(OpenStackAmuletDeployment):
20 """Amulet tests on a basic nova cloud controller deployment."""
21
22 def __init__(self, series=None, openstack=None, source=None):
23 """Deploy the entire test environment."""
24 super(NovaCCBasicDeployment, self).__init__(series, openstack, source)
25 self._add_services()
26 self._add_relations()
27 self._configure_services()
28 self._deploy()
29 self._initialize_tests()
30
31 def _add_services(self):
32 """Add the service that we're testing, including the number of units,
33 where nova-cloud-controller is local, and the other charms are from
34 the charm store."""
35 this_service = ('nova-cloud-controller', 1)
36 other_services = [('mysql', 1), ('rabbitmq-server', 1),
37 ('nova-compute', 2), ('keystone', 1), ('glance', 1)]
38 super(NovaCCBasicDeployment, self)._add_services(this_service,
39 other_services)
40
41 def _add_relations(self):
42 """Add all of the relations for the services."""
43 relations = {
44 'nova-cloud-controller:shared-db': 'mysql:shared-db',
45 'nova-cloud-controller:identity-service': 'keystone:identity-service',
46 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
47 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute',
48 'nova-cloud-controller:image-service': 'glance:image-service',
49 'nova-compute:image-service': 'glance:image-service',
50 'nova-compute:shared-db': 'mysql:shared-db',
51 'nova-compute:amqp': 'rabbitmq-server:amqp',
52 'keystone:shared-db': 'mysql:shared-db',
53 'glance:identity-service': 'keystone:identity-service',
54 'glance:shared-db': 'mysql:shared-db',
55 'glance:amqp': 'rabbitmq-server:amqp'
56 }
57 super(NovaCCBasicDeployment, self)._add_relations(relations)
58
59 def _configure_services(self):
60 """Configure all of the services."""
61 keystone_config = {'admin-password': 'openstack',
62 'admin-token': 'ubuntutesting'}
63 configs = {'keystone': keystone_config}
64 super(NovaCCBasicDeployment, self)._configure_services(configs)
65
66 def _initialize_tests(self):
67 """Perform final initialization before tests get run."""
68 # Access the sentries for inspecting service units
69 self.mysql_sentry = self.d.sentry.unit['mysql/0']
70 self.keystone_sentry = self.d.sentry.unit['keystone/0']
71 self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
72 self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
73 self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
74 self.glance_sentry = self.d.sentry.unit['glance/0']
75
76 # Authenticate admin with keystone
77 self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
78 user='admin',
79 password='openstack',
80 tenant='admin')
81
82 # Authenticate admin with glance endpoint
83 self.glance = u.authenticate_glance_admin(self.keystone)
84
85 # Create a demo tenant/role/user
86 self.demo_tenant = 'demoTenant'
87 self.demo_role = 'demoRole'
88 self.demo_user = 'demoUser'
89 if not u.tenant_exists(self.keystone, self.demo_tenant):
90 tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
91 description='demo tenant',
92 enabled=True)
93 self.keystone.roles.create(name=self.demo_role)
94 self.keystone.users.create(name=self.demo_user,
95 password='password',
96 tenant_id=tenant.id,
97 email='demo@demo.com')
98
99 # Authenticate demo user with keystone
100 self.keystone_demo = \
101 u.authenticate_keystone_user(self.keystone, user=self.demo_user,
102 password='password',
103 tenant=self.demo_tenant)
104
105 # Authenticate demo user with nova-api
106 self.nova_demo = u.authenticate_nova_user(self.keystone,
107 user=self.demo_user,
108 password='password',
109 tenant=self.demo_tenant)
110
111 def test_services(self):
112 """Verify the expected services are running on the corresponding
113 service units."""
114 commands = {
115 self.mysql_sentry: ['status mysql'],
116 self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
117 self.nova_cc_sentry: ['status nova-api-ec2',
118 'status nova-api-os-compute',
119 'status nova-objectstore',
120 'status nova-cert',
121 'status nova-scheduler'],
122 self.nova_compute_sentry: ['status nova-compute',
123 'status nova-network',
124 'status nova-api'],
125 self.keystone_sentry: ['status keystone'],
126 self.glance_sentry: ['status glance-registry', 'status glance-api']
127 }
128 if self._get_openstack_release() >= self.precise_grizzly:
129 commands[self.nova_cc_sentry] = ['status nova-conductor']
130
131 ret = u.validate_services(commands)
132 if ret:
133 amulet.raise_status(amulet.FAIL, msg=ret)
134
135 def test_service_catalog(self):
136 """Verify that the service catalog endpoint data is valid."""
137 endpoint_vol = {'adminURL': u.valid_url,
138 'region': 'RegionOne',
139 'publicURL': u.valid_url,
140 'internalURL': u.valid_url}
141 endpoint_id = {'adminURL': u.valid_url,
142 'region': 'RegionOne',
143 'publicURL': u.valid_url,
144 'internalURL': u.valid_url}
145 if self._get_openstack_release() >= self.precise_folsom:
146 endpoint_vol['id'] = u.not_null
147 endpoint_id['id'] = u.not_null
148 expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
149 'ec2': [endpoint_vol], 'identity': [endpoint_id]}
150 actual = self.keystone_demo.service_catalog.get_endpoints()
151
152 ret = u.validate_svc_catalog_endpoint_data(expected, actual)
153 if ret:
154 amulet.raise_status(amulet.FAIL, msg=ret)
155
156 def test_openstack_compute_api_endpoint(self):
157 """Verify the openstack compute api (osapi) endpoint data."""
158 endpoints = self.keystone.endpoints.list()
159 admin_port = internal_port = public_port = '8774'
160 expected = {'id': u.not_null,
161 'region': 'RegionOne',
162 'adminurl': u.valid_url,
163 'internalurl': u.valid_url,
164 'publicurl': u.valid_url,
165 'service_id': u.not_null}
166
167 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
168 public_port, expected)
169 if ret:
170 message = 'osapi endpoint: {}'.format(ret)
171 amulet.raise_status(amulet.FAIL, msg=message)
172
173 def test_ec2_api_endpoint(self):
174 """Verify the EC2 api endpoint data."""
175 endpoints = self.keystone.endpoints.list()
176 admin_port = internal_port = public_port = '8773'
177 expected = {'id': u.not_null,
178 'region': 'RegionOne',
179 'adminurl': u.valid_url,
180 'internalurl': u.valid_url,
181 'publicurl': u.valid_url,
182 'service_id': u.not_null}
183
184 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
185 public_port, expected)
186 if ret:
187 message = 'EC2 endpoint: {}'.format(ret)
188 amulet.raise_status(amulet.FAIL, msg=message)
189
190 def test_s3_api_endpoint(self):
191 """Verify the S3 api endpoint data."""
192 endpoints = self.keystone.endpoints.list()
193 admin_port = internal_port = public_port = '3333'
194 expected = {'id': u.not_null,
195 'region': 'RegionOne',
196 'adminurl': u.valid_url,
197 'internalurl': u.valid_url,
198 'publicurl': u.valid_url,
199 'service_id': u.not_null}
200
201 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
202 public_port, expected)
203 if ret:
204 message = 'S3 endpoint: {}'.format(ret)
205 amulet.raise_status(amulet.FAIL, msg=message)
206
207 def test_nova_cc_shared_db_relation(self):
208 """Verify the nova-cc to mysql shared-db relation data"""
209 unit = self.nova_cc_sentry
210 relation = ['shared-db', 'mysql:shared-db']
211 expected = {
212 'private-address': u.valid_ip,
213 'nova_database': 'nova',
214 'nova_username': 'nova',
215 'nova_hostname': u.valid_ip
216 }
217
218 ret = u.validate_relation_data(unit, relation, expected)
219 if ret:
220 message = u.relation_error('nova-cc shared-db', ret)
221 amulet.raise_status(amulet.FAIL, msg=message)
222
223 def test_mysql_shared_db_relation(self):
224 """Verify the mysql to nova-cc shared-db relation data"""
225 unit = self.mysql_sentry
226 relation = ['shared-db', 'nova-cloud-controller:shared-db']
227 expected = {
228 'private-address': u.valid_ip,
229 'nova_password': u.not_null,
230 'db_host': u.valid_ip
231 }
232
233 ret = u.validate_relation_data(unit, relation, expected)
234 if ret:
235 message = u.relation_error('mysql shared-db', ret)
236 amulet.raise_status(amulet.FAIL, msg=message)
237
238 def test_nova_cc_identity_service_relation(self):
239 """Verify the nova-cc to keystone identity-service relation data"""
240 unit = self.nova_cc_sentry
241 relation = ['identity-service', 'keystone:identity-service']
242 expected = {
243 'nova_internal_url': u.valid_url,
244 'nova_public_url': u.valid_url,
245 's3_public_url': u.valid_url,
246 's3_service': 's3',
247 'ec2_admin_url': u.valid_url,
248 'ec2_internal_url': u.valid_url,
249 'nova_service': 'nova',
250 's3_region': 'RegionOne',
251 'private-address': u.valid_ip,
252 'nova_region': 'RegionOne',
253 'ec2_public_url': u.valid_url,
254 'ec2_region': 'RegionOne',
255 's3_internal_url': u.valid_url,
256 's3_admin_url': u.valid_url,
257 'nova_admin_url': u.valid_url,
258 'ec2_service': 'ec2'
259 }
260
261 ret = u.validate_relation_data(unit, relation, expected)
262 if ret:
263 message = u.relation_error('nova-cc identity-service', ret)
264 amulet.raise_status(amulet.FAIL, msg=message)
265
266 def test_keystone_identity_service_relation(self):
267 """Verify the keystone to nova-cc identity-service relation data"""
268 unit = self.keystone_sentry
269 relation = ['identity-service',
270 'nova-cloud-controller:identity-service']
271 expected = {
272 'service_protocol': 'http',
273 'service_tenant': 'services',
274 'admin_token': 'ubuntutesting',
275 'service_password': u.not_null,
276 'service_port': '5000',
277 'auth_port': '35357',
278 'auth_protocol': 'http',
279 'private-address': u.valid_ip,
280 'https_keystone': 'False',
281 'auth_host': u.valid_ip,
282 'service_username': 's3_ec2_nova',
283 'service_tenant_id': u.not_null,
284 'service_host': u.valid_ip
285 }
286
287 ret = u.validate_relation_data(unit, relation, expected)
288 if ret:
289 message = u.relation_error('keystone identity-service', ret)
290 amulet.raise_status(amulet.FAIL, msg=message)
291
292 def test_nova_cc_amqp_relation(self):
293 """Verify the nova-cc to rabbitmq-server amqp relation data"""
294 unit = self.nova_cc_sentry
295 relation = ['amqp', 'rabbitmq-server:amqp']
296 expected = {
297 'username': 'nova',
298 'private-address': u.valid_ip,
299 'vhost': 'openstack'
300 }
301
302 ret = u.validate_relation_data(unit, relation, expected)
303 if ret:
304 message = u.relation_error('nova-cc amqp', ret)
305 amulet.raise_status(amulet.FAIL, msg=message)
306
307 def test_rabbitmq_amqp_relation(self):
308 """Verify the rabbitmq-server to nova-cc amqp relation data"""
309 unit = self.rabbitmq_sentry
310 relation = ['amqp', 'nova-cloud-controller:amqp']
311 expected = {
312 'private-address': u.valid_ip,
313 'password': u.not_null,
314 'hostname': u.valid_ip
315 }
316
317 ret = u.validate_relation_data(unit, relation, expected)
318 if ret:
319 message = u.relation_error('rabbitmq amqp', ret)
320 amulet.raise_status(amulet.FAIL, msg=message)
321
322 def test_nova_cc_cloud_compute_relation(self):
323 """Verify the nova-cc to nova-compute cloud-compute relation data"""
324 unit = self.nova_cc_sentry
325 relation = ['cloud-compute', 'nova-compute:cloud-compute']
326 expected = {
327 'volume_service': 'cinder',
328 'network_manager': 'flatdhcpmanager',
329 'ec2_host': u.valid_ip,
330 'private-address': u.valid_ip,
331 'restart_trigger': u.not_null
332 }
333 if self._get_openstack_release() == self.precise_essex:
334 expected['volume_service'] = 'nova-volume'
335
336 ret = u.validate_relation_data(unit, relation, expected)
337 if ret:
338 message = u.relation_error('nova-cc cloud-compute', ret)
339 amulet.raise_status(amulet.FAIL, msg=message)
340
341 def test_nova_cloud_compute_relation(self):
342 """Verify the nova-compute to nova-cc cloud-compute relation data"""
343 unit = self.nova_compute_sentry
344 relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute']
345 expected = {
346 'private-address': u.valid_ip,
347 }
348
349 ret = u.validate_relation_data(unit, relation, expected)
350 if ret:
351 message = u.relation_error('nova-compute cloud-compute', ret)
352 amulet.raise_status(amulet.FAIL, msg=message)
353
354 def test_nova_cc_image_service_relation(self):
355 """Verify the nova-cc to glance image-service relation data"""
356 unit = self.nova_cc_sentry
357 relation = ['image-service', 'glance:image-service']
358 expected = {
359 'private-address': u.valid_ip,
360 }
361
362 ret = u.validate_relation_data(unit, relation, expected)
363 if ret:
364 message = u.relation_error('nova-cc image-service', ret)
365 amulet.raise_status(amulet.FAIL, msg=message)
366
367 def test_glance_image_service_relation(self):
368 """Verify the glance to nova-cc image-service relation data"""
369 unit = self.glance_sentry
370 relation = ['image-service', 'nova-cloud-controller:image-service']
371 expected = {
372 'private-address': u.valid_ip,
373 'glance-api-server': u.valid_url
374 }
375
376 ret = u.validate_relation_data(unit, relation, expected)
377 if ret:
378 message = u.relation_error('glance image-service', ret)
379 amulet.raise_status(amulet.FAIL, msg=message)
380
381 def test_restart_on_config_change(self):
382 """Verify that the specified services are restarted when the config
383 is changed."""
384 # NOTE(coreycb): Skipping failing test on essex until resolved.
385 # config-flags don't take effect on essex.
386 if self._get_openstack_release() == self.precise_essex:
387 u.log.error("Skipping failing test until resolved")
388 return
389
390 services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore',
391 'nova-cert', 'nova-scheduler', 'nova-conductor']
392 self.d.configure('nova-cloud-controller',
393 {'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'})
394 pgrep_full = True
395
396 time = 20
397 conf = '/etc/nova/nova.conf'
398 for s in services:
399 if not u.service_restarted(self.nova_cc_sentry, s, conf,
400 pgrep_full=True, sleep_time=time):
401 msg = "service {} didn't restart after config change".format(s)
402 amulet.raise_status(amulet.FAIL, msg=msg)
403 time = 0
404
405 def test_nova_default_config(self):
406 """Verify the data in the nova config file's default section."""
407 # NOTE(coreycb): Currently no way to test on essex because config file
408 # has no section headers.
409 if self._get_openstack_release() == self.precise_essex:
410 return
411
412 unit = self.nova_cc_sentry
413 conf = '/etc/nova/nova.conf'
414 rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
415 'nova-cloud-controller:amqp')
416 glance_relation = self.glance_sentry.relation('image-service',
417 'nova-cloud-controller:image-service')
418 mysql_relation = self.mysql_sentry.relation('shared-db',
419 'nova-cloud-controller:shared-db')
420 db_uri = "mysql://{}:{}@{}/{}".format('nova',
421 mysql_relation['nova_password'],
422 mysql_relation['db_host'],
423 'nova')
424 keystone_ep = self.keystone_demo.service_catalog.url_for(\
425 service_type='identity',
426 endpoint_type='publicURL')
427 keystone_ec2 = "{}/ec2tokens".format(keystone_ep)
428
429 expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf',
430 'dhcpbridge': '/usr/bin/nova-dhcpbridge',
431 'logdir': '/var/log/nova',
432 'state_path': '/var/lib/nova',
433 'lock_path': '/var/lock/nova',
434 'force_dhcp_release': 'True',
435 'iscsi_helper': 'tgtadm',
436 'libvirt_use_virtio_for_bridges': 'True',
437 'connection_type': 'libvirt',
438 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
439 'verbose': 'True',
440 'ec2_private_dns_show_ip': 'True',
441 'api_paste_config': '/etc/nova/api-paste.ini',
442 'volumes_path': '/var/lib/nova/volumes',
443 'enabled_apis': 'ec2,osapi_compute,metadata',
444 'auth_strategy': 'keystone',
445 'compute_driver': 'libvirt.LibvirtDriver',
446 'keystone_ec2_url': keystone_ec2,
447 'sql_connection': db_uri,
448 'rabbit_userid': 'nova',
449 'rabbit_virtual_host': 'openstack',
450 'rabbit_password': rabbitmq_relation['password'],
451 'rabbit_host': rabbitmq_relation['hostname'],
452 'glance_api_servers': glance_relation['glance-api-server'],
453 'network_manager': 'nova.network.manager.FlatDHCPManager',
454 's3_listen_port': '3333',
455 'osapi_compute_listen_port': '8774',
456 'ec2_listen_port': '8773'}
457
458 ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
459 if ret:
460 message = "nova config error: {}".format(ret)
461 amulet.raise_status(amulet.FAIL, msg=message)
462
463
464 def test_nova_keystone_authtoken_config(self):
465 """Verify the data in the nova config file's keystone_authtoken
466 section. This data only exists since icehouse."""
467 if self._get_openstack_release() < self.precise_icehouse:
468 return
469
470 unit = self.nova_cc_sentry
471 conf = '/etc/nova/nova.conf'
472 keystone_relation = self.keystone_sentry.relation('identity-service',
473 'nova-cloud-controller:identity-service')
474 keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'],
475 keystone_relation['service_port'])
476 expected = {'auth_uri': keystone_uri,
477 'auth_host': keystone_relation['service_host'],
478 'auth_port': keystone_relation['auth_port'],
479 'auth_protocol': keystone_relation['auth_protocol'],
480 'admin_tenant_name': keystone_relation['service_tenant'],
481 'admin_user': keystone_relation['service_username'],
482 'admin_password': keystone_relation['service_password']}
483
484 ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected)
485 if ret:
486 message = "nova config error: {}".format(ret)
487 amulet.raise_status(amulet.FAIL, msg=message)
488
489 def test_image_instance_create(self):
490 """Create an image/instance, verify they exist, and delete them."""
491 # NOTE(coreycb): Skipping failing test on essex until resolved. essex
492 # nova API calls are getting "Malformed request url (HTTP
493 # 400)".
494 if self._get_openstack_release() == self.precise_essex:
495 u.log.error("Skipping failing test until resolved")
496 return
497
498 image = u.create_cirros_image(self.glance, "cirros-image")
499 if not image:
500 amulet.raise_status(amulet.FAIL, msg="Image create failed")
501
502 instance = u.create_instance(self.nova_demo, "cirros-image", "cirros",
503 "m1.tiny")
504 if not instance:
505 amulet.raise_status(amulet.FAIL, msg="Instance create failed")
506
507 found = False
508 for instance in self.nova_demo.servers.list():
509 if instance.name == 'cirros':
510 found = True
511 if instance.status != 'ACTIVE':
512 msg = "cirros instance is not active"
513 amulet.raise_status(amulet.FAIL, msg=message)
514
515 if not found:
516 message = "nova cirros instance does not exist"
517 amulet.raise_status(amulet.FAIL, msg=message)
518
519 u.delete_image(self.glance, image)
520 u.delete_instance(self.nova_demo, instance)
0521
=== added directory 'tests/charmhelpers'
=== added file 'tests/charmhelpers/__init__.py'
=== added directory 'tests/charmhelpers/contrib'
=== added file 'tests/charmhelpers/contrib/__init__.py'
=== added directory 'tests/charmhelpers/contrib/amulet'
=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-11 17:34:59 +0000
@@ -0,0 +1,63 @@
1import amulet
2import re
3
4
5class AmuletDeployment(object):
6 """This class provides generic Amulet deployment and test runner
7 methods."""
8
9 def __init__(self, series):
10 """Initialize the deployment environment."""
11 self.series = series
12 self.d = amulet.Deployment(series=self.series)
13
14 def _get_charm_name(self, service_name):
15 """Gets the charm name from the service name. Unique service names can
16 be specified with a '-service#' suffix (e.g. mysql-service1)."""
17 if re.match(r"^.*-service\d{1,3}$", service_name):
18 charm_name = re.sub('\-service\d{1,3}$', '', service_name)
19 else:
20 charm_name = service_name
21 return charm_name
22
23 def _add_services(self, this_service, other_services):
24 """Add services to the deployment where this_service is the local charm
25 that we're focused on testing and other_services are the other
26 charms that come from the charm store."""
27 name, units = range(2)
28
29 charm_name = self._get_charm_name(this_service[name])
30 self.d.add(this_service[name],
31 units=this_service[units])
32
33 for svc in other_services:
34 charm_name = self._get_charm_name(svc[name])
35 self.d.add(svc[name],
36 charm='cs:{}/{}'.format(self.series, charm_name),
37 units=svc[units])
38
39 def _add_relations(self, relations):
40 """Add all of the relations for the services."""
41 for k, v in relations.iteritems():
42 self.d.relate(k, v)
43
44 def _configure_services(self, configs):
45 """Configure all of the services."""
46 for service, config in configs.iteritems():
47 self.d.configure(service, config)
48
49 def _deploy(self):
50 """Deploy environment and wait for all hooks to finish executing."""
51 try:
52 self.d.setup()
53 self.d.sentry.wait()
54 except amulet.helpers.TimeoutError:
55 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
56 except:
57 raise
58
59 def run_tests(self):
60 """Run all of the methods that are prefixed with 'test_'."""
61 for test in dir(self):
62 if test.startswith('test_'):
63 getattr(self, test)()
064
=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-11 17:34:59 +0000
@@ -0,0 +1,157 @@
1import ConfigParser
2import io
3import logging
4import re
5import sys
6from time import sleep
7
8
9class AmuletUtils(object):
10 """This class provides common utility functions that are used by Amulet
11 tests."""
12
13 def __init__(self, log_level=logging.ERROR):
14 self.log = self.get_logger(level=log_level)
15
16 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
17 """Get a logger object that will log to stdout."""
18 log = logging
19 logger = log.getLogger(name)
20 fmt = \
21 log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
22
23 handler = log.StreamHandler(stream=sys.stdout)
24 handler.setLevel(level)
25 handler.setFormatter(fmt)
26
27 logger.addHandler(handler)
28 logger.setLevel(level)
29
30 return logger
31
32 def valid_ip(self, ip):
33 if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
34 return True
35 else:
36 return False
37
38 def valid_url(self, url):
39 p = re.compile(
40 r'^(?:http|ftp)s?://'
41 r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
42 r'localhost|'
43 r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
44 r'(?::\d+)?'
45 r'(?:/?|[/?]\S+)$',
46 re.IGNORECASE)
47 if p.match(url):
48 return True
49 else:
50 return False
51
52 def validate_services(self, commands):
53 """Verify the specified services are running on the corresponding
54 service units."""
55 for k, v in commands.iteritems():
56 for cmd in v:
57 output, code = k.run(cmd)
58 if code != 0:
59 return "command `{}` returned {}".format(cmd, str(code))
60 return None
61
62 def _get_config(self, unit, filename):
63 """Get a ConfigParser object for parsing a unit's config file."""
64 file_contents = unit.file_contents(filename)
65 config = ConfigParser.ConfigParser()
66 config.readfp(io.StringIO(file_contents))
67 return config
68
69 def validate_config_data(self, sentry_unit, config_file, section, expected):
70 """Verify that the specified section of the config file contains
71 the expected option key:value pairs."""
72 config = self._get_config(sentry_unit, config_file)
73
74 if section != 'DEFAULT' and not config.has_section(section):
75 return "section [{}] does not exist".format(section)
76
77 for k in expected.keys():
78 if not config.has_option(section, k):
79 return "section [{}] is missing option {}".format(section, k)
80 if config.get(section, k) != expected[k]:
81 return "section [{}] {}:{} != expected {}:{}".format(section,
82 k, config.get(section, k), k, expected[k])
83 return None
84
85 def _validate_dict_data(self, expected, actual):
86 """Compare expected dictionary data vs actual dictionary data.
87 The values in the 'expected' dictionary can be strings, bools, ints,
88 longs, or can be a function that evaluate a variable and returns a
89 bool."""
90 for k, v in expected.iteritems():
91 if k in actual:
92 if isinstance(v, basestring) or \
93 isinstance(v, bool) or \
94 isinstance(v, (int, long)):
95 if v != actual[k]:
96 return "{}:{}".format(k, actual[k])
97 elif not v(actual[k]):
98 return "{}:{}".format(k, actual[k])
99 else:
100 return "key '{}' does not exist".format(k)
101 return None
102
103 def validate_relation_data(self, sentry_unit, relation, expected):
104 """Validate actual relation data based on expected relation data."""
105 actual = sentry_unit.relation(relation[0], relation[1])
106 self.log.debug('actual: {}'.format(repr(actual)))
107 return self._validate_dict_data(expected, actual)
108
109 def _validate_list_data(self, expected, actual):
110 """Compare expected list vs actual list data."""
111 for e in expected:
112 if e not in actual:
113 return "expected item {} not found in actual list".format(e)
114 return None
115
116 def not_null(self, string):
117 if string != None:
118 return True
119 else:
120 return False
121
122 def _get_file_mtime(self, sentry_unit, filename):
123 """Get last modification time of file."""
124 return sentry_unit.file_stat(filename)['mtime']
125
126 def _get_dir_mtime(self, sentry_unit, directory):
127 """Get last modification time of directory."""
128 return sentry_unit.directory_stat(directory)['mtime']
129
130 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
131 """Determine start time of the process based on the last modification
132 time of the /proc/pid directory. If pgrep_full is True, the process
133 name is matched against the full command line."""
134 if pgrep_full:
135 cmd = 'pgrep -o -f {}'.format(service)
136 else:
137 cmd = 'pgrep -o {}'.format(service)
138 proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
139 return self._get_dir_mtime(sentry_unit, proc_dir)
140
141 def service_restarted(self, sentry_unit, service, filename,
142 pgrep_full=False, sleep_time=20):
143 """Compare a service's start time vs a file's last modification time
144 (such as a config file for that service) to determine if the service
145 has been restarted."""
146 sleep(sleep_time)
147 if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
148 self._get_file_mtime(sentry_unit, filename):
149 return True
150 else:
151 return False
152
153 def relation_error(self, name, data):
154 return 'unexpected relation data in {} - {}'.format(name, data)
155
156 def endpoint_error(self, name, data):
157 return 'unexpected endpoint data in {} - {}'.format(name, data)
0158
=== added directory 'tests/charmhelpers/contrib/openstack'
=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-11 17:34:59 +0000
@@ -0,0 +1,57 @@
1from charmhelpers.contrib.amulet.deployment import (
2 AmuletDeployment
3)
4
5
6class OpenStackAmuletDeployment(AmuletDeployment):
7 """This class inherits from AmuletDeployment and has additional support
8 that is specifically for use by OpenStack charms."""
9
10 def __init__(self, series, openstack=None, source=None):
11 """Initialize the deployment environment."""
12 super(OpenStackAmuletDeployment, self).__init__(series)
13 self.openstack = openstack
14 self.source = source
15
16 def _add_services(self, this_service, other_services):
17 """Add services to the deployment and set openstack-origin."""
18 super(OpenStackAmuletDeployment, self)._add_services(this_service,
19 other_services)
20 name = 0
21 services = other_services
22 services.append(this_service)
23 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
24
25 if self.openstack:
26 for svc in services:
27 charm_name = self._get_charm_name(svc[name])
28 if charm_name not in use_source:
29 config = {'openstack-origin': self.openstack}
30 self.d.configure(svc[name], config)
31
32 if self.source:
33 for svc in services:
34 charm_name = self._get_charm_name(svc[name])
35 if charm_name in use_source:
36 config = {'source': self.source}
37 self.d.configure(svc[name], config)
38
39 def _configure_services(self, configs):
40 """Configure all of the services."""
41 for service, config in configs.iteritems():
42 self.d.configure(service, config)
43
44 def _get_openstack_release(self):
45 """Return an integer representing the enum value of the openstack
46 release."""
47 self.precise_essex, self.precise_folsom, self.precise_grizzly, \
48 self.precise_havana, self.precise_icehouse, \
49 self.trusty_icehouse = range(6)
50 releases = {
51 ('precise', None): self.precise_essex,
52 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
53 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
54 ('precise', 'cloud:precise-havana'): self.precise_havana,
55 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
56 ('trusty', None): self.trusty_icehouse}
57 return releases[(self.series, self.openstack)]
058
=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-11 17:34:59 +0000
@@ -0,0 +1,253 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """This class inherits from AmuletUtils and has additional support
20 that is specifically for use by OpenStack charms."""
21
22 def __init__(self, log_level=ERROR):
23 """Initialize the deployment environment."""
24 super(OpenStackAmuletUtils, self).__init__(log_level)
25
26 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
27 public_port, expected):
28 """Validate actual endpoint data vs expected endpoint data. The ports
29 are used to find the matching endpoint."""
30 found = False
31 for ep in endpoints:
32 self.log.debug('endpoint: {}'.format(repr(ep)))
33 if admin_port in ep.adminurl and internal_port in ep.internalurl \
34 and public_port in ep.publicurl:
35 found = True
36 actual = {'id': ep.id,
37 'region': ep.region,
38 'adminurl': ep.adminurl,
39 'internalurl': ep.internalurl,
40 'publicurl': ep.publicurl,
41 'service_id': ep.service_id}
42 ret = self._validate_dict_data(expected, actual)
43 if ret:
44 return 'unexpected endpoint data - {}'.format(ret)
45
46 if not found:
47 return 'endpoint not found'
48
49 def validate_svc_catalog_endpoint_data(self, expected, actual):
50 """Validate a list of actual service catalog endpoints vs a list of
51 expected service catalog endpoints."""
52 self.log.debug('actual: {}'.format(repr(actual)))
53 for k, v in expected.iteritems():
54 if k in actual:
55 ret = self._validate_dict_data(expected[k][0], actual[k][0])
56 if ret:
57 return self.endpoint_error(k, ret)
58 else:
59 return "endpoint {} does not exist".format(k)
60 return ret
61
62 def validate_tenant_data(self, expected, actual):
63 """Validate a list of actual tenant data vs list of expected tenant
64 data."""
65 self.log.debug('actual: {}'.format(repr(actual)))
66 for e in expected:
67 found = False
68 for act in actual:
69 a = {'enabled': act.enabled, 'description': act.description,
70 'name': act.name, 'id': act.id}
71 if e['name'] == a['name']:
72 found = True
73 ret = self._validate_dict_data(e, a)
74 if ret:
75 return "unexpected tenant data - {}".format(ret)
76 if not found:
77 return "tenant {} does not exist".format(e['name'])
78 return ret
79
80 def validate_role_data(self, expected, actual):
81 """Validate a list of actual role data vs a list of expected role
82 data."""
83 self.log.debug('actual: {}'.format(repr(actual)))
84 for e in expected:
85 found = False
86 for act in actual:
87 a = {'name': act.name, 'id': act.id}
88 if e['name'] == a['name']:
89 found = True
90 ret = self._validate_dict_data(e, a)
91 if ret:
92 return "unexpected role data - {}".format(ret)
93 if not found:
94 return "role {} does not exist".format(e['name'])
95 return ret
96
97 def validate_user_data(self, expected, actual):
98 """Validate a list of actual user data vs a list of expected user
99 data."""
100 self.log.debug('actual: {}'.format(repr(actual)))
101 for e in expected:
102 found = False
103 for act in actual:
104 a = {'enabled': act.enabled, 'name': act.name,
105 'email': act.email, 'tenantId': act.tenantId,
106 'id': act.id}
107 if e['name'] == a['name']:
108 found = True
109 ret = self._validate_dict_data(e, a)
110 if ret:
111 return "unexpected user data - {}".format(ret)
112 if not found:
113 return "user {} does not exist".format(e['name'])
114 return ret
115
116 def validate_flavor_data(self, expected, actual):
117 """Validate a list of actual flavors vs a list of expected flavors."""
118 self.log.debug('actual: {}'.format(repr(actual)))
119 act = [a.name for a in actual]
120 return self._validate_list_data(expected, act)
121
122 def tenant_exists(self, keystone, tenant):
123 """Return True if tenant exists"""
124 return tenant in [t.name for t in keystone.tenants.list()]
125
126 def authenticate_keystone_admin(self, keystone_sentry, user, password,
127 tenant):
128 """Authenticates admin user with the keystone admin endpoint."""
129 service_ip = \
130 keystone_sentry.relation('shared-db',
131 'mysql:shared-db')['private-address']
132 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
133 return keystone_client.Client(username=user, password=password,
134 tenant_name=tenant, auth_url=ep)
135
136 def authenticate_keystone_user(self, keystone, user, password, tenant):
137 """Authenticates a regular user with the keystone public endpoint."""
138 ep = keystone.service_catalog.url_for(service_type='identity',
139 endpoint_type='publicURL')
140 return keystone_client.Client(username=user, password=password,
141 tenant_name=tenant, auth_url=ep)
142
143 def authenticate_glance_admin(self, keystone):
144 """Authenticates admin user with glance."""
145 ep = keystone.service_catalog.url_for(service_type='image',
146 endpoint_type='adminURL')
147 return glance_client.Client(ep, token=keystone.auth_token)
148
149 def authenticate_nova_user(self, keystone, user, password, tenant):
150 """Authenticates a regular user with nova-api."""
151 ep = keystone.service_catalog.url_for(service_type='identity',
152 endpoint_type='publicURL')
153 return nova_client.Client(username=user, api_key=password,
154 project_id=tenant, auth_url=ep)
155
156 def create_cirros_image(self, glance, image_name):
157 """Download the latest cirros image and upload it to glance."""
158 http_proxy = os.getenv('AMULET_HTTP_PROXY')
159 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
160 if http_proxy:
161 proxies = {'http': http_proxy}
162 opener = urllib.FancyURLopener(proxies)
163 else:
164 opener = urllib.FancyURLopener()
165
166 f = opener.open("http://download.cirros-cloud.net/version/released")
167 version = f.read().strip()
168 cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
169
170 if not os.path.exists(cirros_img):
171 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
172 version, cirros_img)
173 opener.retrieve(cirros_url, cirros_img)
174 f.close()
175
176 with open(cirros_img) as f:
177 image = glance.images.create(name=image_name, is_public=True,
178 disk_format='qcow2',
179 container_format='bare', data=f)
180 count = 1
181 status = image.status
182 while status != 'active' and count < 10:
183 time.sleep(3)
184 image = glance.images.get(image.id)
185 status = image.status
186 self.log.debug('image status: {}'.format(status))
187 count += 1
188
189 if status != 'active':
190 self.log.error('image creation timed out')
191 return None
192
193 return image
194
195 def delete_image(self, glance, image):
196 """Delete the specified image."""
197 num_before = len(list(glance.images.list()))
198 glance.images.delete(image)
199
200 count = 1
201 num_after = len(list(glance.images.list()))
202 while num_after != (num_before - 1) and count < 10:
203 time.sleep(3)
204 num_after = len(list(glance.images.list()))
205 self.log.debug('number of images: {}'.format(num_after))
206 count += 1
207
208 if num_after != (num_before - 1):
209 self.log.error('image deletion timed out')
210 return False
211
212 return True
213
214 def create_instance(self, nova, image_name, instance_name, flavor):
215 """Create the specified instance."""
216 image = nova.images.find(name=image_name)
217 flavor = nova.flavors.find(name=flavor)
218 instance = nova.servers.create(name=instance_name, image=image,
219 flavor=flavor)
220
221 count = 1
222 status = instance.status
223 while status != 'ACTIVE' and count < 60:
224 time.sleep(3)
225 instance = nova.servers.get(instance.id)
226 status = instance.status
227 self.log.debug('instance status: {}'.format(status))
228 count += 1
229
230 if status != 'ACTIVE':
231 self.log.error('instance creation timed out')
232 return None
233
234 return instance
235
236 def delete_instance(self, nova, instance):
237 """Delete the specified instance."""
238 num_before = len(list(nova.servers.list()))
239 nova.servers.delete(instance)
240
241 count = 1
242 num_after = len(list(nova.servers.list()))
243 while num_after != (num_before - 1) and count < 10:
244 time.sleep(3)
245 num_after = len(list(nova.servers.list()))
246 self.log.debug('number of instances: {}'.format(num_after))
247 count += 1
248
249 if num_after != (num_before - 1):
250 self.log.error('instance deletion timed out')
251 return False
252
253 return True

Subscribers

People subscribed via source and target branches