Merge lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic into lp:~openstack-charmers/charms/trusty/quantum-gateway/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 54
Proposed branch: lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic
Merge into: lp:~openstack-charmers/charms/trusty/quantum-gateway/next
Diff against target: 2073 lines (+1690/-80)
24 files modified
Makefile (+12/-4)
charm-helpers-hooks.yaml (+10/-0)
charm-helpers-sync.yaml (+0/-10)
charm-helpers-tests.yaml (+5/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+1/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+26/-7)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+105/-3)
hooks/charmhelpers/contrib/openstack/context.py (+10/-8)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/core/hookenv.py (+5/-4)
hooks/charmhelpers/core/host.py (+7/-5)
hooks/charmhelpers/fetch/__init__.py (+23/-15)
tests/00-setup (+10/-0)
tests/12-basic-precise-grizzly (+11/-0)
tests/13-basic-precise-havana (+11/-0)
tests/14-basic-precise-icehouse (+11/-0)
tests/15-basic-trusty-icehouse (+9/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+834/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+63/-0)
tests/charmhelpers/contrib/amulet/utils.py (+157/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+226489@code.launchpad.net
To post a comment you must log in.
Revision history for this message
James Page (james-page) wrote :

Not relevant for essex and folsom - I'd just drop the tests.

56. By Corey Bryant

Add Amulet basic tests

Revision history for this message
Corey Bryant (corey.bryant) wrote :

Ok. I pushed a new version with essex/folsom tests dropped.

Revision history for this message
Corey Bryant (corey.bryant) wrote :

> Not relevant for essex and folsom - I'd just drop the tests.

Ok. I pushed a new version with essex/folsom tests dropped.

Revision history for this message
James Page (james-page) wrote :

I'm going to merge this as I think the tests are all 100% OK; however I do keep hitting a race where sentry.wait() in _deploy is not actually waiting for all hook execution to complete; resulting in the neutron-server on the nova-cc being restarted and connections from the client in the tests failing.

review: Approve
Revision history for this message
Corey Bryant (corey.bryant) wrote :

Thanks James. I'll add support to charm-helpers for the sentry.wait() issue.

Revision history for this message
Stuart Bishop (stub) wrote :

sentry.wait() is likely Bug #1254766

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'Makefile'
--- Makefile 2014-05-21 10:07:03 +0000
+++ Makefile 2014-07-17 15:16:47 +0000
@@ -3,15 +3,23 @@
33
4lint:4lint:
5 @flake8 --exclude hooks/charmhelpers hooks5 @flake8 --exclude hooks/charmhelpers hooks
6 @flake8 --exclude hooks/charmhelpers unit_tests6 @flake8 --exclude hooks/charmhelpers unit_tests tests
7 @charm proof7 @charm proof
88
9unit_test:
10 @echo Starting unit tests...
11 @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
12
9test:13test:
10 @echo Starting tests...14 @echo Starting Amulet tests...
11 @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests15 # coreycb note: The -v should only be temporary until Amulet sends
16 # raise_status() messages to stderr:
17 # https://bugs.launchpad.net/amulet/+bug/1320357
18 @juju test -v -p AMULET_HTTP_PROXY
1219
13sync:20sync:
14 @charm-helper-sync -c charm-helpers-sync.yaml21 @charm-helper-sync -c charm-helpers-hooks.yaml
22 @charm-helper-sync -c charm-helpers-tests.yaml
1523
16publish: lint test24publish: lint test
17 bzr push lp:charms/quantum-gateway25 bzr push lp:charms/quantum-gateway
1826
=== added file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-hooks.yaml 2014-07-17 15:16:47 +0000
@@ -0,0 +1,10 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack
7 - contrib.hahelpers
8 - contrib.network.ovs
9 - contrib.storage.linux
10 - payload.execd
011
=== removed file 'charm-helpers-sync.yaml'
--- charm-helpers-sync.yaml 2014-03-27 11:20:28 +0000
+++ charm-helpers-sync.yaml 1970-01-01 00:00:00 +0000
@@ -1,10 +0,0 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack
7 - contrib.hahelpers
8 - contrib.network.ovs
9 - contrib.storage.linux
10 - payload.execd
110
=== added file 'charm-helpers-tests.yaml'
--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-tests.yaml 2014-07-17 15:16:47 +0000
@@ -0,0 +1,5 @@
1branch: lp:charm-helpers
2destination: tests/charmhelpers
3include:
4 - contrib.amulet
5 - contrib.openstack.amulet
06
=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-03-27 11:20:28 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-17 15:16:47 +0000
@@ -170,6 +170,7 @@
170170
171 :configs : OSTemplateRenderer: A config tempating object to inspect for171 :configs : OSTemplateRenderer: A config tempating object to inspect for
172 a complete https context.172 a complete https context.
173
173 :vip_setting: str: Setting in charm config that specifies174 :vip_setting: str: Setting in charm config that specifies
174 VIP address.175 VIP address.
175 '''176 '''
176177
=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-06-24 13:40:39 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-17 15:16:47 +0000
@@ -7,19 +7,38 @@
7 """This class inherits from AmuletDeployment and has additional support7 """This class inherits from AmuletDeployment and has additional support
8 that is specifically for use by OpenStack charms."""8 that is specifically for use by OpenStack charms."""
99
10 def __init__(self, series=None, openstack=None):10 def __init__(self, series, openstack=None, source=None):
11 """Initialize the deployment environment."""11 """Initialize the deployment environment."""
12 self.openstack = None
13 super(OpenStackAmuletDeployment, self).__init__(series)12 super(OpenStackAmuletDeployment, self).__init__(series)
1413 self.openstack = openstack
15 if openstack:14 self.source = source
16 self.openstack = openstack15
16 def _add_services(self, this_service, other_services):
17 """Add services to the deployment and set openstack-origin."""
18 super(OpenStackAmuletDeployment, self)._add_services(this_service,
19 other_services)
20 name = 0
21 services = other_services
22 services.append(this_service)
23 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
24
25 if self.openstack:
26 for svc in services:
27 charm_name = self._get_charm_name(svc[name])
28 if charm_name not in use_source:
29 config = {'openstack-origin': self.openstack}
30 self.d.configure(svc[name], config)
31
32 if self.source:
33 for svc in services:
34 charm_name = self._get_charm_name(svc[name])
35 if charm_name in use_source:
36 config = {'source': self.source}
37 self.d.configure(svc[name], config)
1738
18 def _configure_services(self, configs):39 def _configure_services(self, configs):
19 """Configure all of the services."""40 """Configure all of the services."""
20 for service, config in configs.iteritems():41 for service, config in configs.iteritems():
21 if service == self.this_service:
22 config['openstack-origin'] = self.openstack
23 self.d.configure(service, config)42 self.d.configure(service, config)
2443
25 def _get_openstack_release(self):44 def _get_openstack_release(self):
2645
=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-06-24 13:40:39 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-17 15:16:47 +0000
@@ -1,4 +1,7 @@
1import logging1import logging
2import os
3import time
4import urllib
25
3import glanceclient.v1.client as glance_client6import glanceclient.v1.client as glance_client
4import keystoneclient.v2_0 as keystone_client7import keystoneclient.v2_0 as keystone_client
@@ -71,7 +74,7 @@
71 if ret:74 if ret:
72 return "unexpected tenant data - {}".format(ret)75 return "unexpected tenant data - {}".format(ret)
73 if not found:76 if not found:
74 return "tenant {} does not exist".format(e.name)77 return "tenant {} does not exist".format(e['name'])
75 return ret78 return ret
7679
77 def validate_role_data(self, expected, actual):80 def validate_role_data(self, expected, actual):
@@ -88,7 +91,7 @@
88 if ret:91 if ret:
89 return "unexpected role data - {}".format(ret)92 return "unexpected role data - {}".format(ret)
90 if not found:93 if not found:
91 return "role {} does not exist".format(e.name)94 return "role {} does not exist".format(e['name'])
92 return ret95 return ret
9396
94 def validate_user_data(self, expected, actual):97 def validate_user_data(self, expected, actual):
@@ -107,7 +110,7 @@
107 if ret:110 if ret:
108 return "unexpected user data - {}".format(ret)111 return "unexpected user data - {}".format(ret)
109 if not found:112 if not found:
110 return "user {} does not exist".format(e.name)113 return "user {} does not exist".format(e['name'])
111 return ret114 return ret
112115
113 def validate_flavor_data(self, expected, actual):116 def validate_flavor_data(self, expected, actual):
@@ -149,3 +152,102 @@
149 endpoint_type='publicURL')152 endpoint_type='publicURL')
150 return nova_client.Client(username=user, api_key=password,153 return nova_client.Client(username=user, api_key=password,
151 project_id=tenant, auth_url=ep)154 project_id=tenant, auth_url=ep)
155
156 def create_cirros_image(self, glance, image_name):
157 """Download the latest cirros image and upload it to glance."""
158 http_proxy = os.getenv('AMULET_HTTP_PROXY')
159 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
160 if http_proxy:
161 proxies = {'http': http_proxy}
162 opener = urllib.FancyURLopener(proxies)
163 else:
164 opener = urllib.FancyURLopener()
165
166 f = opener.open("http://download.cirros-cloud.net/version/released")
167 version = f.read().strip()
168 cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
169
170 if not os.path.exists(cirros_img):
171 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
172 version, cirros_img)
173 opener.retrieve(cirros_url, cirros_img)
174 f.close()
175
176 with open(cirros_img) as f:
177 image = glance.images.create(name=image_name, is_public=True,
178 disk_format='qcow2',
179 container_format='bare', data=f)
180 count = 1
181 status = image.status
182 while status != 'active' and count < 10:
183 time.sleep(3)
184 image = glance.images.get(image.id)
185 status = image.status
186 self.log.debug('image status: {}'.format(status))
187 count += 1
188
189 if status != 'active':
190 self.log.error('image creation timed out')
191 return None
192
193 return image
194
195 def delete_image(self, glance, image):
196 """Delete the specified image."""
197 num_before = len(list(glance.images.list()))
198 glance.images.delete(image)
199
200 count = 1
201 num_after = len(list(glance.images.list()))
202 while num_after != (num_before - 1) and count < 10:
203 time.sleep(3)
204 num_after = len(list(glance.images.list()))
205 self.log.debug('number of images: {}'.format(num_after))
206 count += 1
207
208 if num_after != (num_before - 1):
209 self.log.error('image deletion timed out')
210 return False
211
212 return True
213
214 def create_instance(self, nova, image_name, instance_name, flavor):
215 """Create the specified instance."""
216 image = nova.images.find(name=image_name)
217 flavor = nova.flavors.find(name=flavor)
218 instance = nova.servers.create(name=instance_name, image=image,
219 flavor=flavor)
220
221 count = 1
222 status = instance.status
223 while status != 'ACTIVE' and count < 60:
224 time.sleep(3)
225 instance = nova.servers.get(instance.id)
226 status = instance.status
227 self.log.debug('instance status: {}'.format(status))
228 count += 1
229
230 if status != 'ACTIVE':
231 self.log.error('instance creation timed out')
232 return None
233
234 return instance
235
236 def delete_instance(self, nova, instance):
237 """Delete the specified instance."""
238 num_before = len(list(nova.servers.list()))
239 nova.servers.delete(instance)
240
241 count = 1
242 num_after = len(list(nova.servers.list()))
243 while num_after != (num_before - 1) and count < 10:
244 time.sleep(3)
245 num_after = len(list(nova.servers.list()))
246 self.log.debug('number of instances: {}'.format(num_after))
247 count += 1
248
249 if num_after != (num_before - 1):
250 self.log.error('instance deletion timed out')
251 return False
252
253 return True
152254
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-06-24 13:40:39 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-17 15:16:47 +0000
@@ -426,12 +426,13 @@
426 """426 """
427 Generates a context for an apache vhost configuration that configures427 Generates a context for an apache vhost configuration that configures
428 HTTPS reverse proxying for one or many endpoints. Generated context428 HTTPS reverse proxying for one or many endpoints. Generated context
429 looks something like:429 looks something like::
430 {430
431 'namespace': 'cinder',431 {
432 'private_address': 'iscsi.mycinderhost.com',432 'namespace': 'cinder',
433 'endpoints': [(8776, 8766), (8777, 8767)]433 'private_address': 'iscsi.mycinderhost.com',
434 }434 'endpoints': [(8776, 8766), (8777, 8767)]
435 }
435436
436 The endpoints list consists of a tuples mapping external ports437 The endpoints list consists of a tuples mapping external ports
437 to internal ports.438 to internal ports.
@@ -641,7 +642,7 @@
641 The subordinate interface allows subordinates to export their642 The subordinate interface allows subordinates to export their
642 configuration requirements to the principle for multiple config643 configuration requirements to the principle for multiple config
643 files and multiple serivces. Ie, a subordinate that has interfaces644 files and multiple serivces. Ie, a subordinate that has interfaces
644 to both glance and nova may export to following yaml blob as json:645 to both glance and nova may export to following yaml blob as json::
645646
646 glance:647 glance:
647 /etc/glance/glance-api.conf:648 /etc/glance/glance-api.conf:
@@ -660,7 +661,8 @@
660661
661 It is then up to the principle charms to subscribe this context to662 It is then up to the principle charms to subscribe this context to
662 the service+config file it is interestd in. Configuration data will663 the service+config file it is interestd in. Configuration data will
663 be available in the template context, in glance's case, as:664 be available in the template context, in glance's case, as::
665
664 ctxt = {666 ctxt = {
665 ... other context ...667 ... other context ...
666 'subordinate_config': {668 'subordinate_config': {
667669
=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2013-09-25 15:27:00 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-17 15:16:47 +0000
@@ -30,17 +30,17 @@
30 loading dir.30 loading dir.
3131
32 A charm may also ship a templates dir with this module32 A charm may also ship a templates dir with this module
33 and it will be appended to the bottom of the search list, eg:33 and it will be appended to the bottom of the search list, eg::
34 hooks/charmhelpers/contrib/openstack/templates.34
3535 hooks/charmhelpers/contrib/openstack/templates
36 :param templates_dir: str: Base template directory containing release36
37 sub-directories.37 :param templates_dir (str): Base template directory containing release
38 :param os_release : str: OpenStack release codename to construct template38 sub-directories.
39 loader.39 :param os_release (str): OpenStack release codename to construct template
4040 loader.
41 :returns : jinja2.ChoiceLoader constructed with a list of41 :returns: jinja2.ChoiceLoader constructed with a list of
42 jinja2.FilesystemLoaders, ordered in descending42 jinja2.FilesystemLoaders, ordered in descending
43 order by OpenStack release.43 order by OpenStack release.
44 """44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in OPENSTACK_CODENAMES.itervalues()]46 for rel in OPENSTACK_CODENAMES.itervalues()]
@@ -111,7 +111,8 @@
111 and ease the burden of managing config templates across multiple OpenStack111 and ease the burden of managing config templates across multiple OpenStack
112 releases.112 releases.
113113
114 Basic usage:114 Basic usage::
115
115 # import some common context generates from charmhelpers116 # import some common context generates from charmhelpers
116 from charmhelpers.contrib.openstack import context117 from charmhelpers.contrib.openstack import context
117118
@@ -131,21 +132,19 @@
131 # write out all registered configs132 # write out all registered configs
132 configs.write_all()133 configs.write_all()
133134
134 Details:135 **OpenStack Releases and template loading**
135136
136 OpenStack Releases and template loading
137 ---------------------------------------
138 When the object is instantiated, it is associated with a specific OS137 When the object is instantiated, it is associated with a specific OS
139 release. This dictates how the template loader will be constructed.138 release. This dictates how the template loader will be constructed.
140139
141 The constructed loader attempts to load the template from several places140 The constructed loader attempts to load the template from several places
142 in the following order:141 in the following order:
143 - from the most recent OS release-specific template dir (if one exists)142 - from the most recent OS release-specific template dir (if one exists)
144 - the base templates_dir143 - the base templates_dir
145 - a template directory shipped in the charm with this helper file.144 - a template directory shipped in the charm with this helper file.
146145
147146 For the example above, '/tmp/templates' contains the following structure::
148 For the example above, '/tmp/templates' contains the following structure:147
149 /tmp/templates/nova.conf148 /tmp/templates/nova.conf
150 /tmp/templates/api-paste.ini149 /tmp/templates/api-paste.ini
151 /tmp/templates/grizzly/api-paste.ini150 /tmp/templates/grizzly/api-paste.ini
@@ -169,8 +168,8 @@
169 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows168 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
170 us to ship common templates (haproxy, apache) with the helpers.169 us to ship common templates (haproxy, apache) with the helpers.
171170
172 Context generators171 **Context generators**
173 ---------------------------------------172
174 Context generators are used to generate template contexts during hook173 Context generators are used to generate template contexts during hook
175 execution. Doing so may require inspecting service relations, charm174 execution. Doing so may require inspecting service relations, charm
176 config, etc. When registered, a config file is associated with a list175 config, etc. When registered, a config file is associated with a list
177176
=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:20:28 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-17 15:16:47 +0000
@@ -303,7 +303,7 @@
303 blk_device, fstype, system_services=[]):303 blk_device, fstype, system_services=[]):
304 """304 """
305 NOTE: This function must only be called from a single service unit for305 NOTE: This function must only be called from a single service unit for
306 the same rbd_img otherwise data loss will occur.306 the same rbd_img otherwise data loss will occur.
307307
308 Ensures given pool and RBD image exists, is mapped to a block device,308 Ensures given pool and RBD image exists, is mapped to a block device,
309 and the device is formatted and mounted at the given mount_point.309 and the device is formatted and mounted at the given mount_point.
310310
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:43:55 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-07-17 15:16:47 +0000
@@ -25,7 +25,7 @@
25def cached(func):25def cached(func):
26 """Cache return values for multiple executions of func + args26 """Cache return values for multiple executions of func + args
2727
28 For example:28 For example::
2929
30 @cached30 @cached
31 def unit_get(attribute):31 def unit_get(attribute):
@@ -445,18 +445,19 @@
445class Hooks(object):445class Hooks(object):
446 """A convenient handler for hook functions.446 """A convenient handler for hook functions.
447447
448 Example:448 Example::
449
449 hooks = Hooks()450 hooks = Hooks()
450451
451 # register a hook, taking its name from the function name452 # register a hook, taking its name from the function name
452 @hooks.hook()453 @hooks.hook()
453 def install():454 def install():
454 ...455 pass # your code here
455456
456 # register a hook, providing a custom hook name457 # register a hook, providing a custom hook name
457 @hooks.hook("config-changed")458 @hooks.hook("config-changed")
458 def config_changed():459 def config_changed():
459 ...460 pass # your code here
460461
461 if __name__ == "__main__":462 if __name__ == "__main__":
462 # execute a hook based on the name the program is called by463 # execute a hook based on the name the program is called by
463464
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-06-24 13:40:39 +0000
+++ hooks/charmhelpers/core/host.py 2014-07-17 15:16:47 +0000
@@ -211,13 +211,13 @@
211def restart_on_change(restart_map, stopstart=False):211def restart_on_change(restart_map, stopstart=False):
212 """Restart services based on configuration files changing212 """Restart services based on configuration files changing
213213
214 This function is used a decorator, for example214 This function is used a decorator, for example::
215215
216 @restart_on_change({216 @restart_on_change({
217 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]217 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
218 })218 })
219 def ceph_client_changed():219 def ceph_client_changed():
220 ...220 pass # your code here
221221
222 In this example, the cinder-api and cinder-volume services222 In this example, the cinder-api and cinder-volume services
223 would be restarted if /etc/ceph/ceph.conf is changed by the223 would be restarted if /etc/ceph/ceph.conf is changed by the
@@ -313,9 +313,11 @@
313313
314def cmp_pkgrevno(package, revno, pkgcache=None):314def cmp_pkgrevno(package, revno, pkgcache=None):
315 '''Compare supplied revno with the revno of the installed package315 '''Compare supplied revno with the revno of the installed package
316 1 => Installed revno is greater than supplied arg316
317 0 => Installed revno is the same as supplied arg317 * 1 => Installed revno is greater than supplied arg
318 -1 => Installed revno is less than supplied arg318 * 0 => Installed revno is the same as supplied arg
319 * -1 => Installed revno is less than supplied arg
320
319 '''321 '''
320 import apt_pkg322 import apt_pkg
321 if not pkgcache:323 if not pkgcache:
322324
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-06-24 13:40:39 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-07-17 15:16:47 +0000
@@ -235,31 +235,39 @@
235 sources_var='install_sources',235 sources_var='install_sources',
236 keys_var='install_keys'):236 keys_var='install_keys'):
237 """237 """
238 Configure multiple sources from charm configuration238 Configure multiple sources from charm configuration.
239
240 The lists are encoded as yaml fragments in the configuration.
241 The frament needs to be included as a string.
239242
240 Example config:243 Example config:
241 install_sources:244 install_sources: |
242 - "ppa:foo"245 - "ppa:foo"
243 - "http://example.com/repo precise main"246 - "http://example.com/repo precise main"
244 install_keys:247 install_keys: |
245 - null248 - null
246 - "a1b2c3d4"249 - "a1b2c3d4"
247250
248 Note that 'null' (a.k.a. None) should not be quoted.251 Note that 'null' (a.k.a. None) should not be quoted.
249 """252 """
250 sources = safe_load(config(sources_var))253 sources = safe_load((config(sources_var) or '').strip()) or []
251 keys = config(keys_var)254 keys = safe_load((config(keys_var) or '').strip()) or None
252 if keys is not None:255
253 keys = safe_load(keys)256 if isinstance(sources, basestring):
254 if isinstance(sources, basestring) and (257 sources = [sources]
255 keys is None or isinstance(keys, basestring)):258
256 add_source(sources, keys)259 if keys is None:
260 for source in sources:
261 add_source(source, None)
257 else:262 else:
258 if not len(sources) == len(keys):263 if isinstance(keys, basestring):
259 msg = 'Install sources and keys lists are different lengths'264 keys = [keys]
260 raise SourceConfigError(msg)265
261 for src_num in range(len(sources)):266 if len(sources) != len(keys):
262 add_source(sources[src_num], keys[src_num])267 raise SourceConfigError(
268 'Install sources and keys lists are different lengths')
269 for source, key in zip(sources, keys):
270 add_source(source, key)
263 if update:271 if update:
264 apt_update(fatal=True)272 apt_update(fatal=True)
265273
266274
=== added directory 'tests'
=== added file 'tests/00-setup'
--- tests/00-setup 1970-01-01 00:00:00 +0000
+++ tests/00-setup 2014-07-17 15:16:47 +0000
@@ -0,0 +1,10 @@
1#!/bin/bash
2
3set -ex
4
5sudo add-apt-repository --yes ppa:juju/stable
6sudo apt-get update --yes
7sudo apt-get install --yes python-amulet
8sudo apt-get install --yes python-neutronclient
9sudo apt-get install --yes python-keystoneclient
10sudo apt-get install --yes python-novaclient
011
=== added file 'tests/12-basic-precise-grizzly'
--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
+++ tests/12-basic-precise-grizzly 2014-07-17 15:16:47 +0000
@@ -0,0 +1,11 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic quantum-gateway deployment on precise-grizzly."""
4
5from basic_deployment import QuantumGatewayBasicDeployment
6
7if __name__ == '__main__':
8 deployment = QuantumGatewayBasicDeployment(series='precise',
9 openstack='cloud:precise-grizzly',
10 source='cloud:precise-updates/grizzly')
11 deployment.run_tests()
012
=== added file 'tests/13-basic-precise-havana'
--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
+++ tests/13-basic-precise-havana 2014-07-17 15:16:47 +0000
@@ -0,0 +1,11 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic quantum-gateway deployment on precise-havana."""
4
5from basic_deployment import QuantumGatewayBasicDeployment
6
7if __name__ == '__main__':
8 deployment = QuantumGatewayBasicDeployment(series='precise',
9 openstack='cloud:precise-havana',
10 source='cloud:precise-updates/havana')
11 deployment.run_tests()
012
=== added file 'tests/14-basic-precise-icehouse'
--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
+++ tests/14-basic-precise-icehouse 2014-07-17 15:16:47 +0000
@@ -0,0 +1,11 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic quantum-gateway deployment on precise-icehouse."""
4
5from basic_deployment import QuantumGatewayBasicDeployment
6
7if __name__ == '__main__':
8 deployment = QuantumGatewayBasicDeployment(series='precise',
9 openstack='cloud:precise-icehouse',
10 source='cloud:precise-updates/icehouse')
11 deployment.run_tests()
012
=== added file 'tests/15-basic-trusty-icehouse'
--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
+++ tests/15-basic-trusty-icehouse 2014-07-17 15:16:47 +0000
@@ -0,0 +1,9 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic quantum-gateway deployment on trusty-icehouse."""
4
5from basic_deployment import QuantumGatewayBasicDeployment
6
7if __name__ == '__main__':
8 deployment = QuantumGatewayBasicDeployment(series='trusty')
9 deployment.run_tests()
010
=== added file 'tests/README'
--- tests/README 1970-01-01 00:00:00 +0000
+++ tests/README 2014-07-17 15:16:47 +0000
@@ -0,0 +1,47 @@
1This directory provides Amulet tests that focus on verification of
2quantum-gateway deployments.
3
4If you use a web proxy server to access the web, you'll need to set the
5AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
6
7The following examples demonstrate different ways that tests can be executed.
8All examples are run from the charm's root directory.
9
10 * To run all tests (starting with 00-setup):
11
12 make test
13
14 * To run a specific test module (or modules):
15
16 juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
17
18 * To run a specific test module (or modules), and keep the environment
19 deployed after a failure:
20
21 juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
22
23 * To re-run a test module against an already deployed environment (one
24 that was deployed by a previous call to 'juju test --set-e'):
25
26 ./tests/15-basic-trusty-icehouse
27
28For debugging and test development purposes, all code should be idempotent.
29In other words, the code should have the ability to be re-run without changing
30the results beyond the initial run. This enables editing and re-running of a
31test module against an already deployed environment, as described above.
32
33Manual debugging tips:
34
35 * Set the following env vars before using the OpenStack CLI as admin:
36 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
37 export OS_TENANT_NAME=admin
38 export OS_USERNAME=admin
39 export OS_PASSWORD=openstack
40 export OS_REGION_NAME=RegionOne
41
42 * Set the following env vars before using the OpenStack CLI as demoUser:
43 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
44 export OS_TENANT_NAME=demoTenant
45 export OS_USERNAME=demoUser
46 export OS_PASSWORD=password
47 export OS_REGION_NAME=RegionOne
048
=== added file 'tests/basic_deployment.py'
--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
+++ tests/basic_deployment.py 2014-07-17 15:16:47 +0000
@@ -0,0 +1,834 @@
1#!/usr/bin/python
2
3import amulet
4try:
5 from quantumclient.v2_0 import client as neutronclient
6except ImportError:
7 from neutronclient.v2_0 import client as neutronclient
8
9from charmhelpers.contrib.openstack.amulet.deployment import (
10 OpenStackAmuletDeployment
11)
12
13from charmhelpers.contrib.openstack.amulet.utils import (
14 OpenStackAmuletUtils,
15 DEBUG, # flake8: noqa
16 ERROR
17)
18
19# Use DEBUG to turn on debug logging
20u = OpenStackAmuletUtils(ERROR)
21
22
23class QuantumGatewayBasicDeployment(OpenStackAmuletDeployment):
24 """Amulet tests on a basic quantum-gateway deployment."""
25
26 def __init__(self, series, openstack=None, source=None):
27 """Deploy the entire test environment."""
28 super(QuantumGatewayBasicDeployment, self).__init__(series, openstack,
29 source)
30 self._add_services()
31 self._add_relations()
32 self._configure_services()
33 self._deploy()
34 self._initialize_tests()
35
36 def _add_services(self):
37 """Add the service that we're testing, including the number of units,
38 where quantum-gateway is local, and the other charms are from
39 the charm store."""
40 this_service = ('quantum-gateway', 1)
41 other_services = [('mysql', 1),
42 ('rabbitmq-server', 1), ('keystone', 1),
43 ('nova-cloud-controller', 1)]
44 super(QuantumGatewayBasicDeployment, self)._add_services(this_service,
45 other_services)
46
47 def _add_relations(self):
48 """Add all of the relations for the services."""
49 relations = {
50 'keystone:shared-db': 'mysql:shared-db',
51 'quantum-gateway:shared-db': 'mysql:shared-db',
52 'quantum-gateway:amqp': 'rabbitmq-server:amqp',
53 'nova-cloud-controller:quantum-network-service': \
54 'quantum-gateway:quantum-network-service',
55 'nova-cloud-controller:shared-db': 'mysql:shared-db',
56 'nova-cloud-controller:identity-service': 'keystone:identity-service',
57 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp'
58 }
59 super(QuantumGatewayBasicDeployment, self)._add_relations(relations)
60
61 def _configure_services(self):
62 """Configure all of the services."""
63 keystone_config = {'admin-password': 'openstack',
64 'admin-token': 'ubuntutesting'}
65 nova_cc_config = {'network-manager': 'Quantum',
66 'quantum-security-groups': 'yes'}
67 configs = {'keystone': keystone_config,
68 'nova-cloud-controller': nova_cc_config}
69 super(QuantumGatewayBasicDeployment, self)._configure_services(configs)
70
71 def _initialize_tests(self):
72 """Perform final initialization before tests get run."""
73 # Access the sentries for inspecting service units
74 self.mysql_sentry = self.d.sentry.unit['mysql/0']
75 self.keystone_sentry = self.d.sentry.unit['keystone/0']
76 self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
77 self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
78 self.quantum_gateway_sentry = self.d.sentry.unit['quantum-gateway/0']
79
80 # Authenticate admin with keystone
81 self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
82 user='admin',
83 password='openstack',
84 tenant='admin')
85
86
87 # Authenticate admin with neutron
88 ep = self.keystone.service_catalog.url_for(service_type='identity',
89 endpoint_type='publicURL')
90 self.neutron = neutronclient.Client(auth_url=ep,
91 username='admin',
92 password='openstack',
93 tenant_name='admin',
94 region_name='RegionOne')
95
96 def test_services(self):
97 """Verify the expected services are running on the corresponding
98 service units."""
99 if self._get_openstack_release() >= self.precise_havana:
100 neutron_services = ['status neutron-dhcp-agent',
101 'status neutron-lbaas-agent',
102 'status neutron-metadata-agent',
103 'status neutron-plugin-openvswitch-agent']
104 if self._get_openstack_release() == self.precise_havana:
105 neutron_services.append('status neutron-l3-agent')
106 else:
107 neutron_services.append('status neutron-vpn-agent')
108 neutron_services.append('status neutron-metering-agent')
109 neutron_services.append('status neutron-ovs-cleanup')
110 else:
111 neutron_services = ['status quantum-dhcp-agent',
112 'status quantum-l3-agent',
113 'status quantum-metadata-agent',
114 'status quantum-plugin-openvswitch-agent']
115
116 nova_cc_services = ['status nova-api-ec2',
117 'status nova-api-os-compute',
118 'status nova-objectstore',
119 'status nova-cert',
120 'status nova-scheduler']
121 if self._get_openstack_release() >= self.precise_grizzly:
122 nova_cc_services.append('status nova-conductor')
123
124 commands = {
125 self.mysql_sentry: ['status mysql'],
126 self.keystone_sentry: ['status keystone'],
127 self.nova_cc_sentry: nova_cc_services,
128 self.quantum_gateway_sentry: neutron_services
129 }
130
131 ret = u.validate_services(commands)
132 if ret:
133 amulet.raise_status(amulet.FAIL, msg=ret)
134
135 def test_quantum_gateway_shared_db_relation(self):
136 """Verify the quantum-gateway to mysql shared-db relation data"""
137 unit = self.quantum_gateway_sentry
138 relation = ['shared-db', 'mysql:shared-db']
139 expected = {
140 'private-address': u.valid_ip,
141 'database': 'nova',
142 'username': 'nova',
143 'hostname': u.valid_ip
144 }
145
146 ret = u.validate_relation_data(unit, relation, expected)
147 if ret:
148 message = u.relation_error('quantum-gateway shared-db', ret)
149 amulet.raise_status(amulet.FAIL, msg=message)
150
151 def test_mysql_shared_db_relation(self):
152 """Verify the mysql to quantum-gateway shared-db relation data"""
153 unit = self.mysql_sentry
154 relation = ['shared-db', 'quantum-gateway:shared-db']
155 expected = {
156 'private-address': u.valid_ip,
157 'password': u.not_null,
158 'db_host': u.valid_ip
159 }
160
161 ret = u.validate_relation_data(unit, relation, expected)
162 if ret:
163 message = u.relation_error('mysql shared-db', ret)
164 amulet.raise_status(amulet.FAIL, msg=message)
165
166 def test_quantum_gateway_amqp_relation(self):
167 """Verify the quantum-gateway to rabbitmq-server amqp relation data"""
168 unit = self.quantum_gateway_sentry
169 relation = ['amqp', 'rabbitmq-server:amqp']
170 expected = {
171 'username': 'neutron',
172 'private-address': u.valid_ip,
173 'vhost': 'openstack'
174 }
175
176 ret = u.validate_relation_data(unit, relation, expected)
177 if ret:
178 message = u.relation_error('quantum-gateway amqp', ret)
179 amulet.raise_status(amulet.FAIL, msg=message)
180
181 def test_rabbitmq_amqp_relation(self):
182 """Verify the rabbitmq-server to quantum-gateway amqp relation data"""
183 unit = self.rabbitmq_sentry
184 relation = ['amqp', 'quantum-gateway:amqp']
185 expected = {
186 'private-address': u.valid_ip,
187 'password': u.not_null,
188 'hostname': u.valid_ip
189 }
190
191 ret = u.validate_relation_data(unit, relation, expected)
192 if ret:
193 message = u.relation_error('rabbitmq amqp', ret)
194 amulet.raise_status(amulet.FAIL, msg=message)
195
196 def test_quantum_gateway_network_service_relation(self):
197 """Verify the quantum-gateway to nova-cc quantum-network-service
198 relation data"""
199 unit = self.quantum_gateway_sentry
200 relation = ['quantum-network-service',
201 'nova-cloud-controller:quantum-network-service']
202 expected = {
203 'private-address': u.valid_ip
204 }
205
206 ret = u.validate_relation_data(unit, relation, expected)
207 if ret:
208 message = u.relation_error('quantum-gateway network-service', ret)
209 amulet.raise_status(amulet.FAIL, msg=message)
210
211 def test_nova_cc_network_service_relation(self):
212 """Verify the nova-cc to quantum-gateway quantum-network-service
213 relation data"""
214 unit = self.nova_cc_sentry
215 relation = ['quantum-network-service',
216 'quantum-gateway:quantum-network-service']
217 expected = {
218 'service_protocol': 'http',
219 'service_tenant': 'services',
220 'quantum_url': u.valid_url,
221 'quantum_port': '9696',
222 'service_port': '5000',
223 'region': 'RegionOne',
224 'service_password': u.not_null,
225 'quantum_host': u.valid_ip,
226 'auth_port': '35357',
227 'auth_protocol': 'http',
228 'private-address': u.valid_ip,
229 'keystone_host': u.valid_ip,
230 'quantum_plugin': 'ovs',
231 'auth_host': u.valid_ip,
232 'service_username': 'quantum_s3_ec2_nova',
233 'service_tenant_name': 'services'
234 }
235
236 ret = u.validate_relation_data(unit, relation, expected)
237 if ret:
238 message = u.relation_error('nova-cc network-service', ret)
239 amulet.raise_status(amulet.FAIL, msg=message)
240
241 def test_restart_on_config_change(self):
242 """Verify that the specified services are restarted when the config
243 is changed."""
244 if self._get_openstack_release() >= self.precise_havana:
245 conf = '/etc/neutron/neutron.conf'
246 services = ['neutron-dhcp-agent', 'neutron-openvswitch-agent',
247 'neutron-metering-agent', 'neutron-lbaas-agent',
248 'neutron-metadata-agent']
249 if self._get_openstack_release() == self.precise_havana:
250 services.append('neutron-l3-agent')
251 else:
252 services.append('neutron-vpn-agent')
253 else:
254 conf = '/etc/quantum/quantum.conf'
255 services = ['quantum-dhcp-agent', 'quantum-openvswitch-agent',
256 'quantum-metadata-agent', 'quantum-l3-agent']
257
258 self.d.configure('quantum-gateway', {'debug': 'True'})
259
260 time = 20
261 for s in services:
262 if not u.service_restarted(self.quantum_gateway_sentry, s, conf,
263 pgrep_full=True, sleep_time=time):
264 msg = "service {} didn't restart after config change".format(s)
265 amulet.raise_status(amulet.FAIL, msg=msg)
266 time = 0
267
268 self.d.configure('quantum-gateway', {'debug': 'False'})
269
270 def test_neutron_config(self):
271 """Verify the data in the neutron config file."""
272 unit = self.quantum_gateway_sentry
273 rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
274 'quantum-gateway:amqp')
275
276 if self._get_openstack_release() >= self.precise_havana:
277 conf = '/etc/neutron/neutron.conf'
278 expected = {
279 'DEFAULT': {
280 'verbose': 'False',
281 'debug': 'False',
282 'lock_path': '/var/lock/neutron',
283 'rabbit_userid': 'neutron',
284 'rabbit_virtual_host': 'openstack',
285 'rabbit_password': rabbitmq_relation['password'],
286 'rabbit_host': rabbitmq_relation['hostname'],
287 'control_exchange': 'neutron',
288 'notification_driver': 'neutron.openstack.common.notifier.'
289 'list_notifier',
290 'list_notifier_drivers': 'neutron.openstack.common.'
291 'notifier.rabbit_notifier'
292 },
293 'agent': {
294 'root_helper': 'sudo /usr/bin/neutron-rootwrap '
295 '/etc/neutron/rootwrap.conf'
296 }
297 }
298 else:
299 conf = '/etc/quantum/quantum.conf'
300 expected = {
301 'DEFAULT': {
302 'verbose': 'False',
303 'debug': 'False',
304 'lock_path': '/var/lock/quantum',
305 'rabbit_userid': 'neutron',
306 'rabbit_virtual_host': 'openstack',
307 'rabbit_password': rabbitmq_relation['password'],
308 'rabbit_host': rabbitmq_relation['hostname'],
309 'control_exchange': 'quantum',
310 'notification_driver': 'quantum.openstack.common.notifier.'
311 'list_notifier',
312 'list_notifier_drivers': 'quantum.openstack.common.'
313 'notifier.rabbit_notifier'
314 },
315 'AGENT': {
316 'root_helper': 'sudo /usr/bin/quantum-rootwrap '
317 '/etc/quantum/rootwrap.conf'
318 }
319 }
320
321 if self._get_openstack_release() >= self.precise_icehouse:
322 expected['DEFAULT']['core_plugin'] = \
323 'neutron.plugins.ml2.plugin.Ml2Plugin'
324 elif self._get_openstack_release() >= self.precise_havana:
325 expected['DEFAULT']['core_plugin'] = \
326 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2'
327 else:
328 expected['DEFAULT']['core_plugin'] = \
329 'quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2'
330
331 for section, pairs in expected.iteritems():
332 ret = u.validate_config_data(unit, conf, section, pairs)
333 if ret:
334 message = "neutron config error: {}".format(ret)
335 amulet.raise_status(amulet.FAIL, msg=message)
336
337 def test_ml2_config(self):
338 """Verify the data in the ml2 config file. This is only available
339 since icehouse."""
340 if self._get_openstack_release() < self.precise_icehouse:
341 return
342
343 unit = self.quantum_gateway_sentry
344 conf = '/etc/neutron/plugins/ml2/ml2_conf.ini'
345 quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db')
346 expected = {
347 'ml2': {
348 'type_drivers': 'gre,vxlan',
349 'tenant_network_types': 'gre,vxlan',
350 'mechanism_drivers': 'openvswitch'
351 },
352 'ml2_type_gre': {
353 'tunnel_id_ranges': '1:1000'
354 },
355 'ml2_type_vxlan': {
356 'vni_ranges': '1001:2000'
357 },
358 'ovs': {
359 'enable_tunneling': 'True',
360 'local_ip': quantum_gateway_relation['private-address']
361 },
362 'agent': {
363 'tunnel_types': 'gre'
364 },
365 'securitygroup': {
366 'firewall_driver': 'neutron.agent.linux.iptables_firewall.'
367 'OVSHybridIptablesFirewallDriver'
368 }
369 }
370
371 for section, pairs in expected.iteritems():
372 ret = u.validate_config_data(unit, conf, section, pairs)
373 if ret:
374 message = "ml2 config error: {}".format(ret)
375 amulet.raise_status(amulet.FAIL, msg=message)
376
377 def test_api_paste_config(self):
378 """Verify the data in the api paste config file."""
379 unit = self.quantum_gateway_sentry
380 if self._get_openstack_release() >= self.precise_havana:
381 conf = '/etc/neutron/api-paste.ini'
382 expected = {
383 'composite:neutron': {
384 'use': 'egg:Paste#urlmap',
385 '/': 'neutronversions',
386 '/v2.0': 'neutronapi_v2_0'
387 },
388 'filter:keystonecontext': {
389 'paste.filter_factory': 'neutron.auth:'
390 'NeutronKeystoneContext.factory'
391 },
392 'filter:authtoken': {
393 'paste.filter_factory': 'keystoneclient.middleware.'
394 'auth_token:filter_factory'
395 },
396 'filter:extensions': {
397 'paste.filter_factory': 'neutron.api.extensions:'
398 'plugin_aware_extension_middleware_'
399 'factory'
400 },
401 'app:neutronversions': {
402 'paste.app_factory': 'neutron.api.versions:Versions.factory'
403 },
404 'app:neutronapiapp_v2_0': {
405 'paste.app_factory': 'neutron.api.v2.router:APIRouter.'
406 'factory'
407 }
408 }
409 if self._get_openstack_release() == self.precise_havana:
410 expected_additional = {
411 'composite:neutronapi_v2_0': {
412 'use': 'call:neutron.auth:pipeline_factory',
413 'noauth': 'extensions neutronapiapp_v2_0',
414 'keystone': 'authtoken keystonecontext extensions '
415 'neutronapiapp_v2_0'
416 }
417 }
418 else:
419 expected_additional = {
420 'composite:neutronapi_v2_0': {
421 'use': 'call:neutron.auth:pipeline_factory',
422 'noauth': 'request_id catch_errors extensions '
423 'neutronapiapp_v2_0',
424 'keystone': 'request_id catch_errors authtoken '
425 'keystonecontext extensions '
426 'neutronapiapp_v2_0'
427 }
428 }
429 expected = dict(expected.items() + expected_additional.items())
430 else:
431 conf = '/etc/quantum/api-paste.ini'
432 expected = {
433 'composite:quantum': {
434 'use': 'egg:Paste#urlmap',
435 '/': 'quantumversions',
436 '/v2.0': 'quantumapi_v2_0'
437 },
438 'composite:quantumapi_v2_0': {
439 'use': 'call:quantum.auth:pipeline_factory',
440 'noauth': 'extensions quantumapiapp_v2_0',
441 'keystone': 'authtoken keystonecontext extensions '
442 'quantumapiapp_v2_0',
443 },
444 'filter:keystonecontext': {
445 'paste.filter_factory': 'quantum.auth:'
446 'QuantumKeystoneContext.factory'
447 },
448 'filter:authtoken': {
449 'paste.filter_factory': 'keystoneclient.middleware.'
450 'auth_token:filter_factory'
451 },
452 'filter:extensions': {
453 'paste.filter_factory': 'quantum.api.extensions:'
454 'plugin_aware_extension_middleware_'
455 'factory'
456 },
457 'app:quantumversions': {
458 'paste.app_factory': 'quantum.api.versions:Versions.factory'
459 },
460 'app:quantumapiapp_v2_0': {
461 'paste.app_factory': 'quantum.api.v2.router:APIRouter.'
462 'factory'
463 }
464 }
465
466 for section, pairs in expected.iteritems():
467 ret = u.validate_config_data(unit, conf, section, pairs)
468 if ret:
469 message = "api paste config error: {}".format(ret)
470 amulet.raise_status(amulet.FAIL, msg=message)
471
472 def test_dhcp_agent_config(self):
473 """Verify the data in the dhcp agent config file."""
474 unit = self.quantum_gateway_sentry
475 if self._get_openstack_release() >= self.precise_havana:
476 conf = '/etc/neutron/dhcp_agent.ini'
477 expected = {
478 'state_path': '/var/lib/neutron',
479 'interface_driver': 'neutron.agent.linux.interface.'
480 'OVSInterfaceDriver',
481 'dhcp_driver': 'neutron.agent.linux.dhcp.Dnsmasq',
482 'root_helper': 'sudo /usr/bin/neutron-rootwrap '
483 '/etc/neutron/rootwrap.conf',
484 'ovs_use_veth': 'True'
485 }
486 else:
487 conf = '/etc/quantum/dhcp_agent.ini'
488 expected = {
489 'state_path': '/var/lib/quantum',
490 'interface_driver': 'quantum.agent.linux.interface.'
491 'OVSInterfaceDriver',
492 'dhcp_driver': 'quantum.agent.linux.dhcp.Dnsmasq',
493 'root_helper': 'sudo /usr/bin/quantum-rootwrap '
494 '/etc/quantum/rootwrap.conf'
495 }
496
497 ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
498 if ret:
499 message = "dhcp agent config error: {}".format(ret)
500 amulet.raise_status(amulet.FAIL, msg=message)
501
502 def test_fwaas_driver_config(self):
503 """Verify the data in the fwaas driver config file. This is only
504 available since havana."""
505 if self._get_openstack_release() < self.precise_havana:
506 return
507
508 unit = self.quantum_gateway_sentry
509 conf = '/etc/neutron/fwaas_driver.ini'
510 expected = {
511 'driver': 'neutron.services.firewall.drivers.linux.'
512 'iptables_fwaas.IptablesFwaasDriver',
513 'enabled': 'True'
514 }
515
516 ret = u.validate_config_data(unit, conf, 'fwaas', expected)
517 if ret:
518 message = "fwaas driver config error: {}".format(ret)
519 amulet.raise_status(amulet.FAIL, msg=message)
520
521 def test_l3_agent_config(self):
522 """Verify the data in the l3 agent config file."""
523 unit = self.quantum_gateway_sentry
524 nova_cc_relation = self.nova_cc_sentry.relation(\
525 'quantum-network-service',
526 'quantum-gateway:quantum-network-service')
527 ep = self.keystone.service_catalog.url_for(service_type='identity',
528 endpoint_type='publicURL')
529
530 if self._get_openstack_release() >= self.precise_havana:
531 conf = '/etc/neutron/l3_agent.ini'
532 expected = {
533 'interface_driver': 'neutron.agent.linux.interface.'
534 'OVSInterfaceDriver',
535 'auth_url': ep,
536 'auth_region': 'RegionOne',
537 'admin_tenant_name': 'services',
538 'admin_user': 'quantum_s3_ec2_nova',
539 'admin_password': nova_cc_relation['service_password'],
540 'root_helper': 'sudo /usr/bin/neutron-rootwrap '
541 '/etc/neutron/rootwrap.conf',
542 'ovs_use_veth': 'True',
543 'handle_internal_only_routers': 'True'
544 }
545 else:
546 conf = '/etc/quantum/l3_agent.ini'
547 expected = {
548 'interface_driver': 'quantum.agent.linux.interface.'
549 'OVSInterfaceDriver',
550 'auth_url': ep,
551 'auth_region': 'RegionOne',
552 'admin_tenant_name': 'services',
553 'admin_user': 'quantum_s3_ec2_nova',
554 'admin_password': nova_cc_relation['service_password'],
555 'root_helper': 'sudo /usr/bin/quantum-rootwrap '
556 '/etc/quantum/rootwrap.conf'
557 }
558
559 ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
560 if ret:
561 message = "l3 agent config error: {}".format(ret)
562 amulet.raise_status(amulet.FAIL, msg=message)
563
564 def test_lbaas_agent_config(self):
565 """Verify the data in the lbaas agent config file. This is only
566 available since havana."""
567 if self._get_openstack_release() < self.precise_havana:
568 return
569
570 unit = self.quantum_gateway_sentry
571 conf = '/etc/neutron/lbaas_agent.ini'
572 expected = {
573 'DEFAULT': {
574 'periodic_interval': '10',
575 'interface_driver': 'neutron.agent.linux.interface.'
576 'OVSInterfaceDriver',
577 'ovs_use_veth': 'False',
578 'device_driver': 'neutron.services.loadbalancer.drivers.'
579 'haproxy.namespace_driver.HaproxyNSDriver'
580 },
581 'haproxy': {
582 'loadbalancer_state_path': '$state_path/lbaas',
583 'user_group': 'nogroup'
584 }
585 }
586
587 for section, pairs in expected.iteritems():
588 ret = u.validate_config_data(unit, conf, section, pairs)
589 if ret:
590 message = "lbaas agent config error: {}".format(ret)
591 amulet.raise_status(amulet.FAIL, msg=message)
592
593 def test_metadata_agent_config(self):
594 """Verify the data in the metadata agent config file."""
595 unit = self.quantum_gateway_sentry
596 ep = self.keystone.service_catalog.url_for(service_type='identity',
597 endpoint_type='publicURL')
598 quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db')
599 nova_cc_relation = self.nova_cc_sentry.relation(\
600 'quantum-network-service',
601 'quantum-gateway:quantum-network-service')
602
603 if self._get_openstack_release() >= self.precise_havana:
604 conf = '/etc/neutron/metadata_agent.ini'
605 expected = {
606 'auth_url': ep,
607 'auth_region': 'RegionOne',
608 'admin_tenant_name': 'services',
609 'admin_user': 'quantum_s3_ec2_nova',
610 'admin_password': nova_cc_relation['service_password'],
611 'root_helper': 'sudo neutron-rootwrap '
612 '/etc/neutron/rootwrap.conf',
613 'state_path': '/var/lib/neutron',
614 'nova_metadata_ip': quantum_gateway_relation['private-address'],
615 'nova_metadata_port': '8775'
616 }
617 else:
618 conf = '/etc/quantum/metadata_agent.ini'
619 expected = {
620 'auth_url': ep,
621 'auth_region': 'RegionOne',
622 'admin_tenant_name': 'services',
623 'admin_user': 'quantum_s3_ec2_nova',
624 'admin_password': nova_cc_relation['service_password'],
625 'root_helper': 'sudo quantum-rootwrap '
626 '/etc/quantum/rootwrap.conf',
627 'state_path': '/var/lib/quantum',
628 'nova_metadata_ip': quantum_gateway_relation['private-address'],
629 'nova_metadata_port': '8775'
630 }
631
632 ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
633 if ret:
634 message = "metadata agent config error: {}".format(ret)
635 amulet.raise_status(amulet.FAIL, msg=message)
636
637 def test_metering_agent_config(self):
638 """Verify the data in the metering agent config file. This is only
639 available since havana."""
640 if self._get_openstack_release() < self.precise_havana:
641 return
642
643 unit = self.quantum_gateway_sentry
644 conf = '/etc/neutron/metering_agent.ini'
645 expected = {
646 'driver': 'neutron.services.metering.drivers.iptables.'
647 'iptables_driver.IptablesMeteringDriver',
648 'measure_interval': '30',
649 'report_interval': '300',
650 'interface_driver': 'neutron.agent.linux.interface.'
651 'OVSInterfaceDriver',
652 'use_namespaces': 'True'
653 }
654
655 ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
656 if ret:
657 message = "metering agent config error: {}".format(ret)
658
659 def test_nova_config(self):
660 """Verify the data in the nova config file."""
661 unit = self.quantum_gateway_sentry
662 conf = '/etc/nova/nova.conf'
663 mysql_relation = self.mysql_sentry.relation('shared-db',
664 'quantum-gateway:shared-db')
665 db_uri = "mysql://{}:{}@{}/{}".format('nova',
666 mysql_relation['password'],
667 mysql_relation['db_host'],
668 'nova')
669 rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
670 'quantum-gateway:amqp')
671 nova_cc_relation = self.nova_cc_sentry.relation(\
672 'quantum-network-service',
673 'quantum-gateway:quantum-network-service')
674 ep = self.keystone.service_catalog.url_for(service_type='identity',
675 endpoint_type='publicURL')
676
677 if self._get_openstack_release() >= self.precise_havana:
678 expected = {
679 'logdir': '/var/log/nova',
680 'state_path': '/var/lib/nova',
681 'lock_path': '/var/lock/nova',
682 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
683 'verbose': 'False',
684 'use_syslog': 'False',
685 'api_paste_config': '/etc/nova/api-paste.ini',
686 'enabled_apis': 'metadata',
687 'multi_host': 'True',
688 'sql_connection': db_uri,
689 'service_neutron_metadata_proxy': 'True',
690 'rabbit_userid': 'neutron',
691 'rabbit_virtual_host': 'openstack',
692 'rabbit_password': rabbitmq_relation['password'],
693 'rabbit_host': rabbitmq_relation['hostname'],
694 'network_api_class': 'nova.network.neutronv2.api.API',
695 'neutron_auth_strategy': 'keystone',
696 'neutron_url': nova_cc_relation['quantum_url'],
697 'neutron_admin_tenant_name': 'services',
698 'neutron_admin_username': 'quantum_s3_ec2_nova',
699 'neutron_admin_password': nova_cc_relation['service_password'],
700 'neutron_admin_auth_url': ep
701
702 }
703 else:
704 expected = {
705 'logdir': '/var/log/nova',
706 'state_path': '/var/lib/nova',
707 'lock_path': '/var/lock/nova',
708 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
709 'verbose': 'True',
710 'api_paste_config': '/etc/nova/api-paste.ini',
711 'enabled_apis': 'metadata',
712 'multi_host': 'True',
713 'sql_connection': db_uri,
714 'service_quantum_metadata_proxy': 'True',
715 'rabbit_userid': 'neutron',
716 'rabbit_virtual_host': 'openstack',
717 'rabbit_password': rabbitmq_relation['password'],
718 'rabbit_host': rabbitmq_relation['hostname'],
719 'network_api_class': 'nova.network.quantumv2.api.API',
720 'quantum_auth_strategy': 'keystone',
721 'quantum_url': nova_cc_relation['quantum_url'],
722 'quantum_admin_tenant_name': 'services',
723 'quantum_admin_username': 'quantum_s3_ec2_nova',
724 'quantum_admin_password': nova_cc_relation['service_password'],
725 'quantum_admin_auth_url': ep
726 }
727
728 ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
729 if ret:
730 message = "nova config error: {}".format(ret)
731 amulet.raise_status(amulet.FAIL, msg=message)
732
733 def test_ovs_neutron_plugin_config(self):
734 """Verify the data in the ovs neutron plugin config file. The ovs
735 plugin is not used by default since icehouse."""
736 if self._get_openstack_release() >= self.precise_icehouse:
737 return
738
739 unit = self.quantum_gateway_sentry
740 quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db')
741
742 if self._get_openstack_release() >= self.precise_havana:
743 conf = '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'
744 expected = {
745 'ovs': {
746 'local_ip': quantum_gateway_relation['private-address'],
747 'tenant_network_type': 'gre',
748 'enable_tunneling': 'True',
749 'tunnel_id_ranges': '1:1000'
750 }
751 }
752 if self._get_openstack_release() > self.precise_havana:
753 expected_additional = {
754 'agent': {
755 'polling_interval': '10',
756 'root_helper': 'sudo /usr/bin/neutron-rootwrap '
757 '/etc/neutron/rootwrap.conf'
758 }
759 }
760 expected = dict(expected.items() + expected_additional.items())
761 else:
762 conf = '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini'
763 expected = {
764 'OVS': {
765 'local_ip': quantum_gateway_relation['private-address'],
766 'tenant_network_type': 'gre',
767 'enable_tunneling': 'True',
768 'tunnel_id_ranges': '1:1000'
769 },
770 'AGENT': {
771 'polling_interval': '10',
772 'root_helper': 'sudo /usr/bin/quantum-rootwrap '
773 '/etc/quantum/rootwrap.conf'
774 }
775 }
776
777 for section, pairs in expected.iteritems():
778 ret = u.validate_config_data(unit, conf, section, pairs)
779 if ret:
780 message = "ovs neutron plugin config error: {}".format(ret)
781 amulet.raise_status(amulet.FAIL, msg=message)
782
783 def test_vpn_agent_config(self):
784 """Verify the data in the vpn agent config file. This isn't available
785 prior to havana."""
786 if self._get_openstack_release() < self.precise_havana:
787 return
788
789 unit = self.quantum_gateway_sentry
790 conf = '/etc/neutron/vpn_agent.ini'
791 expected = {
792 'vpnagent': {
793 'vpn_device_driver': 'neutron.services.vpn.device_drivers.'
794 'ipsec.OpenSwanDriver'
795 },
796 'ipsec': {
797 'ipsec_status_check_interval': '60'
798 }
799 }
800
801 for section, pairs in expected.iteritems():
802 ret = u.validate_config_data(unit, conf, section, pairs)
803 if ret:
804 message = "vpn agent config error: {}".format(ret)
805 amulet.raise_status(amulet.FAIL, msg=message)
806
807 def test_create_network(self):
808 """Create a network, verify that it exists, and then delete it."""
809 self.neutron.format = 'json'
810 net_name = 'ext_net'
811
812 #Verify that the network doesn't exist
813 networks = self.neutron.list_networks(name=net_name)
814 net_count = len(networks['networks'])
815 if net_count != 0:
816 msg = "Expected zero networks, found {}".format(net_count)
817 amulet.raise_status(amulet.FAIL, msg=msg)
818
819 # Create a network and verify that it exists
820 network = {'name': net_name}
821 self.neutron.create_network({'network':network})
822
823 networks = self.neutron.list_networks(name=net_name)
824 net_len = len(networks['networks'])
825 if net_len != 1:
826 msg = "Expected 1 network, found {}".format(net_len)
827 amulet.raise_status(amulet.FAIL, msg=msg)
828
829 network = networks['networks'][0]
830 if network['name'] != net_name:
831 amulet.raise_status(amulet.FAIL, msg="network ext_net not found")
832
833 #Cleanup
834 self.neutron.delete_network(network['id'])
0835
=== added directory 'tests/charmhelpers'
=== added file 'tests/charmhelpers/__init__.py'
=== added directory 'tests/charmhelpers/contrib'
=== added file 'tests/charmhelpers/contrib/__init__.py'
=== added directory 'tests/charmhelpers/contrib/amulet'
=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-17 15:16:47 +0000
@@ -0,0 +1,63 @@
1import amulet
2import re
3
4
5class AmuletDeployment(object):
6 """This class provides generic Amulet deployment and test runner
7 methods."""
8
9 def __init__(self, series):
10 """Initialize the deployment environment."""
11 self.series = series
12 self.d = amulet.Deployment(series=self.series)
13
14 def _get_charm_name(self, service_name):
15 """Gets the charm name from the service name. Unique service names can
16 be specified with a '-service#' suffix (e.g. mysql-service1)."""
17 if re.match(r"^.*-service\d{1,3}$", service_name):
18 charm_name = re.sub('\-service\d{1,3}$', '', service_name)
19 else:
20 charm_name = service_name
21 return charm_name
22
23 def _add_services(self, this_service, other_services):
24 """Add services to the deployment where this_service is the local charm
25 that we're focused on testing and other_services are the other
26 charms that come from the charm store."""
27 name, units = range(2)
28
29 charm_name = self._get_charm_name(this_service[name])
30 self.d.add(this_service[name],
31 units=this_service[units])
32
33 for svc in other_services:
34 charm_name = self._get_charm_name(svc[name])
35 self.d.add(svc[name],
36 charm='cs:{}/{}'.format(self.series, charm_name),
37 units=svc[units])
38
39 def _add_relations(self, relations):
40 """Add all of the relations for the services."""
41 for k, v in relations.iteritems():
42 self.d.relate(k, v)
43
44 def _configure_services(self, configs):
45 """Configure all of the services."""
46 for service, config in configs.iteritems():
47 self.d.configure(service, config)
48
49 def _deploy(self):
50 """Deploy environment and wait for all hooks to finish executing."""
51 try:
52 self.d.setup()
53 self.d.sentry.wait()
54 except amulet.helpers.TimeoutError:
55 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
56 except:
57 raise
58
59 def run_tests(self):
60 """Run all of the methods that are prefixed with 'test_'."""
61 for test in dir(self):
62 if test.startswith('test_'):
63 getattr(self, test)()
064
=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-17 15:16:47 +0000
@@ -0,0 +1,157 @@
1import ConfigParser
2import io
3import logging
4import re
5import sys
6from time import sleep
7
8
9class AmuletUtils(object):
10 """This class provides common utility functions that are used by Amulet
11 tests."""
12
13 def __init__(self, log_level=logging.ERROR):
14 self.log = self.get_logger(level=log_level)
15
16 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
17 """Get a logger object that will log to stdout."""
18 log = logging
19 logger = log.getLogger(name)
20 fmt = \
21 log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
22
23 handler = log.StreamHandler(stream=sys.stdout)
24 handler.setLevel(level)
25 handler.setFormatter(fmt)
26
27 logger.addHandler(handler)
28 logger.setLevel(level)
29
30 return logger
31
32 def valid_ip(self, ip):
33 if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
34 return True
35 else:
36 return False
37
38 def valid_url(self, url):
39 p = re.compile(
40 r'^(?:http|ftp)s?://'
41 r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
42 r'localhost|'
43 r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
44 r'(?::\d+)?'
45 r'(?:/?|[/?]\S+)$',
46 re.IGNORECASE)
47 if p.match(url):
48 return True
49 else:
50 return False
51
52 def validate_services(self, commands):
53 """Verify the specified services are running on the corresponding
54 service units."""
55 for k, v in commands.iteritems():
56 for cmd in v:
57 output, code = k.run(cmd)
58 if code != 0:
59 return "command `{}` returned {}".format(cmd, str(code))
60 return None
61
62 def _get_config(self, unit, filename):
63 """Get a ConfigParser object for parsing a unit's config file."""
64 file_contents = unit.file_contents(filename)
65 config = ConfigParser.ConfigParser()
66 config.readfp(io.StringIO(file_contents))
67 return config
68
69 def validate_config_data(self, sentry_unit, config_file, section, expected):
70 """Verify that the specified section of the config file contains
71 the expected option key:value pairs."""
72 config = self._get_config(sentry_unit, config_file)
73
74 if section != 'DEFAULT' and not config.has_section(section):
75 return "section [{}] does not exist".format(section)
76
77 for k in expected.keys():
78 if not config.has_option(section, k):
79 return "section [{}] is missing option {}".format(section, k)
80 if config.get(section, k) != expected[k]:
81 return "section [{}] {}:{} != expected {}:{}".format(section,
82 k, config.get(section, k), k, expected[k])
83 return None
84
85 def _validate_dict_data(self, expected, actual):
86 """Compare expected dictionary data vs actual dictionary data.
87 The values in the 'expected' dictionary can be strings, bools, ints,
88 longs, or can be a function that evaluate a variable and returns a
89 bool."""
90 for k, v in expected.iteritems():
91 if k in actual:
92 if isinstance(v, basestring) or \
93 isinstance(v, bool) or \
94 isinstance(v, (int, long)):
95 if v != actual[k]:
96 return "{}:{}".format(k, actual[k])
97 elif not v(actual[k]):
98 return "{}:{}".format(k, actual[k])
99 else:
100 return "key '{}' does not exist".format(k)
101 return None
102
103 def validate_relation_data(self, sentry_unit, relation, expected):
104 """Validate actual relation data based on expected relation data."""
105 actual = sentry_unit.relation(relation[0], relation[1])
106 self.log.debug('actual: {}'.format(repr(actual)))
107 return self._validate_dict_data(expected, actual)
108
109 def _validate_list_data(self, expected, actual):
110 """Compare expected list vs actual list data."""
111 for e in expected:
112 if e not in actual:
113 return "expected item {} not found in actual list".format(e)
114 return None
115
116 def not_null(self, string):
117 if string != None:
118 return True
119 else:
120 return False
121
122 def _get_file_mtime(self, sentry_unit, filename):
123 """Get last modification time of file."""
124 return sentry_unit.file_stat(filename)['mtime']
125
126 def _get_dir_mtime(self, sentry_unit, directory):
127 """Get last modification time of directory."""
128 return sentry_unit.directory_stat(directory)['mtime']
129
130 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
131 """Determine start time of the process based on the last modification
132 time of the /proc/pid directory. If pgrep_full is True, the process
133 name is matched against the full command line."""
134 if pgrep_full:
135 cmd = 'pgrep -o -f {}'.format(service)
136 else:
137 cmd = 'pgrep -o {}'.format(service)
138 proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
139 return self._get_dir_mtime(sentry_unit, proc_dir)
140
141 def service_restarted(self, sentry_unit, service, filename,
142 pgrep_full=False, sleep_time=20):
143 """Compare a service's start time vs a file's last modification time
144 (such as a config file for that service) to determine if the service
145 has been restarted."""
146 sleep(sleep_time)
147 if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
148 self._get_file_mtime(sentry_unit, filename):
149 return True
150 else:
151 return False
152
153 def relation_error(self, name, data):
154 return 'unexpected relation data in {} - {}'.format(name, data)
155
156 def endpoint_error(self, name, data):
157 return 'unexpected endpoint data in {} - {}'.format(name, data)
0158
=== added directory 'tests/charmhelpers/contrib/openstack'
=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-17 15:16:47 +0000
@@ -0,0 +1,57 @@
1from charmhelpers.contrib.amulet.deployment import (
2 AmuletDeployment
3)
4
5
6class OpenStackAmuletDeployment(AmuletDeployment):
7 """This class inherits from AmuletDeployment and has additional support
8 that is specifically for use by OpenStack charms."""
9
10 def __init__(self, series, openstack=None, source=None):
11 """Initialize the deployment environment."""
12 super(OpenStackAmuletDeployment, self).__init__(series)
13 self.openstack = openstack
14 self.source = source
15
16 def _add_services(self, this_service, other_services):
17 """Add services to the deployment and set openstack-origin."""
18 super(OpenStackAmuletDeployment, self)._add_services(this_service,
19 other_services)
20 name = 0
21 services = other_services
22 services.append(this_service)
23 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
24
25 if self.openstack:
26 for svc in services:
27 charm_name = self._get_charm_name(svc[name])
28 if charm_name not in use_source:
29 config = {'openstack-origin': self.openstack}
30 self.d.configure(svc[name], config)
31
32 if self.source:
33 for svc in services:
34 charm_name = self._get_charm_name(svc[name])
35 if charm_name in use_source:
36 config = {'source': self.source}
37 self.d.configure(svc[name], config)
38
39 def _configure_services(self, configs):
40 """Configure all of the services."""
41 for service, config in configs.iteritems():
42 self.d.configure(service, config)
43
44 def _get_openstack_release(self):
45 """Return an integer representing the enum value of the openstack
46 release."""
47 self.precise_essex, self.precise_folsom, self.precise_grizzly, \
48 self.precise_havana, self.precise_icehouse, \
49 self.trusty_icehouse = range(6)
50 releases = {
51 ('precise', None): self.precise_essex,
52 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
53 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
54 ('precise', 'cloud:precise-havana'): self.precise_havana,
55 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
56 ('trusty', None): self.trusty_icehouse}
57 return releases[(self.series, self.openstack)]
058
=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-17 15:16:47 +0000
@@ -0,0 +1,253 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """This class inherits from AmuletUtils and has additional support
20 that is specifically for use by OpenStack charms."""
21
22 def __init__(self, log_level=ERROR):
23 """Initialize the deployment environment."""
24 super(OpenStackAmuletUtils, self).__init__(log_level)
25
26 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
27 public_port, expected):
28 """Validate actual endpoint data vs expected endpoint data. The ports
29 are used to find the matching endpoint."""
30 found = False
31 for ep in endpoints:
32 self.log.debug('endpoint: {}'.format(repr(ep)))
33 if admin_port in ep.adminurl and internal_port in ep.internalurl \
34 and public_port in ep.publicurl:
35 found = True
36 actual = {'id': ep.id,
37 'region': ep.region,
38 'adminurl': ep.adminurl,
39 'internalurl': ep.internalurl,
40 'publicurl': ep.publicurl,
41 'service_id': ep.service_id}
42 ret = self._validate_dict_data(expected, actual)
43 if ret:
44 return 'unexpected endpoint data - {}'.format(ret)
45
46 if not found:
47 return 'endpoint not found'
48
49 def validate_svc_catalog_endpoint_data(self, expected, actual):
50 """Validate a list of actual service catalog endpoints vs a list of
51 expected service catalog endpoints."""
52 self.log.debug('actual: {}'.format(repr(actual)))
53 for k, v in expected.iteritems():
54 if k in actual:
55 ret = self._validate_dict_data(expected[k][0], actual[k][0])
56 if ret:
57 return self.endpoint_error(k, ret)
58 else:
59 return "endpoint {} does not exist".format(k)
60 return ret
61
62 def validate_tenant_data(self, expected, actual):
63 """Validate a list of actual tenant data vs list of expected tenant
64 data."""
65 self.log.debug('actual: {}'.format(repr(actual)))
66 for e in expected:
67 found = False
68 for act in actual:
69 a = {'enabled': act.enabled, 'description': act.description,
70 'name': act.name, 'id': act.id}
71 if e['name'] == a['name']:
72 found = True
73 ret = self._validate_dict_data(e, a)
74 if ret:
75 return "unexpected tenant data - {}".format(ret)
76 if not found:
77 return "tenant {} does not exist".format(e['name'])
78 return ret
79
80 def validate_role_data(self, expected, actual):
81 """Validate a list of actual role data vs a list of expected role
82 data."""
83 self.log.debug('actual: {}'.format(repr(actual)))
84 for e in expected:
85 found = False
86 for act in actual:
87 a = {'name': act.name, 'id': act.id}
88 if e['name'] == a['name']:
89 found = True
90 ret = self._validate_dict_data(e, a)
91 if ret:
92 return "unexpected role data - {}".format(ret)
93 if not found:
94 return "role {} does not exist".format(e['name'])
95 return ret
96
97 def validate_user_data(self, expected, actual):
98 """Validate a list of actual user data vs a list of expected user
99 data."""
100 self.log.debug('actual: {}'.format(repr(actual)))
101 for e in expected:
102 found = False
103 for act in actual:
104 a = {'enabled': act.enabled, 'name': act.name,
105 'email': act.email, 'tenantId': act.tenantId,
106 'id': act.id}
107 if e['name'] == a['name']:
108 found = True
109 ret = self._validate_dict_data(e, a)
110 if ret:
111 return "unexpected user data - {}".format(ret)
112 if not found:
113 return "user {} does not exist".format(e['name'])
114 return ret
115
116 def validate_flavor_data(self, expected, actual):
117 """Validate a list of actual flavors vs a list of expected flavors."""
118 self.log.debug('actual: {}'.format(repr(actual)))
119 act = [a.name for a in actual]
120 return self._validate_list_data(expected, act)
121
122 def tenant_exists(self, keystone, tenant):
123 """Return True if tenant exists"""
124 return tenant in [t.name for t in keystone.tenants.list()]
125
126 def authenticate_keystone_admin(self, keystone_sentry, user, password,
127 tenant):
128 """Authenticates admin user with the keystone admin endpoint."""
129 service_ip = \
130 keystone_sentry.relation('shared-db',
131 'mysql:shared-db')['private-address']
132 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
133 return keystone_client.Client(username=user, password=password,
134 tenant_name=tenant, auth_url=ep)
135
136 def authenticate_keystone_user(self, keystone, user, password, tenant):
137 """Authenticates a regular user with the keystone public endpoint."""
138 ep = keystone.service_catalog.url_for(service_type='identity',
139 endpoint_type='publicURL')
140 return keystone_client.Client(username=user, password=password,
141 tenant_name=tenant, auth_url=ep)
142
143 def authenticate_glance_admin(self, keystone):
144 """Authenticates admin user with glance."""
145 ep = keystone.service_catalog.url_for(service_type='image',
146 endpoint_type='adminURL')
147 return glance_client.Client(ep, token=keystone.auth_token)
148
149 def authenticate_nova_user(self, keystone, user, password, tenant):
150 """Authenticates a regular user with nova-api."""
151 ep = keystone.service_catalog.url_for(service_type='identity',
152 endpoint_type='publicURL')
153 return nova_client.Client(username=user, api_key=password,
154 project_id=tenant, auth_url=ep)
155
156 def create_cirros_image(self, glance, image_name):
157 """Download the latest cirros image and upload it to glance."""
158 http_proxy = os.getenv('AMULET_HTTP_PROXY')
159 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
160 if http_proxy:
161 proxies = {'http': http_proxy}
162 opener = urllib.FancyURLopener(proxies)
163 else:
164 opener = urllib.FancyURLopener()
165
166 f = opener.open("http://download.cirros-cloud.net/version/released")
167 version = f.read().strip()
168 cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
169
170 if not os.path.exists(cirros_img):
171 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
172 version, cirros_img)
173 opener.retrieve(cirros_url, cirros_img)
174 f.close()
175
176 with open(cirros_img) as f:
177 image = glance.images.create(name=image_name, is_public=True,
178 disk_format='qcow2',
179 container_format='bare', data=f)
180 count = 1
181 status = image.status
182 while status != 'active' and count < 10:
183 time.sleep(3)
184 image = glance.images.get(image.id)
185 status = image.status
186 self.log.debug('image status: {}'.format(status))
187 count += 1
188
189 if status != 'active':
190 self.log.error('image creation timed out')
191 return None
192
193 return image
194
195 def delete_image(self, glance, image):
196 """Delete the specified image."""
197 num_before = len(list(glance.images.list()))
198 glance.images.delete(image)
199
200 count = 1
201 num_after = len(list(glance.images.list()))
202 while num_after != (num_before - 1) and count < 10:
203 time.sleep(3)
204 num_after = len(list(glance.images.list()))
205 self.log.debug('number of images: {}'.format(num_after))
206 count += 1
207
208 if num_after != (num_before - 1):
209 self.log.error('image deletion timed out')
210 return False
211
212 return True
213
214 def create_instance(self, nova, image_name, instance_name, flavor):
215 """Create the specified instance."""
216 image = nova.images.find(name=image_name)
217 flavor = nova.flavors.find(name=flavor)
218 instance = nova.servers.create(name=instance_name, image=image,
219 flavor=flavor)
220
221 count = 1
222 status = instance.status
223 while status != 'ACTIVE' and count < 60:
224 time.sleep(3)
225 instance = nova.servers.get(instance.id)
226 status = instance.status
227 self.log.debug('instance status: {}'.format(status))
228 count += 1
229
230 if status != 'ACTIVE':
231 self.log.error('instance creation timed out')
232 return None
233
234 return instance
235
236 def delete_instance(self, nova, instance):
237 """Delete the specified instance."""
238 num_before = len(list(nova.servers.list()))
239 nova.servers.delete(instance)
240
241 count = 1
242 num_after = len(list(nova.servers.list()))
243 while num_after != (num_before - 1) and count < 10:
244 time.sleep(3)
245 num_after = len(list(nova.servers.list()))
246 self.log.debug('number of instances: {}'.format(num_after))
247 count += 1
248
249 if num_after != (num_before - 1):
250 self.log.error('instance deletion timed out')
251 return False
252
253 return True

Subscribers

People subscribed via source and target branches