Merge lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic into lp:~openstack-charmers/charms/trusty/quantum-gateway/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 54
Proposed branch: lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic
Merge into: lp:~openstack-charmers/charms/trusty/quantum-gateway/next
Diff against target: 2073 lines (+1690/-80)
24 files modified
Makefile (+12/-4)
charm-helpers-hooks.yaml (+10/-0)
charm-helpers-sync.yaml (+0/-10)
charm-helpers-tests.yaml (+5/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+1/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+26/-7)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+105/-3)
hooks/charmhelpers/contrib/openstack/context.py (+10/-8)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/core/hookenv.py (+5/-4)
hooks/charmhelpers/core/host.py (+7/-5)
hooks/charmhelpers/fetch/__init__.py (+23/-15)
tests/00-setup (+10/-0)
tests/12-basic-precise-grizzly (+11/-0)
tests/13-basic-precise-havana (+11/-0)
tests/14-basic-precise-icehouse (+11/-0)
tests/15-basic-trusty-icehouse (+9/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+834/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+63/-0)
tests/charmhelpers/contrib/amulet/utils.py (+157/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+226489@code.launchpad.net
To post a comment you must log in.
Revision history for this message
James Page (james-page) wrote :

Not relevant for essex and folsom - I'd just drop the tests.

56. By Corey Bryant

Add Amulet basic tests

Revision history for this message
Corey Bryant (corey.bryant) wrote :

Ok. I pushed a new version with essex/folsom tests dropped.

Revision history for this message
Corey Bryant (corey.bryant) wrote :

> Not relevant for essex and folsom - I'd just drop the tests.

Ok. I pushed a new version with essex/folsom tests dropped.

Revision history for this message
James Page (james-page) wrote :

I'm going to merge this as I think the tests are all 100% OK; however I do keep hitting a race where sentry.wait() in _deploy is not actually waiting for all hook execution to complete; resulting in the neutron-server on the nova-cc being restarted and connections from the client in the tests failing.

review: Approve
Revision history for this message
Corey Bryant (corey.bryant) wrote :

Thanks James. I'll add support to charm-helpers for the sentry.wait() issue.

Revision history for this message
Stuart Bishop (stub) wrote :

sentry.wait() is likely Bug #1254766

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'Makefile'
2--- Makefile 2014-05-21 10:07:03 +0000
3+++ Makefile 2014-07-17 15:16:47 +0000
4@@ -3,15 +3,23 @@
5
6 lint:
7 @flake8 --exclude hooks/charmhelpers hooks
8- @flake8 --exclude hooks/charmhelpers unit_tests
9+ @flake8 --exclude hooks/charmhelpers unit_tests tests
10 @charm proof
11
12+unit_test:
13+ @echo Starting unit tests...
14+ @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
15+
16 test:
17- @echo Starting tests...
18- @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
19+ @echo Starting Amulet tests...
20+ # coreycb note: The -v should only be temporary until Amulet sends
21+ # raise_status() messages to stderr:
22+ # https://bugs.launchpad.net/amulet/+bug/1320357
23+ @juju test -v -p AMULET_HTTP_PROXY
24
25 sync:
26- @charm-helper-sync -c charm-helpers-sync.yaml
27+ @charm-helper-sync -c charm-helpers-hooks.yaml
28+ @charm-helper-sync -c charm-helpers-tests.yaml
29
30 publish: lint test
31 bzr push lp:charms/quantum-gateway
32
33=== added file 'charm-helpers-hooks.yaml'
34--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
35+++ charm-helpers-hooks.yaml 2014-07-17 15:16:47 +0000
36@@ -0,0 +1,10 @@
37+branch: lp:charm-helpers
38+destination: hooks/charmhelpers
39+include:
40+ - core
41+ - fetch
42+ - contrib.openstack
43+ - contrib.hahelpers
44+ - contrib.network.ovs
45+ - contrib.storage.linux
46+ - payload.execd
47
48=== removed file 'charm-helpers-sync.yaml'
49--- charm-helpers-sync.yaml 2014-03-27 11:20:28 +0000
50+++ charm-helpers-sync.yaml 1970-01-01 00:00:00 +0000
51@@ -1,10 +0,0 @@
52-branch: lp:charm-helpers
53-destination: hooks/charmhelpers
54-include:
55- - core
56- - fetch
57- - contrib.openstack
58- - contrib.hahelpers
59- - contrib.network.ovs
60- - contrib.storage.linux
61- - payload.execd
62
63=== added file 'charm-helpers-tests.yaml'
64--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
65+++ charm-helpers-tests.yaml 2014-07-17 15:16:47 +0000
66@@ -0,0 +1,5 @@
67+branch: lp:charm-helpers
68+destination: tests/charmhelpers
69+include:
70+ - contrib.amulet
71+ - contrib.openstack.amulet
72
73=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
74--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-03-27 11:20:28 +0000
75+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-17 15:16:47 +0000
76@@ -170,6 +170,7 @@
77
78 :configs : OSTemplateRenderer: A config tempating object to inspect for
79 a complete https context.
80+
81 :vip_setting: str: Setting in charm config that specifies
82 VIP address.
83 '''
84
85=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
86--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-06-24 13:40:39 +0000
87+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-17 15:16:47 +0000
88@@ -7,19 +7,38 @@
89 """This class inherits from AmuletDeployment and has additional support
90 that is specifically for use by OpenStack charms."""
91
92- def __init__(self, series=None, openstack=None):
93+ def __init__(self, series, openstack=None, source=None):
94 """Initialize the deployment environment."""
95- self.openstack = None
96 super(OpenStackAmuletDeployment, self).__init__(series)
97-
98- if openstack:
99- self.openstack = openstack
100+ self.openstack = openstack
101+ self.source = source
102+
103+ def _add_services(self, this_service, other_services):
104+ """Add services to the deployment and set openstack-origin."""
105+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
106+ other_services)
107+ name = 0
108+ services = other_services
109+ services.append(this_service)
110+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
111+
112+ if self.openstack:
113+ for svc in services:
114+ charm_name = self._get_charm_name(svc[name])
115+ if charm_name not in use_source:
116+ config = {'openstack-origin': self.openstack}
117+ self.d.configure(svc[name], config)
118+
119+ if self.source:
120+ for svc in services:
121+ charm_name = self._get_charm_name(svc[name])
122+ if charm_name in use_source:
123+ config = {'source': self.source}
124+ self.d.configure(svc[name], config)
125
126 def _configure_services(self, configs):
127 """Configure all of the services."""
128 for service, config in configs.iteritems():
129- if service == self.this_service:
130- config['openstack-origin'] = self.openstack
131 self.d.configure(service, config)
132
133 def _get_openstack_release(self):
134
135=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
136--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-06-24 13:40:39 +0000
137+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-17 15:16:47 +0000
138@@ -1,4 +1,7 @@
139 import logging
140+import os
141+import time
142+import urllib
143
144 import glanceclient.v1.client as glance_client
145 import keystoneclient.v2_0 as keystone_client
146@@ -71,7 +74,7 @@
147 if ret:
148 return "unexpected tenant data - {}".format(ret)
149 if not found:
150- return "tenant {} does not exist".format(e.name)
151+ return "tenant {} does not exist".format(e['name'])
152 return ret
153
154 def validate_role_data(self, expected, actual):
155@@ -88,7 +91,7 @@
156 if ret:
157 return "unexpected role data - {}".format(ret)
158 if not found:
159- return "role {} does not exist".format(e.name)
160+ return "role {} does not exist".format(e['name'])
161 return ret
162
163 def validate_user_data(self, expected, actual):
164@@ -107,7 +110,7 @@
165 if ret:
166 return "unexpected user data - {}".format(ret)
167 if not found:
168- return "user {} does not exist".format(e.name)
169+ return "user {} does not exist".format(e['name'])
170 return ret
171
172 def validate_flavor_data(self, expected, actual):
173@@ -149,3 +152,102 @@
174 endpoint_type='publicURL')
175 return nova_client.Client(username=user, api_key=password,
176 project_id=tenant, auth_url=ep)
177+
178+ def create_cirros_image(self, glance, image_name):
179+ """Download the latest cirros image and upload it to glance."""
180+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
181+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
182+ if http_proxy:
183+ proxies = {'http': http_proxy}
184+ opener = urllib.FancyURLopener(proxies)
185+ else:
186+ opener = urllib.FancyURLopener()
187+
188+ f = opener.open("http://download.cirros-cloud.net/version/released")
189+ version = f.read().strip()
190+ cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
191+
192+ if not os.path.exists(cirros_img):
193+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
194+ version, cirros_img)
195+ opener.retrieve(cirros_url, cirros_img)
196+ f.close()
197+
198+ with open(cirros_img) as f:
199+ image = glance.images.create(name=image_name, is_public=True,
200+ disk_format='qcow2',
201+ container_format='bare', data=f)
202+ count = 1
203+ status = image.status
204+ while status != 'active' and count < 10:
205+ time.sleep(3)
206+ image = glance.images.get(image.id)
207+ status = image.status
208+ self.log.debug('image status: {}'.format(status))
209+ count += 1
210+
211+ if status != 'active':
212+ self.log.error('image creation timed out')
213+ return None
214+
215+ return image
216+
217+ def delete_image(self, glance, image):
218+ """Delete the specified image."""
219+ num_before = len(list(glance.images.list()))
220+ glance.images.delete(image)
221+
222+ count = 1
223+ num_after = len(list(glance.images.list()))
224+ while num_after != (num_before - 1) and count < 10:
225+ time.sleep(3)
226+ num_after = len(list(glance.images.list()))
227+ self.log.debug('number of images: {}'.format(num_after))
228+ count += 1
229+
230+ if num_after != (num_before - 1):
231+ self.log.error('image deletion timed out')
232+ return False
233+
234+ return True
235+
236+ def create_instance(self, nova, image_name, instance_name, flavor):
237+ """Create the specified instance."""
238+ image = nova.images.find(name=image_name)
239+ flavor = nova.flavors.find(name=flavor)
240+ instance = nova.servers.create(name=instance_name, image=image,
241+ flavor=flavor)
242+
243+ count = 1
244+ status = instance.status
245+ while status != 'ACTIVE' and count < 60:
246+ time.sleep(3)
247+ instance = nova.servers.get(instance.id)
248+ status = instance.status
249+ self.log.debug('instance status: {}'.format(status))
250+ count += 1
251+
252+ if status != 'ACTIVE':
253+ self.log.error('instance creation timed out')
254+ return None
255+
256+ return instance
257+
258+ def delete_instance(self, nova, instance):
259+ """Delete the specified instance."""
260+ num_before = len(list(nova.servers.list()))
261+ nova.servers.delete(instance)
262+
263+ count = 1
264+ num_after = len(list(nova.servers.list()))
265+ while num_after != (num_before - 1) and count < 10:
266+ time.sleep(3)
267+ num_after = len(list(nova.servers.list()))
268+ self.log.debug('number of instances: {}'.format(num_after))
269+ count += 1
270+
271+ if num_after != (num_before - 1):
272+ self.log.error('instance deletion timed out')
273+ return False
274+
275+ return True
276
277=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
278--- hooks/charmhelpers/contrib/openstack/context.py 2014-06-24 13:40:39 +0000
279+++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-17 15:16:47 +0000
280@@ -426,12 +426,13 @@
281 """
282 Generates a context for an apache vhost configuration that configures
283 HTTPS reverse proxying for one or many endpoints. Generated context
284- looks something like:
285- {
286- 'namespace': 'cinder',
287- 'private_address': 'iscsi.mycinderhost.com',
288- 'endpoints': [(8776, 8766), (8777, 8767)]
289- }
290+ looks something like::
291+
292+ {
293+ 'namespace': 'cinder',
294+ 'private_address': 'iscsi.mycinderhost.com',
295+ 'endpoints': [(8776, 8766), (8777, 8767)]
296+ }
297
298 The endpoints list consists of a tuples mapping external ports
299 to internal ports.
300@@ -641,7 +642,7 @@
301 The subordinate interface allows subordinates to export their
302 configuration requirements to the principle for multiple config
303 files and multiple serivces. Ie, a subordinate that has interfaces
304- to both glance and nova may export to following yaml blob as json:
305+ to both glance and nova may export to following yaml blob as json::
306
307 glance:
308 /etc/glance/glance-api.conf:
309@@ -660,7 +661,8 @@
310
311 It is then up to the principle charms to subscribe this context to
312 the service+config file it is interestd in. Configuration data will
313- be available in the template context, in glance's case, as:
314+ be available in the template context, in glance's case, as::
315+
316 ctxt = {
317 ... other context ...
318 'subordinate_config': {
319
320=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
321--- hooks/charmhelpers/contrib/openstack/templating.py 2013-09-25 15:27:00 +0000
322+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-17 15:16:47 +0000
323@@ -30,17 +30,17 @@
324 loading dir.
325
326 A charm may also ship a templates dir with this module
327- and it will be appended to the bottom of the search list, eg:
328- hooks/charmhelpers/contrib/openstack/templates.
329-
330- :param templates_dir: str: Base template directory containing release
331- sub-directories.
332- :param os_release : str: OpenStack release codename to construct template
333- loader.
334-
335- :returns : jinja2.ChoiceLoader constructed with a list of
336- jinja2.FilesystemLoaders, ordered in descending
337- order by OpenStack release.
338+ and it will be appended to the bottom of the search list, eg::
339+
340+ hooks/charmhelpers/contrib/openstack/templates
341+
342+ :param templates_dir (str): Base template directory containing release
343+ sub-directories.
344+ :param os_release (str): OpenStack release codename to construct template
345+ loader.
346+ :returns: jinja2.ChoiceLoader constructed with a list of
347+ jinja2.FilesystemLoaders, ordered in descending
348+ order by OpenStack release.
349 """
350 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
351 for rel in OPENSTACK_CODENAMES.itervalues()]
352@@ -111,7 +111,8 @@
353 and ease the burden of managing config templates across multiple OpenStack
354 releases.
355
356- Basic usage:
357+ Basic usage::
358+
359 # import some common context generates from charmhelpers
360 from charmhelpers.contrib.openstack import context
361
362@@ -131,21 +132,19 @@
363 # write out all registered configs
364 configs.write_all()
365
366- Details:
367+ **OpenStack Releases and template loading**
368
369- OpenStack Releases and template loading
370- ---------------------------------------
371 When the object is instantiated, it is associated with a specific OS
372 release. This dictates how the template loader will be constructed.
373
374 The constructed loader attempts to load the template from several places
375 in the following order:
376- - from the most recent OS release-specific template dir (if one exists)
377- - the base templates_dir
378- - a template directory shipped in the charm with this helper file.
379-
380-
381- For the example above, '/tmp/templates' contains the following structure:
382+ - from the most recent OS release-specific template dir (if one exists)
383+ - the base templates_dir
384+ - a template directory shipped in the charm with this helper file.
385+
386+ For the example above, '/tmp/templates' contains the following structure::
387+
388 /tmp/templates/nova.conf
389 /tmp/templates/api-paste.ini
390 /tmp/templates/grizzly/api-paste.ini
391@@ -169,8 +168,8 @@
392 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
393 us to ship common templates (haproxy, apache) with the helpers.
394
395- Context generators
396- ---------------------------------------
397+ **Context generators**
398+
399 Context generators are used to generate template contexts during hook
400 execution. Doing so may require inspecting service relations, charm
401 config, etc. When registered, a config file is associated with a list
402
403=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
404--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:20:28 +0000
405+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-17 15:16:47 +0000
406@@ -303,7 +303,7 @@
407 blk_device, fstype, system_services=[]):
408 """
409 NOTE: This function must only be called from a single service unit for
410- the same rbd_img otherwise data loss will occur.
411+ the same rbd_img otherwise data loss will occur.
412
413 Ensures given pool and RBD image exists, is mapped to a block device,
414 and the device is formatted and mounted at the given mount_point.
415
416=== modified file 'hooks/charmhelpers/core/hookenv.py'
417--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:43:55 +0000
418+++ hooks/charmhelpers/core/hookenv.py 2014-07-17 15:16:47 +0000
419@@ -25,7 +25,7 @@
420 def cached(func):
421 """Cache return values for multiple executions of func + args
422
423- For example:
424+ For example::
425
426 @cached
427 def unit_get(attribute):
428@@ -445,18 +445,19 @@
429 class Hooks(object):
430 """A convenient handler for hook functions.
431
432- Example:
433+ Example::
434+
435 hooks = Hooks()
436
437 # register a hook, taking its name from the function name
438 @hooks.hook()
439 def install():
440- ...
441+ pass # your code here
442
443 # register a hook, providing a custom hook name
444 @hooks.hook("config-changed")
445 def config_changed():
446- ...
447+ pass # your code here
448
449 if __name__ == "__main__":
450 # execute a hook based on the name the program is called by
451
452=== modified file 'hooks/charmhelpers/core/host.py'
453--- hooks/charmhelpers/core/host.py 2014-06-24 13:40:39 +0000
454+++ hooks/charmhelpers/core/host.py 2014-07-17 15:16:47 +0000
455@@ -211,13 +211,13 @@
456 def restart_on_change(restart_map, stopstart=False):
457 """Restart services based on configuration files changing
458
459- This function is used a decorator, for example
460+ This function is used a decorator, for example::
461
462 @restart_on_change({
463 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
464 })
465 def ceph_client_changed():
466- ...
467+ pass # your code here
468
469 In this example, the cinder-api and cinder-volume services
470 would be restarted if /etc/ceph/ceph.conf is changed by the
471@@ -313,9 +313,11 @@
472
473 def cmp_pkgrevno(package, revno, pkgcache=None):
474 '''Compare supplied revno with the revno of the installed package
475- 1 => Installed revno is greater than supplied arg
476- 0 => Installed revno is the same as supplied arg
477- -1 => Installed revno is less than supplied arg
478+
479+ * 1 => Installed revno is greater than supplied arg
480+ * 0 => Installed revno is the same as supplied arg
481+ * -1 => Installed revno is less than supplied arg
482+
483 '''
484 import apt_pkg
485 if not pkgcache:
486
487=== modified file 'hooks/charmhelpers/fetch/__init__.py'
488--- hooks/charmhelpers/fetch/__init__.py 2014-06-24 13:40:39 +0000
489+++ hooks/charmhelpers/fetch/__init__.py 2014-07-17 15:16:47 +0000
490@@ -235,31 +235,39 @@
491 sources_var='install_sources',
492 keys_var='install_keys'):
493 """
494- Configure multiple sources from charm configuration
495+ Configure multiple sources from charm configuration.
496+
497+ The lists are encoded as yaml fragments in the configuration.
498+ The frament needs to be included as a string.
499
500 Example config:
501- install_sources:
502+ install_sources: |
503 - "ppa:foo"
504 - "http://example.com/repo precise main"
505- install_keys:
506+ install_keys: |
507 - null
508 - "a1b2c3d4"
509
510 Note that 'null' (a.k.a. None) should not be quoted.
511 """
512- sources = safe_load(config(sources_var))
513- keys = config(keys_var)
514- if keys is not None:
515- keys = safe_load(keys)
516- if isinstance(sources, basestring) and (
517- keys is None or isinstance(keys, basestring)):
518- add_source(sources, keys)
519+ sources = safe_load((config(sources_var) or '').strip()) or []
520+ keys = safe_load((config(keys_var) or '').strip()) or None
521+
522+ if isinstance(sources, basestring):
523+ sources = [sources]
524+
525+ if keys is None:
526+ for source in sources:
527+ add_source(source, None)
528 else:
529- if not len(sources) == len(keys):
530- msg = 'Install sources and keys lists are different lengths'
531- raise SourceConfigError(msg)
532- for src_num in range(len(sources)):
533- add_source(sources[src_num], keys[src_num])
534+ if isinstance(keys, basestring):
535+ keys = [keys]
536+
537+ if len(sources) != len(keys):
538+ raise SourceConfigError(
539+ 'Install sources and keys lists are different lengths')
540+ for source, key in zip(sources, keys):
541+ add_source(source, key)
542 if update:
543 apt_update(fatal=True)
544
545
546=== added directory 'tests'
547=== added file 'tests/00-setup'
548--- tests/00-setup 1970-01-01 00:00:00 +0000
549+++ tests/00-setup 2014-07-17 15:16:47 +0000
550@@ -0,0 +1,10 @@
551+#!/bin/bash
552+
553+set -ex
554+
555+sudo add-apt-repository --yes ppa:juju/stable
556+sudo apt-get update --yes
557+sudo apt-get install --yes python-amulet
558+sudo apt-get install --yes python-neutronclient
559+sudo apt-get install --yes python-keystoneclient
560+sudo apt-get install --yes python-novaclient
561
562=== added file 'tests/12-basic-precise-grizzly'
563--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
564+++ tests/12-basic-precise-grizzly 2014-07-17 15:16:47 +0000
565@@ -0,0 +1,11 @@
566+#!/usr/bin/python
567+
568+"""Amulet tests on a basic quantum-gateway deployment on precise-grizzly."""
569+
570+from basic_deployment import QuantumGatewayBasicDeployment
571+
572+if __name__ == '__main__':
573+ deployment = QuantumGatewayBasicDeployment(series='precise',
574+ openstack='cloud:precise-grizzly',
575+ source='cloud:precise-updates/grizzly')
576+ deployment.run_tests()
577
578=== added file 'tests/13-basic-precise-havana'
579--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
580+++ tests/13-basic-precise-havana 2014-07-17 15:16:47 +0000
581@@ -0,0 +1,11 @@
582+#!/usr/bin/python
583+
584+"""Amulet tests on a basic quantum-gateway deployment on precise-havana."""
585+
586+from basic_deployment import QuantumGatewayBasicDeployment
587+
588+if __name__ == '__main__':
589+ deployment = QuantumGatewayBasicDeployment(series='precise',
590+ openstack='cloud:precise-havana',
591+ source='cloud:precise-updates/havana')
592+ deployment.run_tests()
593
594=== added file 'tests/14-basic-precise-icehouse'
595--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
596+++ tests/14-basic-precise-icehouse 2014-07-17 15:16:47 +0000
597@@ -0,0 +1,11 @@
598+#!/usr/bin/python
599+
600+"""Amulet tests on a basic quantum-gateway deployment on precise-icehouse."""
601+
602+from basic_deployment import QuantumGatewayBasicDeployment
603+
604+if __name__ == '__main__':
605+ deployment = QuantumGatewayBasicDeployment(series='precise',
606+ openstack='cloud:precise-icehouse',
607+ source='cloud:precise-updates/icehouse')
608+ deployment.run_tests()
609
610=== added file 'tests/15-basic-trusty-icehouse'
611--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
612+++ tests/15-basic-trusty-icehouse 2014-07-17 15:16:47 +0000
613@@ -0,0 +1,9 @@
614+#!/usr/bin/python
615+
616+"""Amulet tests on a basic quantum-gateway deployment on trusty-icehouse."""
617+
618+from basic_deployment import QuantumGatewayBasicDeployment
619+
620+if __name__ == '__main__':
621+ deployment = QuantumGatewayBasicDeployment(series='trusty')
622+ deployment.run_tests()
623
624=== added file 'tests/README'
625--- tests/README 1970-01-01 00:00:00 +0000
626+++ tests/README 2014-07-17 15:16:47 +0000
627@@ -0,0 +1,47 @@
628+This directory provides Amulet tests that focus on verification of
629+quantum-gateway deployments.
630+
631+If you use a web proxy server to access the web, you'll need to set the
632+AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
633+
634+The following examples demonstrate different ways that tests can be executed.
635+All examples are run from the charm's root directory.
636+
637+ * To run all tests (starting with 00-setup):
638+
639+ make test
640+
641+ * To run a specific test module (or modules):
642+
643+ juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
644+
645+ * To run a specific test module (or modules), and keep the environment
646+ deployed after a failure:
647+
648+ juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
649+
650+ * To re-run a test module against an already deployed environment (one
651+ that was deployed by a previous call to 'juju test --set-e'):
652+
653+ ./tests/15-basic-trusty-icehouse
654+
655+For debugging and test development purposes, all code should be idempotent.
656+In other words, the code should have the ability to be re-run without changing
657+the results beyond the initial run. This enables editing and re-running of a
658+test module against an already deployed environment, as described above.
659+
660+Manual debugging tips:
661+
662+ * Set the following env vars before using the OpenStack CLI as admin:
663+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
664+ export OS_TENANT_NAME=admin
665+ export OS_USERNAME=admin
666+ export OS_PASSWORD=openstack
667+ export OS_REGION_NAME=RegionOne
668+
669+ * Set the following env vars before using the OpenStack CLI as demoUser:
670+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
671+ export OS_TENANT_NAME=demoTenant
672+ export OS_USERNAME=demoUser
673+ export OS_PASSWORD=password
674+ export OS_REGION_NAME=RegionOne
675
676=== added file 'tests/basic_deployment.py'
677--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
678+++ tests/basic_deployment.py 2014-07-17 15:16:47 +0000
679@@ -0,0 +1,834 @@
680+#!/usr/bin/python
681+
682+import amulet
683+try:
684+ from quantumclient.v2_0 import client as neutronclient
685+except ImportError:
686+ from neutronclient.v2_0 import client as neutronclient
687+
688+from charmhelpers.contrib.openstack.amulet.deployment import (
689+ OpenStackAmuletDeployment
690+)
691+
692+from charmhelpers.contrib.openstack.amulet.utils import (
693+ OpenStackAmuletUtils,
694+ DEBUG, # flake8: noqa
695+ ERROR
696+)
697+
698+# Use DEBUG to turn on debug logging
699+u = OpenStackAmuletUtils(ERROR)
700+
701+
702+class QuantumGatewayBasicDeployment(OpenStackAmuletDeployment):
703+ """Amulet tests on a basic quantum-gateway deployment."""
704+
705+ def __init__(self, series, openstack=None, source=None):
706+ """Deploy the entire test environment."""
707+ super(QuantumGatewayBasicDeployment, self).__init__(series, openstack,
708+ source)
709+ self._add_services()
710+ self._add_relations()
711+ self._configure_services()
712+ self._deploy()
713+ self._initialize_tests()
714+
715+ def _add_services(self):
716+ """Add the service that we're testing, including the number of units,
717+ where quantum-gateway is local, and the other charms are from
718+ the charm store."""
719+ this_service = ('quantum-gateway', 1)
720+ other_services = [('mysql', 1),
721+ ('rabbitmq-server', 1), ('keystone', 1),
722+ ('nova-cloud-controller', 1)]
723+ super(QuantumGatewayBasicDeployment, self)._add_services(this_service,
724+ other_services)
725+
726+ def _add_relations(self):
727+ """Add all of the relations for the services."""
728+ relations = {
729+ 'keystone:shared-db': 'mysql:shared-db',
730+ 'quantum-gateway:shared-db': 'mysql:shared-db',
731+ 'quantum-gateway:amqp': 'rabbitmq-server:amqp',
732+ 'nova-cloud-controller:quantum-network-service': \
733+ 'quantum-gateway:quantum-network-service',
734+ 'nova-cloud-controller:shared-db': 'mysql:shared-db',
735+ 'nova-cloud-controller:identity-service': 'keystone:identity-service',
736+ 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp'
737+ }
738+ super(QuantumGatewayBasicDeployment, self)._add_relations(relations)
739+
740+ def _configure_services(self):
741+ """Configure all of the services."""
742+ keystone_config = {'admin-password': 'openstack',
743+ 'admin-token': 'ubuntutesting'}
744+ nova_cc_config = {'network-manager': 'Quantum',
745+ 'quantum-security-groups': 'yes'}
746+ configs = {'keystone': keystone_config,
747+ 'nova-cloud-controller': nova_cc_config}
748+ super(QuantumGatewayBasicDeployment, self)._configure_services(configs)
749+
750+ def _initialize_tests(self):
751+ """Perform final initialization before tests get run."""
752+ # Access the sentries for inspecting service units
753+ self.mysql_sentry = self.d.sentry.unit['mysql/0']
754+ self.keystone_sentry = self.d.sentry.unit['keystone/0']
755+ self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
756+ self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
757+ self.quantum_gateway_sentry = self.d.sentry.unit['quantum-gateway/0']
758+
759+ # Authenticate admin with keystone
760+ self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
761+ user='admin',
762+ password='openstack',
763+ tenant='admin')
764+
765+
766+ # Authenticate admin with neutron
767+ ep = self.keystone.service_catalog.url_for(service_type='identity',
768+ endpoint_type='publicURL')
769+ self.neutron = neutronclient.Client(auth_url=ep,
770+ username='admin',
771+ password='openstack',
772+ tenant_name='admin',
773+ region_name='RegionOne')
774+
775+ def test_services(self):
776+ """Verify the expected services are running on the corresponding
777+ service units."""
778+ if self._get_openstack_release() >= self.precise_havana:
779+ neutron_services = ['status neutron-dhcp-agent',
780+ 'status neutron-lbaas-agent',
781+ 'status neutron-metadata-agent',
782+ 'status neutron-plugin-openvswitch-agent']
783+ if self._get_openstack_release() == self.precise_havana:
784+ neutron_services.append('status neutron-l3-agent')
785+ else:
786+ neutron_services.append('status neutron-vpn-agent')
787+ neutron_services.append('status neutron-metering-agent')
788+ neutron_services.append('status neutron-ovs-cleanup')
789+ else:
790+ neutron_services = ['status quantum-dhcp-agent',
791+ 'status quantum-l3-agent',
792+ 'status quantum-metadata-agent',
793+ 'status quantum-plugin-openvswitch-agent']
794+
795+ nova_cc_services = ['status nova-api-ec2',
796+ 'status nova-api-os-compute',
797+ 'status nova-objectstore',
798+ 'status nova-cert',
799+ 'status nova-scheduler']
800+ if self._get_openstack_release() >= self.precise_grizzly:
801+ nova_cc_services.append('status nova-conductor')
802+
803+ commands = {
804+ self.mysql_sentry: ['status mysql'],
805+ self.keystone_sentry: ['status keystone'],
806+ self.nova_cc_sentry: nova_cc_services,
807+ self.quantum_gateway_sentry: neutron_services
808+ }
809+
810+ ret = u.validate_services(commands)
811+ if ret:
812+ amulet.raise_status(amulet.FAIL, msg=ret)
813+
814+ def test_quantum_gateway_shared_db_relation(self):
815+ """Verify the quantum-gateway to mysql shared-db relation data"""
816+ unit = self.quantum_gateway_sentry
817+ relation = ['shared-db', 'mysql:shared-db']
818+ expected = {
819+ 'private-address': u.valid_ip,
820+ 'database': 'nova',
821+ 'username': 'nova',
822+ 'hostname': u.valid_ip
823+ }
824+
825+ ret = u.validate_relation_data(unit, relation, expected)
826+ if ret:
827+ message = u.relation_error('quantum-gateway shared-db', ret)
828+ amulet.raise_status(amulet.FAIL, msg=message)
829+
830+ def test_mysql_shared_db_relation(self):
831+ """Verify the mysql to quantum-gateway shared-db relation data"""
832+ unit = self.mysql_sentry
833+ relation = ['shared-db', 'quantum-gateway:shared-db']
834+ expected = {
835+ 'private-address': u.valid_ip,
836+ 'password': u.not_null,
837+ 'db_host': u.valid_ip
838+ }
839+
840+ ret = u.validate_relation_data(unit, relation, expected)
841+ if ret:
842+ message = u.relation_error('mysql shared-db', ret)
843+ amulet.raise_status(amulet.FAIL, msg=message)
844+
845+ def test_quantum_gateway_amqp_relation(self):
846+ """Verify the quantum-gateway to rabbitmq-server amqp relation data"""
847+ unit = self.quantum_gateway_sentry
848+ relation = ['amqp', 'rabbitmq-server:amqp']
849+ expected = {
850+ 'username': 'neutron',
851+ 'private-address': u.valid_ip,
852+ 'vhost': 'openstack'
853+ }
854+
855+ ret = u.validate_relation_data(unit, relation, expected)
856+ if ret:
857+ message = u.relation_error('quantum-gateway amqp', ret)
858+ amulet.raise_status(amulet.FAIL, msg=message)
859+
860+ def test_rabbitmq_amqp_relation(self):
861+ """Verify the rabbitmq-server to quantum-gateway amqp relation data"""
862+ unit = self.rabbitmq_sentry
863+ relation = ['amqp', 'quantum-gateway:amqp']
864+ expected = {
865+ 'private-address': u.valid_ip,
866+ 'password': u.not_null,
867+ 'hostname': u.valid_ip
868+ }
869+
870+ ret = u.validate_relation_data(unit, relation, expected)
871+ if ret:
872+ message = u.relation_error('rabbitmq amqp', ret)
873+ amulet.raise_status(amulet.FAIL, msg=message)
874+
875+ def test_quantum_gateway_network_service_relation(self):
876+ """Verify the quantum-gateway to nova-cc quantum-network-service
877+ relation data"""
878+ unit = self.quantum_gateway_sentry
879+ relation = ['quantum-network-service',
880+ 'nova-cloud-controller:quantum-network-service']
881+ expected = {
882+ 'private-address': u.valid_ip
883+ }
884+
885+ ret = u.validate_relation_data(unit, relation, expected)
886+ if ret:
887+ message = u.relation_error('quantum-gateway network-service', ret)
888+ amulet.raise_status(amulet.FAIL, msg=message)
889+
890+ def test_nova_cc_network_service_relation(self):
891+ """Verify the nova-cc to quantum-gateway quantum-network-service
892+ relation data"""
893+ unit = self.nova_cc_sentry
894+ relation = ['quantum-network-service',
895+ 'quantum-gateway:quantum-network-service']
896+ expected = {
897+ 'service_protocol': 'http',
898+ 'service_tenant': 'services',
899+ 'quantum_url': u.valid_url,
900+ 'quantum_port': '9696',
901+ 'service_port': '5000',
902+ 'region': 'RegionOne',
903+ 'service_password': u.not_null,
904+ 'quantum_host': u.valid_ip,
905+ 'auth_port': '35357',
906+ 'auth_protocol': 'http',
907+ 'private-address': u.valid_ip,
908+ 'keystone_host': u.valid_ip,
909+ 'quantum_plugin': 'ovs',
910+ 'auth_host': u.valid_ip,
911+ 'service_username': 'quantum_s3_ec2_nova',
912+ 'service_tenant_name': 'services'
913+ }
914+
915+ ret = u.validate_relation_data(unit, relation, expected)
916+ if ret:
917+ message = u.relation_error('nova-cc network-service', ret)
918+ amulet.raise_status(amulet.FAIL, msg=message)
919+
920+ def test_restart_on_config_change(self):
921+ """Verify that the specified services are restarted when the config
922+ is changed."""
923+ if self._get_openstack_release() >= self.precise_havana:
924+ conf = '/etc/neutron/neutron.conf'
925+ services = ['neutron-dhcp-agent', 'neutron-openvswitch-agent',
926+ 'neutron-metering-agent', 'neutron-lbaas-agent',
927+ 'neutron-metadata-agent']
928+ if self._get_openstack_release() == self.precise_havana:
929+ services.append('neutron-l3-agent')
930+ else:
931+ services.append('neutron-vpn-agent')
932+ else:
933+ conf = '/etc/quantum/quantum.conf'
934+ services = ['quantum-dhcp-agent', 'quantum-openvswitch-agent',
935+ 'quantum-metadata-agent', 'quantum-l3-agent']
936+
937+ self.d.configure('quantum-gateway', {'debug': 'True'})
938+
939+ time = 20
940+ for s in services:
941+ if not u.service_restarted(self.quantum_gateway_sentry, s, conf,
942+ pgrep_full=True, sleep_time=time):
943+ msg = "service {} didn't restart after config change".format(s)
944+ amulet.raise_status(amulet.FAIL, msg=msg)
945+ time = 0
946+
947+ self.d.configure('quantum-gateway', {'debug': 'False'})
948+
949+ def test_neutron_config(self):
950+ """Verify the data in the neutron config file."""
951+ unit = self.quantum_gateway_sentry
952+ rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
953+ 'quantum-gateway:amqp')
954+
955+ if self._get_openstack_release() >= self.precise_havana:
956+ conf = '/etc/neutron/neutron.conf'
957+ expected = {
958+ 'DEFAULT': {
959+ 'verbose': 'False',
960+ 'debug': 'False',
961+ 'lock_path': '/var/lock/neutron',
962+ 'rabbit_userid': 'neutron',
963+ 'rabbit_virtual_host': 'openstack',
964+ 'rabbit_password': rabbitmq_relation['password'],
965+ 'rabbit_host': rabbitmq_relation['hostname'],
966+ 'control_exchange': 'neutron',
967+ 'notification_driver': 'neutron.openstack.common.notifier.'
968+ 'list_notifier',
969+ 'list_notifier_drivers': 'neutron.openstack.common.'
970+ 'notifier.rabbit_notifier'
971+ },
972+ 'agent': {
973+ 'root_helper': 'sudo /usr/bin/neutron-rootwrap '
974+ '/etc/neutron/rootwrap.conf'
975+ }
976+ }
977+ else:
978+ conf = '/etc/quantum/quantum.conf'
979+ expected = {
980+ 'DEFAULT': {
981+ 'verbose': 'False',
982+ 'debug': 'False',
983+ 'lock_path': '/var/lock/quantum',
984+ 'rabbit_userid': 'neutron',
985+ 'rabbit_virtual_host': 'openstack',
986+ 'rabbit_password': rabbitmq_relation['password'],
987+ 'rabbit_host': rabbitmq_relation['hostname'],
988+ 'control_exchange': 'quantum',
989+ 'notification_driver': 'quantum.openstack.common.notifier.'
990+ 'list_notifier',
991+ 'list_notifier_drivers': 'quantum.openstack.common.'
992+ 'notifier.rabbit_notifier'
993+ },
994+ 'AGENT': {
995+ 'root_helper': 'sudo /usr/bin/quantum-rootwrap '
996+ '/etc/quantum/rootwrap.conf'
997+ }
998+ }
999+
1000+ if self._get_openstack_release() >= self.precise_icehouse:
1001+ expected['DEFAULT']['core_plugin'] = \
1002+ 'neutron.plugins.ml2.plugin.Ml2Plugin'
1003+ elif self._get_openstack_release() >= self.precise_havana:
1004+ expected['DEFAULT']['core_plugin'] = \
1005+ 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2'
1006+ else:
1007+ expected['DEFAULT']['core_plugin'] = \
1008+ 'quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2'
1009+
1010+ for section, pairs in expected.iteritems():
1011+ ret = u.validate_config_data(unit, conf, section, pairs)
1012+ if ret:
1013+ message = "neutron config error: {}".format(ret)
1014+ amulet.raise_status(amulet.FAIL, msg=message)
1015+
1016+ def test_ml2_config(self):
1017+ """Verify the data in the ml2 config file. This is only available
1018+ since icehouse."""
1019+ if self._get_openstack_release() < self.precise_icehouse:
1020+ return
1021+
1022+ unit = self.quantum_gateway_sentry
1023+ conf = '/etc/neutron/plugins/ml2/ml2_conf.ini'
1024+ quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db')
1025+ expected = {
1026+ 'ml2': {
1027+ 'type_drivers': 'gre,vxlan',
1028+ 'tenant_network_types': 'gre,vxlan',
1029+ 'mechanism_drivers': 'openvswitch'
1030+ },
1031+ 'ml2_type_gre': {
1032+ 'tunnel_id_ranges': '1:1000'
1033+ },
1034+ 'ml2_type_vxlan': {
1035+ 'vni_ranges': '1001:2000'
1036+ },
1037+ 'ovs': {
1038+ 'enable_tunneling': 'True',
1039+ 'local_ip': quantum_gateway_relation['private-address']
1040+ },
1041+ 'agent': {
1042+ 'tunnel_types': 'gre'
1043+ },
1044+ 'securitygroup': {
1045+ 'firewall_driver': 'neutron.agent.linux.iptables_firewall.'
1046+ 'OVSHybridIptablesFirewallDriver'
1047+ }
1048+ }
1049+
1050+ for section, pairs in expected.iteritems():
1051+ ret = u.validate_config_data(unit, conf, section, pairs)
1052+ if ret:
1053+ message = "ml2 config error: {}".format(ret)
1054+ amulet.raise_status(amulet.FAIL, msg=message)
1055+
1056+ def test_api_paste_config(self):
1057+ """Verify the data in the api paste config file."""
1058+ unit = self.quantum_gateway_sentry
1059+ if self._get_openstack_release() >= self.precise_havana:
1060+ conf = '/etc/neutron/api-paste.ini'
1061+ expected = {
1062+ 'composite:neutron': {
1063+ 'use': 'egg:Paste#urlmap',
1064+ '/': 'neutronversions',
1065+ '/v2.0': 'neutronapi_v2_0'
1066+ },
1067+ 'filter:keystonecontext': {
1068+ 'paste.filter_factory': 'neutron.auth:'
1069+ 'NeutronKeystoneContext.factory'
1070+ },
1071+ 'filter:authtoken': {
1072+ 'paste.filter_factory': 'keystoneclient.middleware.'
1073+ 'auth_token:filter_factory'
1074+ },
1075+ 'filter:extensions': {
1076+ 'paste.filter_factory': 'neutron.api.extensions:'
1077+ 'plugin_aware_extension_middleware_'
1078+ 'factory'
1079+ },
1080+ 'app:neutronversions': {
1081+ 'paste.app_factory': 'neutron.api.versions:Versions.factory'
1082+ },
1083+ 'app:neutronapiapp_v2_0': {
1084+ 'paste.app_factory': 'neutron.api.v2.router:APIRouter.'
1085+ 'factory'
1086+ }
1087+ }
1088+ if self._get_openstack_release() == self.precise_havana:
1089+ expected_additional = {
1090+ 'composite:neutronapi_v2_0': {
1091+ 'use': 'call:neutron.auth:pipeline_factory',
1092+ 'noauth': 'extensions neutronapiapp_v2_0',
1093+ 'keystone': 'authtoken keystonecontext extensions '
1094+ 'neutronapiapp_v2_0'
1095+ }
1096+ }
1097+ else:
1098+ expected_additional = {
1099+ 'composite:neutronapi_v2_0': {
1100+ 'use': 'call:neutron.auth:pipeline_factory',
1101+ 'noauth': 'request_id catch_errors extensions '
1102+ 'neutronapiapp_v2_0',
1103+ 'keystone': 'request_id catch_errors authtoken '
1104+ 'keystonecontext extensions '
1105+ 'neutronapiapp_v2_0'
1106+ }
1107+ }
1108+ expected = dict(expected.items() + expected_additional.items())
1109+ else:
1110+ conf = '/etc/quantum/api-paste.ini'
1111+ expected = {
1112+ 'composite:quantum': {
1113+ 'use': 'egg:Paste#urlmap',
1114+ '/': 'quantumversions',
1115+ '/v2.0': 'quantumapi_v2_0'
1116+ },
1117+ 'composite:quantumapi_v2_0': {
1118+ 'use': 'call:quantum.auth:pipeline_factory',
1119+ 'noauth': 'extensions quantumapiapp_v2_0',
1120+ 'keystone': 'authtoken keystonecontext extensions '
1121+ 'quantumapiapp_v2_0',
1122+ },
1123+ 'filter:keystonecontext': {
1124+ 'paste.filter_factory': 'quantum.auth:'
1125+ 'QuantumKeystoneContext.factory'
1126+ },
1127+ 'filter:authtoken': {
1128+ 'paste.filter_factory': 'keystoneclient.middleware.'
1129+ 'auth_token:filter_factory'
1130+ },
1131+ 'filter:extensions': {
1132+ 'paste.filter_factory': 'quantum.api.extensions:'
1133+ 'plugin_aware_extension_middleware_'
1134+ 'factory'
1135+ },
1136+ 'app:quantumversions': {
1137+ 'paste.app_factory': 'quantum.api.versions:Versions.factory'
1138+ },
1139+ 'app:quantumapiapp_v2_0': {
1140+ 'paste.app_factory': 'quantum.api.v2.router:APIRouter.'
1141+ 'factory'
1142+ }
1143+ }
1144+
1145+ for section, pairs in expected.iteritems():
1146+ ret = u.validate_config_data(unit, conf, section, pairs)
1147+ if ret:
1148+ message = "api paste config error: {}".format(ret)
1149+ amulet.raise_status(amulet.FAIL, msg=message)
1150+
1151+ def test_dhcp_agent_config(self):
1152+ """Verify the data in the dhcp agent config file."""
1153+ unit = self.quantum_gateway_sentry
1154+ if self._get_openstack_release() >= self.precise_havana:
1155+ conf = '/etc/neutron/dhcp_agent.ini'
1156+ expected = {
1157+ 'state_path': '/var/lib/neutron',
1158+ 'interface_driver': 'neutron.agent.linux.interface.'
1159+ 'OVSInterfaceDriver',
1160+ 'dhcp_driver': 'neutron.agent.linux.dhcp.Dnsmasq',
1161+ 'root_helper': 'sudo /usr/bin/neutron-rootwrap '
1162+ '/etc/neutron/rootwrap.conf',
1163+ 'ovs_use_veth': 'True'
1164+ }
1165+ else:
1166+ conf = '/etc/quantum/dhcp_agent.ini'
1167+ expected = {
1168+ 'state_path': '/var/lib/quantum',
1169+ 'interface_driver': 'quantum.agent.linux.interface.'
1170+ 'OVSInterfaceDriver',
1171+ 'dhcp_driver': 'quantum.agent.linux.dhcp.Dnsmasq',
1172+ 'root_helper': 'sudo /usr/bin/quantum-rootwrap '
1173+ '/etc/quantum/rootwrap.conf'
1174+ }
1175+
1176+ ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
1177+ if ret:
1178+ message = "dhcp agent config error: {}".format(ret)
1179+ amulet.raise_status(amulet.FAIL, msg=message)
1180+
1181+ def test_fwaas_driver_config(self):
1182+ """Verify the data in the fwaas driver config file. This is only
1183+ available since havana."""
1184+ if self._get_openstack_release() < self.precise_havana:
1185+ return
1186+
1187+ unit = self.quantum_gateway_sentry
1188+ conf = '/etc/neutron/fwaas_driver.ini'
1189+ expected = {
1190+ 'driver': 'neutron.services.firewall.drivers.linux.'
1191+ 'iptables_fwaas.IptablesFwaasDriver',
1192+ 'enabled': 'True'
1193+ }
1194+
1195+ ret = u.validate_config_data(unit, conf, 'fwaas', expected)
1196+ if ret:
1197+ message = "fwaas driver config error: {}".format(ret)
1198+ amulet.raise_status(amulet.FAIL, msg=message)
1199+
1200+ def test_l3_agent_config(self):
1201+ """Verify the data in the l3 agent config file."""
1202+ unit = self.quantum_gateway_sentry
1203+ nova_cc_relation = self.nova_cc_sentry.relation(\
1204+ 'quantum-network-service',
1205+ 'quantum-gateway:quantum-network-service')
1206+ ep = self.keystone.service_catalog.url_for(service_type='identity',
1207+ endpoint_type='publicURL')
1208+
1209+ if self._get_openstack_release() >= self.precise_havana:
1210+ conf = '/etc/neutron/l3_agent.ini'
1211+ expected = {
1212+ 'interface_driver': 'neutron.agent.linux.interface.'
1213+ 'OVSInterfaceDriver',
1214+ 'auth_url': ep,
1215+ 'auth_region': 'RegionOne',
1216+ 'admin_tenant_name': 'services',
1217+ 'admin_user': 'quantum_s3_ec2_nova',
1218+ 'admin_password': nova_cc_relation['service_password'],
1219+ 'root_helper': 'sudo /usr/bin/neutron-rootwrap '
1220+ '/etc/neutron/rootwrap.conf',
1221+ 'ovs_use_veth': 'True',
1222+ 'handle_internal_only_routers': 'True'
1223+ }
1224+ else:
1225+ conf = '/etc/quantum/l3_agent.ini'
1226+ expected = {
1227+ 'interface_driver': 'quantum.agent.linux.interface.'
1228+ 'OVSInterfaceDriver',
1229+ 'auth_url': ep,
1230+ 'auth_region': 'RegionOne',
1231+ 'admin_tenant_name': 'services',
1232+ 'admin_user': 'quantum_s3_ec2_nova',
1233+ 'admin_password': nova_cc_relation['service_password'],
1234+ 'root_helper': 'sudo /usr/bin/quantum-rootwrap '
1235+ '/etc/quantum/rootwrap.conf'
1236+ }
1237+
1238+ ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
1239+ if ret:
1240+ message = "l3 agent config error: {}".format(ret)
1241+ amulet.raise_status(amulet.FAIL, msg=message)
1242+
1243+ def test_lbaas_agent_config(self):
1244+ """Verify the data in the lbaas agent config file. This is only
1245+ available since havana."""
1246+ if self._get_openstack_release() < self.precise_havana:
1247+ return
1248+
1249+ unit = self.quantum_gateway_sentry
1250+ conf = '/etc/neutron/lbaas_agent.ini'
1251+ expected = {
1252+ 'DEFAULT': {
1253+ 'periodic_interval': '10',
1254+ 'interface_driver': 'neutron.agent.linux.interface.'
1255+ 'OVSInterfaceDriver',
1256+ 'ovs_use_veth': 'False',
1257+ 'device_driver': 'neutron.services.loadbalancer.drivers.'
1258+ 'haproxy.namespace_driver.HaproxyNSDriver'
1259+ },
1260+ 'haproxy': {
1261+ 'loadbalancer_state_path': '$state_path/lbaas',
1262+ 'user_group': 'nogroup'
1263+ }
1264+ }
1265+
1266+ for section, pairs in expected.iteritems():
1267+ ret = u.validate_config_data(unit, conf, section, pairs)
1268+ if ret:
1269+ message = "lbaas agent config error: {}".format(ret)
1270+ amulet.raise_status(amulet.FAIL, msg=message)
1271+
1272+ def test_metadata_agent_config(self):
1273+ """Verify the data in the metadata agent config file."""
1274+ unit = self.quantum_gateway_sentry
1275+ ep = self.keystone.service_catalog.url_for(service_type='identity',
1276+ endpoint_type='publicURL')
1277+ quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db')
1278+ nova_cc_relation = self.nova_cc_sentry.relation(\
1279+ 'quantum-network-service',
1280+ 'quantum-gateway:quantum-network-service')
1281+
1282+ if self._get_openstack_release() >= self.precise_havana:
1283+ conf = '/etc/neutron/metadata_agent.ini'
1284+ expected = {
1285+ 'auth_url': ep,
1286+ 'auth_region': 'RegionOne',
1287+ 'admin_tenant_name': 'services',
1288+ 'admin_user': 'quantum_s3_ec2_nova',
1289+ 'admin_password': nova_cc_relation['service_password'],
1290+ 'root_helper': 'sudo neutron-rootwrap '
1291+ '/etc/neutron/rootwrap.conf',
1292+ 'state_path': '/var/lib/neutron',
1293+ 'nova_metadata_ip': quantum_gateway_relation['private-address'],
1294+ 'nova_metadata_port': '8775'
1295+ }
1296+ else:
1297+ conf = '/etc/quantum/metadata_agent.ini'
1298+ expected = {
1299+ 'auth_url': ep,
1300+ 'auth_region': 'RegionOne',
1301+ 'admin_tenant_name': 'services',
1302+ 'admin_user': 'quantum_s3_ec2_nova',
1303+ 'admin_password': nova_cc_relation['service_password'],
1304+ 'root_helper': 'sudo quantum-rootwrap '
1305+ '/etc/quantum/rootwrap.conf',
1306+ 'state_path': '/var/lib/quantum',
1307+ 'nova_metadata_ip': quantum_gateway_relation['private-address'],
1308+ 'nova_metadata_port': '8775'
1309+ }
1310+
1311+ ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
1312+ if ret:
1313+ message = "metadata agent config error: {}".format(ret)
1314+ amulet.raise_status(amulet.FAIL, msg=message)
1315+
1316+ def test_metering_agent_config(self):
1317+ """Verify the data in the metering agent config file. This is only
1318+ available since havana."""
1319+ if self._get_openstack_release() < self.precise_havana:
1320+ return
1321+
1322+ unit = self.quantum_gateway_sentry
1323+ conf = '/etc/neutron/metering_agent.ini'
1324+ expected = {
1325+ 'driver': 'neutron.services.metering.drivers.iptables.'
1326+ 'iptables_driver.IptablesMeteringDriver',
1327+ 'measure_interval': '30',
1328+ 'report_interval': '300',
1329+ 'interface_driver': 'neutron.agent.linux.interface.'
1330+ 'OVSInterfaceDriver',
1331+ 'use_namespaces': 'True'
1332+ }
1333+
1334+ ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
1335+ if ret:
1336+ message = "metering agent config error: {}".format(ret)
1337+
1338+ def test_nova_config(self):
1339+ """Verify the data in the nova config file."""
1340+ unit = self.quantum_gateway_sentry
1341+ conf = '/etc/nova/nova.conf'
1342+ mysql_relation = self.mysql_sentry.relation('shared-db',
1343+ 'quantum-gateway:shared-db')
1344+ db_uri = "mysql://{}:{}@{}/{}".format('nova',
1345+ mysql_relation['password'],
1346+ mysql_relation['db_host'],
1347+ 'nova')
1348+ rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
1349+ 'quantum-gateway:amqp')
1350+ nova_cc_relation = self.nova_cc_sentry.relation(\
1351+ 'quantum-network-service',
1352+ 'quantum-gateway:quantum-network-service')
1353+ ep = self.keystone.service_catalog.url_for(service_type='identity',
1354+ endpoint_type='publicURL')
1355+
1356+ if self._get_openstack_release() >= self.precise_havana:
1357+ expected = {
1358+ 'logdir': '/var/log/nova',
1359+ 'state_path': '/var/lib/nova',
1360+ 'lock_path': '/var/lock/nova',
1361+ 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
1362+ 'verbose': 'False',
1363+ 'use_syslog': 'False',
1364+ 'api_paste_config': '/etc/nova/api-paste.ini',
1365+ 'enabled_apis': 'metadata',
1366+ 'multi_host': 'True',
1367+ 'sql_connection': db_uri,
1368+ 'service_neutron_metadata_proxy': 'True',
1369+ 'rabbit_userid': 'neutron',
1370+ 'rabbit_virtual_host': 'openstack',
1371+ 'rabbit_password': rabbitmq_relation['password'],
1372+ 'rabbit_host': rabbitmq_relation['hostname'],
1373+ 'network_api_class': 'nova.network.neutronv2.api.API',
1374+ 'neutron_auth_strategy': 'keystone',
1375+ 'neutron_url': nova_cc_relation['quantum_url'],
1376+ 'neutron_admin_tenant_name': 'services',
1377+ 'neutron_admin_username': 'quantum_s3_ec2_nova',
1378+ 'neutron_admin_password': nova_cc_relation['service_password'],
1379+ 'neutron_admin_auth_url': ep
1380+
1381+ }
1382+ else:
1383+ expected = {
1384+ 'logdir': '/var/log/nova',
1385+ 'state_path': '/var/lib/nova',
1386+ 'lock_path': '/var/lock/nova',
1387+ 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
1388+ 'verbose': 'True',
1389+ 'api_paste_config': '/etc/nova/api-paste.ini',
1390+ 'enabled_apis': 'metadata',
1391+ 'multi_host': 'True',
1392+ 'sql_connection': db_uri,
1393+ 'service_quantum_metadata_proxy': 'True',
1394+ 'rabbit_userid': 'neutron',
1395+ 'rabbit_virtual_host': 'openstack',
1396+ 'rabbit_password': rabbitmq_relation['password'],
1397+ 'rabbit_host': rabbitmq_relation['hostname'],
1398+ 'network_api_class': 'nova.network.quantumv2.api.API',
1399+ 'quantum_auth_strategy': 'keystone',
1400+ 'quantum_url': nova_cc_relation['quantum_url'],
1401+ 'quantum_admin_tenant_name': 'services',
1402+ 'quantum_admin_username': 'quantum_s3_ec2_nova',
1403+ 'quantum_admin_password': nova_cc_relation['service_password'],
1404+ 'quantum_admin_auth_url': ep
1405+ }
1406+
1407+ ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
1408+ if ret:
1409+ message = "nova config error: {}".format(ret)
1410+ amulet.raise_status(amulet.FAIL, msg=message)
1411+
1412+ def test_ovs_neutron_plugin_config(self):
1413+ """Verify the data in the ovs neutron plugin config file. The ovs
1414+ plugin is not used by default since icehouse."""
1415+ if self._get_openstack_release() >= self.precise_icehouse:
1416+ return
1417+
1418+ unit = self.quantum_gateway_sentry
1419+ quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db')
1420+
1421+ if self._get_openstack_release() >= self.precise_havana:
1422+ conf = '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'
1423+ expected = {
1424+ 'ovs': {
1425+ 'local_ip': quantum_gateway_relation['private-address'],
1426+ 'tenant_network_type': 'gre',
1427+ 'enable_tunneling': 'True',
1428+ 'tunnel_id_ranges': '1:1000'
1429+ }
1430+ }
1431+ if self._get_openstack_release() > self.precise_havana:
1432+ expected_additional = {
1433+ 'agent': {
1434+ 'polling_interval': '10',
1435+ 'root_helper': 'sudo /usr/bin/neutron-rootwrap '
1436+ '/etc/neutron/rootwrap.conf'
1437+ }
1438+ }
1439+ expected = dict(expected.items() + expected_additional.items())
1440+ else:
1441+ conf = '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini'
1442+ expected = {
1443+ 'OVS': {
1444+ 'local_ip': quantum_gateway_relation['private-address'],
1445+ 'tenant_network_type': 'gre',
1446+ 'enable_tunneling': 'True',
1447+ 'tunnel_id_ranges': '1:1000'
1448+ },
1449+ 'AGENT': {
1450+ 'polling_interval': '10',
1451+ 'root_helper': 'sudo /usr/bin/quantum-rootwrap '
1452+ '/etc/quantum/rootwrap.conf'
1453+ }
1454+ }
1455+
1456+ for section, pairs in expected.iteritems():
1457+ ret = u.validate_config_data(unit, conf, section, pairs)
1458+ if ret:
1459+ message = "ovs neutron plugin config error: {}".format(ret)
1460+ amulet.raise_status(amulet.FAIL, msg=message)
1461+
1462+ def test_vpn_agent_config(self):
1463+ """Verify the data in the vpn agent config file. This isn't available
1464+ prior to havana."""
1465+ if self._get_openstack_release() < self.precise_havana:
1466+ return
1467+
1468+ unit = self.quantum_gateway_sentry
1469+ conf = '/etc/neutron/vpn_agent.ini'
1470+ expected = {
1471+ 'vpnagent': {
1472+ 'vpn_device_driver': 'neutron.services.vpn.device_drivers.'
1473+ 'ipsec.OpenSwanDriver'
1474+ },
1475+ 'ipsec': {
1476+ 'ipsec_status_check_interval': '60'
1477+ }
1478+ }
1479+
1480+ for section, pairs in expected.iteritems():
1481+ ret = u.validate_config_data(unit, conf, section, pairs)
1482+ if ret:
1483+ message = "vpn agent config error: {}".format(ret)
1484+ amulet.raise_status(amulet.FAIL, msg=message)
1485+
1486+ def test_create_network(self):
1487+ """Create a network, verify that it exists, and then delete it."""
1488+ self.neutron.format = 'json'
1489+ net_name = 'ext_net'
1490+
1491+ #Verify that the network doesn't exist
1492+ networks = self.neutron.list_networks(name=net_name)
1493+ net_count = len(networks['networks'])
1494+ if net_count != 0:
1495+ msg = "Expected zero networks, found {}".format(net_count)
1496+ amulet.raise_status(amulet.FAIL, msg=msg)
1497+
1498+ # Create a network and verify that it exists
1499+ network = {'name': net_name}
1500+ self.neutron.create_network({'network':network})
1501+
1502+ networks = self.neutron.list_networks(name=net_name)
1503+ net_len = len(networks['networks'])
1504+ if net_len != 1:
1505+ msg = "Expected 1 network, found {}".format(net_len)
1506+ amulet.raise_status(amulet.FAIL, msg=msg)
1507+
1508+ network = networks['networks'][0]
1509+ if network['name'] != net_name:
1510+ amulet.raise_status(amulet.FAIL, msg="network ext_net not found")
1511+
1512+ #Cleanup
1513+ self.neutron.delete_network(network['id'])
1514
1515=== added directory 'tests/charmhelpers'
1516=== added file 'tests/charmhelpers/__init__.py'
1517=== added directory 'tests/charmhelpers/contrib'
1518=== added file 'tests/charmhelpers/contrib/__init__.py'
1519=== added directory 'tests/charmhelpers/contrib/amulet'
1520=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
1521=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
1522--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
1523+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-17 15:16:47 +0000
1524@@ -0,0 +1,63 @@
1525+import amulet
1526+import re
1527+
1528+
1529+class AmuletDeployment(object):
1530+ """This class provides generic Amulet deployment and test runner
1531+ methods."""
1532+
1533+ def __init__(self, series):
1534+ """Initialize the deployment environment."""
1535+ self.series = series
1536+ self.d = amulet.Deployment(series=self.series)
1537+
1538+ def _get_charm_name(self, service_name):
1539+ """Gets the charm name from the service name. Unique service names can
1540+ be specified with a '-service#' suffix (e.g. mysql-service1)."""
1541+ if re.match(r"^.*-service\d{1,3}$", service_name):
1542+ charm_name = re.sub('\-service\d{1,3}$', '', service_name)
1543+ else:
1544+ charm_name = service_name
1545+ return charm_name
1546+
1547+ def _add_services(self, this_service, other_services):
1548+ """Add services to the deployment where this_service is the local charm
1549+ that we're focused on testing and other_services are the other
1550+ charms that come from the charm store."""
1551+ name, units = range(2)
1552+
1553+ charm_name = self._get_charm_name(this_service[name])
1554+ self.d.add(this_service[name],
1555+ units=this_service[units])
1556+
1557+ for svc in other_services:
1558+ charm_name = self._get_charm_name(svc[name])
1559+ self.d.add(svc[name],
1560+ charm='cs:{}/{}'.format(self.series, charm_name),
1561+ units=svc[units])
1562+
1563+ def _add_relations(self, relations):
1564+ """Add all of the relations for the services."""
1565+ for k, v in relations.iteritems():
1566+ self.d.relate(k, v)
1567+
1568+ def _configure_services(self, configs):
1569+ """Configure all of the services."""
1570+ for service, config in configs.iteritems():
1571+ self.d.configure(service, config)
1572+
1573+ def _deploy(self):
1574+ """Deploy environment and wait for all hooks to finish executing."""
1575+ try:
1576+ self.d.setup()
1577+ self.d.sentry.wait()
1578+ except amulet.helpers.TimeoutError:
1579+ amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
1580+ except:
1581+ raise
1582+
1583+ def run_tests(self):
1584+ """Run all of the methods that are prefixed with 'test_'."""
1585+ for test in dir(self):
1586+ if test.startswith('test_'):
1587+ getattr(self, test)()
1588
1589=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
1590--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
1591+++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-17 15:16:47 +0000
1592@@ -0,0 +1,157 @@
1593+import ConfigParser
1594+import io
1595+import logging
1596+import re
1597+import sys
1598+from time import sleep
1599+
1600+
1601+class AmuletUtils(object):
1602+ """This class provides common utility functions that are used by Amulet
1603+ tests."""
1604+
1605+ def __init__(self, log_level=logging.ERROR):
1606+ self.log = self.get_logger(level=log_level)
1607+
1608+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
1609+ """Get a logger object that will log to stdout."""
1610+ log = logging
1611+ logger = log.getLogger(name)
1612+ fmt = \
1613+ log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
1614+
1615+ handler = log.StreamHandler(stream=sys.stdout)
1616+ handler.setLevel(level)
1617+ handler.setFormatter(fmt)
1618+
1619+ logger.addHandler(handler)
1620+ logger.setLevel(level)
1621+
1622+ return logger
1623+
1624+ def valid_ip(self, ip):
1625+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
1626+ return True
1627+ else:
1628+ return False
1629+
1630+ def valid_url(self, url):
1631+ p = re.compile(
1632+ r'^(?:http|ftp)s?://'
1633+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
1634+ r'localhost|'
1635+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
1636+ r'(?::\d+)?'
1637+ r'(?:/?|[/?]\S+)$',
1638+ re.IGNORECASE)
1639+ if p.match(url):
1640+ return True
1641+ else:
1642+ return False
1643+
1644+ def validate_services(self, commands):
1645+ """Verify the specified services are running on the corresponding
1646+ service units."""
1647+ for k, v in commands.iteritems():
1648+ for cmd in v:
1649+ output, code = k.run(cmd)
1650+ if code != 0:
1651+ return "command `{}` returned {}".format(cmd, str(code))
1652+ return None
1653+
1654+ def _get_config(self, unit, filename):
1655+ """Get a ConfigParser object for parsing a unit's config file."""
1656+ file_contents = unit.file_contents(filename)
1657+ config = ConfigParser.ConfigParser()
1658+ config.readfp(io.StringIO(file_contents))
1659+ return config
1660+
1661+ def validate_config_data(self, sentry_unit, config_file, section, expected):
1662+ """Verify that the specified section of the config file contains
1663+ the expected option key:value pairs."""
1664+ config = self._get_config(sentry_unit, config_file)
1665+
1666+ if section != 'DEFAULT' and not config.has_section(section):
1667+ return "section [{}] does not exist".format(section)
1668+
1669+ for k in expected.keys():
1670+ if not config.has_option(section, k):
1671+ return "section [{}] is missing option {}".format(section, k)
1672+ if config.get(section, k) != expected[k]:
1673+ return "section [{}] {}:{} != expected {}:{}".format(section,
1674+ k, config.get(section, k), k, expected[k])
1675+ return None
1676+
1677+ def _validate_dict_data(self, expected, actual):
1678+ """Compare expected dictionary data vs actual dictionary data.
1679+ The values in the 'expected' dictionary can be strings, bools, ints,
1680+ longs, or can be a function that evaluate a variable and returns a
1681+ bool."""
1682+ for k, v in expected.iteritems():
1683+ if k in actual:
1684+ if isinstance(v, basestring) or \
1685+ isinstance(v, bool) or \
1686+ isinstance(v, (int, long)):
1687+ if v != actual[k]:
1688+ return "{}:{}".format(k, actual[k])
1689+ elif not v(actual[k]):
1690+ return "{}:{}".format(k, actual[k])
1691+ else:
1692+ return "key '{}' does not exist".format(k)
1693+ return None
1694+
1695+ def validate_relation_data(self, sentry_unit, relation, expected):
1696+ """Validate actual relation data based on expected relation data."""
1697+ actual = sentry_unit.relation(relation[0], relation[1])
1698+ self.log.debug('actual: {}'.format(repr(actual)))
1699+ return self._validate_dict_data(expected, actual)
1700+
1701+ def _validate_list_data(self, expected, actual):
1702+ """Compare expected list vs actual list data."""
1703+ for e in expected:
1704+ if e not in actual:
1705+ return "expected item {} not found in actual list".format(e)
1706+ return None
1707+
1708+ def not_null(self, string):
1709+ if string != None:
1710+ return True
1711+ else:
1712+ return False
1713+
1714+ def _get_file_mtime(self, sentry_unit, filename):
1715+ """Get last modification time of file."""
1716+ return sentry_unit.file_stat(filename)['mtime']
1717+
1718+ def _get_dir_mtime(self, sentry_unit, directory):
1719+ """Get last modification time of directory."""
1720+ return sentry_unit.directory_stat(directory)['mtime']
1721+
1722+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
1723+ """Determine start time of the process based on the last modification
1724+ time of the /proc/pid directory. If pgrep_full is True, the process
1725+ name is matched against the full command line."""
1726+ if pgrep_full:
1727+ cmd = 'pgrep -o -f {}'.format(service)
1728+ else:
1729+ cmd = 'pgrep -o {}'.format(service)
1730+ proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
1731+ return self._get_dir_mtime(sentry_unit, proc_dir)
1732+
1733+ def service_restarted(self, sentry_unit, service, filename,
1734+ pgrep_full=False, sleep_time=20):
1735+ """Compare a service's start time vs a file's last modification time
1736+ (such as a config file for that service) to determine if the service
1737+ has been restarted."""
1738+ sleep(sleep_time)
1739+ if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
1740+ self._get_file_mtime(sentry_unit, filename):
1741+ return True
1742+ else:
1743+ return False
1744+
1745+ def relation_error(self, name, data):
1746+ return 'unexpected relation data in {} - {}'.format(name, data)
1747+
1748+ def endpoint_error(self, name, data):
1749+ return 'unexpected endpoint data in {} - {}'.format(name, data)
1750
1751=== added directory 'tests/charmhelpers/contrib/openstack'
1752=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
1753=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
1754=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
1755=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
1756--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
1757+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-17 15:16:47 +0000
1758@@ -0,0 +1,57 @@
1759+from charmhelpers.contrib.amulet.deployment import (
1760+ AmuletDeployment
1761+)
1762+
1763+
1764+class OpenStackAmuletDeployment(AmuletDeployment):
1765+ """This class inherits from AmuletDeployment and has additional support
1766+ that is specifically for use by OpenStack charms."""
1767+
1768+ def __init__(self, series, openstack=None, source=None):
1769+ """Initialize the deployment environment."""
1770+ super(OpenStackAmuletDeployment, self).__init__(series)
1771+ self.openstack = openstack
1772+ self.source = source
1773+
1774+ def _add_services(self, this_service, other_services):
1775+ """Add services to the deployment and set openstack-origin."""
1776+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
1777+ other_services)
1778+ name = 0
1779+ services = other_services
1780+ services.append(this_service)
1781+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
1782+
1783+ if self.openstack:
1784+ for svc in services:
1785+ charm_name = self._get_charm_name(svc[name])
1786+ if charm_name not in use_source:
1787+ config = {'openstack-origin': self.openstack}
1788+ self.d.configure(svc[name], config)
1789+
1790+ if self.source:
1791+ for svc in services:
1792+ charm_name = self._get_charm_name(svc[name])
1793+ if charm_name in use_source:
1794+ config = {'source': self.source}
1795+ self.d.configure(svc[name], config)
1796+
1797+ def _configure_services(self, configs):
1798+ """Configure all of the services."""
1799+ for service, config in configs.iteritems():
1800+ self.d.configure(service, config)
1801+
1802+ def _get_openstack_release(self):
1803+ """Return an integer representing the enum value of the openstack
1804+ release."""
1805+ self.precise_essex, self.precise_folsom, self.precise_grizzly, \
1806+ self.precise_havana, self.precise_icehouse, \
1807+ self.trusty_icehouse = range(6)
1808+ releases = {
1809+ ('precise', None): self.precise_essex,
1810+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
1811+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
1812+ ('precise', 'cloud:precise-havana'): self.precise_havana,
1813+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
1814+ ('trusty', None): self.trusty_icehouse}
1815+ return releases[(self.series, self.openstack)]
1816
1817=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
1818--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
1819+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-17 15:16:47 +0000
1820@@ -0,0 +1,253 @@
1821+import logging
1822+import os
1823+import time
1824+import urllib
1825+
1826+import glanceclient.v1.client as glance_client
1827+import keystoneclient.v2_0 as keystone_client
1828+import novaclient.v1_1.client as nova_client
1829+
1830+from charmhelpers.contrib.amulet.utils import (
1831+ AmuletUtils
1832+)
1833+
1834+DEBUG = logging.DEBUG
1835+ERROR = logging.ERROR
1836+
1837+
1838+class OpenStackAmuletUtils(AmuletUtils):
1839+ """This class inherits from AmuletUtils and has additional support
1840+ that is specifically for use by OpenStack charms."""
1841+
1842+ def __init__(self, log_level=ERROR):
1843+ """Initialize the deployment environment."""
1844+ super(OpenStackAmuletUtils, self).__init__(log_level)
1845+
1846+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
1847+ public_port, expected):
1848+ """Validate actual endpoint data vs expected endpoint data. The ports
1849+ are used to find the matching endpoint."""
1850+ found = False
1851+ for ep in endpoints:
1852+ self.log.debug('endpoint: {}'.format(repr(ep)))
1853+ if admin_port in ep.adminurl and internal_port in ep.internalurl \
1854+ and public_port in ep.publicurl:
1855+ found = True
1856+ actual = {'id': ep.id,
1857+ 'region': ep.region,
1858+ 'adminurl': ep.adminurl,
1859+ 'internalurl': ep.internalurl,
1860+ 'publicurl': ep.publicurl,
1861+ 'service_id': ep.service_id}
1862+ ret = self._validate_dict_data(expected, actual)
1863+ if ret:
1864+ return 'unexpected endpoint data - {}'.format(ret)
1865+
1866+ if not found:
1867+ return 'endpoint not found'
1868+
1869+ def validate_svc_catalog_endpoint_data(self, expected, actual):
1870+ """Validate a list of actual service catalog endpoints vs a list of
1871+ expected service catalog endpoints."""
1872+ self.log.debug('actual: {}'.format(repr(actual)))
1873+ for k, v in expected.iteritems():
1874+ if k in actual:
1875+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
1876+ if ret:
1877+ return self.endpoint_error(k, ret)
1878+ else:
1879+ return "endpoint {} does not exist".format(k)
1880+ return ret
1881+
1882+ def validate_tenant_data(self, expected, actual):
1883+ """Validate a list of actual tenant data vs list of expected tenant
1884+ data."""
1885+ self.log.debug('actual: {}'.format(repr(actual)))
1886+ for e in expected:
1887+ found = False
1888+ for act in actual:
1889+ a = {'enabled': act.enabled, 'description': act.description,
1890+ 'name': act.name, 'id': act.id}
1891+ if e['name'] == a['name']:
1892+ found = True
1893+ ret = self._validate_dict_data(e, a)
1894+ if ret:
1895+ return "unexpected tenant data - {}".format(ret)
1896+ if not found:
1897+ return "tenant {} does not exist".format(e['name'])
1898+ return ret
1899+
1900+ def validate_role_data(self, expected, actual):
1901+ """Validate a list of actual role data vs a list of expected role
1902+ data."""
1903+ self.log.debug('actual: {}'.format(repr(actual)))
1904+ for e in expected:
1905+ found = False
1906+ for act in actual:
1907+ a = {'name': act.name, 'id': act.id}
1908+ if e['name'] == a['name']:
1909+ found = True
1910+ ret = self._validate_dict_data(e, a)
1911+ if ret:
1912+ return "unexpected role data - {}".format(ret)
1913+ if not found:
1914+ return "role {} does not exist".format(e['name'])
1915+ return ret
1916+
1917+ def validate_user_data(self, expected, actual):
1918+ """Validate a list of actual user data vs a list of expected user
1919+ data."""
1920+ self.log.debug('actual: {}'.format(repr(actual)))
1921+ for e in expected:
1922+ found = False
1923+ for act in actual:
1924+ a = {'enabled': act.enabled, 'name': act.name,
1925+ 'email': act.email, 'tenantId': act.tenantId,
1926+ 'id': act.id}
1927+ if e['name'] == a['name']:
1928+ found = True
1929+ ret = self._validate_dict_data(e, a)
1930+ if ret:
1931+ return "unexpected user data - {}".format(ret)
1932+ if not found:
1933+ return "user {} does not exist".format(e['name'])
1934+ return ret
1935+
1936+ def validate_flavor_data(self, expected, actual):
1937+ """Validate a list of actual flavors vs a list of expected flavors."""
1938+ self.log.debug('actual: {}'.format(repr(actual)))
1939+ act = [a.name for a in actual]
1940+ return self._validate_list_data(expected, act)
1941+
1942+ def tenant_exists(self, keystone, tenant):
1943+ """Return True if tenant exists"""
1944+ return tenant in [t.name for t in keystone.tenants.list()]
1945+
1946+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
1947+ tenant):
1948+ """Authenticates admin user with the keystone admin endpoint."""
1949+ service_ip = \
1950+ keystone_sentry.relation('shared-db',
1951+ 'mysql:shared-db')['private-address']
1952+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
1953+ return keystone_client.Client(username=user, password=password,
1954+ tenant_name=tenant, auth_url=ep)
1955+
1956+ def authenticate_keystone_user(self, keystone, user, password, tenant):
1957+ """Authenticates a regular user with the keystone public endpoint."""
1958+ ep = keystone.service_catalog.url_for(service_type='identity',
1959+ endpoint_type='publicURL')
1960+ return keystone_client.Client(username=user, password=password,
1961+ tenant_name=tenant, auth_url=ep)
1962+
1963+ def authenticate_glance_admin(self, keystone):
1964+ """Authenticates admin user with glance."""
1965+ ep = keystone.service_catalog.url_for(service_type='image',
1966+ endpoint_type='adminURL')
1967+ return glance_client.Client(ep, token=keystone.auth_token)
1968+
1969+ def authenticate_nova_user(self, keystone, user, password, tenant):
1970+ """Authenticates a regular user with nova-api."""
1971+ ep = keystone.service_catalog.url_for(service_type='identity',
1972+ endpoint_type='publicURL')
1973+ return nova_client.Client(username=user, api_key=password,
1974+ project_id=tenant, auth_url=ep)
1975+
1976+ def create_cirros_image(self, glance, image_name):
1977+ """Download the latest cirros image and upload it to glance."""
1978+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
1979+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
1980+ if http_proxy:
1981+ proxies = {'http': http_proxy}
1982+ opener = urllib.FancyURLopener(proxies)
1983+ else:
1984+ opener = urllib.FancyURLopener()
1985+
1986+ f = opener.open("http://download.cirros-cloud.net/version/released")
1987+ version = f.read().strip()
1988+ cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
1989+
1990+ if not os.path.exists(cirros_img):
1991+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1992+ version, cirros_img)
1993+ opener.retrieve(cirros_url, cirros_img)
1994+ f.close()
1995+
1996+ with open(cirros_img) as f:
1997+ image = glance.images.create(name=image_name, is_public=True,
1998+ disk_format='qcow2',
1999+ container_format='bare', data=f)
2000+ count = 1
2001+ status = image.status
2002+ while status != 'active' and count < 10:
2003+ time.sleep(3)
2004+ image = glance.images.get(image.id)
2005+ status = image.status
2006+ self.log.debug('image status: {}'.format(status))
2007+ count += 1
2008+
2009+ if status != 'active':
2010+ self.log.error('image creation timed out')
2011+ return None
2012+
2013+ return image
2014+
2015+ def delete_image(self, glance, image):
2016+ """Delete the specified image."""
2017+ num_before = len(list(glance.images.list()))
2018+ glance.images.delete(image)
2019+
2020+ count = 1
2021+ num_after = len(list(glance.images.list()))
2022+ while num_after != (num_before - 1) and count < 10:
2023+ time.sleep(3)
2024+ num_after = len(list(glance.images.list()))
2025+ self.log.debug('number of images: {}'.format(num_after))
2026+ count += 1
2027+
2028+ if num_after != (num_before - 1):
2029+ self.log.error('image deletion timed out')
2030+ return False
2031+
2032+ return True
2033+
2034+ def create_instance(self, nova, image_name, instance_name, flavor):
2035+ """Create the specified instance."""
2036+ image = nova.images.find(name=image_name)
2037+ flavor = nova.flavors.find(name=flavor)
2038+ instance = nova.servers.create(name=instance_name, image=image,
2039+ flavor=flavor)
2040+
2041+ count = 1
2042+ status = instance.status
2043+ while status != 'ACTIVE' and count < 60:
2044+ time.sleep(3)
2045+ instance = nova.servers.get(instance.id)
2046+ status = instance.status
2047+ self.log.debug('instance status: {}'.format(status))
2048+ count += 1
2049+
2050+ if status != 'ACTIVE':
2051+ self.log.error('instance creation timed out')
2052+ return None
2053+
2054+ return instance
2055+
2056+ def delete_instance(self, nova, instance):
2057+ """Delete the specified instance."""
2058+ num_before = len(list(nova.servers.list()))
2059+ nova.servers.delete(instance)
2060+
2061+ count = 1
2062+ num_after = len(list(nova.servers.list()))
2063+ while num_after != (num_before - 1) and count < 10:
2064+ time.sleep(3)
2065+ num_after = len(list(nova.servers.list()))
2066+ self.log.debug('number of instances: {}'.format(num_after))
2067+ count += 1
2068+
2069+ if num_after != (num_before - 1):
2070+ self.log.error('instance deletion timed out')
2071+ return False
2072+
2073+ return True

Subscribers

People subscribed via source and target branches