Merge lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 84
Proposed branch: lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next
Diff against target: 2296 lines (+1781/-86)
30 files modified
Makefile (+12/-4)
charm-helpers-hooks.yaml (+10/-0)
charm-helpers-tests.yaml (+5/-0)
charm-helpers.yaml (+0/-10)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0)
hooks/charmhelpers/contrib/openstack/context.py (+45/-13)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+5/-2)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/utils.py (+1/-0)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+5/-4)
hooks/charmhelpers/core/host.py (+28/-12)
hooks/charmhelpers/fetch/__init__.py (+24/-16)
hooks/charmhelpers/fetch/bzrurl.py (+2/-1)
tests/00-setup (+10/-0)
tests/10-basic-precise-essex (+10/-0)
tests/11-basic-precise-folsom (+18/-0)
tests/12-basic-precise-grizzly (+12/-0)
tests/13-basic-precise-havana (+12/-0)
tests/14-basic-precise-icehouse (+12/-0)
tests/15-basic-trusty-icehouse (+10/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+520/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+63/-0)
tests/charmhelpers/contrib/amulet/utils.py (+157/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic
Reviewer Review Type Date Requested Status
Liam Young (community) Needs Fixing
Review via email: mp+226503@code.launchpad.net
To post a comment you must log in.
84. By Corey Bryant

Add Amulet basic tests

Revision history for this message
Liam Young (gnuoy) wrote :

Looks good but some lint fixes are needed

review: Needs Fixing
Revision history for this message
Corey Bryant (corey.bryant) wrote :

Thanks for the review Liam. Good catch on the noqa issue. I'll fix that in the charm-helpers branch and will fix up any lint issues throughout the charm tests.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'Makefile'
2--- Makefile 2014-05-21 10:14:28 +0000
3+++ Makefile 2014-07-11 17:34:59 +0000
4@@ -2,15 +2,23 @@
5 PYTHON := /usr/bin/env python
6
7 lint:
8- @flake8 --exclude hooks/charmhelpers hooks unit_tests
9+ @flake8 --exclude hooks/charmhelpers hooks unit_tests tests
10 @charm proof
11
12+unit_test:
13+ @echo Starting unit tests...
14+ @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
15+
16 test:
17- @echo Starting tests...
18- @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
19+ @echo Starting Amulet tests...
20+ # coreycb note: The -v should only be temporary until Amulet sends
21+ # raise_status() messages to stderr:
22+ # https://bugs.launchpad.net/amulet/+bug/1320357
23+ @juju test -v -p AMULET_HTTP_PROXY
24
25 sync:
26- @charm-helper-sync -c charm-helpers.yaml
27+ @charm-helper-sync -c charm-helpers-hooks.yaml
28+ @charm-helper-sync -c charm-helpers-tests.yaml
29
30 publish: lint test
31 bzr push lp:charms/nova-cloud-controller
32
33=== added file 'charm-helpers-hooks.yaml'
34--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
35+++ charm-helpers-hooks.yaml 2014-07-11 17:34:59 +0000
36@@ -0,0 +1,10 @@
37+branch: lp:charm-helpers
38+destination: hooks/charmhelpers
39+include:
40+ - core
41+ - fetch
42+ - contrib.openstack|inc=*
43+ - contrib.storage
44+ - contrib.hahelpers:
45+ - apache
46+ - payload.execd
47
48=== added file 'charm-helpers-tests.yaml'
49--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
50+++ charm-helpers-tests.yaml 2014-07-11 17:34:59 +0000
51@@ -0,0 +1,5 @@
52+branch: lp:charm-helpers
53+destination: tests/charmhelpers
54+include:
55+ - contrib.amulet
56+ - contrib.openstack.amulet
57
58=== removed file 'charm-helpers.yaml'
59--- charm-helpers.yaml 2014-05-10 02:00:22 +0000
60+++ charm-helpers.yaml 1970-01-01 00:00:00 +0000
61@@ -1,10 +0,0 @@
62-branch: lp:charm-helpers
63-destination: hooks/charmhelpers
64-include:
65- - core
66- - fetch
67- - contrib.openstack|inc=*
68- - contrib.storage
69- - contrib.hahelpers:
70- - apache
71- - payload.execd
72
73=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
74=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
75=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
76--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
77+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-11 17:34:59 +0000
78@@ -0,0 +1,57 @@
79+from charmhelpers.contrib.amulet.deployment import (
80+ AmuletDeployment
81+)
82+
83+
84+class OpenStackAmuletDeployment(AmuletDeployment):
85+ """This class inherits from AmuletDeployment and has additional support
86+ that is specifically for use by OpenStack charms."""
87+
88+ def __init__(self, series, openstack=None, source=None):
89+ """Initialize the deployment environment."""
90+ super(OpenStackAmuletDeployment, self).__init__(series)
91+ self.openstack = openstack
92+ self.source = source
93+
94+ def _add_services(self, this_service, other_services):
95+ """Add services to the deployment and set openstack-origin."""
96+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
97+ other_services)
98+ name = 0
99+ services = other_services
100+ services.append(this_service)
101+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
102+
103+ if self.openstack:
104+ for svc in services:
105+ charm_name = self._get_charm_name(svc[name])
106+ if charm_name not in use_source:
107+ config = {'openstack-origin': self.openstack}
108+ self.d.configure(svc[name], config)
109+
110+ if self.source:
111+ for svc in services:
112+ charm_name = self._get_charm_name(svc[name])
113+ if charm_name in use_source:
114+ config = {'source': self.source}
115+ self.d.configure(svc[name], config)
116+
117+ def _configure_services(self, configs):
118+ """Configure all of the services."""
119+ for service, config in configs.iteritems():
120+ self.d.configure(service, config)
121+
122+ def _get_openstack_release(self):
123+ """Return an integer representing the enum value of the openstack
124+ release."""
125+ self.precise_essex, self.precise_folsom, self.precise_grizzly, \
126+ self.precise_havana, self.precise_icehouse, \
127+ self.trusty_icehouse = range(6)
128+ releases = {
129+ ('precise', None): self.precise_essex,
130+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
131+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
132+ ('precise', 'cloud:precise-havana'): self.precise_havana,
133+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
134+ ('trusty', None): self.trusty_icehouse}
135+ return releases[(self.series, self.openstack)]
136
137=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
138--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
139+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-11 17:34:59 +0000
140@@ -0,0 +1,253 @@
141+import logging
142+import os
143+import time
144+import urllib
145+
146+import glanceclient.v1.client as glance_client
147+import keystoneclient.v2_0 as keystone_client
148+import novaclient.v1_1.client as nova_client
149+
150+from charmhelpers.contrib.amulet.utils import (
151+ AmuletUtils
152+)
153+
154+DEBUG = logging.DEBUG
155+ERROR = logging.ERROR
156+
157+
158+class OpenStackAmuletUtils(AmuletUtils):
159+ """This class inherits from AmuletUtils and has additional support
160+ that is specifically for use by OpenStack charms."""
161+
162+ def __init__(self, log_level=ERROR):
163+ """Initialize the deployment environment."""
164+ super(OpenStackAmuletUtils, self).__init__(log_level)
165+
166+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
167+ public_port, expected):
168+ """Validate actual endpoint data vs expected endpoint data. The ports
169+ are used to find the matching endpoint."""
170+ found = False
171+ for ep in endpoints:
172+ self.log.debug('endpoint: {}'.format(repr(ep)))
173+ if admin_port in ep.adminurl and internal_port in ep.internalurl \
174+ and public_port in ep.publicurl:
175+ found = True
176+ actual = {'id': ep.id,
177+ 'region': ep.region,
178+ 'adminurl': ep.adminurl,
179+ 'internalurl': ep.internalurl,
180+ 'publicurl': ep.publicurl,
181+ 'service_id': ep.service_id}
182+ ret = self._validate_dict_data(expected, actual)
183+ if ret:
184+ return 'unexpected endpoint data - {}'.format(ret)
185+
186+ if not found:
187+ return 'endpoint not found'
188+
189+ def validate_svc_catalog_endpoint_data(self, expected, actual):
190+ """Validate a list of actual service catalog endpoints vs a list of
191+ expected service catalog endpoints."""
192+ self.log.debug('actual: {}'.format(repr(actual)))
193+ for k, v in expected.iteritems():
194+ if k in actual:
195+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
196+ if ret:
197+ return self.endpoint_error(k, ret)
198+ else:
199+ return "endpoint {} does not exist".format(k)
200+ return ret
201+
202+ def validate_tenant_data(self, expected, actual):
203+ """Validate a list of actual tenant data vs list of expected tenant
204+ data."""
205+ self.log.debug('actual: {}'.format(repr(actual)))
206+ for e in expected:
207+ found = False
208+ for act in actual:
209+ a = {'enabled': act.enabled, 'description': act.description,
210+ 'name': act.name, 'id': act.id}
211+ if e['name'] == a['name']:
212+ found = True
213+ ret = self._validate_dict_data(e, a)
214+ if ret:
215+ return "unexpected tenant data - {}".format(ret)
216+ if not found:
217+ return "tenant {} does not exist".format(e['name'])
218+ return ret
219+
220+ def validate_role_data(self, expected, actual):
221+ """Validate a list of actual role data vs a list of expected role
222+ data."""
223+ self.log.debug('actual: {}'.format(repr(actual)))
224+ for e in expected:
225+ found = False
226+ for act in actual:
227+ a = {'name': act.name, 'id': act.id}
228+ if e['name'] == a['name']:
229+ found = True
230+ ret = self._validate_dict_data(e, a)
231+ if ret:
232+ return "unexpected role data - {}".format(ret)
233+ if not found:
234+ return "role {} does not exist".format(e['name'])
235+ return ret
236+
237+ def validate_user_data(self, expected, actual):
238+ """Validate a list of actual user data vs a list of expected user
239+ data."""
240+ self.log.debug('actual: {}'.format(repr(actual)))
241+ for e in expected:
242+ found = False
243+ for act in actual:
244+ a = {'enabled': act.enabled, 'name': act.name,
245+ 'email': act.email, 'tenantId': act.tenantId,
246+ 'id': act.id}
247+ if e['name'] == a['name']:
248+ found = True
249+ ret = self._validate_dict_data(e, a)
250+ if ret:
251+ return "unexpected user data - {}".format(ret)
252+ if not found:
253+ return "user {} does not exist".format(e['name'])
254+ return ret
255+
256+ def validate_flavor_data(self, expected, actual):
257+ """Validate a list of actual flavors vs a list of expected flavors."""
258+ self.log.debug('actual: {}'.format(repr(actual)))
259+ act = [a.name for a in actual]
260+ return self._validate_list_data(expected, act)
261+
262+ def tenant_exists(self, keystone, tenant):
263+ """Return True if tenant exists"""
264+ return tenant in [t.name for t in keystone.tenants.list()]
265+
266+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
267+ tenant):
268+ """Authenticates admin user with the keystone admin endpoint."""
269+ service_ip = \
270+ keystone_sentry.relation('shared-db',
271+ 'mysql:shared-db')['private-address']
272+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
273+ return keystone_client.Client(username=user, password=password,
274+ tenant_name=tenant, auth_url=ep)
275+
276+ def authenticate_keystone_user(self, keystone, user, password, tenant):
277+ """Authenticates a regular user with the keystone public endpoint."""
278+ ep = keystone.service_catalog.url_for(service_type='identity',
279+ endpoint_type='publicURL')
280+ return keystone_client.Client(username=user, password=password,
281+ tenant_name=tenant, auth_url=ep)
282+
283+ def authenticate_glance_admin(self, keystone):
284+ """Authenticates admin user with glance."""
285+ ep = keystone.service_catalog.url_for(service_type='image',
286+ endpoint_type='adminURL')
287+ return glance_client.Client(ep, token=keystone.auth_token)
288+
289+ def authenticate_nova_user(self, keystone, user, password, tenant):
290+ """Authenticates a regular user with nova-api."""
291+ ep = keystone.service_catalog.url_for(service_type='identity',
292+ endpoint_type='publicURL')
293+ return nova_client.Client(username=user, api_key=password,
294+ project_id=tenant, auth_url=ep)
295+
296+ def create_cirros_image(self, glance, image_name):
297+ """Download the latest cirros image and upload it to glance."""
298+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
299+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
300+ if http_proxy:
301+ proxies = {'http': http_proxy}
302+ opener = urllib.FancyURLopener(proxies)
303+ else:
304+ opener = urllib.FancyURLopener()
305+
306+ f = opener.open("http://download.cirros-cloud.net/version/released")
307+ version = f.read().strip()
308+ cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
309+
310+ if not os.path.exists(cirros_img):
311+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
312+ version, cirros_img)
313+ opener.retrieve(cirros_url, cirros_img)
314+ f.close()
315+
316+ with open(cirros_img) as f:
317+ image = glance.images.create(name=image_name, is_public=True,
318+ disk_format='qcow2',
319+ container_format='bare', data=f)
320+ count = 1
321+ status = image.status
322+ while status != 'active' and count < 10:
323+ time.sleep(3)
324+ image = glance.images.get(image.id)
325+ status = image.status
326+ self.log.debug('image status: {}'.format(status))
327+ count += 1
328+
329+ if status != 'active':
330+ self.log.error('image creation timed out')
331+ return None
332+
333+ return image
334+
335+ def delete_image(self, glance, image):
336+ """Delete the specified image."""
337+ num_before = len(list(glance.images.list()))
338+ glance.images.delete(image)
339+
340+ count = 1
341+ num_after = len(list(glance.images.list()))
342+ while num_after != (num_before - 1) and count < 10:
343+ time.sleep(3)
344+ num_after = len(list(glance.images.list()))
345+ self.log.debug('number of images: {}'.format(num_after))
346+ count += 1
347+
348+ if num_after != (num_before - 1):
349+ self.log.error('image deletion timed out')
350+ return False
351+
352+ return True
353+
354+ def create_instance(self, nova, image_name, instance_name, flavor):
355+ """Create the specified instance."""
356+ image = nova.images.find(name=image_name)
357+ flavor = nova.flavors.find(name=flavor)
358+ instance = nova.servers.create(name=instance_name, image=image,
359+ flavor=flavor)
360+
361+ count = 1
362+ status = instance.status
363+ while status != 'ACTIVE' and count < 60:
364+ time.sleep(3)
365+ instance = nova.servers.get(instance.id)
366+ status = instance.status
367+ self.log.debug('instance status: {}'.format(status))
368+ count += 1
369+
370+ if status != 'ACTIVE':
371+ self.log.error('instance creation timed out')
372+ return None
373+
374+ return instance
375+
376+ def delete_instance(self, nova, instance):
377+ """Delete the specified instance."""
378+ num_before = len(list(nova.servers.list()))
379+ nova.servers.delete(instance)
380+
381+ count = 1
382+ num_after = len(list(nova.servers.list()))
383+ while num_after != (num_before - 1) and count < 10:
384+ time.sleep(3)
385+ num_after = len(list(nova.servers.list()))
386+ self.log.debug('number of instances: {}'.format(num_after))
387+ count += 1
388+
389+ if num_after != (num_before - 1):
390+ self.log.error('instance deletion timed out')
391+ return False
392+
393+ return True
394
395=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
396--- hooks/charmhelpers/contrib/openstack/context.py 2014-05-21 10:28:14 +0000
397+++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-11 17:34:59 +0000
398@@ -243,23 +243,31 @@
399
400
401 class AMQPContext(OSContextGenerator):
402- interfaces = ['amqp']
403
404- def __init__(self, ssl_dir=None):
405+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
406 self.ssl_dir = ssl_dir
407+ self.rel_name = rel_name
408+ self.relation_prefix = relation_prefix
409+ self.interfaces = [rel_name]
410
411 def __call__(self):
412 log('Generating template context for amqp')
413 conf = config()
414+ user_setting = 'rabbit-user'
415+ vhost_setting = 'rabbit-vhost'
416+ if self.relation_prefix:
417+ user_setting = self.relation_prefix + '-rabbit-user'
418+ vhost_setting = self.relation_prefix + '-rabbit-vhost'
419+
420 try:
421- username = conf['rabbit-user']
422- vhost = conf['rabbit-vhost']
423+ username = conf[user_setting]
424+ vhost = conf[vhost_setting]
425 except KeyError as e:
426 log('Could not generate shared_db context. '
427 'Missing required charm config options: %s.' % e)
428 raise OSContextError
429 ctxt = {}
430- for rid in relation_ids('amqp'):
431+ for rid in relation_ids(self.rel_name):
432 ha_vip_only = False
433 for unit in related_units(rid):
434 if relation_get('clustered', rid=rid, unit=unit):
435@@ -418,12 +426,13 @@
436 """
437 Generates a context for an apache vhost configuration that configures
438 HTTPS reverse proxying for one or many endpoints. Generated context
439- looks something like:
440- {
441- 'namespace': 'cinder',
442- 'private_address': 'iscsi.mycinderhost.com',
443- 'endpoints': [(8776, 8766), (8777, 8767)]
444- }
445+ looks something like::
446+
447+ {
448+ 'namespace': 'cinder',
449+ 'private_address': 'iscsi.mycinderhost.com',
450+ 'endpoints': [(8776, 8766), (8777, 8767)]
451+ }
452
453 The endpoints list consists of a tuples mapping external ports
454 to internal ports.
455@@ -541,6 +550,26 @@
456
457 return nvp_ctxt
458
459+ def n1kv_ctxt(self):
460+ driver = neutron_plugin_attribute(self.plugin, 'driver',
461+ self.network_manager)
462+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
463+ self.network_manager)
464+ n1kv_ctxt = {
465+ 'core_plugin': driver,
466+ 'neutron_plugin': 'n1kv',
467+ 'neutron_security_groups': self.neutron_security_groups,
468+ 'local_ip': unit_private_ip(),
469+ 'config': n1kv_config,
470+ 'vsm_ip': config('n1kv-vsm-ip'),
471+ 'vsm_username': config('n1kv-vsm-username'),
472+ 'vsm_password': config('n1kv-vsm-password'),
473+ 'restrict_policy_profiles': config(
474+ 'n1kv_restrict_policy_profiles'),
475+ }
476+
477+ return n1kv_ctxt
478+
479 def neutron_ctxt(self):
480 if https():
481 proto = 'https'
482@@ -572,6 +601,8 @@
483 ctxt.update(self.ovs_ctxt())
484 elif self.plugin in ['nvp', 'nsx']:
485 ctxt.update(self.nvp_ctxt())
486+ elif self.plugin == 'n1kv':
487+ ctxt.update(self.n1kv_ctxt())
488
489 alchemy_flags = config('neutron-alchemy-flags')
490 if alchemy_flags:
491@@ -611,7 +642,7 @@
492 The subordinate interface allows subordinates to export their
493 configuration requirements to the principle for multiple config
494 files and multiple serivces. Ie, a subordinate that has interfaces
495- to both glance and nova may export to following yaml blob as json:
496+ to both glance and nova may export to following yaml blob as json::
497
498 glance:
499 /etc/glance/glance-api.conf:
500@@ -630,7 +661,8 @@
501
502 It is then up to the principle charms to subscribe this context to
503 the service+config file it is interestd in. Configuration data will
504- be available in the template context, in glance's case, as:
505+ be available in the template context, in glance's case, as::
506+
507 ctxt = {
508 ... other context ...
509 'subordinate_config': {
510
511=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
512--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-21 10:28:14 +0000
513+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-11 17:34:59 +0000
514@@ -128,6 +128,20 @@
515 'server_packages': ['neutron-server',
516 'neutron-plugin-vmware'],
517 'server_services': ['neutron-server']
518+ },
519+ 'n1kv': {
520+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
521+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
522+ 'contexts': [
523+ context.SharedDBContext(user=config('neutron-database-user'),
524+ database=config('neutron-database'),
525+ relation_prefix='neutron',
526+ ssl_dir=NEUTRON_CONF_DIR)],
527+ 'services': [],
528+ 'packages': [['neutron-plugin-cisco']],
529+ 'server_packages': ['neutron-server',
530+ 'neutron-plugin-cisco'],
531+ 'server_services': ['neutron-server']
532 }
533 }
534 if release >= 'icehouse':
535
536=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
537--- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:31:57 +0000
538+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-11 17:34:59 +0000
539@@ -30,17 +30,17 @@
540 loading dir.
541
542 A charm may also ship a templates dir with this module
543- and it will be appended to the bottom of the search list, eg:
544- hooks/charmhelpers/contrib/openstack/templates.
545-
546- :param templates_dir: str: Base template directory containing release
547- sub-directories.
548- :param os_release : str: OpenStack release codename to construct template
549- loader.
550-
551- :returns : jinja2.ChoiceLoader constructed with a list of
552- jinja2.FilesystemLoaders, ordered in descending
553- order by OpenStack release.
554+ and it will be appended to the bottom of the search list, eg::
555+
556+ hooks/charmhelpers/contrib/openstack/templates
557+
558+ :param templates_dir (str): Base template directory containing release
559+ sub-directories.
560+ :param os_release (str): OpenStack release codename to construct template
561+ loader.
562+ :returns: jinja2.ChoiceLoader constructed with a list of
563+ jinja2.FilesystemLoaders, ordered in descending
564+ order by OpenStack release.
565 """
566 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
567 for rel in OPENSTACK_CODENAMES.itervalues()]
568@@ -111,7 +111,8 @@
569 and ease the burden of managing config templates across multiple OpenStack
570 releases.
571
572- Basic usage:
573+ Basic usage::
574+
575 # import some common context generates from charmhelpers
576 from charmhelpers.contrib.openstack import context
577
578@@ -131,21 +132,19 @@
579 # write out all registered configs
580 configs.write_all()
581
582- Details:
583+ **OpenStack Releases and template loading**
584
585- OpenStack Releases and template loading
586- ---------------------------------------
587 When the object is instantiated, it is associated with a specific OS
588 release. This dictates how the template loader will be constructed.
589
590 The constructed loader attempts to load the template from several places
591 in the following order:
592- - from the most recent OS release-specific template dir (if one exists)
593- - the base templates_dir
594- - a template directory shipped in the charm with this helper file.
595-
596-
597- For the example above, '/tmp/templates' contains the following structure:
598+ - from the most recent OS release-specific template dir (if one exists)
599+ - the base templates_dir
600+ - a template directory shipped in the charm with this helper file.
601+
602+ For the example above, '/tmp/templates' contains the following structure::
603+
604 /tmp/templates/nova.conf
605 /tmp/templates/api-paste.ini
606 /tmp/templates/grizzly/api-paste.ini
607@@ -169,8 +168,8 @@
608 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
609 us to ship common templates (haproxy, apache) with the helpers.
610
611- Context generators
612- ---------------------------------------
613+ **Context generators**
614+
615 Context generators are used to generate template contexts during hook
616 execution. Doing so may require inspecting service relations, charm
617 config, etc. When registered, a config file is associated with a list
618
619=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
620--- hooks/charmhelpers/contrib/openstack/utils.py 2014-06-16 14:47:23 +0000
621+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-07-11 17:34:59 +0000
622@@ -3,7 +3,6 @@
623 # Common python helper functions used for OpenStack charms.
624 from collections import OrderedDict
625
626-import apt_pkg as apt
627 import subprocess
628 import os
629 import socket
630@@ -85,6 +84,8 @@
631 '''Derive OpenStack release codename from a given installation source.'''
632 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
633 rel = ''
634+ if src is None:
635+ return rel
636 if src in ['distro', 'distro-proposed']:
637 try:
638 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
639@@ -132,6 +133,7 @@
640
641 def get_os_codename_package(package, fatal=True):
642 '''Derive OpenStack release codename from an installed package.'''
643+ import apt_pkg as apt
644 apt.init()
645
646 # Tell apt to build an in-memory cache to prevent race conditions (if
647@@ -189,7 +191,7 @@
648 for version, cname in vers_map.iteritems():
649 if cname == codename:
650 return version
651- #e = "Could not determine OpenStack version for package: %s" % pkg
652+ # e = "Could not determine OpenStack version for package: %s" % pkg
653 # error_out(e)
654
655
656@@ -325,6 +327,7 @@
657
658 """
659
660+ import apt_pkg as apt
661 src = config('openstack-origin')
662 cur_vers = get_os_version_package(package)
663 available_vers = get_os_version_install_source(src)
664
665=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
666--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:02:24 +0000
667+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-11 17:34:59 +0000
668@@ -303,7 +303,7 @@
669 blk_device, fstype, system_services=[]):
670 """
671 NOTE: This function must only be called from a single service unit for
672- the same rbd_img otherwise data loss will occur.
673+ the same rbd_img otherwise data loss will occur.
674
675 Ensures given pool and RBD image exists, is mapped to a block device,
676 and the device is formatted and mounted at the given mount_point.
677
678=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
679--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-21 10:28:14 +0000
680+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-11 17:34:59 +0000
681@@ -37,6 +37,7 @@
682 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
683 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
684
685+
686 def is_device_mounted(device):
687 '''Given a device path, return True if that device is mounted, and False
688 if it isn't.
689
690=== added file 'hooks/charmhelpers/core/fstab.py'
691--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
692+++ hooks/charmhelpers/core/fstab.py 2014-07-11 17:34:59 +0000
693@@ -0,0 +1,116 @@
694+#!/usr/bin/env python
695+# -*- coding: utf-8 -*-
696+
697+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
698+
699+import os
700+
701+
702+class Fstab(file):
703+ """This class extends file in order to implement a file reader/writer
704+ for file `/etc/fstab`
705+ """
706+
707+ class Entry(object):
708+ """Entry class represents a non-comment line on the `/etc/fstab` file
709+ """
710+ def __init__(self, device, mountpoint, filesystem,
711+ options, d=0, p=0):
712+ self.device = device
713+ self.mountpoint = mountpoint
714+ self.filesystem = filesystem
715+
716+ if not options:
717+ options = "defaults"
718+
719+ self.options = options
720+ self.d = d
721+ self.p = p
722+
723+ def __eq__(self, o):
724+ return str(self) == str(o)
725+
726+ def __str__(self):
727+ return "{} {} {} {} {} {}".format(self.device,
728+ self.mountpoint,
729+ self.filesystem,
730+ self.options,
731+ self.d,
732+ self.p)
733+
734+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
735+
736+ def __init__(self, path=None):
737+ if path:
738+ self._path = path
739+ else:
740+ self._path = self.DEFAULT_PATH
741+ file.__init__(self, self._path, 'r+')
742+
743+ def _hydrate_entry(self, line):
744+ # NOTE: use split with no arguments to split on any
745+ # whitespace including tabs
746+ return Fstab.Entry(*filter(
747+ lambda x: x not in ('', None),
748+ line.strip("\n").split()))
749+
750+ @property
751+ def entries(self):
752+ self.seek(0)
753+ for line in self.readlines():
754+ try:
755+ if not line.startswith("#"):
756+ yield self._hydrate_entry(line)
757+ except ValueError:
758+ pass
759+
760+ def get_entry_by_attr(self, attr, value):
761+ for entry in self.entries:
762+ e_attr = getattr(entry, attr)
763+ if e_attr == value:
764+ return entry
765+ return None
766+
767+ def add_entry(self, entry):
768+ if self.get_entry_by_attr('device', entry.device):
769+ return False
770+
771+ self.write(str(entry) + '\n')
772+ self.truncate()
773+ return entry
774+
775+ def remove_entry(self, entry):
776+ self.seek(0)
777+
778+ lines = self.readlines()
779+
780+ found = False
781+ for index, line in enumerate(lines):
782+ if not line.startswith("#"):
783+ if self._hydrate_entry(line) == entry:
784+ found = True
785+ break
786+
787+ if not found:
788+ return False
789+
790+ lines.remove(line)
791+
792+ self.seek(0)
793+ self.write(''.join(lines))
794+ self.truncate()
795+ return True
796+
797+ @classmethod
798+ def remove_by_mountpoint(cls, mountpoint, path=None):
799+ fstab = cls(path=path)
800+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
801+ if entry:
802+ return fstab.remove_entry(entry)
803+ return False
804+
805+ @classmethod
806+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
807+ return cls(path=path).add_entry(Fstab.Entry(device,
808+ mountpoint, filesystem,
809+ options=options))
810
811=== modified file 'hooks/charmhelpers/core/hookenv.py'
812--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:38:09 +0000
813+++ hooks/charmhelpers/core/hookenv.py 2014-07-11 17:34:59 +0000
814@@ -25,7 +25,7 @@
815 def cached(func):
816 """Cache return values for multiple executions of func + args
817
818- For example:
819+ For example::
820
821 @cached
822 def unit_get(attribute):
823@@ -445,18 +445,19 @@
824 class Hooks(object):
825 """A convenient handler for hook functions.
826
827- Example:
828+ Example::
829+
830 hooks = Hooks()
831
832 # register a hook, taking its name from the function name
833 @hooks.hook()
834 def install():
835- ...
836+ pass # your code here
837
838 # register a hook, providing a custom hook name
839 @hooks.hook("config-changed")
840 def config_changed():
841- ...
842+ pass # your code here
843
844 if __name__ == "__main__":
845 # execute a hook based on the name the program is called by
846
847=== modified file 'hooks/charmhelpers/core/host.py'
848--- hooks/charmhelpers/core/host.py 2014-05-19 11:38:09 +0000
849+++ hooks/charmhelpers/core/host.py 2014-07-11 17:34:59 +0000
850@@ -12,11 +12,11 @@
851 import string
852 import subprocess
853 import hashlib
854-import apt_pkg
855
856 from collections import OrderedDict
857
858 from hookenv import log
859+from fstab import Fstab
860
861
862 def service_start(service_name):
863@@ -35,7 +35,8 @@
864
865
866 def service_reload(service_name, restart_on_failure=False):
867- """Reload a system service, optionally falling back to restart if reload fails"""
868+ """Reload a system service, optionally falling back to restart if
869+ reload fails"""
870 service_result = service('reload', service_name)
871 if not service_result and restart_on_failure:
872 service_result = service('restart', service_name)
873@@ -144,7 +145,19 @@
874 target.write(content)
875
876
877-def mount(device, mountpoint, options=None, persist=False):
878+def fstab_remove(mp):
879+ """Remove the given mountpoint entry from /etc/fstab
880+ """
881+ return Fstab.remove_by_mountpoint(mp)
882+
883+
884+def fstab_add(dev, mp, fs, options=None):
885+ """Adds the given device entry to the /etc/fstab file
886+ """
887+ return Fstab.add(dev, mp, fs, options=options)
888+
889+
890+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
891 """Mount a filesystem at a particular mountpoint"""
892 cmd_args = ['mount']
893 if options is not None:
894@@ -155,9 +168,9 @@
895 except subprocess.CalledProcessError, e:
896 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
897 return False
898+
899 if persist:
900- # TODO: update fstab
901- pass
902+ return fstab_add(device, mountpoint, filesystem, options=options)
903 return True
904
905
906@@ -169,9 +182,9 @@
907 except subprocess.CalledProcessError, e:
908 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
909 return False
910+
911 if persist:
912- # TODO: update fstab
913- pass
914+ return fstab_remove(mountpoint)
915 return True
916
917
918@@ -198,13 +211,13 @@
919 def restart_on_change(restart_map, stopstart=False):
920 """Restart services based on configuration files changing
921
922- This function is used a decorator, for example
923+ This function is used a decorator, for example::
924
925 @restart_on_change({
926 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
927 })
928 def ceph_client_changed():
929- ...
930+ pass # your code here
931
932 In this example, the cinder-api and cinder-volume services
933 would be restarted if /etc/ceph/ceph.conf is changed by the
934@@ -300,10 +313,13 @@
935
936 def cmp_pkgrevno(package, revno, pkgcache=None):
937 '''Compare supplied revno with the revno of the installed package
938- 1 => Installed revno is greater than supplied arg
939- 0 => Installed revno is the same as supplied arg
940- -1 => Installed revno is less than supplied arg
941+
942+ * 1 => Installed revno is greater than supplied arg
943+ * 0 => Installed revno is the same as supplied arg
944+ * -1 => Installed revno is less than supplied arg
945+
946 '''
947+ import apt_pkg
948 if not pkgcache:
949 apt_pkg.init()
950 pkgcache = apt_pkg.Cache()
951
952=== modified file 'hooks/charmhelpers/fetch/__init__.py'
953--- hooks/charmhelpers/fetch/__init__.py 2014-06-04 13:06:13 +0000
954+++ hooks/charmhelpers/fetch/__init__.py 2014-07-11 17:34:59 +0000
955@@ -13,7 +13,6 @@
956 config,
957 log,
958 )
959-import apt_pkg
960 import os
961
962
963@@ -117,6 +116,7 @@
964
965 def filter_installed_packages(packages):
966 """Returns a list of packages that require installation"""
967+ import apt_pkg
968 apt_pkg.init()
969
970 # Tell apt to build an in-memory cache to prevent race conditions (if
971@@ -235,31 +235,39 @@
972 sources_var='install_sources',
973 keys_var='install_keys'):
974 """
975- Configure multiple sources from charm configuration
976+ Configure multiple sources from charm configuration.
977+
978+ The lists are encoded as yaml fragments in the configuration.
979+ The frament needs to be included as a string.
980
981 Example config:
982- install_sources:
983+ install_sources: |
984 - "ppa:foo"
985 - "http://example.com/repo precise main"
986- install_keys:
987+ install_keys: |
988 - null
989 - "a1b2c3d4"
990
991 Note that 'null' (a.k.a. None) should not be quoted.
992 """
993- sources = safe_load(config(sources_var))
994- keys = config(keys_var)
995- if keys is not None:
996- keys = safe_load(keys)
997- if isinstance(sources, basestring) and (
998- keys is None or isinstance(keys, basestring)):
999- add_source(sources, keys)
1000+ sources = safe_load((config(sources_var) or '').strip()) or []
1001+ keys = safe_load((config(keys_var) or '').strip()) or None
1002+
1003+ if isinstance(sources, basestring):
1004+ sources = [sources]
1005+
1006+ if keys is None:
1007+ for source in sources:
1008+ add_source(source, None)
1009 else:
1010- if not len(sources) == len(keys):
1011- msg = 'Install sources and keys lists are different lengths'
1012- raise SourceConfigError(msg)
1013- for src_num in range(len(sources)):
1014- add_source(sources[src_num], keys[src_num])
1015+ if isinstance(keys, basestring):
1016+ keys = [keys]
1017+
1018+ if len(sources) != len(keys):
1019+ raise SourceConfigError(
1020+ 'Install sources and keys lists are different lengths')
1021+ for source, key in zip(sources, keys):
1022+ add_source(source, key)
1023 if update:
1024 apt_update(fatal=True)
1025
1026
1027=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
1028--- hooks/charmhelpers/fetch/bzrurl.py 2013-11-06 03:48:26 +0000
1029+++ hooks/charmhelpers/fetch/bzrurl.py 2014-07-11 17:34:59 +0000
1030@@ -39,7 +39,8 @@
1031 def install(self, source):
1032 url_parts = self.parse_url(source)
1033 branch_name = url_parts.path.strip("/").split("/")[-1]
1034- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
1035+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
1036+ branch_name)
1037 if not os.path.exists(dest_dir):
1038 mkdir(dest_dir, perms=0755)
1039 try:
1040
1041=== added directory 'tests'
1042=== added file 'tests/00-setup'
1043--- tests/00-setup 1970-01-01 00:00:00 +0000
1044+++ tests/00-setup 2014-07-11 17:34:59 +0000
1045@@ -0,0 +1,10 @@
1046+#!/bin/bash
1047+
1048+set -ex
1049+
1050+sudo add-apt-repository --yes ppa:juju/stable
1051+sudo apt-get update --yes
1052+sudo apt-get install --yes python-amulet
1053+sudo apt-get install --yes python-glanceclient
1054+sudo apt-get install --yes python-keystoneclient
1055+sudo apt-get install --yes python-novaclient
1056
1057=== added file 'tests/10-basic-precise-essex'
1058--- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000
1059+++ tests/10-basic-precise-essex 2014-07-11 17:34:59 +0000
1060@@ -0,0 +1,10 @@
1061+#!/usr/bin/python
1062+
1063+"""Amulet tests on a basic nova cloud controller deployment on
1064+ precise-essex."""
1065+
1066+from basic_deployment import NovaCCBasicDeployment
1067+
1068+if __name__ == '__main__':
1069+ deployment = NovaCCBasicDeployment(series='precise')
1070+ deployment.run_tests()
1071
1072=== added file 'tests/11-basic-precise-folsom'
1073--- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000
1074+++ tests/11-basic-precise-folsom 2014-07-11 17:34:59 +0000
1075@@ -0,0 +1,18 @@
1076+#!/usr/bin/python
1077+
1078+"""Amulet tests on a basic nova cloud controller deployment on
1079+ precise-folsom."""
1080+
1081+import amulet
1082+from basic_deployment import NovaCCBasicDeployment
1083+
1084+if __name__ == '__main__':
1085+ # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync'
1086+ # fails in shared-db-relation-changed (only fails on folsom)
1087+ message = "Skipping failing test until resolved"
1088+ amulet.raise_status(amulet.SKIP, msg=message)
1089+
1090+ deployment = NovaCCBasicDeployment(series='precise',
1091+ openstack='cloud:precise-folsom',
1092+ source='cloud:precise-updates/folsom')
1093+ deployment.run_tests()
1094
1095=== added file 'tests/12-basic-precise-grizzly'
1096--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
1097+++ tests/12-basic-precise-grizzly 2014-07-11 17:34:59 +0000
1098@@ -0,0 +1,12 @@
1099+#!/usr/bin/python
1100+
1101+"""Amulet tests on a basic nova cloud controller deployment on
1102+ precise-grizzly."""
1103+
1104+from basic_deployment import NovaCCBasicDeployment
1105+
1106+if __name__ == '__main__':
1107+ deployment = NovaCCBasicDeployment(series='precise',
1108+ openstack='cloud:precise-grizzly',
1109+ source='cloud:precise-updates/grizzly')
1110+ deployment.run_tests()
1111
1112=== added file 'tests/13-basic-precise-havana'
1113--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
1114+++ tests/13-basic-precise-havana 2014-07-11 17:34:59 +0000
1115@@ -0,0 +1,12 @@
1116+#!/usr/bin/python
1117+
1118+"""Amulet tests on a basic nova cloud controller deployment on
1119+ precise-havana."""
1120+
1121+from basic_deployment import NovaCCBasicDeployment
1122+
1123+if __name__ == '__main__':
1124+ deployment = NovaCCBasicDeployment(series='precise',
1125+ openstack='cloud:precise-havana',
1126+ source='cloud:precise-updates/havana')
1127+ deployment.run_tests()
1128
1129=== added file 'tests/14-basic-precise-icehouse'
1130--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
1131+++ tests/14-basic-precise-icehouse 2014-07-11 17:34:59 +0000
1132@@ -0,0 +1,12 @@
1133+#!/usr/bin/python
1134+
1135+"""Amulet tests on a basic nova cloud controller deployment on
1136+ precise-icehouse."""
1137+
1138+from basic_deployment import NovaCCBasicDeployment
1139+
1140+if __name__ == '__main__':
1141+ deployment = NovaCCBasicDeployment(series='precise',
1142+ openstack='cloud:precise-icehouse',
1143+ source='cloud:precise-updates/icehouse')
1144+ deployment.run_tests()
1145
1146=== added file 'tests/15-basic-trusty-icehouse'
1147--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
1148+++ tests/15-basic-trusty-icehouse 2014-07-11 17:34:59 +0000
1149@@ -0,0 +1,10 @@
1150+#!/usr/bin/python
1151+
1152+"""Amulet tests on a basic nova cloud controller deployment on
1153+ trusty-icehouse."""
1154+
1155+from basic_deployment import NovaCCBasicDeployment
1156+
1157+if __name__ == '__main__':
1158+ deployment = NovaCCBasicDeployment(series='trusty')
1159+ deployment.run_tests()
1160
1161=== added file 'tests/README'
1162--- tests/README 1970-01-01 00:00:00 +0000
1163+++ tests/README 2014-07-11 17:34:59 +0000
1164@@ -0,0 +1,47 @@
1165+This directory provides Amulet tests that focus on verification of Nova Cloud
1166+Controller deployments.
1167+
1168+If you use a web proxy server to access the web, you'll need to set the
1169+AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
1170+
1171+The following examples demonstrate different ways that tests can be executed.
1172+All examples are run from the charm's root directory.
1173+
1174+ * To run all tests (starting with 00-setup):
1175+
1176+ make test
1177+
1178+ * To run a specific test module (or modules):
1179+
1180+ juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
1181+
1182+ * To run a specific test module (or modules), and keep the environment
1183+ deployed after a failure:
1184+
1185+ juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
1186+
1187+ * To re-run a test module against an already deployed environment (one
1188+ that was deployed by a previous call to 'juju test --set-e'):
1189+
1190+ ./tests/15-basic-trusty-icehouse
1191+
1192+For debugging and test development purposes, all code should be idempotent.
1193+In other words, the code should have the ability to be re-run without changing
1194+the results beyond the initial run. This enables editing and re-running of a
1195+test module against an already deployed environment, as described above.
1196+
1197+Manual debugging tips:
1198+
1199+ * Set the following env vars before using the OpenStack CLI as admin:
1200+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
1201+ export OS_TENANT_NAME=admin
1202+ export OS_USERNAME=admin
1203+ export OS_PASSWORD=openstack
1204+ export OS_REGION_NAME=RegionOne
1205+
1206+ * Set the following env vars before using the OpenStack CLI as demoUser:
1207+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
1208+ export OS_TENANT_NAME=demoTenant
1209+ export OS_USERNAME=demoUser
1210+ export OS_PASSWORD=password
1211+ export OS_REGION_NAME=RegionOne
1212
1213=== added file 'tests/basic_deployment.py'
1214--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
1215+++ tests/basic_deployment.py 2014-07-11 17:34:59 +0000
1216@@ -0,0 +1,520 @@
1217+#!/usr/bin/python
1218+
1219+import amulet
1220+
1221+from charmhelpers.contrib.openstack.amulet.deployment import (
1222+ OpenStackAmuletDeployment
1223+)
1224+
1225+from charmhelpers.contrib.openstack.amulet.utils import (
1226+ OpenStackAmuletUtils,
1227+ DEBUG, # flake8: noqa
1228+ ERROR
1229+)
1230+
1231+# Use DEBUG to turn on debug logging
1232+u = OpenStackAmuletUtils(ERROR)
1233+
1234+
1235+class NovaCCBasicDeployment(OpenStackAmuletDeployment):
1236+ """Amulet tests on a basic nova cloud controller deployment."""
1237+
1238+ def __init__(self, series=None, openstack=None, source=None):
1239+ """Deploy the entire test environment."""
1240+ super(NovaCCBasicDeployment, self).__init__(series, openstack, source)
1241+ self._add_services()
1242+ self._add_relations()
1243+ self._configure_services()
1244+ self._deploy()
1245+ self._initialize_tests()
1246+
1247+ def _add_services(self):
1248+ """Add the service that we're testing, including the number of units,
1249+ where nova-cloud-controller is local, and the other charms are from
1250+ the charm store."""
1251+ this_service = ('nova-cloud-controller', 1)
1252+ other_services = [('mysql', 1), ('rabbitmq-server', 1),
1253+ ('nova-compute', 2), ('keystone', 1), ('glance', 1)]
1254+ super(NovaCCBasicDeployment, self)._add_services(this_service,
1255+ other_services)
1256+
1257+ def _add_relations(self):
1258+ """Add all of the relations for the services."""
1259+ relations = {
1260+ 'nova-cloud-controller:shared-db': 'mysql:shared-db',
1261+ 'nova-cloud-controller:identity-service': 'keystone:identity-service',
1262+ 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
1263+ 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute',
1264+ 'nova-cloud-controller:image-service': 'glance:image-service',
1265+ 'nova-compute:image-service': 'glance:image-service',
1266+ 'nova-compute:shared-db': 'mysql:shared-db',
1267+ 'nova-compute:amqp': 'rabbitmq-server:amqp',
1268+ 'keystone:shared-db': 'mysql:shared-db',
1269+ 'glance:identity-service': 'keystone:identity-service',
1270+ 'glance:shared-db': 'mysql:shared-db',
1271+ 'glance:amqp': 'rabbitmq-server:amqp'
1272+ }
1273+ super(NovaCCBasicDeployment, self)._add_relations(relations)
1274+
1275+ def _configure_services(self):
1276+ """Configure all of the services."""
1277+ keystone_config = {'admin-password': 'openstack',
1278+ 'admin-token': 'ubuntutesting'}
1279+ configs = {'keystone': keystone_config}
1280+ super(NovaCCBasicDeployment, self)._configure_services(configs)
1281+
1282+ def _initialize_tests(self):
1283+ """Perform final initialization before tests get run."""
1284+ # Access the sentries for inspecting service units
1285+ self.mysql_sentry = self.d.sentry.unit['mysql/0']
1286+ self.keystone_sentry = self.d.sentry.unit['keystone/0']
1287+ self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
1288+ self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
1289+ self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
1290+ self.glance_sentry = self.d.sentry.unit['glance/0']
1291+
1292+ # Authenticate admin with keystone
1293+ self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
1294+ user='admin',
1295+ password='openstack',
1296+ tenant='admin')
1297+
1298+ # Authenticate admin with glance endpoint
1299+ self.glance = u.authenticate_glance_admin(self.keystone)
1300+
1301+ # Create a demo tenant/role/user
1302+ self.demo_tenant = 'demoTenant'
1303+ self.demo_role = 'demoRole'
1304+ self.demo_user = 'demoUser'
1305+ if not u.tenant_exists(self.keystone, self.demo_tenant):
1306+ tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
1307+ description='demo tenant',
1308+ enabled=True)
1309+ self.keystone.roles.create(name=self.demo_role)
1310+ self.keystone.users.create(name=self.demo_user,
1311+ password='password',
1312+ tenant_id=tenant.id,
1313+ email='demo@demo.com')
1314+
1315+ # Authenticate demo user with keystone
1316+ self.keystone_demo = \
1317+ u.authenticate_keystone_user(self.keystone, user=self.demo_user,
1318+ password='password',
1319+ tenant=self.demo_tenant)
1320+
1321+ # Authenticate demo user with nova-api
1322+ self.nova_demo = u.authenticate_nova_user(self.keystone,
1323+ user=self.demo_user,
1324+ password='password',
1325+ tenant=self.demo_tenant)
1326+
1327+ def test_services(self):
1328+ """Verify the expected services are running on the corresponding
1329+ service units."""
1330+ commands = {
1331+ self.mysql_sentry: ['status mysql'],
1332+ self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
1333+ self.nova_cc_sentry: ['status nova-api-ec2',
1334+ 'status nova-api-os-compute',
1335+ 'status nova-objectstore',
1336+ 'status nova-cert',
1337+ 'status nova-scheduler'],
1338+ self.nova_compute_sentry: ['status nova-compute',
1339+ 'status nova-network',
1340+ 'status nova-api'],
1341+ self.keystone_sentry: ['status keystone'],
1342+ self.glance_sentry: ['status glance-registry', 'status glance-api']
1343+ }
1344+ if self._get_openstack_release() >= self.precise_grizzly:
1345+ commands[self.nova_cc_sentry] = ['status nova-conductor']
1346+
1347+ ret = u.validate_services(commands)
1348+ if ret:
1349+ amulet.raise_status(amulet.FAIL, msg=ret)
1350+
1351+ def test_service_catalog(self):
1352+ """Verify that the service catalog endpoint data is valid."""
1353+ endpoint_vol = {'adminURL': u.valid_url,
1354+ 'region': 'RegionOne',
1355+ 'publicURL': u.valid_url,
1356+ 'internalURL': u.valid_url}
1357+ endpoint_id = {'adminURL': u.valid_url,
1358+ 'region': 'RegionOne',
1359+ 'publicURL': u.valid_url,
1360+ 'internalURL': u.valid_url}
1361+ if self._get_openstack_release() >= self.precise_folsom:
1362+ endpoint_vol['id'] = u.not_null
1363+ endpoint_id['id'] = u.not_null
1364+ expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
1365+ 'ec2': [endpoint_vol], 'identity': [endpoint_id]}
1366+ actual = self.keystone_demo.service_catalog.get_endpoints()
1367+
1368+ ret = u.validate_svc_catalog_endpoint_data(expected, actual)
1369+ if ret:
1370+ amulet.raise_status(amulet.FAIL, msg=ret)
1371+
1372+ def test_openstack_compute_api_endpoint(self):
1373+ """Verify the openstack compute api (osapi) endpoint data."""
1374+ endpoints = self.keystone.endpoints.list()
1375+ admin_port = internal_port = public_port = '8774'
1376+ expected = {'id': u.not_null,
1377+ 'region': 'RegionOne',
1378+ 'adminurl': u.valid_url,
1379+ 'internalurl': u.valid_url,
1380+ 'publicurl': u.valid_url,
1381+ 'service_id': u.not_null}
1382+
1383+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
1384+ public_port, expected)
1385+ if ret:
1386+ message = 'osapi endpoint: {}'.format(ret)
1387+ amulet.raise_status(amulet.FAIL, msg=message)
1388+
1389+ def test_ec2_api_endpoint(self):
1390+ """Verify the EC2 api endpoint data."""
1391+ endpoints = self.keystone.endpoints.list()
1392+ admin_port = internal_port = public_port = '8773'
1393+ expected = {'id': u.not_null,
1394+ 'region': 'RegionOne',
1395+ 'adminurl': u.valid_url,
1396+ 'internalurl': u.valid_url,
1397+ 'publicurl': u.valid_url,
1398+ 'service_id': u.not_null}
1399+
1400+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
1401+ public_port, expected)
1402+ if ret:
1403+ message = 'EC2 endpoint: {}'.format(ret)
1404+ amulet.raise_status(amulet.FAIL, msg=message)
1405+
1406+ def test_s3_api_endpoint(self):
1407+ """Verify the S3 api endpoint data."""
1408+ endpoints = self.keystone.endpoints.list()
1409+ admin_port = internal_port = public_port = '3333'
1410+ expected = {'id': u.not_null,
1411+ 'region': 'RegionOne',
1412+ 'adminurl': u.valid_url,
1413+ 'internalurl': u.valid_url,
1414+ 'publicurl': u.valid_url,
1415+ 'service_id': u.not_null}
1416+
1417+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
1418+ public_port, expected)
1419+ if ret:
1420+ message = 'S3 endpoint: {}'.format(ret)
1421+ amulet.raise_status(amulet.FAIL, msg=message)
1422+
1423+ def test_nova_cc_shared_db_relation(self):
1424+ """Verify the nova-cc to mysql shared-db relation data"""
1425+ unit = self.nova_cc_sentry
1426+ relation = ['shared-db', 'mysql:shared-db']
1427+ expected = {
1428+ 'private-address': u.valid_ip,
1429+ 'nova_database': 'nova',
1430+ 'nova_username': 'nova',
1431+ 'nova_hostname': u.valid_ip
1432+ }
1433+
1434+ ret = u.validate_relation_data(unit, relation, expected)
1435+ if ret:
1436+ message = u.relation_error('nova-cc shared-db', ret)
1437+ amulet.raise_status(amulet.FAIL, msg=message)
1438+
1439+ def test_mysql_shared_db_relation(self):
1440+ """Verify the mysql to nova-cc shared-db relation data"""
1441+ unit = self.mysql_sentry
1442+ relation = ['shared-db', 'nova-cloud-controller:shared-db']
1443+ expected = {
1444+ 'private-address': u.valid_ip,
1445+ 'nova_password': u.not_null,
1446+ 'db_host': u.valid_ip
1447+ }
1448+
1449+ ret = u.validate_relation_data(unit, relation, expected)
1450+ if ret:
1451+ message = u.relation_error('mysql shared-db', ret)
1452+ amulet.raise_status(amulet.FAIL, msg=message)
1453+
1454+ def test_nova_cc_identity_service_relation(self):
1455+ """Verify the nova-cc to keystone identity-service relation data"""
1456+ unit = self.nova_cc_sentry
1457+ relation = ['identity-service', 'keystone:identity-service']
1458+ expected = {
1459+ 'nova_internal_url': u.valid_url,
1460+ 'nova_public_url': u.valid_url,
1461+ 's3_public_url': u.valid_url,
1462+ 's3_service': 's3',
1463+ 'ec2_admin_url': u.valid_url,
1464+ 'ec2_internal_url': u.valid_url,
1465+ 'nova_service': 'nova',
1466+ 's3_region': 'RegionOne',
1467+ 'private-address': u.valid_ip,
1468+ 'nova_region': 'RegionOne',
1469+ 'ec2_public_url': u.valid_url,
1470+ 'ec2_region': 'RegionOne',
1471+ 's3_internal_url': u.valid_url,
1472+ 's3_admin_url': u.valid_url,
1473+ 'nova_admin_url': u.valid_url,
1474+ 'ec2_service': 'ec2'
1475+ }
1476+
1477+ ret = u.validate_relation_data(unit, relation, expected)
1478+ if ret:
1479+ message = u.relation_error('nova-cc identity-service', ret)
1480+ amulet.raise_status(amulet.FAIL, msg=message)
1481+
1482+ def test_keystone_identity_service_relation(self):
1483+ """Verify the keystone to nova-cc identity-service relation data"""
1484+ unit = self.keystone_sentry
1485+ relation = ['identity-service',
1486+ 'nova-cloud-controller:identity-service']
1487+ expected = {
1488+ 'service_protocol': 'http',
1489+ 'service_tenant': 'services',
1490+ 'admin_token': 'ubuntutesting',
1491+ 'service_password': u.not_null,
1492+ 'service_port': '5000',
1493+ 'auth_port': '35357',
1494+ 'auth_protocol': 'http',
1495+ 'private-address': u.valid_ip,
1496+ 'https_keystone': 'False',
1497+ 'auth_host': u.valid_ip,
1498+ 'service_username': 's3_ec2_nova',
1499+ 'service_tenant_id': u.not_null,
1500+ 'service_host': u.valid_ip
1501+ }
1502+
1503+ ret = u.validate_relation_data(unit, relation, expected)
1504+ if ret:
1505+ message = u.relation_error('keystone identity-service', ret)
1506+ amulet.raise_status(amulet.FAIL, msg=message)
1507+
1508+ def test_nova_cc_amqp_relation(self):
1509+ """Verify the nova-cc to rabbitmq-server amqp relation data"""
1510+ unit = self.nova_cc_sentry
1511+ relation = ['amqp', 'rabbitmq-server:amqp']
1512+ expected = {
1513+ 'username': 'nova',
1514+ 'private-address': u.valid_ip,
1515+ 'vhost': 'openstack'
1516+ }
1517+
1518+ ret = u.validate_relation_data(unit, relation, expected)
1519+ if ret:
1520+ message = u.relation_error('nova-cc amqp', ret)
1521+ amulet.raise_status(amulet.FAIL, msg=message)
1522+
1523+ def test_rabbitmq_amqp_relation(self):
1524+ """Verify the rabbitmq-server to nova-cc amqp relation data"""
1525+ unit = self.rabbitmq_sentry
1526+ relation = ['amqp', 'nova-cloud-controller:amqp']
1527+ expected = {
1528+ 'private-address': u.valid_ip,
1529+ 'password': u.not_null,
1530+ 'hostname': u.valid_ip
1531+ }
1532+
1533+ ret = u.validate_relation_data(unit, relation, expected)
1534+ if ret:
1535+ message = u.relation_error('rabbitmq amqp', ret)
1536+ amulet.raise_status(amulet.FAIL, msg=message)
1537+
1538+ def test_nova_cc_cloud_compute_relation(self):
1539+ """Verify the nova-cc to nova-compute cloud-compute relation data"""
1540+ unit = self.nova_cc_sentry
1541+ relation = ['cloud-compute', 'nova-compute:cloud-compute']
1542+ expected = {
1543+ 'volume_service': 'cinder',
1544+ 'network_manager': 'flatdhcpmanager',
1545+ 'ec2_host': u.valid_ip,
1546+ 'private-address': u.valid_ip,
1547+ 'restart_trigger': u.not_null
1548+ }
1549+ if self._get_openstack_release() == self.precise_essex:
1550+ expected['volume_service'] = 'nova-volume'
1551+
1552+ ret = u.validate_relation_data(unit, relation, expected)
1553+ if ret:
1554+ message = u.relation_error('nova-cc cloud-compute', ret)
1555+ amulet.raise_status(amulet.FAIL, msg=message)
1556+
1557+ def test_nova_cloud_compute_relation(self):
1558+ """Verify the nova-compute to nova-cc cloud-compute relation data"""
1559+ unit = self.nova_compute_sentry
1560+ relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute']
1561+ expected = {
1562+ 'private-address': u.valid_ip,
1563+ }
1564+
1565+ ret = u.validate_relation_data(unit, relation, expected)
1566+ if ret:
1567+ message = u.relation_error('nova-compute cloud-compute', ret)
1568+ amulet.raise_status(amulet.FAIL, msg=message)
1569+
1570+ def test_nova_cc_image_service_relation(self):
1571+ """Verify the nova-cc to glance image-service relation data"""
1572+ unit = self.nova_cc_sentry
1573+ relation = ['image-service', 'glance:image-service']
1574+ expected = {
1575+ 'private-address': u.valid_ip,
1576+ }
1577+
1578+ ret = u.validate_relation_data(unit, relation, expected)
1579+ if ret:
1580+ message = u.relation_error('nova-cc image-service', ret)
1581+ amulet.raise_status(amulet.FAIL, msg=message)
1582+
1583+ def test_glance_image_service_relation(self):
1584+ """Verify the glance to nova-cc image-service relation data"""
1585+ unit = self.glance_sentry
1586+ relation = ['image-service', 'nova-cloud-controller:image-service']
1587+ expected = {
1588+ 'private-address': u.valid_ip,
1589+ 'glance-api-server': u.valid_url
1590+ }
1591+
1592+ ret = u.validate_relation_data(unit, relation, expected)
1593+ if ret:
1594+ message = u.relation_error('glance image-service', ret)
1595+ amulet.raise_status(amulet.FAIL, msg=message)
1596+
1597+ def test_restart_on_config_change(self):
1598+ """Verify that the specified services are restarted when the config
1599+ is changed."""
1600+ # NOTE(coreycb): Skipping failing test on essex until resolved.
1601+ # config-flags don't take effect on essex.
1602+ if self._get_openstack_release() == self.precise_essex:
1603+ u.log.error("Skipping failing test until resolved")
1604+ return
1605+
1606+ services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore',
1607+ 'nova-cert', 'nova-scheduler', 'nova-conductor']
1608+ self.d.configure('nova-cloud-controller',
1609+ {'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'})
1610+ pgrep_full = True
1611+
1612+ time = 20
1613+ conf = '/etc/nova/nova.conf'
1614+ for s in services:
1615+ if not u.service_restarted(self.nova_cc_sentry, s, conf,
1616+ pgrep_full=True, sleep_time=time):
1617+ msg = "service {} didn't restart after config change".format(s)
1618+ amulet.raise_status(amulet.FAIL, msg=msg)
1619+ time = 0
1620+
1621+ def test_nova_default_config(self):
1622+ """Verify the data in the nova config file's default section."""
1623+ # NOTE(coreycb): Currently no way to test on essex because config file
1624+ # has no section headers.
1625+ if self._get_openstack_release() == self.precise_essex:
1626+ return
1627+
1628+ unit = self.nova_cc_sentry
1629+ conf = '/etc/nova/nova.conf'
1630+ rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
1631+ 'nova-cloud-controller:amqp')
1632+ glance_relation = self.glance_sentry.relation('image-service',
1633+ 'nova-cloud-controller:image-service')
1634+ mysql_relation = self.mysql_sentry.relation('shared-db',
1635+ 'nova-cloud-controller:shared-db')
1636+ db_uri = "mysql://{}:{}@{}/{}".format('nova',
1637+ mysql_relation['nova_password'],
1638+ mysql_relation['db_host'],
1639+ 'nova')
1640+ keystone_ep = self.keystone_demo.service_catalog.url_for(\
1641+ service_type='identity',
1642+ endpoint_type='publicURL')
1643+ keystone_ec2 = "{}/ec2tokens".format(keystone_ep)
1644+
1645+ expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf',
1646+ 'dhcpbridge': '/usr/bin/nova-dhcpbridge',
1647+ 'logdir': '/var/log/nova',
1648+ 'state_path': '/var/lib/nova',
1649+ 'lock_path': '/var/lock/nova',
1650+ 'force_dhcp_release': 'True',
1651+ 'iscsi_helper': 'tgtadm',
1652+ 'libvirt_use_virtio_for_bridges': 'True',
1653+ 'connection_type': 'libvirt',
1654+ 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
1655+ 'verbose': 'True',
1656+ 'ec2_private_dns_show_ip': 'True',
1657+ 'api_paste_config': '/etc/nova/api-paste.ini',
1658+ 'volumes_path': '/var/lib/nova/volumes',
1659+ 'enabled_apis': 'ec2,osapi_compute,metadata',
1660+ 'auth_strategy': 'keystone',
1661+ 'compute_driver': 'libvirt.LibvirtDriver',
1662+ 'keystone_ec2_url': keystone_ec2,
1663+ 'sql_connection': db_uri,
1664+ 'rabbit_userid': 'nova',
1665+ 'rabbit_virtual_host': 'openstack',
1666+ 'rabbit_password': rabbitmq_relation['password'],
1667+ 'rabbit_host': rabbitmq_relation['hostname'],
1668+ 'glance_api_servers': glance_relation['glance-api-server'],
1669+ 'network_manager': 'nova.network.manager.FlatDHCPManager',
1670+ 's3_listen_port': '3333',
1671+ 'osapi_compute_listen_port': '8774',
1672+ 'ec2_listen_port': '8773'}
1673+
1674+ ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
1675+ if ret:
1676+ message = "nova config error: {}".format(ret)
1677+ amulet.raise_status(amulet.FAIL, msg=message)
1678+
1679+
1680+ def test_nova_keystone_authtoken_config(self):
1681+ """Verify the data in the nova config file's keystone_authtoken
1682+ section. This data only exists since icehouse."""
1683+ if self._get_openstack_release() < self.precise_icehouse:
1684+ return
1685+
1686+ unit = self.nova_cc_sentry
1687+ conf = '/etc/nova/nova.conf'
1688+ keystone_relation = self.keystone_sentry.relation('identity-service',
1689+ 'nova-cloud-controller:identity-service')
1690+ keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'],
1691+ keystone_relation['service_port'])
1692+ expected = {'auth_uri': keystone_uri,
1693+ 'auth_host': keystone_relation['service_host'],
1694+ 'auth_port': keystone_relation['auth_port'],
1695+ 'auth_protocol': keystone_relation['auth_protocol'],
1696+ 'admin_tenant_name': keystone_relation['service_tenant'],
1697+ 'admin_user': keystone_relation['service_username'],
1698+ 'admin_password': keystone_relation['service_password']}
1699+
1700+ ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected)
1701+ if ret:
1702+ message = "nova config error: {}".format(ret)
1703+ amulet.raise_status(amulet.FAIL, msg=message)
1704+
1705+ def test_image_instance_create(self):
1706+ """Create an image/instance, verify they exist, and delete them."""
1707+ # NOTE(coreycb): Skipping failing test on essex until resolved. essex
1708+ # nova API calls are getting "Malformed request url (HTTP
1709+ # 400)".
1710+ if self._get_openstack_release() == self.precise_essex:
1711+ u.log.error("Skipping failing test until resolved")
1712+ return
1713+
1714+ image = u.create_cirros_image(self.glance, "cirros-image")
1715+ if not image:
1716+ amulet.raise_status(amulet.FAIL, msg="Image create failed")
1717+
1718+ instance = u.create_instance(self.nova_demo, "cirros-image", "cirros",
1719+ "m1.tiny")
1720+ if not instance:
1721+ amulet.raise_status(amulet.FAIL, msg="Instance create failed")
1722+
1723+ found = False
1724+ for instance in self.nova_demo.servers.list():
1725+ if instance.name == 'cirros':
1726+ found = True
1727+ if instance.status != 'ACTIVE':
1728+ msg = "cirros instance is not active"
1729+ amulet.raise_status(amulet.FAIL, msg=message)
1730+
1731+ if not found:
1732+ message = "nova cirros instance does not exist"
1733+ amulet.raise_status(amulet.FAIL, msg=message)
1734+
1735+ u.delete_image(self.glance, image)
1736+ u.delete_instance(self.nova_demo, instance)
1737
1738=== added directory 'tests/charmhelpers'
1739=== added file 'tests/charmhelpers/__init__.py'
1740=== added directory 'tests/charmhelpers/contrib'
1741=== added file 'tests/charmhelpers/contrib/__init__.py'
1742=== added directory 'tests/charmhelpers/contrib/amulet'
1743=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
1744=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
1745--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
1746+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-11 17:34:59 +0000
1747@@ -0,0 +1,63 @@
1748+import amulet
1749+import re
1750+
1751+
1752+class AmuletDeployment(object):
1753+ """This class provides generic Amulet deployment and test runner
1754+ methods."""
1755+
1756+ def __init__(self, series):
1757+ """Initialize the deployment environment."""
1758+ self.series = series
1759+ self.d = amulet.Deployment(series=self.series)
1760+
1761+ def _get_charm_name(self, service_name):
1762+ """Gets the charm name from the service name. Unique service names can
1763+ be specified with a '-service#' suffix (e.g. mysql-service1)."""
1764+ if re.match(r"^.*-service\d{1,3}$", service_name):
1765+ charm_name = re.sub('\-service\d{1,3}$', '', service_name)
1766+ else:
1767+ charm_name = service_name
1768+ return charm_name
1769+
1770+ def _add_services(self, this_service, other_services):
1771+ """Add services to the deployment where this_service is the local charm
1772+ that we're focused on testing and other_services are the other
1773+ charms that come from the charm store."""
1774+ name, units = range(2)
1775+
1776+ charm_name = self._get_charm_name(this_service[name])
1777+ self.d.add(this_service[name],
1778+ units=this_service[units])
1779+
1780+ for svc in other_services:
1781+ charm_name = self._get_charm_name(svc[name])
1782+ self.d.add(svc[name],
1783+ charm='cs:{}/{}'.format(self.series, charm_name),
1784+ units=svc[units])
1785+
1786+ def _add_relations(self, relations):
1787+ """Add all of the relations for the services."""
1788+ for k, v in relations.iteritems():
1789+ self.d.relate(k, v)
1790+
1791+ def _configure_services(self, configs):
1792+ """Configure all of the services."""
1793+ for service, config in configs.iteritems():
1794+ self.d.configure(service, config)
1795+
1796+ def _deploy(self):
1797+ """Deploy environment and wait for all hooks to finish executing."""
1798+ try:
1799+ self.d.setup()
1800+ self.d.sentry.wait()
1801+ except amulet.helpers.TimeoutError:
1802+ amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
1803+ except:
1804+ raise
1805+
1806+ def run_tests(self):
1807+ """Run all of the methods that are prefixed with 'test_'."""
1808+ for test in dir(self):
1809+ if test.startswith('test_'):
1810+ getattr(self, test)()
1811
1812=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
1813--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
1814+++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-11 17:34:59 +0000
1815@@ -0,0 +1,157 @@
1816+import ConfigParser
1817+import io
1818+import logging
1819+import re
1820+import sys
1821+from time import sleep
1822+
1823+
1824+class AmuletUtils(object):
1825+ """This class provides common utility functions that are used by Amulet
1826+ tests."""
1827+
1828+ def __init__(self, log_level=logging.ERROR):
1829+ self.log = self.get_logger(level=log_level)
1830+
1831+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
1832+ """Get a logger object that will log to stdout."""
1833+ log = logging
1834+ logger = log.getLogger(name)
1835+ fmt = \
1836+ log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
1837+
1838+ handler = log.StreamHandler(stream=sys.stdout)
1839+ handler.setLevel(level)
1840+ handler.setFormatter(fmt)
1841+
1842+ logger.addHandler(handler)
1843+ logger.setLevel(level)
1844+
1845+ return logger
1846+
1847+ def valid_ip(self, ip):
1848+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
1849+ return True
1850+ else:
1851+ return False
1852+
1853+ def valid_url(self, url):
1854+ p = re.compile(
1855+ r'^(?:http|ftp)s?://'
1856+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
1857+ r'localhost|'
1858+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
1859+ r'(?::\d+)?'
1860+ r'(?:/?|[/?]\S+)$',
1861+ re.IGNORECASE)
1862+ if p.match(url):
1863+ return True
1864+ else:
1865+ return False
1866+
1867+ def validate_services(self, commands):
1868+ """Verify the specified services are running on the corresponding
1869+ service units."""
1870+ for k, v in commands.iteritems():
1871+ for cmd in v:
1872+ output, code = k.run(cmd)
1873+ if code != 0:
1874+ return "command `{}` returned {}".format(cmd, str(code))
1875+ return None
1876+
1877+ def _get_config(self, unit, filename):
1878+ """Get a ConfigParser object for parsing a unit's config file."""
1879+ file_contents = unit.file_contents(filename)
1880+ config = ConfigParser.ConfigParser()
1881+ config.readfp(io.StringIO(file_contents))
1882+ return config
1883+
1884+ def validate_config_data(self, sentry_unit, config_file, section, expected):
1885+ """Verify that the specified section of the config file contains
1886+ the expected option key:value pairs."""
1887+ config = self._get_config(sentry_unit, config_file)
1888+
1889+ if section != 'DEFAULT' and not config.has_section(section):
1890+ return "section [{}] does not exist".format(section)
1891+
1892+ for k in expected.keys():
1893+ if not config.has_option(section, k):
1894+ return "section [{}] is missing option {}".format(section, k)
1895+ if config.get(section, k) != expected[k]:
1896+ return "section [{}] {}:{} != expected {}:{}".format(section,
1897+ k, config.get(section, k), k, expected[k])
1898+ return None
1899+
1900+ def _validate_dict_data(self, expected, actual):
1901+ """Compare expected dictionary data vs actual dictionary data.
1902+ The values in the 'expected' dictionary can be strings, bools, ints,
1903+ longs, or can be a function that evaluate a variable and returns a
1904+ bool."""
1905+ for k, v in expected.iteritems():
1906+ if k in actual:
1907+ if isinstance(v, basestring) or \
1908+ isinstance(v, bool) or \
1909+ isinstance(v, (int, long)):
1910+ if v != actual[k]:
1911+ return "{}:{}".format(k, actual[k])
1912+ elif not v(actual[k]):
1913+ return "{}:{}".format(k, actual[k])
1914+ else:
1915+ return "key '{}' does not exist".format(k)
1916+ return None
1917+
1918+ def validate_relation_data(self, sentry_unit, relation, expected):
1919+ """Validate actual relation data based on expected relation data."""
1920+ actual = sentry_unit.relation(relation[0], relation[1])
1921+ self.log.debug('actual: {}'.format(repr(actual)))
1922+ return self._validate_dict_data(expected, actual)
1923+
1924+ def _validate_list_data(self, expected, actual):
1925+ """Compare expected list vs actual list data."""
1926+ for e in expected:
1927+ if e not in actual:
1928+ return "expected item {} not found in actual list".format(e)
1929+ return None
1930+
1931+ def not_null(self, string):
1932+ if string != None:
1933+ return True
1934+ else:
1935+ return False
1936+
1937+ def _get_file_mtime(self, sentry_unit, filename):
1938+ """Get last modification time of file."""
1939+ return sentry_unit.file_stat(filename)['mtime']
1940+
1941+ def _get_dir_mtime(self, sentry_unit, directory):
1942+ """Get last modification time of directory."""
1943+ return sentry_unit.directory_stat(directory)['mtime']
1944+
1945+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
1946+ """Determine start time of the process based on the last modification
1947+ time of the /proc/pid directory. If pgrep_full is True, the process
1948+ name is matched against the full command line."""
1949+ if pgrep_full:
1950+ cmd = 'pgrep -o -f {}'.format(service)
1951+ else:
1952+ cmd = 'pgrep -o {}'.format(service)
1953+ proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
1954+ return self._get_dir_mtime(sentry_unit, proc_dir)
1955+
1956+ def service_restarted(self, sentry_unit, service, filename,
1957+ pgrep_full=False, sleep_time=20):
1958+ """Compare a service's start time vs a file's last modification time
1959+ (such as a config file for that service) to determine if the service
1960+ has been restarted."""
1961+ sleep(sleep_time)
1962+ if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
1963+ self._get_file_mtime(sentry_unit, filename):
1964+ return True
1965+ else:
1966+ return False
1967+
1968+ def relation_error(self, name, data):
1969+ return 'unexpected relation data in {} - {}'.format(name, data)
1970+
1971+ def endpoint_error(self, name, data):
1972+ return 'unexpected endpoint data in {} - {}'.format(name, data)
1973
1974=== added directory 'tests/charmhelpers/contrib/openstack'
1975=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
1976=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
1977=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
1978=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
1979--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
1980+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-11 17:34:59 +0000
1981@@ -0,0 +1,57 @@
1982+from charmhelpers.contrib.amulet.deployment import (
1983+ AmuletDeployment
1984+)
1985+
1986+
1987+class OpenStackAmuletDeployment(AmuletDeployment):
1988+ """This class inherits from AmuletDeployment and has additional support
1989+ that is specifically for use by OpenStack charms."""
1990+
1991+ def __init__(self, series, openstack=None, source=None):
1992+ """Initialize the deployment environment."""
1993+ super(OpenStackAmuletDeployment, self).__init__(series)
1994+ self.openstack = openstack
1995+ self.source = source
1996+
1997+ def _add_services(self, this_service, other_services):
1998+ """Add services to the deployment and set openstack-origin."""
1999+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
2000+ other_services)
2001+ name = 0
2002+ services = other_services
2003+ services.append(this_service)
2004+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
2005+
2006+ if self.openstack:
2007+ for svc in services:
2008+ charm_name = self._get_charm_name(svc[name])
2009+ if charm_name not in use_source:
2010+ config = {'openstack-origin': self.openstack}
2011+ self.d.configure(svc[name], config)
2012+
2013+ if self.source:
2014+ for svc in services:
2015+ charm_name = self._get_charm_name(svc[name])
2016+ if charm_name in use_source:
2017+ config = {'source': self.source}
2018+ self.d.configure(svc[name], config)
2019+
2020+ def _configure_services(self, configs):
2021+ """Configure all of the services."""
2022+ for service, config in configs.iteritems():
2023+ self.d.configure(service, config)
2024+
2025+ def _get_openstack_release(self):
2026+ """Return an integer representing the enum value of the openstack
2027+ release."""
2028+ self.precise_essex, self.precise_folsom, self.precise_grizzly, \
2029+ self.precise_havana, self.precise_icehouse, \
2030+ self.trusty_icehouse = range(6)
2031+ releases = {
2032+ ('precise', None): self.precise_essex,
2033+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
2034+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
2035+ ('precise', 'cloud:precise-havana'): self.precise_havana,
2036+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
2037+ ('trusty', None): self.trusty_icehouse}
2038+ return releases[(self.series, self.openstack)]
2039
2040=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
2041--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
2042+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-11 17:34:59 +0000
2043@@ -0,0 +1,253 @@
2044+import logging
2045+import os
2046+import time
2047+import urllib
2048+
2049+import glanceclient.v1.client as glance_client
2050+import keystoneclient.v2_0 as keystone_client
2051+import novaclient.v1_1.client as nova_client
2052+
2053+from charmhelpers.contrib.amulet.utils import (
2054+ AmuletUtils
2055+)
2056+
2057+DEBUG = logging.DEBUG
2058+ERROR = logging.ERROR
2059+
2060+
2061+class OpenStackAmuletUtils(AmuletUtils):
2062+ """This class inherits from AmuletUtils and has additional support
2063+ that is specifically for use by OpenStack charms."""
2064+
2065+ def __init__(self, log_level=ERROR):
2066+ """Initialize the deployment environment."""
2067+ super(OpenStackAmuletUtils, self).__init__(log_level)
2068+
2069+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
2070+ public_port, expected):
2071+ """Validate actual endpoint data vs expected endpoint data. The ports
2072+ are used to find the matching endpoint."""
2073+ found = False
2074+ for ep in endpoints:
2075+ self.log.debug('endpoint: {}'.format(repr(ep)))
2076+ if admin_port in ep.adminurl and internal_port in ep.internalurl \
2077+ and public_port in ep.publicurl:
2078+ found = True
2079+ actual = {'id': ep.id,
2080+ 'region': ep.region,
2081+ 'adminurl': ep.adminurl,
2082+ 'internalurl': ep.internalurl,
2083+ 'publicurl': ep.publicurl,
2084+ 'service_id': ep.service_id}
2085+ ret = self._validate_dict_data(expected, actual)
2086+ if ret:
2087+ return 'unexpected endpoint data - {}'.format(ret)
2088+
2089+ if not found:
2090+ return 'endpoint not found'
2091+
2092+ def validate_svc_catalog_endpoint_data(self, expected, actual):
2093+ """Validate a list of actual service catalog endpoints vs a list of
2094+ expected service catalog endpoints."""
2095+ self.log.debug('actual: {}'.format(repr(actual)))
2096+ for k, v in expected.iteritems():
2097+ if k in actual:
2098+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
2099+ if ret:
2100+ return self.endpoint_error(k, ret)
2101+ else:
2102+ return "endpoint {} does not exist".format(k)
2103+ return ret
2104+
2105+ def validate_tenant_data(self, expected, actual):
2106+ """Validate a list of actual tenant data vs list of expected tenant
2107+ data."""
2108+ self.log.debug('actual: {}'.format(repr(actual)))
2109+ for e in expected:
2110+ found = False
2111+ for act in actual:
2112+ a = {'enabled': act.enabled, 'description': act.description,
2113+ 'name': act.name, 'id': act.id}
2114+ if e['name'] == a['name']:
2115+ found = True
2116+ ret = self._validate_dict_data(e, a)
2117+ if ret:
2118+ return "unexpected tenant data - {}".format(ret)
2119+ if not found:
2120+ return "tenant {} does not exist".format(e['name'])
2121+ return ret
2122+
2123+ def validate_role_data(self, expected, actual):
2124+ """Validate a list of actual role data vs a list of expected role
2125+ data."""
2126+ self.log.debug('actual: {}'.format(repr(actual)))
2127+ for e in expected:
2128+ found = False
2129+ for act in actual:
2130+ a = {'name': act.name, 'id': act.id}
2131+ if e['name'] == a['name']:
2132+ found = True
2133+ ret = self._validate_dict_data(e, a)
2134+ if ret:
2135+ return "unexpected role data - {}".format(ret)
2136+ if not found:
2137+ return "role {} does not exist".format(e['name'])
2138+ return ret
2139+
2140+ def validate_user_data(self, expected, actual):
2141+ """Validate a list of actual user data vs a list of expected user
2142+ data."""
2143+ self.log.debug('actual: {}'.format(repr(actual)))
2144+ for e in expected:
2145+ found = False
2146+ for act in actual:
2147+ a = {'enabled': act.enabled, 'name': act.name,
2148+ 'email': act.email, 'tenantId': act.tenantId,
2149+ 'id': act.id}
2150+ if e['name'] == a['name']:
2151+ found = True
2152+ ret = self._validate_dict_data(e, a)
2153+ if ret:
2154+ return "unexpected user data - {}".format(ret)
2155+ if not found:
2156+ return "user {} does not exist".format(e['name'])
2157+ return ret
2158+
2159+ def validate_flavor_data(self, expected, actual):
2160+ """Validate a list of actual flavors vs a list of expected flavors."""
2161+ self.log.debug('actual: {}'.format(repr(actual)))
2162+ act = [a.name for a in actual]
2163+ return self._validate_list_data(expected, act)
2164+
2165+ def tenant_exists(self, keystone, tenant):
2166+ """Return True if tenant exists"""
2167+ return tenant in [t.name for t in keystone.tenants.list()]
2168+
2169+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
2170+ tenant):
2171+ """Authenticates admin user with the keystone admin endpoint."""
2172+ service_ip = \
2173+ keystone_sentry.relation('shared-db',
2174+ 'mysql:shared-db')['private-address']
2175+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
2176+ return keystone_client.Client(username=user, password=password,
2177+ tenant_name=tenant, auth_url=ep)
2178+
2179+ def authenticate_keystone_user(self, keystone, user, password, tenant):
2180+ """Authenticates a regular user with the keystone public endpoint."""
2181+ ep = keystone.service_catalog.url_for(service_type='identity',
2182+ endpoint_type='publicURL')
2183+ return keystone_client.Client(username=user, password=password,
2184+ tenant_name=tenant, auth_url=ep)
2185+
2186+ def authenticate_glance_admin(self, keystone):
2187+ """Authenticates admin user with glance."""
2188+ ep = keystone.service_catalog.url_for(service_type='image',
2189+ endpoint_type='adminURL')
2190+ return glance_client.Client(ep, token=keystone.auth_token)
2191+
2192+ def authenticate_nova_user(self, keystone, user, password, tenant):
2193+ """Authenticates a regular user with nova-api."""
2194+ ep = keystone.service_catalog.url_for(service_type='identity',
2195+ endpoint_type='publicURL')
2196+ return nova_client.Client(username=user, api_key=password,
2197+ project_id=tenant, auth_url=ep)
2198+
2199+ def create_cirros_image(self, glance, image_name):
2200+ """Download the latest cirros image and upload it to glance."""
2201+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
2202+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
2203+ if http_proxy:
2204+ proxies = {'http': http_proxy}
2205+ opener = urllib.FancyURLopener(proxies)
2206+ else:
2207+ opener = urllib.FancyURLopener()
2208+
2209+ f = opener.open("http://download.cirros-cloud.net/version/released")
2210+ version = f.read().strip()
2211+ cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
2212+
2213+ if not os.path.exists(cirros_img):
2214+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
2215+ version, cirros_img)
2216+ opener.retrieve(cirros_url, cirros_img)
2217+ f.close()
2218+
2219+ with open(cirros_img) as f:
2220+ image = glance.images.create(name=image_name, is_public=True,
2221+ disk_format='qcow2',
2222+ container_format='bare', data=f)
2223+ count = 1
2224+ status = image.status
2225+ while status != 'active' and count < 10:
2226+ time.sleep(3)
2227+ image = glance.images.get(image.id)
2228+ status = image.status
2229+ self.log.debug('image status: {}'.format(status))
2230+ count += 1
2231+
2232+ if status != 'active':
2233+ self.log.error('image creation timed out')
2234+ return None
2235+
2236+ return image
2237+
2238+ def delete_image(self, glance, image):
2239+ """Delete the specified image."""
2240+ num_before = len(list(glance.images.list()))
2241+ glance.images.delete(image)
2242+
2243+ count = 1
2244+ num_after = len(list(glance.images.list()))
2245+ while num_after != (num_before - 1) and count < 10:
2246+ time.sleep(3)
2247+ num_after = len(list(glance.images.list()))
2248+ self.log.debug('number of images: {}'.format(num_after))
2249+ count += 1
2250+
2251+ if num_after != (num_before - 1):
2252+ self.log.error('image deletion timed out')
2253+ return False
2254+
2255+ return True
2256+
2257+ def create_instance(self, nova, image_name, instance_name, flavor):
2258+ """Create the specified instance."""
2259+ image = nova.images.find(name=image_name)
2260+ flavor = nova.flavors.find(name=flavor)
2261+ instance = nova.servers.create(name=instance_name, image=image,
2262+ flavor=flavor)
2263+
2264+ count = 1
2265+ status = instance.status
2266+ while status != 'ACTIVE' and count < 60:
2267+ time.sleep(3)
2268+ instance = nova.servers.get(instance.id)
2269+ status = instance.status
2270+ self.log.debug('instance status: {}'.format(status))
2271+ count += 1
2272+
2273+ if status != 'ACTIVE':
2274+ self.log.error('instance creation timed out')
2275+ return None
2276+
2277+ return instance
2278+
2279+ def delete_instance(self, nova, instance):
2280+ """Delete the specified instance."""
2281+ num_before = len(list(nova.servers.list()))
2282+ nova.servers.delete(instance)
2283+
2284+ count = 1
2285+ num_after = len(list(nova.servers.list()))
2286+ while num_after != (num_before - 1) and count < 10:
2287+ time.sleep(3)
2288+ num_after = len(list(nova.servers.list()))
2289+ self.log.debug('number of instances: {}'.format(num_after))
2290+ count += 1
2291+
2292+ if num_after != (num_before - 1):
2293+ self.log.error('instance deletion timed out')
2294+ return False
2295+
2296+ return True

Subscribers

People subscribed via source and target branches