Merge lp:~corey.bryant/charms/trusty/ceph-radosgw/amulet-basics into lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 27
Proposed branch: lp:~corey.bryant/charms/trusty/ceph-radosgw/amulet-basics
Merge into: lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/next
Diff against target: 1623 lines (+1291/-94)
19 files modified
Makefile (+11/-2)
charm-helpers-hooks.yaml (+9/-0)
charm-helpers-sync.yaml (+0/-9)
charm-helpers-tests.yaml (+5/-0)
hooks/charmhelpers/core/hookenv.py (+6/-4)
hooks/charmhelpers/core/host.py (+30/-5)
hooks/charmhelpers/core/services/helpers.py (+119/-5)
hooks/charmhelpers/fetch/__init__.py (+19/-5)
hooks/charmhelpers/fetch/archiveurl.py (+49/-44)
templates/ceph.conf (+20/-20)
tests/00-setup (+10/-0)
tests/14-basic-precise-icehouse (+11/-0)
tests/15-basic-trusty-icehouse (+9/-0)
tests/README (+53/-0)
tests/basic_deployment.py (+320/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+77/-0)
tests/charmhelpers/contrib/amulet/utils.py (+176/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+91/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/ceph-radosgw/amulet-basics
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+236274@code.launchpad.net
To post a comment you must log in.
31. By Corey Bryant

Sync charm-helpers to pick up lint fix.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'Makefile'
2--- Makefile 2014-09-17 14:11:53 +0000
3+++ Makefile 2014-09-29 20:46:18 +0000
4@@ -2,16 +2,25 @@
5 PYTHON := /usr/bin/env python
6
7 lint:
8- @flake8 --exclude hooks/charmhelpers hooks
9+ @flake8 --exclude hooks/charmhelpers hooks tests
10 @charm proof
11
12+test:
13+ @echo Starting Amulet tests...
14+ # coreycb note: The -v should only be temporary until Amulet sends
15+ # raise_status() messages to stderr:
16+ # https://bugs.launchpad.net/amulet/+bug/1320357
17+ @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \
18+ 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse
19+
20 bin/charm_helpers_sync.py:
21 @mkdir -p bin
22 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
23 > bin/charm_helpers_sync.py
24
25 sync: bin/charm_helpers_sync.py
26- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
27+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
28+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
29
30 publish: lint
31 bzr push lp:charms/ceph-radosgw
32
33=== added file 'charm-helpers-hooks.yaml'
34--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
35+++ charm-helpers-hooks.yaml 2014-09-29 20:46:18 +0000
36@@ -0,0 +1,9 @@
37+branch: lp:charm-helpers
38+destination: hooks/charmhelpers
39+include:
40+ - core
41+ - fetch
42+ - contrib.storage.linux:
43+ - utils
44+ - payload.execd
45+ - contrib.openstack.alternatives
46
47=== removed file 'charm-helpers-sync.yaml'
48--- charm-helpers-sync.yaml 2014-01-24 16:02:57 +0000
49+++ charm-helpers-sync.yaml 1970-01-01 00:00:00 +0000
50@@ -1,9 +0,0 @@
51-branch: lp:charm-helpers
52-destination: hooks/charmhelpers
53-include:
54- - core
55- - fetch
56- - contrib.storage.linux:
57- - utils
58- - payload.execd
59- - contrib.openstack.alternatives
60
61=== added file 'charm-helpers-tests.yaml'
62--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
63+++ charm-helpers-tests.yaml 2014-09-29 20:46:18 +0000
64@@ -0,0 +1,5 @@
65+branch: lp:charm-helpers
66+destination: tests/charmhelpers
67+include:
68+ - contrib.amulet
69+ - contrib.openstack.amulet
70
71=== modified file 'hooks/charmhelpers/core/hookenv.py'
72--- hooks/charmhelpers/core/hookenv.py 2014-09-17 14:11:53 +0000
73+++ hooks/charmhelpers/core/hookenv.py 2014-09-29 20:46:18 +0000
74@@ -486,9 +486,10 @@
75 hooks.execute(sys.argv)
76 """
77
78- def __init__(self):
79+ def __init__(self, config_save=True):
80 super(Hooks, self).__init__()
81 self._hooks = {}
82+ self._config_save = config_save
83
84 def register(self, name, function):
85 """Register a hook"""
86@@ -499,9 +500,10 @@
87 hook_name = os.path.basename(args[0])
88 if hook_name in self._hooks:
89 self._hooks[hook_name]()
90- cfg = config()
91- if cfg.implicit_save:
92- cfg.save()
93+ if self._config_save:
94+ cfg = config()
95+ if cfg.implicit_save:
96+ cfg.save()
97 else:
98 raise UnregisteredHookError(hook_name)
99
100
101=== modified file 'hooks/charmhelpers/core/host.py'
102--- hooks/charmhelpers/core/host.py 2014-09-17 14:11:53 +0000
103+++ hooks/charmhelpers/core/host.py 2014-09-29 20:46:18 +0000
104@@ -68,8 +68,8 @@
105 """Determine whether a system service is available"""
106 try:
107 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
108- except subprocess.CalledProcessError:
109- return False
110+ except subprocess.CalledProcessError as e:
111+ return 'unrecognized service' not in e.output
112 else:
113 return True
114
115@@ -209,10 +209,15 @@
116 return system_mounts
117
118
119-def file_hash(path):
120- """Generate a md5 hash of the contents of 'path' or None if not found """
121+def file_hash(path, hash_type='md5'):
122+ """
123+ Generate a hash checksum of the contents of 'path' or None if not found.
124+
125+ :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
126+ such as md5, sha1, sha256, sha512, etc.
127+ """
128 if os.path.exists(path):
129- h = hashlib.md5()
130+ h = getattr(hashlib, hash_type)()
131 with open(path, 'r') as source:
132 h.update(source.read()) # IGNORE:E1101 - it does have update
133 return h.hexdigest()
134@@ -220,6 +225,26 @@
135 return None
136
137
138+def check_hash(path, checksum, hash_type='md5'):
139+ """
140+ Validate a file using a cryptographic checksum.
141+
142+ :param str checksum: Value of the checksum used to validate the file.
143+ :param str hash_type: Hash algorithm used to generate `checksum`.
144+ Can be any hash alrgorithm supported by :mod:`hashlib`,
145+ such as md5, sha1, sha256, sha512, etc.
146+ :raises ChecksumError: If the file fails the checksum
147+
148+ """
149+ actual_checksum = file_hash(path, hash_type)
150+ if checksum != actual_checksum:
151+ raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
152+
153+
154+class ChecksumError(ValueError):
155+ pass
156+
157+
158 def restart_on_change(restart_map, stopstart=False):
159 """Restart services based on configuration files changing
160
161
162=== modified file 'hooks/charmhelpers/core/services/helpers.py'
163--- hooks/charmhelpers/core/services/helpers.py 2014-09-17 14:11:53 +0000
164+++ hooks/charmhelpers/core/services/helpers.py 2014-09-29 20:46:18 +0000
165@@ -1,3 +1,5 @@
166+import os
167+import yaml
168 from charmhelpers.core import hookenv
169 from charmhelpers.core import templating
170
171@@ -19,15 +21,21 @@
172 the `name` attribute that are complete will used to populate the dictionary
173 values (see `get_data`, below).
174
175- The generated context will be namespaced under the interface type, to prevent
176- potential naming conflicts.
177+ The generated context will be namespaced under the relation :attr:`name`,
178+ to prevent potential naming conflicts.
179+
180+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
181+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
182 """
183 name = None
184 interface = None
185 required_keys = []
186
187- def __init__(self, *args, **kwargs):
188- super(RelationContext, self).__init__(*args, **kwargs)
189+ def __init__(self, name=None, additional_required_keys=None):
190+ if name is not None:
191+ self.name = name
192+ if additional_required_keys is not None:
193+ self.required_keys.extend(additional_required_keys)
194 self.get_data()
195
196 def __bool__(self):
197@@ -101,9 +109,115 @@
198 return {}
199
200
201+class MysqlRelation(RelationContext):
202+ """
203+ Relation context for the `mysql` interface.
204+
205+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
206+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
207+ """
208+ name = 'db'
209+ interface = 'mysql'
210+ required_keys = ['host', 'user', 'password', 'database']
211+
212+
213+class HttpRelation(RelationContext):
214+ """
215+ Relation context for the `http` interface.
216+
217+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
218+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
219+ """
220+ name = 'website'
221+ interface = 'http'
222+ required_keys = ['host', 'port']
223+
224+ def provide_data(self):
225+ return {
226+ 'host': hookenv.unit_get('private-address'),
227+ 'port': 80,
228+ }
229+
230+
231+class RequiredConfig(dict):
232+ """
233+ Data context that loads config options with one or more mandatory options.
234+
235+ Once the required options have been changed from their default values, all
236+ config options will be available, namespaced under `config` to prevent
237+ potential naming conflicts (for example, between a config option and a
238+ relation property).
239+
240+ :param list *args: List of options that must be changed from their default values.
241+ """
242+
243+ def __init__(self, *args):
244+ self.required_options = args
245+ self['config'] = hookenv.config()
246+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
247+ self.config = yaml.load(fp).get('options', {})
248+
249+ def __bool__(self):
250+ for option in self.required_options:
251+ if option not in self['config']:
252+ return False
253+ current_value = self['config'][option]
254+ default_value = self.config[option].get('default')
255+ if current_value == default_value:
256+ return False
257+ if current_value in (None, '') and default_value in (None, ''):
258+ return False
259+ return True
260+
261+ def __nonzero__(self):
262+ return self.__bool__()
263+
264+
265+class StoredContext(dict):
266+ """
267+ A data context that always returns the data that it was first created with.
268+
269+ This is useful to do a one-time generation of things like passwords, that
270+ will thereafter use the same value that was originally generated, instead
271+ of generating a new value each time it is run.
272+ """
273+ def __init__(self, file_name, config_data):
274+ """
275+ If the file exists, populate `self` with the data from the file.
276+ Otherwise, populate with the given data and persist it to the file.
277+ """
278+ if os.path.exists(file_name):
279+ self.update(self.read_context(file_name))
280+ else:
281+ self.store_context(file_name, config_data)
282+ self.update(config_data)
283+
284+ def store_context(self, file_name, config_data):
285+ if not os.path.isabs(file_name):
286+ file_name = os.path.join(hookenv.charm_dir(), file_name)
287+ with open(file_name, 'w') as file_stream:
288+ os.fchmod(file_stream.fileno(), 0600)
289+ yaml.dump(config_data, file_stream)
290+
291+ def read_context(self, file_name):
292+ if not os.path.isabs(file_name):
293+ file_name = os.path.join(hookenv.charm_dir(), file_name)
294+ with open(file_name, 'r') as file_stream:
295+ data = yaml.load(file_stream)
296+ if not data:
297+ raise OSError("%s is empty" % file_name)
298+ return data
299+
300+
301 class TemplateCallback(ManagerCallback):
302 """
303- Callback class that will render a template, for use as a ready action.
304+ Callback class that will render a Jinja2 template, for use as a ready action.
305+
306+ :param str source: The template source file, relative to `$CHARM_DIR/templates`
307+ :param str target: The target to write the rendered template to
308+ :param str owner: The owner of the rendered file
309+ :param str group: The group of the rendered file
310+ :param int perms: The permissions of the rendered file
311 """
312 def __init__(self, source, target, owner='root', group='root', perms=0444):
313 self.source = source
314
315=== modified file 'hooks/charmhelpers/fetch/__init__.py'
316--- hooks/charmhelpers/fetch/__init__.py 2014-09-17 14:11:53 +0000
317+++ hooks/charmhelpers/fetch/__init__.py 2014-09-29 20:46:18 +0000
318@@ -208,7 +208,8 @@
319 """Add a package source to this system.
320
321 @param source: a URL or sources.list entry, as supported by
322- add-apt-repository(1). Examples:
323+ add-apt-repository(1). Examples::
324+
325 ppa:charmers/example
326 deb https://stub:key@private.example.com/ubuntu trusty main
327
328@@ -311,22 +312,35 @@
329 apt_update(fatal=True)
330
331
332-def install_remote(source):
333+def install_remote(source, *args, **kwargs):
334 """
335 Install a file tree from a remote source
336
337 The specified source should be a url of the form:
338 scheme://[host]/path[#[option=value][&...]]
339
340- Schemes supported are based on this modules submodules
341- Options supported are submodule-specific"""
342+ Schemes supported are based on this modules submodules.
343+ Options supported are submodule-specific.
344+ Additional arguments are passed through to the submodule.
345+
346+ For example::
347+
348+ dest = install_remote('http://example.com/archive.tgz',
349+ checksum='deadbeef',
350+ hash_type='sha1')
351+
352+ This will download `archive.tgz`, validate it using SHA1 and, if
353+ the file is ok, extract it and return the directory in which it
354+ was extracted. If the checksum fails, it will raise
355+ :class:`charmhelpers.core.host.ChecksumError`.
356+ """
357 # We ONLY check for True here because can_handle may return a string
358 # explaining why it can't handle a given source.
359 handlers = [h for h in plugins() if h.can_handle(source) is True]
360 installed_to = None
361 for handler in handlers:
362 try:
363- installed_to = handler.install(source)
364+ installed_to = handler.install(source, *args, **kwargs)
365 except UnhandledSource:
366 pass
367 if not installed_to:
368
369=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
370--- hooks/charmhelpers/fetch/archiveurl.py 2014-09-17 14:11:53 +0000
371+++ hooks/charmhelpers/fetch/archiveurl.py 2014-09-29 20:46:18 +0000
372@@ -12,21 +12,19 @@
373 get_archive_handler,
374 extract,
375 )
376-from charmhelpers.core.host import mkdir
377-
378-"""
379-This class is a plugin for charmhelpers.fetch.install_remote.
380-
381-It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/.
382-
383-Example usage:
384-install_remote("https://example.com/some/archive.tar.gz")
385-# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/.
386-
387-See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types.
388-"""
389+from charmhelpers.core.host import mkdir, check_hash
390+
391+
392 class ArchiveUrlFetchHandler(BaseFetchHandler):
393- """Handler for archives via generic URLs"""
394+ """
395+ Handler to download archive files from arbitrary URLs.
396+
397+ Can fetch from http, https, ftp, and file URLs.
398+
399+ Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
400+
401+ Installs the contents of the archive in $CHARM_DIR/fetched/.
402+ """
403 def can_handle(self, source):
404 url_parts = self.parse_url(source)
405 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
406@@ -36,6 +34,12 @@
407 return False
408
409 def download(self, source, dest):
410+ """
411+ Download an archive file.
412+
413+ :param str source: URL pointing to an archive file.
414+ :param str dest: Local path location to download archive file to.
415+ """
416 # propogate all exceptions
417 # URLError, OSError, etc
418 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
419@@ -60,7 +64,30 @@
420 os.unlink(dest)
421 raise e
422
423- def install(self, source):
424+ # Mandatory file validation via Sha1 or MD5 hashing.
425+ def download_and_validate(self, url, hashsum, validate="sha1"):
426+ tempfile, headers = urlretrieve(url)
427+ check_hash(tempfile, hashsum, validate)
428+ return tempfile
429+
430+ def install(self, source, dest=None, checksum=None, hash_type='sha1'):
431+ """
432+ Download and install an archive file, with optional checksum validation.
433+
434+ The checksum can also be given on the `source` URL's fragment.
435+ For example::
436+
437+ handler.install('http://example.com/file.tgz#sha1=deadbeef')
438+
439+ :param str source: URL pointing to an archive file.
440+ :param str dest: Local destination path to install to. If not given,
441+ installs to `$CHARM_DIR/archives/archive_file_name`.
442+ :param str checksum: If given, validate the archive file after download.
443+ :param str hash_type: Algorithm used to generate `checksum`.
444+ Can be any hash alrgorithm supported by :mod:`hashlib`,
445+ such as md5, sha1, sha256, sha512, etc.
446+
447+ """
448 url_parts = self.parse_url(source)
449 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
450 if not os.path.exists(dest_dir):
451@@ -72,32 +99,10 @@
452 raise UnhandledSource(e.reason)
453 except OSError as e:
454 raise UnhandledSource(e.strerror)
455- return extract(dld_file)
456-
457- # Mandatory file validation via Sha1 or MD5 hashing.
458- def download_and_validate(self, url, hashsum, validate="sha1"):
459- if validate == 'sha1' and len(hashsum) != 40:
460- raise ValueError("HashSum must be = 40 characters when using sha1"
461- " validation")
462- if validate == 'md5' and len(hashsum) != 32:
463- raise ValueError("HashSum must be = 32 characters when using md5"
464- " validation")
465- tempfile, headers = urlretrieve(url)
466- self.validate_file(tempfile, hashsum, validate)
467- return tempfile
468-
469- # Predicate method that returns status of hash matching expected hash.
470- def validate_file(self, source, hashsum, vmethod='sha1'):
471- if vmethod != 'sha1' and vmethod != 'md5':
472- raise ValueError("Validation Method not supported")
473-
474- if vmethod == 'md5':
475- m = hashlib.md5()
476- if vmethod == 'sha1':
477- m = hashlib.sha1()
478- with open(source) as f:
479- for line in f:
480- m.update(line)
481- if hashsum != m.hexdigest():
482- msg = "Hash Mismatch on {} expected {} got {}"
483- raise ValueError(msg.format(source, hashsum, m.hexdigest()))
484+ options = urlparse.parse_qs(url_parts.fragment)
485+ for key, value in options.items():
486+ if key in hashlib.algorithms:
487+ check_hash(dld_file, value, key)
488+ if checksum:
489+ check_hash(dld_file, checksum, hash_type)
490+ return extract(dld_file, dest)
491
492=== modified file 'templates/ceph.conf'
493--- templates/ceph.conf 2014-08-07 10:09:14 +0000
494+++ templates/ceph.conf 2014-09-29 20:46:18 +0000
495@@ -1,29 +1,29 @@
496 [global]
497 {% if old_auth %}
498- auth supported = {{ auth_supported }}
499+auth supported = {{ auth_supported }}
500 {% else %}
501- auth cluster required = {{ auth_supported }}
502- auth service required = {{ auth_supported }}
503- auth client required = {{ auth_supported }}
504+auth cluster required = {{ auth_supported }}
505+auth service required = {{ auth_supported }}
506+auth client required = {{ auth_supported }}
507 {% endif %}
508- mon host = {{ mon_hosts }}
509- log to syslog = {{ use_syslog }}
510- err to syslog = {{ use_syslog }}
511- clog to syslog = {{ use_syslog }}
512+mon host = {{ mon_hosts }}
513+log to syslog = {{ use_syslog }}
514+err to syslog = {{ use_syslog }}
515+clog to syslog = {{ use_syslog }}
516
517 [client.radosgw.gateway]
518- host = {{ hostname }}
519- keyring = /etc/ceph/keyring.rados.gateway
520- rgw socket path = /tmp/radosgw.sock
521- log file = /var/log/ceph/radosgw.log
522- # Turn off 100-continue optimization as stock mod_fastcgi
523- # does not support it
524- rgw print continue = false
525+host = {{ hostname }}
526+keyring = /etc/ceph/keyring.rados.gateway
527+rgw socket path = /tmp/radosgw.sock
528+log file = /var/log/ceph/radosgw.log
529+# Turn off 100-continue optimization as stock mod_fastcgi
530+# does not support it
531+rgw print continue = false
532 {% if auth_type == 'keystone' %}
533- rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/
534- rgw keystone admin token = {{ admin_token }}
535- rgw keystone accepted roles = {{ user_roles }}
536- rgw keystone token cache size = {{ cache_size }}
537- rgw keystone revocation interval = {{ revocation_check_interval }}
538+rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/
539+rgw keystone admin token = {{ admin_token }}
540+rgw keystone accepted roles = {{ user_roles }}
541+rgw keystone token cache size = {{ cache_size }}
542+rgw keystone revocation interval = {{ revocation_check_interval }}
543 #nss db path = /var/lib/ceph/nss
544 {% endif %}
545
546=== added directory 'tests'
547=== added file 'tests/00-setup'
548--- tests/00-setup 1970-01-01 00:00:00 +0000
549+++ tests/00-setup 2014-09-29 20:46:18 +0000
550@@ -0,0 +1,10 @@
551+#!/bin/bash
552+
553+set -ex
554+
555+sudo add-apt-repository --yes ppa:juju/stable
556+sudo apt-get update --yes
557+sudo apt-get install --yes python-amulet \
558+ python-keystoneclient \
559+ python-glanceclient \
560+ python-novaclient
561
562=== added file 'tests/14-basic-precise-icehouse'
563--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
564+++ tests/14-basic-precise-icehouse 2014-09-29 20:46:18 +0000
565@@ -0,0 +1,11 @@
566+#!/usr/bin/python
567+
568+"""Amulet tests on a basic ceph-radosgw deployment on precise-icehouse."""
569+
570+from basic_deployment import CephRadosGwBasicDeployment
571+
572+if __name__ == '__main__':
573+ deployment = CephRadosGwBasicDeployment(series='precise',
574+ openstack='cloud:precise-icehouse',
575+ source='cloud:precise-updates/icehouse')
576+ deployment.run_tests()
577
578=== added file 'tests/15-basic-trusty-icehouse'
579--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
580+++ tests/15-basic-trusty-icehouse 2014-09-29 20:46:18 +0000
581@@ -0,0 +1,9 @@
582+#!/usr/bin/python
583+
584+"""Amulet tests on a basic ceph-radosgw deployment on trusty-icehouse."""
585+
586+from basic_deployment import CephRadosGwBasicDeployment
587+
588+if __name__ == '__main__':
589+ deployment = CephRadosGwBasicDeployment(series='trusty')
590+ deployment.run_tests()
591
592=== added file 'tests/README'
593--- tests/README 1970-01-01 00:00:00 +0000
594+++ tests/README 2014-09-29 20:46:18 +0000
595@@ -0,0 +1,53 @@
596+This directory provides Amulet tests that focus on verification of
597+ceph-radosgw deployments.
598+
599+In order to run tests, you'll need charm-tools installed (in addition to
600+juju, of course):
601+ sudo add-apt-repository ppa:juju/stable
602+ sudo apt-get update
603+ sudo apt-get install charm-tools
604+
605+If you use a web proxy server to access the web, you'll need to set the
606+AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
607+
608+The following examples demonstrate different ways that tests can be executed.
609+All examples are run from the charm's root directory.
610+
611+ * To run all tests (starting with 00-setup):
612+
613+ make test
614+
615+ * To run a specific test module (or modules):
616+
617+ juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
618+
619+ * To run a specific test module (or modules), and keep the environment
620+ deployed after a failure:
621+
622+ juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
623+
624+ * To re-run a test module against an already deployed environment (one
625+ that was deployed by a previous call to 'juju test --set-e'):
626+
627+ ./tests/15-basic-trusty-icehouse
628+
629+For debugging and test development purposes, all code should be idempotent.
630+In other words, the code should have the ability to be re-run without changing
631+the results beyond the initial run. This enables editing and re-running of a
632+test module against an already deployed environment, as described above.
633+
634+Manual debugging tips:
635+
636+ * Set the following env vars before using the OpenStack CLI as admin:
637+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
638+ export OS_TENANT_NAME=admin
639+ export OS_USERNAME=admin
640+ export OS_PASSWORD=openstack
641+ export OS_REGION_NAME=RegionOne
642+
643+ * Set the following env vars before using the OpenStack CLI as demoUser:
644+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
645+ export OS_TENANT_NAME=demoTenant
646+ export OS_USERNAME=demoUser
647+ export OS_PASSWORD=password
648+ export OS_REGION_NAME=RegionOne
649
650=== added file 'tests/basic_deployment.py'
651--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
652+++ tests/basic_deployment.py 2014-09-29 20:46:18 +0000
653@@ -0,0 +1,320 @@
654+#!/usr/bin/python
655+
656+import amulet
657+from charmhelpers.contrib.openstack.amulet.deployment import (
658+ OpenStackAmuletDeployment
659+)
660+from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
661+ OpenStackAmuletUtils,
662+ DEBUG,
663+ ERROR
664+)
665+
666+# Use DEBUG to turn on debug logging
667+u = OpenStackAmuletUtils(ERROR)
668+
669+
670+class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
671+ """Amulet tests on a basic ceph-radosgw deployment."""
672+
673+ def __init__(self, series=None, openstack=None, source=None, stable=False):
674+ """Deploy the entire test environment."""
675+ super(CephRadosGwBasicDeployment, self).__init__(series, openstack,
676+ source, stable)
677+ self._add_services()
678+ self._add_relations()
679+ self._configure_services()
680+ self._deploy()
681+ self._initialize_tests()
682+
683+ def _add_services(self):
684+ """Add services
685+
686+ Add the services that we're testing, where ceph-radosgw is local,
687+ and the rest of the service are from lp branches that are
688+ compatible with the local charm (e.g. stable or next).
689+ """
690+ this_service = {'name': 'ceph-radosgw'}
691+ other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'},
692+ {'name': 'keystone'}, {'name': 'rabbitmq-server'},
693+ {'name': 'nova-compute'}, {'name': 'glance'},
694+ {'name': 'cinder'}]
695+ super(CephRadosGwBasicDeployment, self)._add_services(this_service,
696+ other_services)
697+
698+ def _add_relations(self):
699+ """Add all of the relations for the services."""
700+ relations = {
701+ 'nova-compute:shared-db': 'mysql:shared-db',
702+ 'nova-compute:amqp': 'rabbitmq-server:amqp',
703+ 'nova-compute:image-service': 'glance:image-service',
704+ 'nova-compute:ceph': 'ceph:client',
705+ 'keystone:shared-db': 'mysql:shared-db',
706+ 'glance:shared-db': 'mysql:shared-db',
707+ 'glance:identity-service': 'keystone:identity-service',
708+ 'glance:amqp': 'rabbitmq-server:amqp',
709+ 'glance:ceph': 'ceph:client',
710+ 'cinder:shared-db': 'mysql:shared-db',
711+ 'cinder:identity-service': 'keystone:identity-service',
712+ 'cinder:amqp': 'rabbitmq-server:amqp',
713+ 'cinder:image-service': 'glance:image-service',
714+ 'cinder:ceph': 'ceph:client',
715+ 'ceph-radosgw:mon': 'ceph:radosgw',
716+ 'ceph-radosgw:identity-service': 'keystone:identity-service'
717+ }
718+ super(CephRadosGwBasicDeployment, self)._add_relations(relations)
719+
720+ def _configure_services(self):
721+ """Configure all of the services."""
722+ keystone_config = {'admin-password': 'openstack',
723+ 'admin-token': 'ubuntutesting'}
724+ mysql_config = {'dataset-size': '50%'}
725+ cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
726+ ceph_config = {
727+ 'monitor-count': '3',
728+ 'auth-supported': 'none',
729+ 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
730+ 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
731+ 'osd-reformat': 'yes',
732+ 'ephemeral-unmount': '/mnt',
733+ 'osd-devices': '/dev/vdb /srv/ceph'
734+ }
735+
736+ configs = {'keystone': keystone_config,
737+ 'mysql': mysql_config,
738+ 'cinder': cinder_config,
739+ 'ceph': ceph_config}
740+ super(CephRadosGwBasicDeployment, self)._configure_services(configs)
741+
742+ def _initialize_tests(self):
743+ """Perform final initialization before tests get run."""
744+ # Access the sentries for inspecting service units
745+ self.mysql_sentry = self.d.sentry.unit['mysql/0']
746+ self.keystone_sentry = self.d.sentry.unit['keystone/0']
747+ self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
748+ self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
749+ self.glance_sentry = self.d.sentry.unit['glance/0']
750+ self.cinder_sentry = self.d.sentry.unit['cinder/0']
751+ self.ceph0_sentry = self.d.sentry.unit['ceph/0']
752+ self.ceph1_sentry = self.d.sentry.unit['ceph/1']
753+ self.ceph2_sentry = self.d.sentry.unit['ceph/2']
754+ self.ceph_radosgw_sentry = self.d.sentry.unit['ceph-radosgw/0']
755+
756+ # Authenticate admin with keystone
757+ self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
758+ user='admin',
759+ password='openstack',
760+ tenant='admin')
761+
762+ # Authenticate admin with glance endpoint
763+ self.glance = u.authenticate_glance_admin(self.keystone)
764+
765+ # Create a demo tenant/role/user
766+ self.demo_tenant = 'demoTenant'
767+ self.demo_role = 'demoRole'
768+ self.demo_user = 'demoUser'
769+ if not u.tenant_exists(self.keystone, self.demo_tenant):
770+ tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
771+ description='demo tenant',
772+ enabled=True)
773+ self.keystone.roles.create(name=self.demo_role)
774+ self.keystone.users.create(name=self.demo_user,
775+ password='password',
776+ tenant_id=tenant.id,
777+ email='demo@demo.com')
778+
779+ # Authenticate demo user with keystone
780+ self.keystone_demo = u.authenticate_keystone_user(self.keystone,
781+ self.demo_user,
782+ 'password',
783+ self.demo_tenant)
784+
785+ # Authenticate demo user with nova-api
786+ self.nova_demo = u.authenticate_nova_user(self.keystone,
787+ self.demo_user,
788+ 'password',
789+ self.demo_tenant)
790+
791+ def _ceph_osd_id(self, index):
792+ """Produce a shell command that will return a ceph-osd id."""
793+ return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa
794+
795+ def test_services(self):
796+ """Verify the expected services are running on the service units."""
797+ ceph_services = ['status ceph-mon-all',
798+ 'status ceph-mon id=`hostname`']
799+ commands = {
800+ self.mysql_sentry: ['status mysql'],
801+ self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
802+ self.nova_compute_sentry: ['status nova-compute'],
803+ self.keystone_sentry: ['status keystone'],
804+ self.glance_sentry: ['status glance-registry',
805+ 'status glance-api'],
806+ self.cinder_sentry: ['status cinder-api',
807+ 'status cinder-scheduler',
808+ 'status cinder-volume'],
809+ self.ceph_radosgw_sentry: ['status radosgw-all']
810+ }
811+ ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
812+ ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1))
813+ ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all'])
814+ commands[self.ceph0_sentry] = ceph_services
815+ commands[self.ceph1_sentry] = ceph_services
816+ commands[self.ceph2_sentry] = ceph_services
817+
818+ ret = u.validate_services(commands)
819+ if ret:
820+ amulet.raise_status(amulet.FAIL, msg=ret)
821+
822+ def test_ceph_radosgw_ceph_relation(self):
823+ """Verify the ceph-radosgw to ceph relation data."""
824+ unit = self.ceph_radosgw_sentry
825+ relation = ['mon', 'ceph:radosgw']
826+ expected = {
827+ 'private-address': u.valid_ip
828+ }
829+
830+ ret = u.validate_relation_data(unit, relation, expected)
831+ if ret:
832+ message = u.relation_error('ceph-radosgw to ceph', ret)
833+ amulet.raise_status(amulet.FAIL, msg=message)
834+
835+ def test_ceph0_ceph_radosgw_relation(self):
836+ """Verify the ceph0 to ceph-radosgw relation data."""
837+ unit = self.ceph0_sentry
838+ relation = ['radosgw', 'ceph-radosgw:mon']
839+ expected = {
840+ 'private-address': u.valid_ip,
841+ 'radosgw_key': u.not_null,
842+ 'auth': 'none',
843+ 'ceph-public-address': u.valid_ip,
844+ 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
845+ }
846+
847+ ret = u.validate_relation_data(unit, relation, expected)
848+ if ret:
849+ message = u.relation_error('ceph0 to ceph-radosgw', ret)
850+ amulet.raise_status(amulet.FAIL, msg=message)
851+
852+ def test_ceph1_ceph_radosgw_relation(self):
853+ """Verify the ceph1 to ceph-radosgw relation data."""
854+ unit = self.ceph1_sentry
855+ relation = ['radosgw', 'ceph-radosgw:mon']
856+ expected = {
857+ 'private-address': u.valid_ip,
858+ 'radosgw_key': u.not_null,
859+ 'auth': 'none',
860+ 'ceph-public-address': u.valid_ip,
861+ 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
862+ }
863+
864+ ret = u.validate_relation_data(unit, relation, expected)
865+ if ret:
866+ message = u.relation_error('ceph1 to ceph-radosgw', ret)
867+ amulet.raise_status(amulet.FAIL, msg=message)
868+
869+ def test_ceph2_ceph_radosgw_relation(self):
870+ """Verify the ceph2 to ceph-radosgw relation data."""
871+ unit = self.ceph2_sentry
872+ relation = ['radosgw', 'ceph-radosgw:mon']
873+ expected = {
874+ 'private-address': u.valid_ip,
875+ 'radosgw_key': u.not_null,
876+ 'auth': 'none',
877+ 'ceph-public-address': u.valid_ip,
878+ 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
879+ }
880+
881+ ret = u.validate_relation_data(unit, relation, expected)
882+ if ret:
883+ message = u.relation_error('ceph2 to ceph-radosgw', ret)
884+ amulet.raise_status(amulet.FAIL, msg=message)
885+
886+ def test_ceph_radosgw_keystone_relation(self):
887+ """Verify the ceph-radosgw to keystone relation data."""
888+ unit = self.ceph_radosgw_sentry
889+ relation = ['identity-service', 'keystone:identity-service']
890+ expected = {
891+ 'service': 'swift',
892+ 'region': 'RegionOne',
893+ 'public_url': u.valid_url,
894+ 'internal_url': u.valid_url,
895+ 'private-address': u.valid_ip,
896+ 'requested_roles': 'Member,Admin',
897+ 'admin_url': u.valid_url
898+ }
899+
900+ ret = u.validate_relation_data(unit, relation, expected)
901+ if ret:
902+ message = u.relation_error('ceph-radosgw to keystone', ret)
903+ amulet.raise_status(amulet.FAIL, msg=message)
904+
905+ def test_keystone_ceph_radosgw_relation(self):
906+ """Verify the keystone to ceph-radosgw relation data."""
907+ unit = self.keystone_sentry
908+ relation = ['identity-service', 'ceph-radosgw:identity-service']
909+ expected = {
910+ 'service_protocol': 'http',
911+ 'service_tenant': 'services',
912+ 'admin_token': 'ubuntutesting',
913+ 'service_password': u.not_null,
914+ 'service_port': '5000',
915+ 'auth_port': '35357',
916+ 'auth_protocol': 'http',
917+ 'private-address': u.valid_ip,
918+ 'https_keystone': 'False',
919+ 'auth_host': u.valid_ip,
920+ 'service_username': 'swift',
921+ 'service_tenant_id': u.not_null,
922+ 'service_host': u.valid_ip
923+ }
924+
925+ ret = u.validate_relation_data(unit, relation, expected)
926+ if ret:
927+ message = u.relation_error('keystone to ceph-radosgw', ret)
928+ amulet.raise_status(amulet.FAIL, msg=message)
929+
930+ def test_ceph_config(self):
931+ """Verify the data in the ceph config file."""
932+ unit = self.ceph_radosgw_sentry
933+ conf = '/etc/ceph/ceph.conf'
934+ keystone_sentry = self.keystone_sentry
935+ relation = keystone_sentry.relation('identity-service',
936+ 'ceph-radosgw:identity-service')
937+ keystone_ip = relation['auth_host']
938+ expected = {
939+ 'global': {
940+ 'auth cluster required': 'none',
941+ 'auth service required': 'none',
942+ 'auth client required': 'none',
943+ 'log to syslog': 'false',
944+ 'err to syslog': 'false',
945+ 'clog to syslog': 'false'
946+ },
947+ 'client.radosgw.gateway': {
948+ 'keyring': '/etc/ceph/keyring.rados.gateway',
949+ 'rgw socket path': '/tmp/radosgw.sock',
950+ 'log file': '/var/log/ceph/radosgw.log',
951+ 'rgw print continue': 'false',
952+ 'rgw keystone url': 'http://{}:35357/'.format(keystone_ip),
953+ 'rgw keystone admin token': 'ubuntutesting',
954+ 'rgw keystone accepted roles': 'Member,Admin',
955+ 'rgw keystone token cache size': '500',
956+ 'rgw keystone revocation interval': '600'
957+ },
958+ }
959+
960+ for section, pairs in expected.iteritems():
961+ ret = u.validate_config_data(unit, conf, section, pairs)
962+ if ret:
963+ message = "ceph config error: {}".format(ret)
964+ amulet.raise_status(amulet.FAIL, msg=message)
965+
966+ def test_restart_on_config_change(self):
967+ """Verify the specified services are restarted on config change."""
968+ # NOTE(coreycb): Test not implemented but should it be? ceph-radosgw
969+ # svcs aren't restarted by charm after config change
970+ # Should they be restarted?
971+ if self._get_openstack_release() >= self.precise_essex:
972+ u.log.error("Test not implemented")
973+ return
974
975=== added directory 'tests/charmhelpers'
976=== added file 'tests/charmhelpers/__init__.py'
977=== added directory 'tests/charmhelpers/contrib'
978=== added file 'tests/charmhelpers/contrib/__init__.py'
979=== added directory 'tests/charmhelpers/contrib/amulet'
980=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
981=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
982--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
983+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-09-29 20:46:18 +0000
984@@ -0,0 +1,77 @@
985+import amulet
986+
987+import os
988+
989+
990+class AmuletDeployment(object):
991+ """Amulet deployment.
992+
993+ This class provides generic Amulet deployment and test runner
994+ methods.
995+ """
996+
997+ def __init__(self, series=None):
998+ """Initialize the deployment environment."""
999+ self.series = None
1000+
1001+ if series:
1002+ self.series = series
1003+ self.d = amulet.Deployment(series=self.series)
1004+ else:
1005+ self.d = amulet.Deployment()
1006+
1007+ def _add_services(self, this_service, other_services):
1008+ """Add services.
1009+
1010+ Add services to the deployment where this_service is the local charm
1011+ that we're testing and other_services are the other services that
1012+ are being used in the local amulet tests.
1013+ """
1014+ if this_service['name'] != os.path.basename(os.getcwd()):
1015+ s = this_service['name']
1016+ msg = "The charm's root directory name needs to be {}".format(s)
1017+ amulet.raise_status(amulet.FAIL, msg=msg)
1018+
1019+ if 'units' not in this_service:
1020+ this_service['units'] = 1
1021+
1022+ self.d.add(this_service['name'], units=this_service['units'])
1023+
1024+ for svc in other_services:
1025+ if 'location' in svc:
1026+ branch_location = svc['location']
1027+ elif self.series:
1028+ branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
1029+ else:
1030+ branch_location = None
1031+
1032+ if 'units' not in svc:
1033+ svc['units'] = 1
1034+
1035+ self.d.add(svc['name'], charm=branch_location, units=svc['units'])
1036+
1037+ def _add_relations(self, relations):
1038+ """Add all of the relations for the services."""
1039+ for k, v in relations.iteritems():
1040+ self.d.relate(k, v)
1041+
1042+ def _configure_services(self, configs):
1043+ """Configure all of the services."""
1044+ for service, config in configs.iteritems():
1045+ self.d.configure(service, config)
1046+
1047+ def _deploy(self):
1048+ """Deploy environment and wait for all hooks to finish executing."""
1049+ try:
1050+ self.d.setup(timeout=900)
1051+ self.d.sentry.wait(timeout=900)
1052+ except amulet.helpers.TimeoutError:
1053+ amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
1054+ except Exception:
1055+ raise
1056+
1057+ def run_tests(self):
1058+ """Run all of the methods that are prefixed with 'test_'."""
1059+ for test in dir(self):
1060+ if test.startswith('test_'):
1061+ getattr(self, test)()
1062
1063=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
1064--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
1065+++ tests/charmhelpers/contrib/amulet/utils.py 2014-09-29 20:46:18 +0000
1066@@ -0,0 +1,176 @@
1067+import ConfigParser
1068+import io
1069+import logging
1070+import re
1071+import sys
1072+import time
1073+
1074+
1075+class AmuletUtils(object):
1076+ """Amulet utilities.
1077+
1078+ This class provides common utility functions that are used by Amulet
1079+ tests.
1080+ """
1081+
1082+ def __init__(self, log_level=logging.ERROR):
1083+ self.log = self.get_logger(level=log_level)
1084+
1085+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
1086+ """Get a logger object that will log to stdout."""
1087+ log = logging
1088+ logger = log.getLogger(name)
1089+ fmt = log.Formatter("%(asctime)s %(funcName)s "
1090+ "%(levelname)s: %(message)s")
1091+
1092+ handler = log.StreamHandler(stream=sys.stdout)
1093+ handler.setLevel(level)
1094+ handler.setFormatter(fmt)
1095+
1096+ logger.addHandler(handler)
1097+ logger.setLevel(level)
1098+
1099+ return logger
1100+
1101+ def valid_ip(self, ip):
1102+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
1103+ return True
1104+ else:
1105+ return False
1106+
1107+ def valid_url(self, url):
1108+ p = re.compile(
1109+ r'^(?:http|ftp)s?://'
1110+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
1111+ r'localhost|'
1112+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
1113+ r'(?::\d+)?'
1114+ r'(?:/?|[/?]\S+)$',
1115+ re.IGNORECASE)
1116+ if p.match(url):
1117+ return True
1118+ else:
1119+ return False
1120+
1121+ def validate_services(self, commands):
1122+ """Validate services.
1123+
1124+ Verify the specified services are running on the corresponding
1125+ service units.
1126+ """
1127+ for k, v in commands.iteritems():
1128+ for cmd in v:
1129+ output, code = k.run(cmd)
1130+ if code != 0:
1131+ return "command `{}` returned {}".format(cmd, str(code))
1132+ return None
1133+
1134+ def _get_config(self, unit, filename):
1135+ """Get a ConfigParser object for parsing a unit's config file."""
1136+ file_contents = unit.file_contents(filename)
1137+ config = ConfigParser.ConfigParser()
1138+ config.readfp(io.StringIO(file_contents))
1139+ return config
1140+
1141+ def validate_config_data(self, sentry_unit, config_file, section,
1142+ expected):
1143+ """Validate config file data.
1144+
1145+ Verify that the specified section of the config file contains
1146+ the expected option key:value pairs.
1147+ """
1148+ config = self._get_config(sentry_unit, config_file)
1149+
1150+ if section != 'DEFAULT' and not config.has_section(section):
1151+ return "section [{}] does not exist".format(section)
1152+
1153+ for k in expected.keys():
1154+ if not config.has_option(section, k):
1155+ return "section [{}] is missing option {}".format(section, k)
1156+ if config.get(section, k) != expected[k]:
1157+ return "section [{}] {}:{} != expected {}:{}".format(
1158+ section, k, config.get(section, k), k, expected[k])
1159+ return None
1160+
1161+ def _validate_dict_data(self, expected, actual):
1162+ """Validate dictionary data.
1163+
1164+ Compare expected dictionary data vs actual dictionary data.
1165+ The values in the 'expected' dictionary can be strings, bools, ints,
1166+ longs, or can be a function that evaluate a variable and returns a
1167+ bool.
1168+ """
1169+ for k, v in expected.iteritems():
1170+ if k in actual:
1171+ if (isinstance(v, basestring) or
1172+ isinstance(v, bool) or
1173+ isinstance(v, (int, long))):
1174+ if v != actual[k]:
1175+ return "{}:{}".format(k, actual[k])
1176+ elif not v(actual[k]):
1177+ return "{}:{}".format(k, actual[k])
1178+ else:
1179+ return "key '{}' does not exist".format(k)
1180+ return None
1181+
1182+ def validate_relation_data(self, sentry_unit, relation, expected):
1183+ """Validate actual relation data based on expected relation data."""
1184+ actual = sentry_unit.relation(relation[0], relation[1])
1185+ self.log.debug('actual: {}'.format(repr(actual)))
1186+ return self._validate_dict_data(expected, actual)
1187+
1188+ def _validate_list_data(self, expected, actual):
1189+ """Compare expected list vs actual list data."""
1190+ for e in expected:
1191+ if e not in actual:
1192+ return "expected item {} not found in actual list".format(e)
1193+ return None
1194+
1195+ def not_null(self, string):
1196+ if string is not None:
1197+ return True
1198+ else:
1199+ return False
1200+
1201+ def _get_file_mtime(self, sentry_unit, filename):
1202+ """Get last modification time of file."""
1203+ return sentry_unit.file_stat(filename)['mtime']
1204+
1205+ def _get_dir_mtime(self, sentry_unit, directory):
1206+ """Get last modification time of directory."""
1207+ return sentry_unit.directory_stat(directory)['mtime']
1208+
1209+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
1210+ """Get process' start time.
1211+
1212+ Determine start time of the process based on the last modification
1213+ time of the /proc/pid directory. If pgrep_full is True, the process
1214+ name is matched against the full command line.
1215+ """
1216+ if pgrep_full:
1217+ cmd = 'pgrep -o -f {}'.format(service)
1218+ else:
1219+ cmd = 'pgrep -o {}'.format(service)
1220+ proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
1221+ return self._get_dir_mtime(sentry_unit, proc_dir)
1222+
1223+ def service_restarted(self, sentry_unit, service, filename,
1224+ pgrep_full=False, sleep_time=20):
1225+ """Check if service was restarted.
1226+
1227+ Compare a service's start time vs a file's last modification time
1228+ (such as a config file for that service) to determine if the service
1229+ has been restarted.
1230+ """
1231+ time.sleep(sleep_time)
1232+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
1233+ self._get_file_mtime(sentry_unit, filename)):
1234+ return True
1235+ else:
1236+ return False
1237+
1238+ def relation_error(self, name, data):
1239+ return 'unexpected relation data in {} - {}'.format(name, data)
1240+
1241+ def endpoint_error(self, name, data):
1242+ return 'unexpected endpoint data in {} - {}'.format(name, data)
1243
1244=== added directory 'tests/charmhelpers/contrib/openstack'
1245=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
1246=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
1247=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
1248=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
1249--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
1250+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-29 20:46:18 +0000
1251@@ -0,0 +1,91 @@
1252+from charmhelpers.contrib.amulet.deployment import (
1253+ AmuletDeployment
1254+)
1255+
1256+
1257+class OpenStackAmuletDeployment(AmuletDeployment):
1258+ """OpenStack amulet deployment.
1259+
1260+ This class inherits from AmuletDeployment and has additional support
1261+ that is specifically for use by OpenStack charms.
1262+ """
1263+
1264+ def __init__(self, series=None, openstack=None, source=None, stable=True):
1265+ """Initialize the deployment environment."""
1266+ super(OpenStackAmuletDeployment, self).__init__(series)
1267+ self.openstack = openstack
1268+ self.source = source
1269+ self.stable = stable
1270+ # Note(coreycb): this needs to be changed when new next branches come
1271+ # out.
1272+ self.current_next = "trusty"
1273+
1274+ def _determine_branch_locations(self, other_services):
1275+ """Determine the branch locations for the other services.
1276+
1277+ Determine if the local branch being tested is derived from its
1278+ stable or next (dev) branch, and based on this, use the corresonding
1279+ stable or next branches for the other_services."""
1280+ base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
1281+
1282+ if self.stable:
1283+ for svc in other_services:
1284+ temp = 'lp:charms/{}'
1285+ svc['location'] = temp.format(svc['name'])
1286+ else:
1287+ for svc in other_services:
1288+ if svc['name'] in base_charms:
1289+ temp = 'lp:charms/{}'
1290+ svc['location'] = temp.format(svc['name'])
1291+ else:
1292+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
1293+ svc['location'] = temp.format(self.current_next,
1294+ svc['name'])
1295+ return other_services
1296+
1297+ def _add_services(self, this_service, other_services):
1298+ """Add services to the deployment and set openstack-origin/source."""
1299+ other_services = self._determine_branch_locations(other_services)
1300+
1301+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
1302+ other_services)
1303+
1304+ services = other_services
1305+ services.append(this_service)
1306+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
1307+ 'ceph-osd', 'ceph-radosgw']
1308+
1309+ if self.openstack:
1310+ for svc in services:
1311+ if svc['name'] not in use_source:
1312+ config = {'openstack-origin': self.openstack}
1313+ self.d.configure(svc['name'], config)
1314+
1315+ if self.source:
1316+ for svc in services:
1317+ if svc['name'] in use_source:
1318+ config = {'source': self.source}
1319+ self.d.configure(svc['name'], config)
1320+
1321+ def _configure_services(self, configs):
1322+ """Configure all of the services."""
1323+ for service, config in configs.iteritems():
1324+ self.d.configure(service, config)
1325+
1326+ def _get_openstack_release(self):
1327+ """Get openstack release.
1328+
1329+ Return an integer representing the enum value of the openstack
1330+ release.
1331+ """
1332+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
1333+ self.precise_havana, self.precise_icehouse,
1334+ self.trusty_icehouse) = range(6)
1335+ releases = {
1336+ ('precise', None): self.precise_essex,
1337+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
1338+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
1339+ ('precise', 'cloud:precise-havana'): self.precise_havana,
1340+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
1341+ ('trusty', None): self.trusty_icehouse}
1342+ return releases[(self.series, self.openstack)]
1343
1344=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
1345--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
1346+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-29 20:46:18 +0000
1347@@ -0,0 +1,276 @@
1348+import logging
1349+import os
1350+import time
1351+import urllib
1352+
1353+import glanceclient.v1.client as glance_client
1354+import keystoneclient.v2_0 as keystone_client
1355+import novaclient.v1_1.client as nova_client
1356+
1357+from charmhelpers.contrib.amulet.utils import (
1358+ AmuletUtils
1359+)
1360+
1361+DEBUG = logging.DEBUG
1362+ERROR = logging.ERROR
1363+
1364+
1365+class OpenStackAmuletUtils(AmuletUtils):
1366+ """OpenStack amulet utilities.
1367+
1368+ This class inherits from AmuletUtils and has additional support
1369+ that is specifically for use by OpenStack charms.
1370+ """
1371+
1372+ def __init__(self, log_level=ERROR):
1373+ """Initialize the deployment environment."""
1374+ super(OpenStackAmuletUtils, self).__init__(log_level)
1375+
1376+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
1377+ public_port, expected):
1378+ """Validate endpoint data.
1379+
1380+ Validate actual endpoint data vs expected endpoint data. The ports
1381+ are used to find the matching endpoint.
1382+ """
1383+ found = False
1384+ for ep in endpoints:
1385+ self.log.debug('endpoint: {}'.format(repr(ep)))
1386+ if (admin_port in ep.adminurl and
1387+ internal_port in ep.internalurl and
1388+ public_port in ep.publicurl):
1389+ found = True
1390+ actual = {'id': ep.id,
1391+ 'region': ep.region,
1392+ 'adminurl': ep.adminurl,
1393+ 'internalurl': ep.internalurl,
1394+ 'publicurl': ep.publicurl,
1395+ 'service_id': ep.service_id}
1396+ ret = self._validate_dict_data(expected, actual)
1397+ if ret:
1398+ return 'unexpected endpoint data - {}'.format(ret)
1399+
1400+ if not found:
1401+ return 'endpoint not found'
1402+
1403+ def validate_svc_catalog_endpoint_data(self, expected, actual):
1404+ """Validate service catalog endpoint data.
1405+
1406+ Validate a list of actual service catalog endpoints vs a list of
1407+ expected service catalog endpoints.
1408+ """
1409+ self.log.debug('actual: {}'.format(repr(actual)))
1410+ for k, v in expected.iteritems():
1411+ if k in actual:
1412+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
1413+ if ret:
1414+ return self.endpoint_error(k, ret)
1415+ else:
1416+ return "endpoint {} does not exist".format(k)
1417+ return ret
1418+
1419+ def validate_tenant_data(self, expected, actual):
1420+ """Validate tenant data.
1421+
1422+ Validate a list of actual tenant data vs list of expected tenant
1423+ data.
1424+ """
1425+ self.log.debug('actual: {}'.format(repr(actual)))
1426+ for e in expected:
1427+ found = False
1428+ for act in actual:
1429+ a = {'enabled': act.enabled, 'description': act.description,
1430+ 'name': act.name, 'id': act.id}
1431+ if e['name'] == a['name']:
1432+ found = True
1433+ ret = self._validate_dict_data(e, a)
1434+ if ret:
1435+ return "unexpected tenant data - {}".format(ret)
1436+ if not found:
1437+ return "tenant {} does not exist".format(e['name'])
1438+ return ret
1439+
1440+ def validate_role_data(self, expected, actual):
1441+ """Validate role data.
1442+
1443+ Validate a list of actual role data vs a list of expected role
1444+ data.
1445+ """
1446+ self.log.debug('actual: {}'.format(repr(actual)))
1447+ for e in expected:
1448+ found = False
1449+ for act in actual:
1450+ a = {'name': act.name, 'id': act.id}
1451+ if e['name'] == a['name']:
1452+ found = True
1453+ ret = self._validate_dict_data(e, a)
1454+ if ret:
1455+ return "unexpected role data - {}".format(ret)
1456+ if not found:
1457+ return "role {} does not exist".format(e['name'])
1458+ return ret
1459+
1460+ def validate_user_data(self, expected, actual):
1461+ """Validate user data.
1462+
1463+ Validate a list of actual user data vs a list of expected user
1464+ data.
1465+ """
1466+ self.log.debug('actual: {}'.format(repr(actual)))
1467+ for e in expected:
1468+ found = False
1469+ for act in actual:
1470+ a = {'enabled': act.enabled, 'name': act.name,
1471+ 'email': act.email, 'tenantId': act.tenantId,
1472+ 'id': act.id}
1473+ if e['name'] == a['name']:
1474+ found = True
1475+ ret = self._validate_dict_data(e, a)
1476+ if ret:
1477+ return "unexpected user data - {}".format(ret)
1478+ if not found:
1479+ return "user {} does not exist".format(e['name'])
1480+ return ret
1481+
1482+ def validate_flavor_data(self, expected, actual):
1483+ """Validate flavor data.
1484+
1485+ Validate a list of actual flavors vs a list of expected flavors.
1486+ """
1487+ self.log.debug('actual: {}'.format(repr(actual)))
1488+ act = [a.name for a in actual]
1489+ return self._validate_list_data(expected, act)
1490+
1491+ def tenant_exists(self, keystone, tenant):
1492+ """Return True if tenant exists."""
1493+ return tenant in [t.name for t in keystone.tenants.list()]
1494+
1495+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
1496+ tenant):
1497+ """Authenticates admin user with the keystone admin endpoint."""
1498+ unit = keystone_sentry
1499+ service_ip = unit.relation('shared-db',
1500+ 'mysql:shared-db')['private-address']
1501+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
1502+ return keystone_client.Client(username=user, password=password,
1503+ tenant_name=tenant, auth_url=ep)
1504+
1505+ def authenticate_keystone_user(self, keystone, user, password, tenant):
1506+ """Authenticates a regular user with the keystone public endpoint."""
1507+ ep = keystone.service_catalog.url_for(service_type='identity',
1508+ endpoint_type='publicURL')
1509+ return keystone_client.Client(username=user, password=password,
1510+ tenant_name=tenant, auth_url=ep)
1511+
1512+ def authenticate_glance_admin(self, keystone):
1513+ """Authenticates admin user with glance."""
1514+ ep = keystone.service_catalog.url_for(service_type='image',
1515+ endpoint_type='adminURL')
1516+ return glance_client.Client(ep, token=keystone.auth_token)
1517+
1518+ def authenticate_nova_user(self, keystone, user, password, tenant):
1519+ """Authenticates a regular user with nova-api."""
1520+ ep = keystone.service_catalog.url_for(service_type='identity',
1521+ endpoint_type='publicURL')
1522+ return nova_client.Client(username=user, api_key=password,
1523+ project_id=tenant, auth_url=ep)
1524+
1525+ def create_cirros_image(self, glance, image_name):
1526+ """Download the latest cirros image and upload it to glance."""
1527+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
1528+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
1529+ if http_proxy:
1530+ proxies = {'http': http_proxy}
1531+ opener = urllib.FancyURLopener(proxies)
1532+ else:
1533+ opener = urllib.FancyURLopener()
1534+
1535+ f = opener.open("http://download.cirros-cloud.net/version/released")
1536+ version = f.read().strip()
1537+ cirros_img = "cirros-{}-x86_64-disk.img".format(version)
1538+ local_path = os.path.join('tests', cirros_img)
1539+
1540+ if not os.path.exists(local_path):
1541+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1542+ version, cirros_img)
1543+ opener.retrieve(cirros_url, local_path)
1544+ f.close()
1545+
1546+ with open(local_path) as f:
1547+ image = glance.images.create(name=image_name, is_public=True,
1548+ disk_format='qcow2',
1549+ container_format='bare', data=f)
1550+ count = 1
1551+ status = image.status
1552+ while status != 'active' and count < 10:
1553+ time.sleep(3)
1554+ image = glance.images.get(image.id)
1555+ status = image.status
1556+ self.log.debug('image status: {}'.format(status))
1557+ count += 1
1558+
1559+ if status != 'active':
1560+ self.log.error('image creation timed out')
1561+ return None
1562+
1563+ return image
1564+
1565+ def delete_image(self, glance, image):
1566+ """Delete the specified image."""
1567+ num_before = len(list(glance.images.list()))
1568+ glance.images.delete(image)
1569+
1570+ count = 1
1571+ num_after = len(list(glance.images.list()))
1572+ while num_after != (num_before - 1) and count < 10:
1573+ time.sleep(3)
1574+ num_after = len(list(glance.images.list()))
1575+ self.log.debug('number of images: {}'.format(num_after))
1576+ count += 1
1577+
1578+ if num_after != (num_before - 1):
1579+ self.log.error('image deletion timed out')
1580+ return False
1581+
1582+ return True
1583+
1584+ def create_instance(self, nova, image_name, instance_name, flavor):
1585+ """Create the specified instance."""
1586+ image = nova.images.find(name=image_name)
1587+ flavor = nova.flavors.find(name=flavor)
1588+ instance = nova.servers.create(name=instance_name, image=image,
1589+ flavor=flavor)
1590+
1591+ count = 1
1592+ status = instance.status
1593+ while status != 'ACTIVE' and count < 60:
1594+ time.sleep(3)
1595+ instance = nova.servers.get(instance.id)
1596+ status = instance.status
1597+ self.log.debug('instance status: {}'.format(status))
1598+ count += 1
1599+
1600+ if status != 'ACTIVE':
1601+ self.log.error('instance creation timed out')
1602+ return None
1603+
1604+ return instance
1605+
1606+ def delete_instance(self, nova, instance):
1607+ """Delete the specified instance."""
1608+ num_before = len(list(nova.servers.list()))
1609+ nova.servers.delete(instance)
1610+
1611+ count = 1
1612+ num_after = len(list(nova.servers.list()))
1613+ while num_after != (num_before - 1) and count < 10:
1614+ time.sleep(3)
1615+ num_after = len(list(nova.servers.list()))
1616+ self.log.debug('number of instances: {}'.format(num_after))
1617+ count += 1
1618+
1619+ if num_after != (num_before - 1):
1620+ self.log.error('instance deletion timed out')
1621+ return False
1622+
1623+ return True

Subscribers

People subscribed via source and target branches